PyAnime4K is a simple, fast and powerful Anime4K Python implementation.
PyAnime4K can be installed easily through pip
.(recommended for Windows)
pip install pyanime4k
For Ubuntu you can also use setup_wheel_ubuntu.sh
to build wheel file and install it (especially recommended for Google Colab):
# For Google Colab, you need to add "!" at the beginning
# "setup_wheel_ubuntu.sh" need root permission for installing dependency library by apt
wget https://github.com/TianZerL/pyanime4k/raw/master/setup_wheel_ubuntu.sh && chmod 777 setup_wheel_ubuntu.sh && ./setup_wheel_ubuntu.sh
- Clone Anime4KCPP
- Follow this, and for pyanime4k, only core and c wrapper is needed. Make sure CMake option
Build_C_wrapper
is turned on, and if you want to build core and c wrapper in one file, turned onBuild_C_wrapper_with_core
(recommend)
- Clone the repo
- Download from release or compile the Anime4KCPP core and Anime4KCPP C Wrapper.
-
- Copy
ac.dll
andAnime4KCPPCore.dll
(if turned off Build_C_wrapper_with_core option of Anime4KCPP in CMake) andopencv_world440.dll
to the pyanime4k/wrapper (Windows) - Copy
libac.so
to the pyanime4k/wrapper (Linux) - Copy
libac.dylib
to the pyanime4k/wrapper (macOS)
- Copy
- Enjoy
# pathlib.Path path objects are recommended instead of strings
import pathlib
# import pyanime4k library
import pyanime4k
# display single image upscaled with Anime4KCPP
pyanime4k.show_upscaled_image(pathlib.Path('image1.png'))
# upscale a single image
pyanime4k.upscale_images(pathlib.Path('image1.png'))
# upscale a list of images
images = [
pathlib.Path('image1.png'),
pathlib.Path('image2.png')
]
pyanime4k.upscale_images(
input_paths=images,
output_path=pathlib.Path('./output')
)
# pathlib.Path path objects are recommended instead of strings
import pathlib
# import pyanime4k library
import pyanime4k
# upscale a single video file
pyanime4k.upscale_videos(pathlib.Path('video1.mp4'))
# upscale multiple files
videos = [
pathlib.Path('video1.mp4'),
pathlib.Path('video2.mp4')
]
pyanime4k.upscale_videos(
input_paths=videos,
output_path=pathlib.Path('./output')
)
from pyanime4k import ac
import cv2
video = cv2.VideoCapture(r"F:\Temp\Anime4K\P1-1.m4v")
a = ac.AC(
managerList=ac.ManagerList([ac.OpenCLACNetManager(pID=0, dID=0)]),
type=ac.ProcessorType.OpenCL_ACNet
)
while True:
v,f = video.read()
if not v:
break
# the default color format of OpenCV is BGR
f = a.proccess_image_with_numpy(f)
cv2.imshow("video", f)
cv2.waitKey(1)
You can specify GPU for processing if you have more than one GPU
from pyanime4k import ac
# print GPU list to get pID and dID of each GPU
ac.AC.list_GPUs()
# check GPU support
flag, info = ac.AC.check_GPU_support(GPGPU=ac.GPGPUModel.AC_OpenCL, pID=1, dID=0)
# init AC core with pID and dID
if flag:
a = ac.AC(
managerList=ac.ManagerList([ac.OpenCLACNetManager(pID=0, dID=0)]),
type=ac.ProcessorType.OpenCL_ACNet
)
# print GPU information
print(info)
# or to check the current processor information
print(a.get_processor_info())
You may also create a low-level AC object and handle each of the steps manually.
from pyanime4k import ac
import pyanime4k
parameters = ac.Parameters()
# enable HDN for ACNet
parameters.HDN = True
a = ac.AC(
managerList=ac.ManagerList([ac.OpenCLACNetManager(pID=0, dID=0)]),
type=ac.ProcessorType.OpenCL_ACNet
)
# load image from file
a.load_image(r"D:\Temp\anime4k\p1.png")
# start processing
a.process()
# preview upscaled image
a.show_image()
# save image to file
a.save_image('image1_output.png')
# from PIL and numpy
import numpy as np
from PIL import Image
img = Image.open(r"D:\Temp\anime4k\p1.png").convert("RGB")
img = np.array(img)
# BGR, RGB and YUV444 is supported
a.load_image_from_numpy(img, input_type=ac.AC_INPUT_RGB)
# start processing
a.process()
# save image to numpy array
new_img = a.save_image_to_numpy()
new_img = Image.fromarray(new_img)
new_img.show()
# from OpenCV
import cv2
img = cv2.imread(r"D:\Temp\anime4k\p1.png")
a.load_image_from_numpy(img,input_type=ac.AC_INPUT_BGR)
a.process()
img = a.save_image_to_numpy()
cv2.imshow("opencv", img)
cv2.waitKey(0)
# save image to file
a.save_image('image1_output_1.png')
# let's process video
a.set_video_mode(True)
# load video file
a.load_video(r"D:\Temp\anime4k\P1-1.m4v")
# specify output video file name
# note that this needs to be done before processing starts
a.set_save_video_info("output_tmp.mp4", codec=ac.Codec.MP4V)
# start processing with progress
a.process_with_progress()
# process with callback function
def print_progress_time(v, t):
print("%.2f%% elapsed: %.2f remaining: %.2f" % (v * 100, t, t/v - t), end="\r")
'''
#or
def print_progress(v):
print("%.2f%%" % (v * 100), end="\r")
'''
# load video file
a.load_video(r"D:\Temp\anime4k\P1-1.m4v")
# specify output video file name
# note that this needs to be done before processing starts
a.set_save_video_info("output_tmp_.mp4", codec=ac.Codec.MP4V)
# start processing with progress value and time callback
a.process_with_progress_time_callback(print_progress_time)
'''
#or
# start processing with progress value callback
a.process_with_progress_callback(print_progress)
'''
# save video to file
a.save_video()
# merge audio and auto delete tmp file
pyanime4k.migrate_audio_streams("output_tmp.mp4",r"D:\Temp\anime4k\P1-1.m4v","output.mp4")
from pyanime4k import ac
import cv2
import time
import threading
import queue
# init VideoCapture and VideoWriter
videoReader = cv2.VideoCapture(r"D:\Temp\anime4k\P1-1.m4v")
fps = videoReader.get(cv2.CAP_PROP_FPS)
h = videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)
w = videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)
videoWriter = cv2.VideoWriter(
"output.mp4",
cv2.VideoWriter_fourcc("m", "p", "4", "v"),
fps,
(int(w * 2), int(h * 2)),
)
# init Anime4KCPP
a = ac.AC(
managerList=ac.ManagerList([ac.OpenCLACNetManager(pID=0, dID=0)]),
type=ac.ProcessorType.OpenCL_ACNet
)
# frame queue
q = queue.Queue(12)
# write frames to disk
def writeFrames():
while True:
f = q.get()
videoWriter.write(f)
q.task_done()
# write frames in new thread
t = threading.Thread(target=writeFrames, daemon=True)
t.start()
s = time.time()
while True:
v, f = videoReader.read()
if not v:
break
f = a.proccess_image_with_numpy(f)
q.put(f)
e = time.time()
print("time:", e - s, "s")
# wait for finished
q.join()
videoWriter.release()