Commit
·
1e0d3f4
1
Parent(s):
c4a4fb7
update limits
Browse files- LHM/utils/ffmpeg_utils.py +12 -11
- app.py +17 -18
LHM/utils/ffmpeg_utils.py
CHANGED
|
@@ -48,19 +48,20 @@ def images_to_video(images, output_path, fps, gradio_codec: bool, verbose=False,
|
|
| 48 |
f"Frame value out of range: {frame.min()} ~ {frame.max()}"
|
| 49 |
else:
|
| 50 |
frame = images[i]
|
| 51 |
-
|
| 52 |
# reshape to limit the export time
|
| 53 |
-
|
| 54 |
-
|
| 55 |
frames.append(frame)
|
| 56 |
-
|
| 57 |
-
frames = frames[:
|
| 58 |
|
| 59 |
frames = np.stack(frames)
|
| 60 |
|
| 61 |
-
try:
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
except:
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
| 48 |
f"Frame value out of range: {frame.min()} ~ {frame.max()}"
|
| 49 |
else:
|
| 50 |
frame = images[i]
|
| 51 |
+
width, height = frame.shape[1], frame.shape[0]
|
| 52 |
# reshape to limit the export time
|
| 53 |
+
if width > 1200 or height > 1200 or images.shape[0] > 200:
|
| 54 |
+
frames.append(cv2.resize(frame, (width // 2, height // 2)))
|
| 55 |
frames.append(frame)
|
| 56 |
+
# limit the frames directly @NOTE huggingface only!
|
| 57 |
+
frames = frames[:200]
|
| 58 |
|
| 59 |
frames = np.stack(frames)
|
| 60 |
|
| 61 |
+
# try:
|
| 62 |
+
# imageio.mimwrite(output_path, frames, fps=30, codec="libx264")
|
| 63 |
+
# print("save {} using imageio.".format(output_path))
|
| 64 |
+
# except:
|
| 65 |
+
print("start saving {} using imageio.v3 .".format(output_path))
|
| 66 |
+
iio.imwrite(output_path,frames,fps=fps,codec="libx264",pixelformat="yuv420p",bitrate=bitrate, macro_block_size=32)
|
| 67 |
+
print("saved {} using imageio.v3 .".format(output_path))
|
app.py
CHANGED
|
@@ -589,11 +589,6 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
|
|
| 589 |
|
| 590 |
os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
|
| 591 |
|
| 592 |
-
return rgb, dump_image_path, dump_video_path
|
| 593 |
-
|
| 594 |
-
def core_fn_export(image, video_params, working_dir):
|
| 595 |
-
rgb, dump_image_path, dump_video_path = core_fn(image=image, video_params=video_params, working_dir=working_dir)
|
| 596 |
-
print("start to export the video.")
|
| 597 |
images_to_video(
|
| 598 |
rgb,
|
| 599 |
output_path=dump_video_path,
|
|
@@ -601,8 +596,22 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
|
|
| 601 |
gradio_codec=False,
|
| 602 |
verbose=True,
|
| 603 |
)
|
| 604 |
-
|
| 605 |
return dump_image_path, dump_video_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 606 |
|
| 607 |
_TITLE = '''LHM: Large Animatable Human Model'''
|
| 608 |
|
|
@@ -728,7 +737,7 @@ def demo_lhm(pose_estimator, face_detector, lhm, cfg):
|
|
| 728 |
outputs=[working_dir],
|
| 729 |
queue=False,
|
| 730 |
).success(
|
| 731 |
-
fn=
|
| 732 |
inputs=[input_image, video_input, working_dir], # video_params refer to smpl dir
|
| 733 |
outputs=[processed_image, output_video],
|
| 734 |
)
|
|
@@ -771,17 +780,7 @@ def launch_gradio_app():
|
|
| 771 |
|
| 772 |
demo_lhm(pose_estimator, facedetector, lhm, cfg)
|
| 773 |
|
| 774 |
-
|
| 775 |
-
|
| 776 |
if __name__ == '__main__':
|
| 777 |
launch_pretrained()
|
| 778 |
# launch_env_not_compile_with_cuda()
|
| 779 |
-
launch_gradio_app()
|
| 780 |
-
|
| 781 |
-
# import gradio as gr
|
| 782 |
-
|
| 783 |
-
# def greet(name):
|
| 784 |
-
# return "Hello " + name + "!!"
|
| 785 |
-
|
| 786 |
-
# demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
| 787 |
-
# demo.launch()
|
|
|
|
| 589 |
|
| 590 |
os.makedirs(os.path.dirname(dump_video_path), exist_ok=True)
|
| 591 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 592 |
images_to_video(
|
| 593 |
rgb,
|
| 594 |
output_path=dump_video_path,
|
|
|
|
| 596 |
gradio_codec=False,
|
| 597 |
verbose=True,
|
| 598 |
)
|
| 599 |
+
|
| 600 |
return dump_image_path, dump_video_path
|
| 601 |
+
# return rgb, dump_image_path, dump_video_path
|
| 602 |
+
|
| 603 |
+
# def core_fn_export(image, video_params, working_dir):
|
| 604 |
+
# rgb, dump_image_path, dump_video_path = core_fn(image=image, video_params=video_params, working_dir=working_dir)
|
| 605 |
+
# print("start to export the video.")
|
| 606 |
+
# images_to_video(
|
| 607 |
+
# rgb,
|
| 608 |
+
# output_path=dump_video_path,
|
| 609 |
+
# fps=30,
|
| 610 |
+
# gradio_codec=False,
|
| 611 |
+
# verbose=True,
|
| 612 |
+
# )
|
| 613 |
+
|
| 614 |
+
# return dump_image_path, dump_video_path
|
| 615 |
|
| 616 |
_TITLE = '''LHM: Large Animatable Human Model'''
|
| 617 |
|
|
|
|
| 737 |
outputs=[working_dir],
|
| 738 |
queue=False,
|
| 739 |
).success(
|
| 740 |
+
fn=core_fn,
|
| 741 |
inputs=[input_image, video_input, working_dir], # video_params refer to smpl dir
|
| 742 |
outputs=[processed_image, output_video],
|
| 743 |
)
|
|
|
|
| 780 |
|
| 781 |
demo_lhm(pose_estimator, facedetector, lhm, cfg)
|
| 782 |
|
|
|
|
|
|
|
| 783 |
if __name__ == '__main__':
|
| 784 |
launch_pretrained()
|
| 785 |
# launch_env_not_compile_with_cuda()
|
| 786 |
+
launch_gradio_app()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|