Spaces:
Running
on
Zero
Running
on
Zero
chore: refine the doc and code
Browse files- .gitattributes +2 -0
- app.py +6 -4
- assets/examples/driving/d14.mp4 +3 -0
- assets/examples/source/s12.jpg +3 -0
- assets/gradio_description_retargeting.md +4 -1
- src/config/argument_config.py +2 -1
- src/gradio_pipeline.py +0 -2
.gitattributes
CHANGED
|
@@ -47,3 +47,5 @@ pretrained_weights/liveportrait/base_models/warping_module.pth filter=lfs diff=l
|
|
| 47 |
pretrained_weights/insightface/models/buffalo_l/2d106det.onnx filter=lfs diff=lfs merge=lfs -text
|
| 48 |
pretrained_weights/insightface/models/buffalo_l/det_10g.onnx filter=lfs diff=lfs merge=lfs -text
|
| 49 |
pretrained_weights/liveportrait/landmark.onnx filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 47 |
pretrained_weights/insightface/models/buffalo_l/2d106det.onnx filter=lfs diff=lfs merge=lfs -text
|
| 48 |
pretrained_weights/insightface/models/buffalo_l/det_10g.onnx filter=lfs diff=lfs merge=lfs -text
|
| 49 |
pretrained_weights/liveportrait/landmark.onnx filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
assets/examples/driving/d14.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
assets/examples/source/s12.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -46,14 +46,14 @@ def gpu_wrapped_execute_image(*args, **kwargs):
|
|
| 46 |
|
| 47 |
def is_square_video(video_path):
|
| 48 |
video = cv2.VideoCapture(video_path)
|
| 49 |
-
|
| 50 |
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 51 |
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 52 |
-
|
| 53 |
video.release()
|
| 54 |
if width != height:
|
| 55 |
raise gr.Error("Error: the video does not have a square aspect ratio. We currently only support square videos")
|
| 56 |
-
|
| 57 |
return gr.update(visible=True)
|
| 58 |
|
| 59 |
# assets
|
|
@@ -91,6 +91,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 91 |
[osp.join(example_portrait_dir, "s10.jpg")],
|
| 92 |
[osp.join(example_portrait_dir, "s5.jpg")],
|
| 93 |
[osp.join(example_portrait_dir, "s7.jpg")],
|
|
|
|
| 94 |
],
|
| 95 |
inputs=[image_input],
|
| 96 |
cache_examples=False,
|
|
@@ -100,6 +101,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 100 |
gr.Examples(
|
| 101 |
examples=[
|
| 102 |
[osp.join(example_video_dir, "d0.mp4")],
|
|
|
|
| 103 |
[osp.join(example_video_dir, "d5.mp4")],
|
| 104 |
[osp.join(example_video_dir, "d6.mp4")],
|
| 105 |
[osp.join(example_video_dir, "d7.mp4")],
|
|
@@ -128,7 +130,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 128 |
output_video_concat.render()
|
| 129 |
with gr.Row():
|
| 130 |
# Examples
|
| 131 |
-
gr.Markdown("## You could choose the examples below ⬇️")
|
| 132 |
with gr.Row():
|
| 133 |
gr.Examples(
|
| 134 |
examples=data_examples,
|
|
|
|
| 46 |
|
| 47 |
def is_square_video(video_path):
|
| 48 |
video = cv2.VideoCapture(video_path)
|
| 49 |
+
|
| 50 |
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 51 |
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 52 |
+
|
| 53 |
video.release()
|
| 54 |
if width != height:
|
| 55 |
raise gr.Error("Error: the video does not have a square aspect ratio. We currently only support square videos")
|
| 56 |
+
|
| 57 |
return gr.update(visible=True)
|
| 58 |
|
| 59 |
# assets
|
|
|
|
| 91 |
[osp.join(example_portrait_dir, "s10.jpg")],
|
| 92 |
[osp.join(example_portrait_dir, "s5.jpg")],
|
| 93 |
[osp.join(example_portrait_dir, "s7.jpg")],
|
| 94 |
+
[osp.join(example_portrait_dir, "s12.jpg")],
|
| 95 |
],
|
| 96 |
inputs=[image_input],
|
| 97 |
cache_examples=False,
|
|
|
|
| 101 |
gr.Examples(
|
| 102 |
examples=[
|
| 103 |
[osp.join(example_video_dir, "d0.mp4")],
|
| 104 |
+
[osp.join(example_video_dir, "d14.mp4")],
|
| 105 |
[osp.join(example_video_dir, "d5.mp4")],
|
| 106 |
[osp.join(example_video_dir, "d6.mp4")],
|
| 107 |
[osp.join(example_video_dir, "d7.mp4")],
|
|
|
|
| 130 |
output_video_concat.render()
|
| 131 |
with gr.Row():
|
| 132 |
# Examples
|
| 133 |
+
gr.Markdown("## You could also choose the examples below by one click ⬇️")
|
| 134 |
with gr.Row():
|
| 135 |
gr.Examples(
|
| 136 |
examples=data_examples,
|
assets/examples/driving/d14.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:465e72fbf26bf4ed46d1adf7aab8a7344aac54a2f92c4d82a1d53127f0170472
|
| 3 |
+
size 891025
|
assets/examples/source/s12.jpg
ADDED
|
Git LFS Details
|
assets/gradio_description_retargeting.md
CHANGED
|
@@ -1 +1,4 @@
|
|
| 1 |
-
<
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<br>
|
| 2 |
+
|
| 3 |
+
## Retargeting
|
| 4 |
+
<span style="font-size: 1.2em;">🔥 To edit the eyes and lip open ratio of the source portrait, drag the sliders and click the <strong>🚗 Retargeting</strong> button. You can try running it multiple times. <strong>😊 Set both ratios to 0.8 to see what's going on!</strong> </span>
|
src/config/argument_config.py
CHANGED
|
@@ -8,6 +8,7 @@ import os.path as osp
|
|
| 8 |
from dataclasses import dataclass
|
| 9 |
import tyro
|
| 10 |
from typing_extensions import Annotated
|
|
|
|
| 11 |
from .base_config import PrintableConfig, make_abs_path
|
| 12 |
|
| 13 |
|
|
@@ -41,4 +42,4 @@ class ArgumentConfig(PrintableConfig):
|
|
| 41 |
########## gradio arguments ##########
|
| 42 |
server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])] = 7860
|
| 43 |
share: bool = False
|
| 44 |
-
server_name: str = None
|
|
|
|
| 8 |
from dataclasses import dataclass
|
| 9 |
import tyro
|
| 10 |
from typing_extensions import Annotated
|
| 11 |
+
from typing import Optional
|
| 12 |
from .base_config import PrintableConfig, make_abs_path
|
| 13 |
|
| 14 |
|
|
|
|
| 42 |
########## gradio arguments ##########
|
| 43 |
server_port: Annotated[int, tyro.conf.arg(aliases=["-p"])] = 7860
|
| 44 |
share: bool = False
|
| 45 |
+
server_name: Optional[str] = None # one can set "0.0.0.0" on local
|
src/gradio_pipeline.py
CHANGED
|
@@ -10,7 +10,6 @@ from .utils.io import load_img_online
|
|
| 10 |
from .utils.rprint import rlog as log
|
| 11 |
from .utils.crop import prepare_paste_back, paste_back
|
| 12 |
from .utils.camera import get_rotation_matrix
|
| 13 |
-
from .utils.retargeting_utils import calc_eye_close_ratio, calc_lip_close_ratio
|
| 14 |
|
| 15 |
def update_args(args, user_args):
|
| 16 |
"""update the args according to user inputs
|
|
@@ -111,4 +110,3 @@ class GradioPipeline(LivePortraitPipeline):
|
|
| 111 |
else:
|
| 112 |
# when press the clear button, go here
|
| 113 |
raise gr.Error("The retargeting input hasn't been prepared yet 💥!", duration=5)
|
| 114 |
-
|
|
|
|
| 10 |
from .utils.rprint import rlog as log
|
| 11 |
from .utils.crop import prepare_paste_back, paste_back
|
| 12 |
from .utils.camera import get_rotation_matrix
|
|
|
|
| 13 |
|
| 14 |
def update_args(args, user_args):
|
| 15 |
"""update the args according to user inputs
|
|
|
|
| 110 |
else:
|
| 111 |
# when press the clear button, go here
|
| 112 |
raise gr.Error("The retargeting input hasn't been prepared yet 💥!", duration=5)
|
|
|