Spaces:
Runtime error
Runtime error
update inference
Browse files
app.py
CHANGED
|
@@ -129,15 +129,6 @@ if __name__ == "__main__":
|
|
| 129 |
inject_motion_embeddings_combinations = ['down 1280','up 1280','down 640','up 640']
|
| 130 |
default_motion_embeddings_combinations = ['down 1280','up 1280']
|
| 131 |
|
| 132 |
-
examples_train = [
|
| 133 |
-
'assets/train/car_turn.mp4',
|
| 134 |
-
'assets/train/pan_up.mp4',
|
| 135 |
-
'assets/train/run_up.mp4',
|
| 136 |
-
'assets/train/train_ride.mp4',
|
| 137 |
-
'assets/train/orbit_shot.mp4',
|
| 138 |
-
'assets/train/dolly_zoom_out.mp4',
|
| 139 |
-
'assets/train/santa_dance.mp4',
|
| 140 |
-
]
|
| 141 |
|
| 142 |
examples_inference = [
|
| 143 |
['results/pan_up/source.mp4', 'A flora garden.', 'camera', 'pan_up/checkpoint'],
|
|
@@ -210,59 +201,29 @@ if __name__ == "__main__":
|
|
| 210 |
</a>
|
| 211 |
"""
|
| 212 |
)
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
with gr.Accordion("Advanced Settings", open=False):
|
| 222 |
-
with gr.Row():
|
| 223 |
-
motion_embeddings_combinations = gr.Dropdown(label="Motion Embeddings Combinations", choices=inject_motion_embeddings_combinations, multiselect=True,value=default_motion_embeddings_combinations)
|
| 224 |
-
unet_dropdown = gr.Dropdown(label="Unet", choices=["videoCrafter2", "zeroscope_v2_576w"], value="videoCrafter2")
|
| 225 |
-
checkpointing_steps = gr.Dropdown(label="Checkpointing Steps",choices=[100,50],value=100)
|
| 226 |
-
max_train_steps = gr.Slider(label="Max Train Steps", minimum=200,maximum=500,value=200,step=50)
|
| 227 |
|
| 228 |
-
|
| 229 |
-
gr.Examples(examples=examples_train,inputs=[video_input])
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
train_button.click(
|
| 233 |
-
lambda video, mec, u, cs, mts: train_model(video, generate_config_train(mec, u, cs, mts)),
|
| 234 |
-
inputs=[video_input, motion_embeddings_combinations, unet_dropdown, checkpointing_steps, max_train_steps],
|
| 235 |
-
outputs=checkpoint_output
|
| 236 |
-
)
|
| 237 |
-
|
| 238 |
-
with gr.Tab("Inference"):
|
| 239 |
-
with gr.Row():
|
| 240 |
-
with gr.Column():
|
| 241 |
-
preview_video = gr.Video(label="Preview Video")
|
| 242 |
-
text_input = gr.Textbox(label="Input Text")
|
| 243 |
-
checkpoint_dropdown = gr.Dropdown(label="Select Checkpoint", choices=get_checkpoints('results'))
|
| 244 |
-
seed = gr.Number(label="Seed", value=0)
|
| 245 |
-
inference_button = gr.Button("Generate Video")
|
| 246 |
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
output_video = gr.Video(label="Output Video")
|
| 250 |
-
generated_prompt = gr.Textbox(label="Generated Prompt")
|
| 251 |
-
|
| 252 |
-
with gr.Accordion("Advanced Settings", open=False):
|
| 253 |
-
with gr.Row():
|
| 254 |
-
inference_steps = gr.Number(label="Inference Steps", value=30)
|
| 255 |
-
motion_type = gr.Dropdown(label="Motion Type", choices=["camera", "object"], value="object")
|
| 256 |
-
|
| 257 |
-
gr.Examples(examples=examples_inference,inputs=[preview_video,text_input,motion_type,checkpoint_dropdown])
|
| 258 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 259 |
|
| 260 |
-
|
| 261 |
-
return gr.update(choices=get_checkpoints('results'))
|
| 262 |
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
output_video.change(fn=update_generated_prompt, inputs=[text_input], outputs=generated_prompt)
|
| 267 |
|
| 268 |
demo.launch()
|
|
|
|
| 129 |
inject_motion_embeddings_combinations = ['down 1280','up 1280','down 640','up 640']
|
| 130 |
default_motion_embeddings_combinations = ['down 1280','up 1280']
|
| 131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
examples_inference = [
|
| 134 |
['results/pan_up/source.mp4', 'A flora garden.', 'camera', 'pan_up/checkpoint'],
|
|
|
|
| 201 |
</a>
|
| 202 |
"""
|
| 203 |
)
|
| 204 |
+
|
| 205 |
+
with gr.Row():
|
| 206 |
+
with gr.Column():
|
| 207 |
+
preview_video = gr.Video(label="Preview Video")
|
| 208 |
+
text_input = gr.Textbox(label="Input Text")
|
| 209 |
+
checkpoint_dropdown = gr.Dropdown(label="Select Checkpoint", choices=get_checkpoints('results'))
|
| 210 |
+
seed = gr.Number(label="Seed", value=0)
|
| 211 |
+
inference_button = gr.Button("Generate Video")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
|
| 213 |
+
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
|
| 215 |
+
output_video = gr.Video(label="Output Video")
|
| 216 |
+
generated_prompt = gr.Textbox(label="Generated Prompt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 217 |
|
| 218 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 219 |
+
with gr.Row():
|
| 220 |
+
inference_steps = gr.Number(label="Inference Steps", value=30)
|
| 221 |
+
motion_type = gr.Dropdown(label="Motion Type", choices=["camera", "object"], value="object")
|
| 222 |
|
| 223 |
+
gr.Examples(examples=examples_inference,inputs=[preview_video,text_input,motion_type,checkpoint_dropdown])
|
|
|
|
| 224 |
|
| 225 |
+
checkpoint_dropdown.change(fn=update_preview_video, inputs=checkpoint_dropdown, outputs=preview_video)
|
| 226 |
+
inference_button.click(inference_model, inputs=[text_input, checkpoint_dropdown,inference_steps,motion_type, seed], outputs=output_video)
|
| 227 |
+
output_video.change(fn=update_generated_prompt, inputs=[text_input], outputs=generated_prompt)
|
|
|
|
| 228 |
|
| 229 |
demo.launch()
|