Spaces:
Running
on
Zero
Running
on
Zero
Update cogvideo_pipeline.py
Browse files- cogvideo_pipeline.py +5 -5
cogvideo_pipeline.py
CHANGED
|
@@ -770,14 +770,14 @@ if __name__ == "__main__":
|
|
| 770 |
# py_parser.add_argument("--interp-duration", type=float, default=-1) # -1是顺序生成,0是超分,0.5/1/2是插帧
|
| 771 |
# py_parser.add_argument("--total-duration", type=float, default=4.0) # 整个的时间
|
| 772 |
py_parser.add_argument('--use-guidance-stage1', action='store_true')
|
| 773 |
-
py_parser.add_argument('--use-guidance-stage2', action='
|
| 774 |
py_parser.add_argument('--guidance-alpha', type=float, default=3.0)
|
| 775 |
py_parser.add_argument('--stage-1', action='store_true') # stage 1: sequential generation
|
| 776 |
-
py_parser.add_argument('--stage-2', action='
|
| 777 |
-
py_parser.add_argument('--both-stages', action='
|
| 778 |
py_parser.add_argument('--parallel-size', type=int, default=1)
|
| 779 |
-
py_parser.add_argument('--stage1-max-inference-batch-size', type=int, default
|
| 780 |
-
py_parser.add_argument('--multi-gpu', action='
|
| 781 |
|
| 782 |
CogVideoCacheModel.add_model_specific_args(py_parser)
|
| 783 |
|
|
|
|
| 770 |
# py_parser.add_argument("--interp-duration", type=float, default=-1) # -1是顺序生成,0是超分,0.5/1/2是插帧
|
| 771 |
# py_parser.add_argument("--total-duration", type=float, default=4.0) # 整个的时间
|
| 772 |
py_parser.add_argument('--use-guidance-stage1', action='store_true')
|
| 773 |
+
py_parser.add_argument('--use-guidance-stage2', action='store_false')
|
| 774 |
py_parser.add_argument('--guidance-alpha', type=float, default=3.0)
|
| 775 |
py_parser.add_argument('--stage-1', action='store_true') # stage 1: sequential generation
|
| 776 |
+
py_parser.add_argument('--stage-2', action='store_false') # stage 2: interp + dsr
|
| 777 |
+
py_parser.add_argument('--both-stages', action='store_false') # stage 1&2: sequential generation; interp + dsr
|
| 778 |
py_parser.add_argument('--parallel-size', type=int, default=1)
|
| 779 |
+
py_parser.add_argument('--stage1-max-inference-batch-size', type=int, default=1) # -1: use max-inference-batch-size
|
| 780 |
+
py_parser.add_argument('--multi-gpu', action='store_false')
|
| 781 |
|
| 782 |
CogVideoCacheModel.add_model_specific_args(py_parser)
|
| 783 |
|