Spaces:
				
			
			
	
			
			
		Runtime error
		
	
	
	
			
			
	
	
	
	
		
		
		Runtime error
		
	Commit 
							
							Β·
						
						f33e2ba
	
1
								Parent(s):
							
							23d1284
								
Upload code
Browse files- app.py +6 -5
- imgs/1.jpg +0 -0
- imgs/2.jpg +0 -0
- imgs/3.jpg +0 -0
    	
        app.py
    CHANGED
    
    | @@ -111,13 +111,14 @@ def resize_without_crop(image, target_width, target_height): | |
| 111 |  | 
| 112 |  | 
| 113 | 
             
            @torch.inference_mode()
         | 
| 114 | 
            -
            @spaces.GPU
         | 
| 115 | 
             
            def interrogator_process(x):
         | 
| 116 | 
            -
                 | 
|  | |
| 117 |  | 
| 118 |  | 
| 119 | 
             
            @torch.inference_mode()
         | 
| 120 | 
            -
            @spaces.GPU
         | 
| 121 | 
             
            def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
         | 
| 122 | 
             
                        progress=gr.Progress()):
         | 
| 123 | 
             
                rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
         | 
| @@ -214,7 +215,7 @@ def process_video_inner(image_1, image_2, prompt, seed=123, steps=25, cfg_scale= | |
| 214 |  | 
| 215 |  | 
| 216 | 
             
            @torch.inference_mode()
         | 
| 217 | 
            -
            @spaces.GPU(duration= | 
| 218 | 
             
            def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
         | 
| 219 | 
             
                result_frames = []
         | 
| 220 | 
             
                cropped_images = []
         | 
| @@ -292,7 +293,7 @@ with block: | |
| 292 | 
             
                prompt_gen_button.click(
         | 
| 293 | 
             
                    fn=interrogator_process,
         | 
| 294 | 
             
                    inputs=[input_fg],
         | 
| 295 | 
            -
                    outputs=[prompt]
         | 
| 296 | 
             
                ).then(lambda: [gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=False)],
         | 
| 297 | 
             
                       outputs=[prompt_gen_button, key_gen_button, i2v_end_btn])
         | 
| 298 |  | 
|  | |
| 111 |  | 
| 112 |  | 
| 113 | 
             
            @torch.inference_mode()
         | 
| 114 | 
            +
            @spaces.GPU(duration=1200)
         | 
| 115 | 
             
            def interrogator_process(x):
         | 
| 116 | 
            +
                image_description = wd14tagger.default_interrogator(x)
         | 
| 117 | 
            +
                return image_description, image_description
         | 
| 118 |  | 
| 119 |  | 
| 120 | 
             
            @torch.inference_mode()
         | 
| 121 | 
            +
            @spaces.GPU(duration=1200)
         | 
| 122 | 
             
            def process(input_fg, prompt, input_undo_steps, image_width, image_height, seed, steps, n_prompt, cfg,
         | 
| 123 | 
             
                        progress=gr.Progress()):
         | 
| 124 | 
             
                rng = torch.Generator(device=memory_management.gpu).manual_seed(int(seed))
         | 
|  | |
| 215 |  | 
| 216 |  | 
| 217 | 
             
            @torch.inference_mode()
         | 
| 218 | 
            +
            @spaces.GPU(duration=1200)
         | 
| 219 | 
             
            def process_video(keyframes, prompt, steps, cfg, fps, seed, progress=gr.Progress()):
         | 
| 220 | 
             
                result_frames = []
         | 
| 221 | 
             
                cropped_images = []
         | 
|  | |
| 293 | 
             
                prompt_gen_button.click(
         | 
| 294 | 
             
                    fn=interrogator_process,
         | 
| 295 | 
             
                    inputs=[input_fg],
         | 
| 296 | 
            +
                    outputs=[prompt, i2v_input_text]
         | 
| 297 | 
             
                ).then(lambda: [gr.update(interactive=True), gr.update(interactive=True), gr.update(interactive=False)],
         | 
| 298 | 
             
                       outputs=[prompt_gen_button, key_gen_button, i2v_end_btn])
         | 
| 299 |  | 
    	
        imgs/1.jpg
    ADDED
    
    |   | 
    	
        imgs/2.jpg
    ADDED
    
    |   | 
    	
        imgs/3.jpg
    ADDED
    
    |   | 
 
			
