Archivo validado localmente - sin errores de sintaxis
Browse files- app.py +1 -1
 - app_fixed.py +1451 -0
 
    	
        app.py
    CHANGED
    
    | 
         @@ -777,7 +777,7 @@ def generate_image( 
     | 
|
| 777 | 
         | 
| 778 | 
         
             
                                        result = pipe(**generation_kwargs)
         
     | 
| 779 | 
         
             
                                        image = result.images[0]
         
     | 
| 780 | 
         
            -
             
     | 
| 781 | 
         
             
                                        print("✅ Imagen generada correctamente")
         
     | 
| 782 | 
         
             
                                else:
         
     | 
| 783 | 
         
             
                                    print("❌ Error: Imagen vacía")
         
     | 
| 
         | 
|
| 777 | 
         | 
| 778 | 
         
             
                                        result = pipe(**generation_kwargs)
         
     | 
| 779 | 
         
             
                                        image = result.images[0]
         
     | 
| 780 | 
         
            +
                                    else:
         
     | 
| 781 | 
         
             
                                        print("✅ Imagen generada correctamente")
         
     | 
| 782 | 
         
             
                                else:
         
     | 
| 783 | 
         
             
                                    print("❌ Error: Imagen vacía")
         
     | 
    	
        app_fixed.py
    ADDED
    
    | 
         @@ -0,0 +1,1451 @@ 
     | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
|
| 
         | 
| 
         | 
|
| 1 | 
         
            +
            # IMPORTANTE: Importar spaces ANTES de torch para ZeroGPU
         
     | 
| 2 | 
         
            +
            import spaces  # Para usar ZeroGPU H200
         
     | 
| 3 | 
         
            +
             
     | 
| 4 | 
         
            +
            import gradio as gr
         
     | 
| 5 | 
         
            +
            import torch
         
     | 
| 6 | 
         
            +
            from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
         
     | 
| 7 | 
         
            +
            from diffusers import StableDiffusionPipeline, DiffusionPipeline
         
     | 
| 8 | 
         
            +
            import requests
         
     | 
| 9 | 
         
            +
            from PIL import Image
         
     | 
| 10 | 
         
            +
            import io
         
     | 
| 11 | 
         
            +
            import base64
         
     | 
| 12 | 
         
            +
            import os
         
     | 
| 13 | 
         
            +
            import time
         
     | 
| 14 | 
         
            +
            import numpy as np
         
     | 
| 15 | 
         
            +
            import random
         
     | 
| 16 | 
         
            +
            from huggingface_hub import login
         
     | 
| 17 | 
         
            +
            from fastapi import FastAPI, HTTPException
         
     | 
| 18 | 
         
            +
            from fastapi.middleware.cors import CORSMiddleware
         
     | 
| 19 | 
         
            +
            from pydantic import BaseModel
         
     | 
| 20 | 
         
            +
             
     | 
| 21 | 
         
            +
            print("🚀 Iniciando NTIA Space con ZeroGPU H200...")
         
     | 
| 22 | 
         
            +
            print(f"📁 Directorio actual: {os.getcwd()}")
         
     | 
| 23 | 
         
            +
            print(f"🐍 Python version: {os.sys.version}")
         
     | 
| 24 | 
         
            +
             
     | 
| 25 | 
         
            +
            # Optimización para ZeroGPU H200
         
     | 
| 26 | 
         
            +
            device = "cuda" if torch.cuda.is_available() else "cpu"
         
     | 
| 27 | 
         
            +
            print(f"🖥️ Dispositivo detectado: {device}")
         
     | 
| 28 | 
         
            +
            print(f"🔥 CUDA disponible: {torch.cuda.is_available()}")
         
     | 
| 29 | 
         
            +
             
     | 
| 30 | 
         
            +
            if torch.cuda.is_available():
         
     | 
| 31 | 
         
            +
                print(f"🎮 GPU: {torch.cuda.get_device_name(0)}")
         
     | 
| 32 | 
         
            +
                print(f"💾 Memoria GPU: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB")
         
     | 
| 33 | 
         
            +
                print("🚀 ZeroGPU H200 detectado - Optimizando para máximo rendimiento")
         
     | 
| 34 | 
         
            +
                
         
     | 
| 35 | 
         
            +
                # Configuración optimizada para H200
         
     | 
| 36 | 
         
            +
                torch_dtype = torch.float16  # Usar float16 para mayor velocidad
         
     | 
| 37 | 
         
            +
                print("⚡ Usando torch.float16 para H200")
         
     | 
| 38 | 
         
            +
                
         
     | 
| 39 | 
         
            +
                # Optimizaciones adicionales para H200
         
     | 
| 40 | 
         
            +
                torch.backends.cudnn.benchmark = True
         
     | 
| 41 | 
         
            +
                torch.backends.cuda.matmul.allow_tf32 = True
         
     | 
| 42 | 
         
            +
                torch.backends.cudnn.allow_tf32 = True
         
     | 
| 43 | 
         
            +
                print("🔧 Optimizaciones CUDA habilitadas para H200")
         
     | 
| 44 | 
         
            +
            else:
         
     | 
| 45 | 
         
            +
                torch_dtype = torch.float32
         
     | 
| 46 | 
         
            +
                print("🐌 Usando torch.float32 para CPU")
         
     | 
| 47 | 
         
            +
             
     | 
| 48 | 
         
            +
            # Configurar autenticación con Hugging Face
         
     | 
| 49 | 
         
            +
            HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGING_FACE_HUB_TOKEN")
         
     | 
| 50 | 
         
            +
            if HF_TOKEN:
         
     | 
| 51 | 
         
            +
                try:
         
     | 
| 52 | 
         
            +
                    print(f"🔑 Token detectado: {HF_TOKEN[:10]}...")
         
     | 
| 53 | 
         
            +
                    login(token=HF_TOKEN)
         
     | 
| 54 | 
         
            +
                    print("✅ Autenticado con Hugging Face")
         
     | 
| 55 | 
         
            +
                    print(f"🔑 Token configurado: {HF_TOKEN[:10]}...")
         
     | 
| 56 | 
         
            +
                except Exception as e:
         
     | 
| 57 | 
         
            +
                    print(f"⚠️ Error de autenticación: {e}")
         
     | 
| 58 | 
         
            +
            else:
         
     | 
| 59 | 
         
            +
                print("⚠️ No se encontró HF_TOKEN - modelos gated no estarán disponibles")
         
     | 
| 60 | 
         
            +
                print("💡 Para usar modelos FLUX, configura la variable de entorno HF_TOKEN en el Space")
         
     | 
| 61 | 
         
            +
             
     | 
| 62 | 
         
            +
            # Verificar acceso a modelos gated
         
     | 
| 63 | 
         
            +
            def check_gated_model_access():
         
     | 
| 64 | 
         
            +
                """Verificar si tenemos acceso a modelos gated"""
         
     | 
| 65 | 
         
            +
                if not HF_TOKEN:
         
     | 
| 66 | 
         
            +
                    return False
         
     | 
| 67 | 
         
            +
                
         
     | 
| 68 | 
         
            +
                try:
         
     | 
| 69 | 
         
            +
                    # Intentar acceder a un modelo gated para verificar permisos
         
     | 
| 70 | 
         
            +
                    from huggingface_hub import model_info
         
     | 
| 71 | 
         
            +
                    info = model_info("black-forest-labs/FLUX.1-dev", token=HF_TOKEN)
         
     | 
| 72 | 
         
            +
                    print(f"✅ Acceso verificado a FLUX.1-dev: {info.modelId}")
         
     | 
| 73 | 
         
            +
                    return True
         
     | 
| 74 | 
         
            +
                except Exception as e:
         
     | 
| 75 | 
         
            +
                    print(f"❌ No se pudo verificar acceso a modelos gated: {e}")
         
     | 
| 76 | 
         
            +
                    return False
         
     | 
| 77 | 
         
            +
             
     | 
| 78 | 
         
            +
            # Verificar acceso al inicio
         
     | 
| 79 | 
         
            +
            GATED_ACCESS = check_gated_model_access()
         
     | 
| 80 | 
         
            +
             
     | 
| 81 | 
         
            +
            # Mostrar estado de configuración al inicio
         
     | 
| 82 | 
         
            +
            print("=" * 60)
         
     | 
| 83 | 
         
            +
            print("🚀 SPACE NTIA - ESTADO DE CONFIGURACIÓN")
         
     | 
| 84 | 
         
            +
            print("=" * 60)
         
     | 
| 85 | 
         
            +
            print(f"🔑 Token HF configurado: {'✅' if HF_TOKEN else '❌'}")
         
     | 
| 86 | 
         
            +
            print(f"🔐 Acceso a modelos gated: {'✅' if GATED_ACCESS else '❌'}")
         
     | 
| 87 | 
         
            +
            print(f"🎨 Modelos FLUX disponibles: {'✅' if GATED_ACCESS else '❌'}")
         
     | 
| 88 | 
         
            +
            print("=" * 60)
         
     | 
| 89 | 
         
            +
             
     | 
| 90 | 
         
            +
            if not GATED_ACCESS:
         
     | 
| 91 | 
         
            +
                print("⚠️ Para usar modelos FLUX:")
         
     | 
| 92 | 
         
            +
                print("   1. Configura HF_TOKEN en las variables de entorno del Space")
         
     | 
| 93 | 
         
            +
                print("   2. Solicita acceso a los modelos FLUX en Hugging Face")
         
     | 
| 94 | 
         
            +
                print("   3. Acepta los términos de licencia")
         
     | 
| 95 | 
         
            +
                print("=" * 60)
         
     | 
| 96 | 
         
            +
             
     | 
| 97 | 
         
            +
            # Clases para los endpoints API
         
     | 
| 98 | 
         
            +
            class TextRequest(BaseModel):
         
     | 
| 99 | 
         
            +
                prompt: str
         
     | 
| 100 | 
         
            +
                model_name: str
         
     | 
| 101 | 
         
            +
                max_length: int = 100
         
     | 
| 102 | 
         
            +
             
     | 
| 103 | 
         
            +
            class ImageRequest(BaseModel):
         
     | 
| 104 | 
         
            +
                prompt: str
         
     | 
| 105 | 
         
            +
                model_name: str
         
     | 
| 106 | 
         
            +
                negative_prompt: str = ""
         
     | 
| 107 | 
         
            +
                seed: int = 0
         
     | 
| 108 | 
         
            +
                randomize_seed: bool = True
         
     | 
| 109 | 
         
            +
                width: int = 1024
         
     | 
| 110 | 
         
            +
                height: int = 1024
         
     | 
| 111 | 
         
            +
                guidance_scale: float = 7.5
         
     | 
| 112 | 
         
            +
                num_inference_steps: int = 20
         
     | 
| 113 | 
         
            +
                eta: float = 0
         
     | 
| 114 | 
         
            +
                strength: float = 1
         
     | 
| 115 | 
         
            +
                num_images: int = 1
         
     | 
| 116 | 
         
            +
                safety_checker: bool = True
         
     | 
| 117 | 
         
            +
             
     | 
| 118 | 
         
            +
            class VideoRequest(BaseModel):
         
     | 
| 119 | 
         
            +
                prompt: str
         
     | 
| 120 | 
         
            +
                model_name: str
         
     | 
| 121 | 
         
            +
                num_frames: int = 16
         
     | 
| 122 | 
         
            +
                num_inference_steps: int = 20
         
     | 
| 123 | 
         
            +
             
     | 
| 124 | 
         
            +
            class ChatRequest(BaseModel):
         
     | 
| 125 | 
         
            +
                message: str
         
     | 
| 126 | 
         
            +
                history: list
         
     | 
| 127 | 
         
            +
                model_name: str
         
     | 
| 128 | 
         
            +
             
     | 
| 129 | 
         
            +
            # Configuración de modelos libres
         
     | 
| 130 | 
         
            +
            MODELS = {
         
     | 
| 131 | 
         
            +
                "text": {
         
     | 
| 132 | 
         
            +
                    "microsoft/DialoGPT-medium": "Chat conversacional",
         
     | 
| 133 | 
         
            +
                    "microsoft/DialoGPT-large": "Chat conversacional avanzado",
         
     | 
| 134 | 
         
            +
                    "microsoft/DialoGPT-small": "Chat conversacional rápido",
         
     | 
| 135 | 
         
            +
                    "gpt2": "Generación de texto",
         
     | 
| 136 | 
         
            +
                    "gpt2-medium": "GPT-2 mediano",
         
     | 
| 137 | 
         
            +
                    "gpt2-large": "GPT-2 grande",
         
     | 
| 138 | 
         
            +
                    "distilgpt2": "GPT-2 optimizado",
         
     | 
| 139 | 
         
            +
                    "EleutherAI/gpt-neo-125M": "GPT-Neo pequeño",
         
     | 
| 140 | 
         
            +
                    "EleutherAI/gpt-neo-1.3B": "GPT-Neo mediano",
         
     | 
| 141 | 
         
            +
                    "facebook/opt-125m": "OPT pequeño",
         
     | 
| 142 | 
         
            +
                    "facebook/opt-350m": "OPT mediano",
         
     | 
| 143 | 
         
            +
                    "bigscience/bloom-560m": "BLOOM multilingüe",
         
     | 
| 144 | 
         
            +
                    "bigscience/bloom-1b1": "BLOOM grande",
         
     | 
| 145 | 
         
            +
                    "Helsinki-NLP/opus-mt-es-en": "Traductor español-inglés",
         
     | 
| 146 | 
         
            +
                    "Helsinki-NLP/opus-mt-en-es": "Traductor inglés-español",
         
     | 
| 147 | 
         
            +
                    # ✅ Nuevos modelos de texto
         
     | 
| 148 | 
         
            +
                    "mistralai/Voxtral-Mini-3B-2507": "Voxtral Mini 3B - Multimodal",
         
     | 
| 149 | 
         
            +
                    "tiiuae/falcon-7b-instruct": "Falcon 7B Instruct",
         
     | 
| 150 | 
         
            +
                    "google/flan-t5-base": "Flan-T5 Base - Tareas múltiples"
         
     | 
| 151 | 
         
            +
                },
         
     | 
| 152 | 
         
            +
                "image": {
         
     | 
| 153 | 
         
            +
                    "CompVis/stable-diffusion-v1-4": "Stable Diffusion v1.4 (Libre)",
         
     | 
| 154 | 
         
            +
                    "stabilityai/stable-diffusion-2-1": "Stable Diffusion 2.1",
         
     | 
| 155 | 
         
            +
                    "stabilityai/stable-diffusion-xl-base-1.0": "SDXL Base",
         
     | 
| 156 | 
         
            +
                    "stabilityai/stable-diffusion-3-medium": "SD 3 Medium",
         
     | 
| 157 | 
         
            +
                    "prompthero/openjourney": "Midjourney Style",
         
     | 
| 158 | 
         
            +
                    "WarriorMama777/OrangeMixs": "Orange Mixs",
         
     | 
| 159 | 
         
            +
                    "hakurei/waifu-diffusion": "Waifu Diffusion",
         
     | 
| 160 | 
         
            +
                    "black-forest-labs/FLUX.1-schnell": "FLUX.1 Schnell (Requiere acceso)",
         
     | 
| 161 | 
         
            +
                    "black-forest-labs/FLUX.1-dev": "FLUX.1 Dev (Requiere acceso)",
         
     | 
| 162 | 
         
            +
                    # ✅ Nuevos modelos de imagen
         
     | 
| 163 | 
         
            +
                    "CompVis/ldm-text2im-large-256": "Latent Diffusion Model 256",
         
     | 
| 164 | 
         
            +
                    # ⚡ Modelos Turbo (rápidos)
         
     | 
| 165 | 
         
            +
                    "stabilityai/sdxl-turbo": "⚡ SDXL Turbo",
         
     | 
| 166 | 
         
            +
                    "stabilityai/sd-turbo": "⚡ SD Turbo",
         
     | 
| 167 | 
         
            +
                    "ByteDance/SDXL-Lightning": "⚡ SDXL Lightning",
         
     | 
| 168 | 
         
            +
                    # 🎨 Modelos adicionales
         
     | 
| 169 | 
         
            +
                    "KBlueLeaf/kohaku-v2.1": "Kohaku V2.1"
         
     | 
| 170 | 
         
            +
                },
         
     | 
| 171 | 
         
            +
                "video": {
         
     | 
| 172 | 
         
            +
                    "damo-vilab/text-to-video-ms-1.7b": "Text-to-Video MS 1.7B (Libre)",
         
     | 
| 173 | 
         
            +
                    "ali-vilab/text-to-video-ms-1.7b": "Text-to-Video MS 1.7B Alt",
         
     | 
| 174 | 
         
            +
                    "cerspense/zeroscope_v2_576w": "Zeroscope v2 576w (Libre)",
         
     | 
| 175 | 
         
            +
                    "cerspense/zeroscope_v2_XL": "Zeroscope v2 XL (Libre)",
         
     | 
| 176 | 
         
            +
                    "ByteDance/AnimateDiff-Lightning": "AnimateDiff Lightning (Libre)",
         
     | 
| 177 | 
         
            +
                    "THUDM/CogVideoX-5b": "CogVideoX 5B (Libre)",
         
     | 
| 178 | 
         
            +
                    "rain1011/pyramid-flow-sd3": "Pyramid Flow SD3 (Libre)",
         
     | 
| 179 | 
         
            +
                    # ✅ Nuevos modelos de video
         
     | 
| 180 | 
         
            +
                    "ali-vilab/modelscope-damo-text-to-video-synthesis": "ModelScope Text-to-Video"
         
     | 
| 181 | 
         
            +
                },
         
     | 
| 182 | 
         
            +
                "chat": {
         
     | 
| 183 | 
         
            +
                    "microsoft/DialoGPT-medium": "Chat conversacional",
         
     | 
| 184 | 
         
            +
                    "microsoft/DialoGPT-large": "Chat conversacional avanzado",
         
     | 
| 185 | 
         
            +
                    "microsoft/DialoGPT-small": "Chat conversacional rápido",
         
     | 
| 186 | 
         
            +
                    "facebook/opt-350m": "OPT conversacional",
         
     | 
| 187 | 
         
            +
                    "bigscience/bloom-560m": "BLOOM multilingüe",
         
     | 
| 188 | 
         
            +
                    # ✅ Nuevos modelos de chat
         
     | 
| 189 | 
         
            +
                    "mistralai/Voxtral-Mini-3B-2507": "Voxtral Mini 3B - Multimodal",
         
     | 
| 190 | 
         
            +
                    "tiiuae/falcon-7b-instruct": "Falcon 7B Instruct"
         
     | 
| 191 | 
         
            +
                }
         
     | 
| 192 | 
         
            +
            }
         
     | 
| 193 | 
         
            +
             
     | 
| 194 | 
         
            +
            # Cache para los modelos
         
     | 
| 195 | 
         
            +
            model_cache = {}
         
     | 
| 196 | 
         
            +
             
     | 
| 197 | 
         
            +
            def load_text_model(model_name):
         
     | 
| 198 | 
         
            +
                """Cargar modelo de texto con soporte para diferentes tipos"""
         
     | 
| 199 | 
         
            +
                if model_name not in model_cache:
         
     | 
| 200 | 
         
            +
                    print(f"Cargando modelo de texto: {model_name}")
         
     | 
| 201 | 
         
            +
                    
         
     | 
| 202 | 
         
            +
                    try:
         
     | 
| 203 | 
         
            +
                        # Detectar tipo de modelo
         
     | 
| 204 | 
         
            +
                        if "opus-mt" in model_name.lower():
         
     | 
| 205 | 
         
            +
                            # Modelo de traducción
         
     | 
| 206 | 
         
            +
                            from transformers import MarianMTModel, MarianTokenizer
         
     | 
| 207 | 
         
            +
                            tokenizer = MarianTokenizer.from_pretrained(model_name)
         
     | 
| 208 | 
         
            +
                            model = MarianMTModel.from_pretrained(model_name)
         
     | 
| 209 | 
         
            +
                            
         
     | 
| 210 | 
         
            +
                        elif "flan-t5" in model_name.lower():
         
     | 
| 211 | 
         
            +
                            # Modelo Flan-T5
         
     | 
| 212 | 
         
            +
                            from transformers import T5Tokenizer, T5ForConditionalGeneration
         
     | 
| 213 | 
         
            +
                            tokenizer = T5Tokenizer.from_pretrained(model_name)
         
     | 
| 214 | 
         
            +
                            model = T5ForConditionalGeneration.from_pretrained(model_name)
         
     | 
| 215 | 
         
            +
                        
         
     | 
| 216 | 
         
            +
                        elif "falcon" in model_name.lower():
         
     | 
| 217 | 
         
            +
                            # Modelo Falcon
         
     | 
| 218 | 
         
            +
                            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 219 | 
         
            +
                            tokenizer = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 220 | 
         
            +
                            model = AutoModelForCausalLM.from_pretrained(model_name)
         
     | 
| 221 | 
         
            +
                            # Configurar para Falcon
         
     | 
| 222 | 
         
            +
                            if tokenizer.pad_token is None:
         
     | 
| 223 | 
         
            +
                                tokenizer.pad_token = tokenizer.eos_token
         
     | 
| 224 | 
         
            +
                        
         
     | 
| 225 | 
         
            +
                        elif "voxtral" in model_name.lower():
         
     | 
| 226 | 
         
            +
                            # Modelo Voxtral (multimodal)
         
     | 
| 227 | 
         
            +
                            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 228 | 
         
            +
                            tokenizer = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 229 | 
         
            +
                            model = AutoModelForCausalLM.from_pretrained(model_name)
         
     | 
| 230 | 
         
            +
                            # Configurar para Voxtral
         
     | 
| 231 | 
         
            +
                            if tokenizer.pad_token is None:
         
     | 
| 232 | 
         
            +
                                tokenizer.pad_token = tokenizer.eos_token
         
     | 
| 233 | 
         
            +
                        
         
     | 
| 234 | 
         
            +
                        else:
         
     | 
| 235 | 
         
            +
                            # Modelo de generación de texto estándar
         
     | 
| 236 | 
         
            +
                            from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 237 | 
         
            +
                            tokenizer = AutoTokenizer.from_pretrained(model_name)
         
     | 
| 238 | 
         
            +
                            model = AutoModelForCausalLM.from_pretrained(model_name)
         
     | 
| 239 | 
         
            +
                            
         
     | 
| 240 | 
         
            +
                            # Configurar para chat si es DialoGPT
         
     | 
| 241 | 
         
            +
                            if "dialogpt" in model_name.lower():
         
     | 
| 242 | 
         
            +
                                tokenizer.pad_token = tokenizer.eos_token
         
     | 
| 243 | 
         
            +
                                model.config.pad_token_id = model.config.eos_token_id
         
     | 
| 244 | 
         
            +
                        
         
     | 
| 245 | 
         
            +
                        model_cache[model_name] = {
         
     | 
| 246 | 
         
            +
                            "tokenizer": tokenizer,
         
     | 
| 247 | 
         
            +
                            "model": model,
         
     | 
| 248 | 
         
            +
                            "type": "text"
         
     | 
| 249 | 
         
            +
                        }
         
     | 
| 250 | 
         
            +
                        
         
     | 
| 251 | 
         
            +
                    except Exception as e:
         
     | 
| 252 | 
         
            +
                        print(f"Error cargando modelo de texto {model_name}: {e}")
         
     | 
| 253 | 
         
            +
                        # Fallback a un modelo básico
         
     | 
| 254 | 
         
            +
                        from transformers import AutoTokenizer, AutoModelForCausalLM
         
     | 
| 255 | 
         
            +
                        tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
         
     | 
| 256 | 
         
            +
                        model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
         
     | 
| 257 | 
         
            +
                        tokenizer.pad_token = tokenizer.eos_token
         
     | 
| 258 | 
         
            +
                        model.config.pad_token_id = model.config.eos_token_id
         
     | 
| 259 | 
         
            +
                        
         
     | 
| 260 | 
         
            +
                        model_cache[model_name] = {
         
     | 
| 261 | 
         
            +
                            "tokenizer": tokenizer,
         
     | 
| 262 | 
         
            +
                            "model": model,
         
     | 
| 263 | 
         
            +
                            "type": "text"
         
     | 
| 264 | 
         
            +
                        }
         
     | 
| 265 | 
         
            +
                
         
     | 
| 266 | 
         
            +
                return model_cache[model_name]
         
     | 
| 267 | 
         
            +
             
     | 
| 268 | 
         
            +
            def load_image_model(model_name):
         
     | 
| 269 | 
         
            +
                """Cargar modelo de imagen optimizado para H200"""
         
     | 
| 270 | 
         
            +
                global model_cache
         
     | 
| 271 | 
         
            +
                
         
     | 
| 272 | 
         
            +
                if model_name not in model_cache:
         
     | 
| 273 | 
         
            +
                    print(f"\n🔄 Iniciando carga del modelo: {model_name}")
         
     | 
| 274 | 
         
            +
                    
         
     | 
| 275 | 
         
            +
                    try:
         
     | 
| 276 | 
         
            +
                        start_time = time.time()
         
     | 
| 277 | 
         
            +
                        
         
     | 
| 278 | 
         
            +
                        # Determinar si usar variant fp16 basado en el modelo
         
     | 
| 279 | 
         
            +
                        use_fp16_variant = False
         
     | 
| 280 | 
         
            +
                        if torch.cuda.is_available():
         
     | 
| 281 | 
         
            +
                            # Solo usar fp16 variant para modelos que lo soportan
         
     | 
| 282 | 
         
            +
                            fp16_supported_models = [
         
     | 
| 283 | 
         
            +
                                "stabilityai/sdxl-turbo",
         
     | 
| 284 | 
         
            +
                                "stabilityai/sd-turbo", 
         
     | 
| 285 | 
         
            +
                                "stabilityai/stable-diffusion-xl-base-1.0",
         
     | 
| 286 | 
         
            +
                                "runwayml/stable-diffusion-v1-5",
         
     | 
| 287 | 
         
            +
                                "CompVis/stable-diffusion-v1-4"
         
     | 
| 288 | 
         
            +
                            ]
         
     | 
| 289 | 
         
            +
                            use_fp16_variant = any(model in model_name for model in fp16_supported_models)
         
     | 
| 290 | 
         
            +
                            print(f"🔧 FP16 variant: {'✅ Habilitado' if use_fp16_variant else '❌ Deshabilitado'} para {model_name}")
         
     | 
| 291 | 
         
            +
                        
         
     | 
| 292 | 
         
            +
                        # Configuración especial para FLUX
         
     | 
| 293 | 
         
            +
                        if "flux" in model_name.lower():
         
     | 
| 294 | 
         
            +
                            if not GATED_ACCESS:
         
     | 
| 295 | 
         
            +
                                print("❌ No hay acceso a modelos gated. Configura HF_TOKEN en el Space.")
         
     | 
| 296 | 
         
            +
                                raise Exception("Acceso denegado a modelos FLUX. Configura HF_TOKEN en las variables de entorno del Space.")
         
     | 
| 297 | 
         
            +
                            
         
     | 
| 298 | 
         
            +
                            try:
         
     | 
| 299 | 
         
            +
                                from diffusers import FluxPipeline
         
     | 
| 300 | 
         
            +
                                print("🚀 Cargando FLUX Pipeline...")
         
     | 
| 301 | 
         
            +
                                print(f"🔧 Modelo: {model_name}")
         
     | 
| 302 | 
         
            +
                                print(f"🔑 Usando token de autenticación: {'Sí' if HF_TOKEN else 'No'}")
         
     | 
| 303 | 
         
            +
                                
         
     | 
| 304 | 
         
            +
                                # Para modelos FLUX, no usar variant fp16
         
     | 
| 305 | 
         
            +
                                pipe = FluxPipeline.from_pretrained(
         
     | 
| 306 | 
         
            +
                                    model_name,
         
     | 
| 307 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 308 | 
         
            +
                                    use_auth_token=HF_TOKEN,
         
     | 
| 309 | 
         
            +
                                    variant="fp16" if use_fp16_variant else None
         
     | 
| 310 | 
         
            +
                                )
         
     | 
| 311 | 
         
            +
                                
         
     | 
| 312 | 
         
            +
                                print("✅ FLUX Pipeline cargado exitosamente")
         
     | 
| 313 | 
         
            +
                                
         
     | 
| 314 | 
         
            +
                            except Exception as e:
         
     | 
| 315 | 
         
            +
                                print(f"❌ Error cargando FLUX: {e}")
         
     | 
| 316 | 
         
            +
                                print(f"🔍 Tipo de error: {type(e).__name__}")
         
     | 
| 317 | 
         
            +
                                
         
     | 
| 318 | 
         
            +
                                # Si es un error de autenticación, dar instrucciones específicas
         
     | 
| 319 | 
         
            +
                                if "401" in str(e) or "unauthorized" in str(e).lower():
         
     | 
| 320 | 
         
            +
                                    print("🔐 Error de autenticación. Asegúrate de:")
         
     | 
| 321 | 
         
            +
                                    print("   1. Tener acceso al modelo FLUX en Hugging Face")
         
     | 
| 322 | 
         
            +
                                    print("   2. Configurar HF_TOKEN en las variables de entorno del Space")
         
     | 
| 323 | 
         
            +
                                    print("   3. Que el token tenga permisos para acceder a modelos gated")
         
     | 
| 324 | 
         
            +
                                
         
     | 
| 325 | 
         
            +
                                # Fallback a Stable Diffusion
         
     | 
| 326 | 
         
            +
                                print("🔄 Fallback a Stable Diffusion...")
         
     | 
| 327 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 328 | 
         
            +
                                    "CompVis/stable-diffusion-v1-4",
         
     | 
| 329 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 330 | 
         
            +
                                    safety_checker=None
         
     | 
| 331 | 
         
            +
                                )
         
     | 
| 332 | 
         
            +
                        
         
     | 
| 333 | 
         
            +
                        # Configuración especial para SD 2.1 (problemático)
         
     | 
| 334 | 
         
            +
                        elif "stable-diffusion-2-1" in model_name:
         
     | 
| 335 | 
         
            +
                            try:
         
     | 
| 336 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 337 | 
         
            +
                                    model_name,
         
     | 
| 338 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 339 | 
         
            +
                                    safety_checker=None,
         
     | 
| 340 | 
         
            +
                                    requires_safety_checker=False,
         
     | 
| 341 | 
         
            +
                                    variant="fp16" if use_fp16_variant else None
         
     | 
| 342 | 
         
            +
                                )
         
     | 
| 343 | 
         
            +
                            except Exception as e:
         
     | 
| 344 | 
         
            +
                                print(f"Error cargando SD 2.1: {e}")
         
     | 
| 345 | 
         
            +
                                # Fallback a SD 1.4
         
     | 
| 346 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 347 | 
         
            +
                                    "CompVis/stable-diffusion-v1-4",
         
     | 
| 348 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 349 | 
         
            +
                                    safety_checker=None
         
     | 
| 350 | 
         
            +
                                )
         
     | 
| 351 | 
         
            +
                        
         
     | 
| 352 | 
         
            +
                        # Configuración especial para LDM
         
     | 
| 353 | 
         
            +
                        elif "ldm-text2im" in model_name:
         
     | 
| 354 | 
         
            +
                            try:
         
     | 
| 355 | 
         
            +
                                from diffusers import DiffusionPipeline
         
     | 
| 356 | 
         
            +
                                pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 357 | 
         
            +
                                    model_name,
         
     | 
| 358 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 359 | 
         
            +
                                    safety_checker=None
         
     | 
| 360 | 
         
            +
                                )
         
     | 
| 361 | 
         
            +
                            except Exception as e:
         
     | 
| 362 | 
         
            +
                                print(f"Error cargando LDM: {e}")
         
     | 
| 363 | 
         
            +
                                # Fallback a SD 1.4
         
     | 
| 364 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 365 | 
         
            +
                                    "CompVis/stable-diffusion-v1-4",
         
     | 
| 366 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 367 | 
         
            +
                                    safety_checker=None
         
     | 
| 368 | 
         
            +
                                )
         
     | 
| 369 | 
         
            +
                        
         
     | 
| 370 | 
         
            +
                        # Configuración estándar para otros modelos
         
     | 
| 371 | 
         
            +
                        else:
         
     | 
| 372 | 
         
            +
                            try:
         
     | 
| 373 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 374 | 
         
            +
                                    model_name,
         
     | 
| 375 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 376 | 
         
            +
                                    safety_checker=None,
         
     | 
| 377 | 
         
            +
                                    variant="fp16" if use_fp16_variant else None
         
     | 
| 378 | 
         
            +
                                )
         
     | 
| 379 | 
         
            +
                            except Exception as e:
         
     | 
| 380 | 
         
            +
                                print(f"Error cargando {model_name}: {e}")
         
     | 
| 381 | 
         
            +
                                # Fallback a SD 1.4
         
     | 
| 382 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 383 | 
         
            +
                                    "CompVis/stable-diffusion-v1-4",
         
     | 
| 384 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 385 | 
         
            +
                                    safety_checker=None
         
     | 
| 386 | 
         
            +
                                )
         
     | 
| 387 | 
         
            +
                        
         
     | 
| 388 | 
         
            +
                        load_time = time.time() - start_time
         
     | 
| 389 | 
         
            +
                        print(f"⏱️ Tiempo de carga: {load_time:.2f} segundos")
         
     | 
| 390 | 
         
            +
                        
         
     | 
| 391 | 
         
            +
                        print(f"🚀 Moviendo modelo a dispositivo: {device}")
         
     | 
| 392 | 
         
            +
                        pipe = pipe.to(device)
         
     | 
| 393 | 
         
            +
                        
         
     | 
| 394 | 
         
            +
                        # Optimizaciones específicas para H200
         
     | 
| 395 | 
         
            +
                        if torch.cuda.is_available():
         
     | 
| 396 | 
         
            +
                            print("🔧 Aplicando optimizaciones para H200...")
         
     | 
| 397 | 
         
            +
                            
         
     | 
| 398 | 
         
            +
                            # Habilitar optimizaciones de memoria (más conservadoras)
         
     | 
| 399 | 
         
            +
                            if hasattr(pipe, 'enable_attention_slicing'):
         
     | 
| 400 | 
         
            +
                                pipe.enable_attention_slicing()
         
     | 
| 401 | 
         
            +
                                print("✅ Attention slicing habilitado")
         
     | 
| 402 | 
         
            +
                            
         
     | 
| 403 | 
         
            +
                            # Deshabilitar CPU offload temporalmente (causa problemas con ZeroGPU)
         
     | 
| 404 | 
         
            +
                            # if hasattr(pipe, 'enable_model_cpu_offload') and "sdxl" in model_name.lower():
         
     | 
| 405 | 
         
            +
                            #     pipe.enable_model_cpu_offload()
         
     | 
| 406 | 
         
            +
                            #     print("✅ CPU offload habilitado (modelo grande)")
         
     | 
| 407 | 
         
            +
                            
         
     | 
| 408 | 
         
            +
                            if hasattr(pipe, 'enable_vae_slicing'):
         
     | 
| 409 | 
         
            +
                                pipe.enable_vae_slicing()
         
     | 
| 410 | 
         
            +
                                print("✅ VAE slicing habilitado")
         
     | 
| 411 | 
         
            +
                            
         
     | 
| 412 | 
         
            +
                            # XFormers solo si está disponible y el modelo lo soporta
         
     | 
| 413 | 
         
            +
                            if hasattr(pipe, 'enable_xformers_memory_efficient_attention'):
         
     | 
| 414 | 
         
            +
                                # FLUX models tienen problemas con XFormers, deshabilitar
         
     | 
| 415 | 
         
            +
                                if "flux" in model_name.lower() or "black-forest" in model_name.lower():
         
     | 
| 416 | 
         
            +
                                    print("⚠️ XFormers deshabilitado para modelos FLUX (incompatible)")
         
     | 
| 417 | 
         
            +
                                else:
         
     | 
| 418 | 
         
            +
                                    try:
         
     | 
| 419 | 
         
            +
                                        pipe.enable_xformers_memory_efficient_attention()
         
     | 
| 420 | 
         
            +
                                        print("✅ XFormers memory efficient attention habilitado")
         
     | 
| 421 | 
         
            +
                                    except Exception as e:
         
     | 
| 422 | 
         
            +
                                        print(f"⚠️ XFormers no disponible: {e}")
         
     | 
| 423 | 
         
            +
                                        print("🔄 Usando atención estándar")
         
     | 
| 424 | 
         
            +
                        
         
     | 
| 425 | 
         
            +
                        print(f"✅ Modelo {model_name} cargado exitosamente")
         
     | 
| 426 | 
         
            +
                        
         
     | 
| 427 | 
         
            +
                        if torch.cuda.is_available():
         
     | 
| 428 | 
         
            +
                            memory_used = torch.cuda.memory_allocated() / 1024**3
         
     | 
| 429 | 
         
            +
                            memory_reserved = torch.cuda.memory_reserved() / 1024**3
         
     | 
| 430 | 
         
            +
                            print(f"💾 Memoria GPU utilizada: {memory_used:.2f} GB")
         
     | 
| 431 | 
         
            +
                            print(f"💾 Memoria GPU reservada: {memory_reserved:.2f} GB")
         
     | 
| 432 | 
         
            +
                            
         
     | 
| 433 | 
         
            +
                            # Verificar si la memoria es sospechosamente baja
         
     | 
| 434 | 
         
            +
                            if memory_used < 0.1:
         
     | 
| 435 | 
         
            +
                                print("⚠️ ADVERTENCIA: Memoria GPU muy baja - posible problema de carga")
         
     | 
| 436 | 
         
            +
                        else:
         
     | 
| 437 | 
         
            +
                            print("💾 Memoria CPU")
         
     | 
| 438 | 
         
            +
                        
         
     | 
| 439 | 
         
            +
                        # Guardar en cache
         
     | 
| 440 | 
         
            +
                        model_cache[model_name] = pipe
         
     | 
| 441 | 
         
            +
                            
         
     | 
| 442 | 
         
            +
                    except Exception as e:
         
     | 
| 443 | 
         
            +
                        print(f"❌ Error cargando modelo {model_name}: {e}")
         
     | 
| 444 | 
         
            +
                        print(f"🔍 Tipo de error: {type(e).__name__}")
         
     | 
| 445 | 
         
            +
                        
         
     | 
| 446 | 
         
            +
                        # Intentar cargar sin variant fp16 si falló
         
     | 
| 447 | 
         
            +
                        if "variant" in str(e) and "fp16" in str(e):
         
     | 
| 448 | 
         
            +
                            print("🔄 Reintentando sin variant fp16...")
         
     | 
| 449 | 
         
            +
                            try:
         
     | 
| 450 | 
         
            +
                                pipe = StableDiffusionPipeline.from_pretrained(
         
     | 
| 451 | 
         
            +
                                    model_name, 
         
     | 
| 452 | 
         
            +
                                    torch_dtype=torch_dtype,
         
     | 
| 453 | 
         
            +
                                    use_auth_token=HF_TOKEN if HF_TOKEN and ("flux" in model_name.lower() or "black-forest" in model_name.lower()) else None
         
     | 
| 454 | 
         
            +
                                )
         
     | 
| 455 | 
         
            +
                                pipe = pipe.to(device)
         
     | 
| 456 | 
         
            +
                                model_cache[model_name] = pipe
         
     | 
| 457 | 
         
            +
                                print(f"✅ Modelo {model_name} cargado exitosamente (sin fp16 variant)")
         
     | 
| 458 | 
         
            +
                            except Exception as e2:
         
     | 
| 459 | 
         
            +
                                print(f"❌ Error en segundo intento: {e2}")
         
     | 
| 460 | 
         
            +
                                raise e2
         
     | 
| 461 | 
         
            +
                        else:
         
     | 
| 462 | 
         
            +
                            raise e
         
     | 
| 463 | 
         
            +
                else:
         
     | 
| 464 | 
         
            +
                    print(f"♻️ Modelo {model_name} ya está cargado, reutilizando...")
         
     | 
| 465 | 
         
            +
                
         
     | 
| 466 | 
         
            +
                return model_cache[model_name]
         
     | 
| 467 | 
         
            +
             
     | 
| 468 | 
         
            +
            def load_video_model(model_name):
         
     | 
| 469 | 
         
            +
                """Cargar modelo de video con soporte para diferentes tipos"""
         
     | 
| 470 | 
         
            +
                if model_name not in model_cache:
         
     | 
| 471 | 
         
            +
                    print(f"Cargando modelo de video: {model_name}")
         
     | 
| 472 | 
         
            +
                    
         
     | 
| 473 | 
         
            +
                    try:
         
     | 
| 474 | 
         
            +
                        # Detectar tipo de modelo de video
         
     | 
| 475 | 
         
            +
                        if "text-to-video" in model_name.lower():
         
     | 
| 476 | 
         
            +
                            # Modelos de texto a video
         
     | 
| 477 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 478 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 479 | 
         
            +
                                model_name,
         
     | 
| 480 | 
         
            +
                                torch_dtype=torch.float32,
         
     | 
| 481 | 
         
            +
                                variant="fp16"
         
     | 
| 482 | 
         
            +
                            )
         
     | 
| 483 | 
         
            +
                        elif "modelscope" in model_name.lower():
         
     | 
| 484 | 
         
            +
                            # ModelScope models
         
     | 
| 485 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 486 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 487 | 
         
            +
                                model_name,
         
     | 
| 488 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 489 | 
         
            +
                            )
         
     | 
| 490 | 
         
            +
                        elif "zeroscope" in model_name.lower():
         
     | 
| 491 | 
         
            +
                            # Zeroscope models
         
     | 
| 492 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 493 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 494 | 
         
            +
                                model_name,
         
     | 
| 495 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 496 | 
         
            +
                            )
         
     | 
| 497 | 
         
            +
                        elif "animatediff" in model_name.lower():
         
     | 
| 498 | 
         
            +
                            # AnimateDiff models
         
     | 
| 499 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 500 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 501 | 
         
            +
                                model_name,
         
     | 
| 502 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 503 | 
         
            +
                            )
         
     | 
| 504 | 
         
            +
                        elif "cogvideo" in model_name.lower():
         
     | 
| 505 | 
         
            +
                            # CogVideo models
         
     | 
| 506 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 507 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 508 | 
         
            +
                                model_name,
         
     | 
| 509 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 510 | 
         
            +
                            )
         
     | 
| 511 | 
         
            +
                        elif "pyramid-flow" in model_name.lower():
         
     | 
| 512 | 
         
            +
                            # Pyramid Flow models
         
     | 
| 513 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 514 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 515 | 
         
            +
                                model_name,
         
     | 
| 516 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 517 | 
         
            +
                            )
         
     | 
| 518 | 
         
            +
                        else:
         
     | 
| 519 | 
         
            +
                            # Fallback a text-to-video genérico
         
     | 
| 520 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 521 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 522 | 
         
            +
                                model_name,
         
     | 
| 523 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 524 | 
         
            +
                            )
         
     | 
| 525 | 
         
            +
                        
         
     | 
| 526 | 
         
            +
                        # Optimizaciones básicas
         
     | 
| 527 | 
         
            +
                        pipe.enable_attention_slicing()
         
     | 
| 528 | 
         
            +
                        if hasattr(pipe, 'enable_model_cpu_offload'):
         
     | 
| 529 | 
         
            +
                            pipe.enable_model_cpu_offload()
         
     | 
| 530 | 
         
            +
                        
         
     | 
| 531 | 
         
            +
                        model_cache[model_name] = {
         
     | 
| 532 | 
         
            +
                            "pipeline": pipe,
         
     | 
| 533 | 
         
            +
                            "type": "video"
         
     | 
| 534 | 
         
            +
                        }
         
     | 
| 535 | 
         
            +
                        
         
     | 
| 536 | 
         
            +
                    except Exception as e:
         
     | 
| 537 | 
         
            +
                        print(f"Error cargando modelo de video {model_name}: {e}")
         
     | 
| 538 | 
         
            +
                        # Fallback a un modelo básico
         
     | 
| 539 | 
         
            +
                        try:
         
     | 
| 540 | 
         
            +
                            from diffusers import DiffusionPipeline
         
     | 
| 541 | 
         
            +
                            pipe = DiffusionPipeline.from_pretrained(
         
     | 
| 542 | 
         
            +
                                "damo-vilab/text-to-video-ms-1.7b",
         
     | 
| 543 | 
         
            +
                                torch_dtype=torch.float32
         
     | 
| 544 | 
         
            +
                            )
         
     | 
| 545 | 
         
            +
                            pipe.enable_attention_slicing()
         
     | 
| 546 | 
         
            +
                            
         
     | 
| 547 | 
         
            +
                            model_cache[model_name] = {
         
     | 
| 548 | 
         
            +
                                "pipeline": pipe,
         
     | 
| 549 | 
         
            +
                                "type": "video"
         
     | 
| 550 | 
         
            +
                            }
         
     | 
| 551 | 
         
            +
                        except Exception as fallback_error:
         
     | 
| 552 | 
         
            +
                            print(f"Error crítico en fallback de video: {fallback_error}")
         
     | 
| 553 | 
         
            +
                            raise
         
     | 
| 554 | 
         
            +
                
         
     | 
| 555 | 
         
            +
                return model_cache[model_name]
         
     | 
| 556 | 
         
            +
             
     | 
| 557 | 
         
            +
            def generate_text(prompt, model_name, max_length=100):
         
     | 
| 558 | 
         
            +
                """Generar texto con el modelo seleccionado - mejorado para diferentes tipos"""
         
     | 
| 559 | 
         
            +
                try:
         
     | 
| 560 | 
         
            +
                    model_data = load_text_model(model_name)
         
     | 
| 561 | 
         
            +
                    tokenizer = model_data["tokenizer"]
         
     | 
| 562 | 
         
            +
                    model = model_data["model"]
         
     | 
| 563 | 
         
            +
                    
         
     | 
| 564 | 
         
            +
                    # Detectar si es modelo de traducción
         
     | 
| 565 | 
         
            +
                    if "opus-mt" in model_name.lower():
         
     | 
| 566 | 
         
            +
                        # Traducción
         
     | 
| 567 | 
         
            +
                        inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=512, truncation=True)
         
     | 
| 568 | 
         
            +
                        with torch.no_grad():
         
     | 
| 569 | 
         
            +
                            outputs = model.generate(inputs, max_length=max_length, num_beams=4, early_stopping=True)
         
     | 
| 570 | 
         
            +
                        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
         
     | 
| 571 | 
         
            +
                    else:
         
     | 
| 572 | 
         
            +
                        # Generación de texto
         
     | 
| 573 | 
         
            +
                        inputs = tokenizer.encode(prompt, return_tensors="pt")
         
     | 
| 574 | 
         
            +
                        
         
     | 
| 575 | 
         
            +
                        # Generar
         
     | 
| 576 | 
         
            +
                        with torch.no_grad():
         
     | 
| 577 | 
         
            +
                            outputs = model.generate(
         
     | 
| 578 | 
         
            +
                                inputs,
         
     | 
| 579 | 
         
            +
                                max_length=max_length,
         
     | 
| 580 | 
         
            +
                                num_return_sequences=1,
         
     | 
| 581 | 
         
            +
                                temperature=0.7,
         
     | 
| 582 | 
         
            +
                                do_sample=True,
         
     | 
| 583 | 
         
            +
                                pad_token_id=tokenizer.eos_token_id
         
     | 
| 584 | 
         
            +
                            )
         
     | 
| 585 | 
         
            +
                        
         
     | 
| 586 | 
         
            +
                        # Decodificar respuesta
         
     | 
| 587 | 
         
            +
                        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
         
     | 
| 588 | 
         
            +
                        
         
     | 
| 589 | 
         
            +
                        # Para DialoGPT, extraer solo la respuesta del asistente
         
     | 
| 590 | 
         
            +
                        if "dialogpt" in model_name.lower():
         
     | 
| 591 | 
         
            +
                            response = response.replace(prompt, "").strip()
         
     | 
| 592 | 
         
            +
                    
         
     | 
| 593 | 
         
            +
                    return response
         
     | 
| 594 | 
         
            +
                    
         
     | 
| 595 | 
         
            +
                except Exception as e:
         
     | 
| 596 | 
         
            +
                    return f"Error generando texto: {str(e)}"
         
     | 
| 597 | 
         
            +
             
     | 
| 598 | 
         
            +
            # @spaces.GPU #[uncomment to use ZeroGPU]
         
     | 
| 599 | 
         
            +
            @spaces.GPU
         
     | 
| 600 | 
         
            +
            def generate_image(
         
     | 
| 601 | 
         
            +
                prompt, 
         
     | 
| 602 | 
         
            +
                model_name, 
         
     | 
| 603 | 
         
            +
                negative_prompt="",
         
     | 
| 604 | 
         
            +
                seed=0,
         
     | 
| 605 | 
         
            +
                randomize_seed=True,
         
     | 
| 606 | 
         
            +
                width=1024,
         
     | 
| 607 | 
         
            +
                height=1024,
         
     | 
| 608 | 
         
            +
                guidance_scale=7.5,
         
     | 
| 609 | 
         
            +
                num_inference_steps=20,
         
     | 
| 610 | 
         
            +
                eta=0,
         
     | 
| 611 | 
         
            +
                strength=1,
         
     | 
| 612 | 
         
            +
                num_images=1,
         
     | 
| 613 | 
         
            +
                safety_checker=True
         
     | 
| 614 | 
         
            +
            ):
         
     | 
| 615 | 
         
            +
                """Generar imagen optimizada para H200 con parámetros avanzados"""
         
     | 
| 616 | 
         
            +
                try:
         
     | 
| 617 | 
         
            +
                    print(f"\n🎨 Iniciando generación de imagen con H200...")
         
     | 
| 618 | 
         
            +
                    print(f"📝 Prompt: {prompt}")
         
     | 
| 619 | 
         
            +
                    print(f"🚫 Negative prompt: {negative_prompt}")
         
     | 
| 620 | 
         
            +
                    print(f"🎯 Modelo seleccionado: {model_name}")
         
     | 
| 621 | 
         
            +
                    print(f"🔄 Inference steps: {num_inference_steps}")
         
     | 
| 622 | 
         
            +
                    print(f"🎲 Seed: {seed} (randomize: {randomize_seed})")
         
     | 
| 623 | 
         
            +
                    print(f"📐 Dimensiones: {width}x{height}")
         
     | 
| 624 | 
         
            +
                    print(f"🎯 Guidance scale: {guidance_scale}")
         
     | 
| 625 | 
         
            +
                    print(f"🎯 Eta: {eta}")
         
     | 
| 626 | 
         
            +
                    print(f"💪 Strength: {strength}")
         
     | 
| 627 | 
         
            +
                    print(f"🖼️ Images per prompt: {num_images}")
         
     | 
| 628 | 
         
            +
                    print(f"🛡️ Safety checker: {safety_checker}")
         
     | 
| 629 | 
         
            +
                    
         
     | 
| 630 | 
         
            +
                    start_time = time.time()
         
     | 
| 631 | 
         
            +
                    
         
     | 
| 632 | 
         
            +
                    # Convertir parámetros a tipos correctos
         
     | 
| 633 | 
         
            +
                    if isinstance(num_inference_steps, str):
         
     | 
| 634 | 
         
            +
                        try:
         
     | 
| 635 | 
         
            +
                            num_inference_steps = int(num_inference_steps)
         
     | 
| 636 | 
         
            +
                        except ValueError:
         
     | 
| 637 | 
         
            +
                            num_inference_steps = 20
         
     | 
| 638 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{num_inference_steps}' a entero, usando 20")
         
     | 
| 639 | 
         
            +
                    
         
     | 
| 640 | 
         
            +
                    if isinstance(seed, str):
         
     | 
| 641 | 
         
            +
                        try:
         
     | 
| 642 | 
         
            +
                            seed = int(seed)
         
     | 
| 643 | 
         
            +
                        except ValueError:
         
     | 
| 644 | 
         
            +
                            seed = 0
         
     | 
| 645 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{seed}' a entero, usando 0")
         
     | 
| 646 | 
         
            +
                    
         
     | 
| 647 | 
         
            +
                    if isinstance(width, str):
         
     | 
| 648 | 
         
            +
                        try:
         
     | 
| 649 | 
         
            +
                            width = int(width)
         
     | 
| 650 | 
         
            +
                        except ValueError:
         
     | 
| 651 | 
         
            +
                            width = 1024
         
     | 
| 652 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{width}' a entero, usando 1024")
         
     | 
| 653 | 
         
            +
                    
         
     | 
| 654 | 
         
            +
                    if isinstance(height, str):
         
     | 
| 655 | 
         
            +
                        try:
         
     | 
| 656 | 
         
            +
                            height = int(height)
         
     | 
| 657 | 
         
            +
                        except ValueError:
         
     | 
| 658 | 
         
            +
                            height = 1024
         
     | 
| 659 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{height}' a entero, usando 1024")
         
     | 
| 660 | 
         
            +
                    
         
     | 
| 661 | 
         
            +
                    if isinstance(guidance_scale, str):
         
     | 
| 662 | 
         
            +
                        try:
         
     | 
| 663 | 
         
            +
                            guidance_scale = float(guidance_scale)
         
     | 
| 664 | 
         
            +
                        except ValueError:
         
     | 
| 665 | 
         
            +
                            guidance_scale = 7.5
         
     | 
| 666 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{guidance_scale}' a float, usando 7.5")
         
     | 
| 667 | 
         
            +
                    
         
     | 
| 668 | 
         
            +
                    if isinstance(eta, str):
         
     | 
| 669 | 
         
            +
                        try:
         
     | 
| 670 | 
         
            +
                            eta = float(eta)
         
     | 
| 671 | 
         
            +
                        except ValueError:
         
     | 
| 672 | 
         
            +
                            eta = 0
         
     | 
| 673 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{eta}' a float, usando 0")
         
     | 
| 674 | 
         
            +
                    
         
     | 
| 675 | 
         
            +
                    if isinstance(strength, str):
         
     | 
| 676 | 
         
            +
                        try:
         
     | 
| 677 | 
         
            +
                            strength = float(strength)
         
     | 
| 678 | 
         
            +
                        except ValueError:
         
     | 
| 679 | 
         
            +
                            strength = 1
         
     | 
| 680 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{strength}' a float, usando 1")
         
     | 
| 681 | 
         
            +
                    
         
     | 
| 682 | 
         
            +
                    if isinstance(num_images, str):
         
     | 
| 683 | 
         
            +
                        try:
         
     | 
| 684 | 
         
            +
                            num_images = int(num_images)
         
     | 
| 685 | 
         
            +
                        except ValueError:
         
     | 
| 686 | 
         
            +
                            num_images = 1
         
     | 
| 687 | 
         
            +
                            print(f"⚠️ No se pudo convertir '{num_images}' a entero, usando 1")
         
     | 
| 688 | 
         
            +
                    
         
     | 
| 689 | 
         
            +
                    # Cargar el modelo
         
     | 
| 690 | 
         
            +
                    pipe = load_image_model(model_name)
         
     | 
| 691 | 
         
            +
                    
         
     | 
| 692 | 
         
            +
                    # Configurar seed
         
     | 
| 693 | 
         
            +
                    if randomize_seed:
         
     | 
| 694 | 
         
            +
                        seed = random.randint(0, 2147483647)
         
     | 
| 695 | 
         
            +
                        print(f"🎲 Seed aleatorizado: {seed}")
         
     | 
| 696 | 
         
            +
                    
         
     | 
| 697 | 
         
            +
                    generator = torch.Generator(device=device).manual_seed(seed)
         
     | 
| 698 | 
         
            +
                    
         
     | 
| 699 | 
         
            +
                    # Ajustar parámetros según el tipo de modelo
         
     | 
| 700 | 
         
            +
                    if "turbo" in model_name.lower():
         
     | 
| 701 | 
         
            +
                        guidance_scale = min(guidance_scale, 1.0)
         
     | 
| 702 | 
         
            +
                        num_inference_steps = min(num_inference_steps, 4)
         
     | 
| 703 | 
         
            +
                        print(f"⚡ Modelo turbo - Ajustando parámetros: guidance={guidance_scale}, steps={num_inference_steps}")
         
     | 
| 704 | 
         
            +
                    elif "lightning" in model_name.lower():
         
     | 
| 705 | 
         
            +
                        guidance_scale = min(guidance_scale, 1.0)
         
     | 
| 706 | 
         
            +
                        num_inference_steps = max(num_inference_steps, 4)
         
     | 
| 707 | 
         
            +
                        print(f"⚡ Modelo lightning - Ajustando parámetros: guidance={guidance_scale}, steps={num_inference_steps}")
         
     | 
| 708 | 
         
            +
                    elif "flux" in model_name.lower():
         
     | 
| 709 | 
         
            +
                        guidance_scale = max(3.5, min(guidance_scale, 7.5))
         
     | 
| 710 | 
         
            +
                        num_inference_steps = max(15, num_inference_steps)
         
     | 
| 711 | 
         
            +
                        print(f"🔐 Modelo FLUX - Ajustando parámetros: guidance={guidance_scale}, steps={num_inference_steps}")
         
     | 
| 712 | 
         
            +
                    
         
     | 
| 713 | 
         
            +
                    print(f"⚙️ Parámetros finales (respetando configuración del usuario):")
         
     | 
| 714 | 
         
            +
                    print(f"   - Guidance scale: {guidance_scale} → {guidance_scale}")
         
     | 
| 715 | 
         
            +
                    print(f"   - Inference steps: {num_inference_steps} → {num_inference_steps}")
         
     | 
| 716 | 
         
            +
                    print(f"   - Width: {width}, Height: {height}")
         
     | 
| 717 | 
         
            +
                    print(f"   - Seed: {seed}")
         
     | 
| 718 | 
         
            +
                    print(f"   - Eta: {eta}")
         
     | 
| 719 | 
         
            +
                    print(f"   - Strength: {strength}")
         
     | 
| 720 | 
         
            +
                    print(f"   - Images per prompt: {num_images}")
         
     | 
| 721 | 
         
            +
                    
         
     | 
| 722 | 
         
            +
                    print("🎨 Iniciando generación de imagen con H200...")
         
     | 
| 723 | 
         
            +
                    inference_start = time.time()
         
     | 
| 724 | 
         
            +
                    
         
     | 
| 725 | 
         
            +
                    # Optimizaciones específicas para H200
         
     | 
| 726 | 
         
            +
                    if torch.cuda.is_available():
         
     | 
| 727 | 
         
            +
                        print("🚀 Aplicando optimizaciones específicas para H200...")
         
     | 
| 728 | 
         
            +
                        
         
     | 
| 729 | 
         
            +
                        # Limpiar cache de GPU antes de la inferencia
         
     | 
| 730 | 
         
            +
                        torch.cuda.empty_cache()
         
     | 
| 731 | 
         
            +
                        
         
     | 
| 732 | 
         
            +
                        # Generar la imagen
         
     | 
| 733 | 
         
            +
                        print("⚡ Generando imagen con H200...")
         
     | 
| 734 | 
         
            +
                        
         
     | 
| 735 | 
         
            +
                        # Configurar parámetros de generación
         
     | 
| 736 | 
         
            +
                        generation_kwargs = {
         
     | 
| 737 | 
         
            +
                            "prompt": prompt,
         
     | 
| 738 | 
         
            +
                            "height": height,
         
     | 
| 739 | 
         
            +
                            "width": width,
         
     | 
| 740 | 
         
            +
                            "guidance_scale": guidance_scale,
         
     | 
| 741 | 
         
            +
                            "num_inference_steps": num_inference_steps,
         
     | 
| 742 | 
         
            +
                            "generator": generator,
         
     | 
| 743 | 
         
            +
                            "num_images_per_prompt": num_images
         
     | 
| 744 | 
         
            +
                        }
         
     | 
| 745 | 
         
            +
                        
         
     | 
| 746 | 
         
            +
                        # Agregar parámetros opcionales
         
     | 
| 747 | 
         
            +
                        if negative_prompt and negative_prompt.strip():
         
     | 
| 748 | 
         
            +
                            generation_kwargs["negative_prompt"] = negative_prompt.strip()
         
     | 
| 749 | 
         
            +
                        
         
     | 
| 750 | 
         
            +
                        if eta > 0:
         
     | 
| 751 | 
         
            +
                            generation_kwargs["eta"] = eta
         
     | 
| 752 | 
         
            +
                        
         
     | 
| 753 | 
         
            +
                        if strength < 1:
         
     | 
| 754 | 
         
            +
                            generation_kwargs["strength"] = strength
         
     | 
| 755 | 
         
            +
                        
         
     | 
| 756 | 
         
            +
                        # Generar la imagen
         
     | 
| 757 | 
         
            +
                        result = pipe(**generation_kwargs)
         
     | 
| 758 | 
         
            +
                        
         
     | 
| 759 | 
         
            +
                        # Verificar que la imagen se generó correctamente
         
     | 
| 760 | 
         
            +
                        if hasattr(result, 'images') and len(result.images) > 0:
         
     | 
| 761 | 
         
            +
                            # Si se generaron múltiples imágenes, devolver la primera
         
     | 
| 762 | 
         
            +
                            image = result.images[0]
         
     | 
| 763 | 
         
            +
                            
         
     | 
| 764 | 
         
            +
                            # Verificar que la imagen no sea completamente negra
         
     | 
| 765 | 
         
            +
                            if image is not None:
         
     | 
| 766 | 
         
            +
                                # Convertir a numpy para verificar
         
     | 
| 767 | 
         
            +
                                img_array = np.array(image)
         
     | 
| 768 | 
         
            +
                                if img_array.size > 0:
         
     | 
| 769 | 
         
            +
                                    # Verificar si la imagen es completamente negra
         
     | 
| 770 | 
         
            +
                                    if np.all(img_array == 0) or np.all(img_array < 10):
         
     | 
| 771 | 
         
            +
                                        print("⚠️ ADVERTENCIA: Imagen generada es completamente negra")
         
     | 
| 772 | 
         
            +
                                        print("🔄 Reintentando con parámetros ajustados...")
         
     | 
| 773 | 
         
            +
                                        
         
     | 
| 774 | 
         
            +
                                        # Reintentar con parámetros más conservadores
         
     | 
| 775 | 
         
            +
                                        generation_kwargs["guidance_scale"] = max(1.0, guidance_scale * 0.8)
         
     | 
| 776 | 
         
            +
                                        generation_kwargs["num_inference_steps"] = max(10, num_inference_steps)
         
     | 
| 777 | 
         
            +
                                        
         
     | 
| 778 | 
         
            +
                                        result = pipe(**generation_kwargs)
         
     | 
| 779 | 
         
            +
                                        image = result.images[0]
         
     | 
| 780 | 
         
            +
                        else:
         
     | 
| 781 | 
         
            +
                                        print("✅ Imagen generada correctamente")
         
     | 
| 782 | 
         
            +
                                else:
         
     | 
| 783 | 
         
            +
                                    print("❌ Error: Imagen vacía")
         
     | 
| 784 | 
         
            +
                                    raise Exception("Imagen vacía generada")
         
     | 
| 785 | 
         
            +
                            else:
         
     | 
| 786 | 
         
            +
                                print("❌ Error: Imagen es None")
         
     | 
| 787 | 
         
            +
                                raise Exception("Imagen es None")
         
     | 
| 788 | 
         
            +
                        else:
         
     | 
| 789 | 
         
            +
                            print("❌ Error: No se generaron imágenes")
         
     | 
| 790 | 
         
            +
                            raise Exception("No se generaron imágenes")
         
     | 
| 791 | 
         
            +
                    else:
         
     | 
| 792 | 
         
            +
                        # Fallback para CPU
         
     | 
| 793 | 
         
            +
                        generation_kwargs = {
         
     | 
| 794 | 
         
            +
                            "prompt": prompt,
         
     | 
| 795 | 
         
            +
                            "height": height,
         
     | 
| 796 | 
         
            +
                            "width": width,
         
     | 
| 797 | 
         
            +
                            "guidance_scale": guidance_scale,
         
     | 
| 798 | 
         
            +
                            "num_inference_steps": num_inference_steps,
         
     | 
| 799 | 
         
            +
                            "generator": generator,
         
     | 
| 800 | 
         
            +
                            "num_images_per_prompt": num_images
         
     | 
| 801 | 
         
            +
                        }
         
     | 
| 802 | 
         
            +
                        
         
     | 
| 803 | 
         
            +
                        if negative_prompt and negative_prompt.strip():
         
     | 
| 804 | 
         
            +
                            generation_kwargs["negative_prompt"] = negative_prompt.strip()
         
     | 
| 805 | 
         
            +
                        
         
     | 
| 806 | 
         
            +
                        if eta > 0:
         
     | 
| 807 | 
         
            +
                            generation_kwargs["eta"] = eta
         
     | 
| 808 | 
         
            +
                        
         
     | 
| 809 | 
         
            +
                        if strength < 1:
         
     | 
| 810 | 
         
            +
                            generation_kwargs["strength"] = strength
         
     | 
| 811 | 
         
            +
                        
         
     | 
| 812 | 
         
            +
                        result = pipe(**generation_kwargs)
         
     | 
| 813 | 
         
            +
                        image = result.images[0]
         
     | 
| 814 | 
         
            +
                    
         
     | 
| 815 | 
         
            +
                    inference_time = time.time() - inference_start
         
     | 
| 816 | 
         
            +
                    total_time = time.time() - start_time
         
     | 
| 817 | 
         
            +
                    
         
     | 
| 818 | 
         
            +
                    print(f"✅ Imagen generada exitosamente con H200!")
         
     | 
| 819 | 
         
            +
                    print(f"⏱️ Tiempo de inferencia: {inference_time:.2f} segundos")
         
     | 
| 820 | 
         
            +
                    print(f"⏱️ Tiempo total: {total_time:.2f} segundos")
         
     | 
| 821 | 
         
            +
                    print(f"🎲 Seed final: {seed}")
         
     | 
| 822 | 
         
            +
                    
         
     | 
| 823 | 
         
            +
                    if torch.cuda.is_available():
         
     | 
| 824 | 
         
            +
                        print(f"💾 Memoria GPU utilizada: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
         
     | 
| 825 | 
         
            +
                        print(f"💾 Memoria GPU libre: {torch.cuda.memory_reserved() / 1024**3:.2f} GB")
         
     | 
| 826 | 
         
            +
                        print(f"🚀 Velocidad H200: {num_inference_steps/inference_time:.1f} steps/segundo")
         
     | 
| 827 | 
         
            +
                    else:
         
     | 
| 828 | 
         
            +
                        print("💾 Memoria CPU")
         
     | 
| 829 | 
         
            +
                    
         
     | 
| 830 | 
         
            +
                    return image
         
     | 
| 831 | 
         
            +
                    
         
     | 
| 832 | 
         
            +
                except Exception as e:
         
     | 
| 833 | 
         
            +
                    print(f"❌ Error en inferencia: {e}")
         
     | 
| 834 | 
         
            +
                    print(f"🔍 Tipo de error: {type(e).__name__}")
         
     | 
| 835 | 
         
            +
                    print(f"📋 Detalles del error: {str(e)}")
         
     | 
| 836 | 
         
            +
                    # Crear imagen de error
         
     | 
| 837 | 
         
            +
                    error_image = Image.new('RGB', (512, 512), color='red')
         
     | 
| 838 | 
         
            +
                    return error_image
         
     | 
| 839 | 
         
            +
             
     | 
| 840 | 
         
            +
            # @spaces.GPU #[uncomment to use ZeroGPU]
         
     | 
| 841 | 
         
            +
            @spaces.GPU
         
     | 
| 842 | 
         
            +
            def generate_video(prompt, model_name, num_frames=16, num_inference_steps=20):
         
     | 
| 843 | 
         
            +
                """Generar video con el modelo seleccionado"""
         
     | 
| 844 | 
         
            +
                try:
         
     | 
| 845 | 
         
            +
                    print(f"Generando video con modelo: {model_name}")
         
     | 
| 846 | 
         
            +
                    print(f"Prompt: {prompt}")
         
     | 
| 847 | 
         
            +
                    print(f"Frames: {num_frames}")
         
     | 
| 848 | 
         
            +
                    print(f"Pasos: {num_inference_steps}")
         
     | 
| 849 | 
         
            +
                    
         
     | 
| 850 | 
         
            +
                    model_data = load_video_model(model_name)
         
     | 
| 851 | 
         
            +
                    pipeline = model_data["pipeline"]
         
     | 
| 852 | 
         
            +
                    
         
     | 
| 853 | 
         
            +
                    # Configuración específica por tipo de modelo
         
     | 
| 854 | 
         
            +
                    if "zeroscope" in model_name.lower():
         
     | 
| 855 | 
         
            +
                        # Zeroscope models
         
     | 
| 856 | 
         
            +
                        result = pipeline(
         
     | 
| 857 | 
         
            +
                            prompt,
         
     | 
| 858 | 
         
            +
                            num_inference_steps=num_inference_steps,
         
     | 
| 859 | 
         
            +
                            num_frames=num_frames,
         
     | 
| 860 | 
         
            +
                            height=256,
         
     | 
| 861 | 
         
            +
                            width=256
         
     | 
| 862 | 
         
            +
                        )
         
     | 
| 863 | 
         
            +
                    elif "animatediff" in model_name.lower():
         
     | 
| 864 | 
         
            +
                        # AnimateDiff models
         
     | 
| 865 | 
         
            +
                        result = pipeline(
         
     | 
| 866 | 
         
            +
                            prompt,
         
     | 
| 867 | 
         
            +
                            num_inference_steps=num_inference_steps,
         
     | 
| 868 | 
         
            +
                            num_frames=num_frames
         
     | 
| 869 | 
         
            +
                        )
         
     | 
| 870 | 
         
            +
                    else:
         
     | 
| 871 | 
         
            +
                        # Text-to-video models (default)
         
     | 
| 872 | 
         
            +
                        result = pipeline(
         
     | 
| 873 | 
         
            +
                            prompt,
         
     | 
| 874 | 
         
            +
                            num_inference_steps=num_inference_steps,
         
     | 
| 875 | 
         
            +
                            num_frames=num_frames
         
     | 
| 876 | 
         
            +
                        )
         
     | 
| 877 | 
         
            +
                    
         
     | 
| 878 | 
         
            +
                    print("Video generado exitosamente")
         
     | 
| 879 | 
         
            +
                    
         
     | 
| 880 | 
         
            +
                    # Manejar diferentes tipos de respuesta
         
     | 
| 881 | 
         
            +
                    if hasattr(result, 'frames'):
         
     | 
| 882 | 
         
            +
                        video_frames = result.frames
         
     | 
| 883 | 
         
            +
                    elif hasattr(result, 'videos'):
         
     | 
| 884 | 
         
            +
                        video_frames = result.videos
         
     | 
| 885 | 
         
            +
                    else:
         
     | 
| 886 | 
         
            +
                        video_frames = result
         
     | 
| 887 | 
         
            +
                    
         
     | 
| 888 | 
         
            +
                    # Convertir a formato compatible con Gradio
         
     | 
| 889 | 
         
            +
                    if isinstance(video_frames, list):
         
     | 
| 890 | 
         
            +
                        if len(video_frames) == 1:
         
     | 
| 891 | 
         
            +
                            return video_frames[0]
         
     | 
| 892 | 
         
            +
                        else:
         
     | 
| 893 | 
         
            +
                            return video_frames
         
     | 
| 894 | 
         
            +
                    else:
         
     | 
| 895 | 
         
            +
                        # Si es un tensor numpy, convertirlo a formato de video
         
     | 
| 896 | 
         
            +
                        if hasattr(video_frames, 'shape'):
         
     | 
| 897 | 
         
            +
                            import numpy as np
         
     | 
| 898 | 
         
            +
                            print(f"Forma del video: {video_frames.shape}")
         
     | 
| 899 | 
         
            +
                            
         
     | 
| 900 | 
         
            +
                            # Convertir a formato de video compatible con Gradio
         
     | 
| 901 | 
         
            +
                            if len(video_frames.shape) == 4:  # (frames, height, width, channels)
         
     | 
| 902 | 
         
            +
                                # Convertir frames a formato de video
         
     | 
| 903 | 
         
            +
                                frames_list = []
         
     | 
| 904 | 
         
            +
                                for i in range(video_frames.shape[0]):
         
     | 
| 905 | 
         
            +
                                    frame = video_frames[i]
         
     | 
| 906 | 
         
            +
                                    # Asegurar que el frame esté en el rango correcto (0-255)
         
     | 
| 907 | 
         
            +
                                    if frame.dtype == np.float32 or frame.dtype == np.float16:
         
     | 
| 908 | 
         
            +
                                        frame = (frame * 255).astype(np.uint8)
         
     | 
| 909 | 
         
            +
                                    frames_list.append(frame)
         
     | 
| 910 | 
         
            +
                                
         
     | 
| 911 | 
         
            +
                                # Crear video a partir de frames
         
     | 
| 912 | 
         
            +
                                import imageio
         
     | 
| 913 | 
         
            +
                                import tempfile
         
     | 
| 914 | 
         
            +
                                import os
         
     | 
| 915 | 
         
            +
                                
         
     | 
| 916 | 
         
            +
                                # Crear archivo temporal
         
     | 
| 917 | 
         
            +
                                with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
         
     | 
| 918 | 
         
            +
                                    temp_path = tmp_file.name
         
     | 
| 919 | 
         
            +
                                
         
     | 
| 920 | 
         
            +
                                # Guardar frames como video
         
     | 
| 921 | 
         
            +
                                imageio.mimsave(temp_path, frames_list, fps=8)
         
     | 
| 922 | 
         
            +
                                
         
     | 
| 923 | 
         
            +
                                print(f"Video guardado en: {temp_path}")
         
     | 
| 924 | 
         
            +
                                return temp_path
         
     | 
| 925 | 
         
            +
                                
         
     | 
| 926 | 
         
            +
                            elif len(video_frames.shape) == 5:  # (batch, frames, height, width, channels)
         
     | 
| 927 | 
         
            +
                                # Tomar el primer batch
         
     | 
| 928 | 
         
            +
                                frames = video_frames[0]
         
     | 
| 929 | 
         
            +
                                return generate_video(prompt, model_name, num_frames, num_inference_steps)
         
     | 
| 930 | 
         
            +
                            else:
         
     | 
| 931 | 
         
            +
                                print(f"Forma no reconocida: {video_frames.shape}")
         
     | 
| 932 | 
         
            +
                                return None
         
     | 
| 933 | 
         
            +
                        else:
         
     | 
| 934 | 
         
            +
                            return video_frames
         
     | 
| 935 | 
         
            +
                    
         
     | 
| 936 | 
         
            +
                except Exception as e:
         
     | 
| 937 | 
         
            +
                    print(f"Error generando video: {str(e)}")
         
     | 
| 938 | 
         
            +
                    print(f"Tipo de error: {type(e).__name__}")
         
     | 
| 939 | 
         
            +
                    import traceback
         
     | 
| 940 | 
         
            +
                    traceback.print_exc()
         
     | 
| 941 | 
         
            +
                    return f"Error generando video: {str(e)}"
         
     | 
| 942 | 
         
            +
             
     | 
| 943 | 
         
            +
            # @spaces.GPU #[uncomment to use ZeroGPU]
         
     | 
| 944 | 
         
            +
            @spaces.GPU
         
     | 
| 945 | 
         
            +
            def generate_video_with_info(prompt, model_name, optimization_level="balanced", input_image=None):
         
     | 
| 946 | 
         
            +
                """Generar video con información adicional - función para API"""
         
     | 
| 947 | 
         
            +
                try:
         
     | 
| 948 | 
         
            +
                    print(f"Generando video con modelo: {model_name}")
         
     | 
| 949 | 
         
            +
                    print(f"Prompt: {prompt}")
         
     | 
| 950 | 
         
            +
                    print(f"Optimization level: {optimization_level}")
         
     | 
| 951 | 
         
            +
                    print(f"Input image: {'Sí' if input_image else 'No'}")
         
     | 
| 952 | 
         
            +
                    
         
     | 
| 953 | 
         
            +
                    # Configurar parámetros según el nivel de optimización
         
     | 
| 954 | 
         
            +
                    if optimization_level == "speed":
         
     | 
| 955 | 
         
            +
                        num_frames = 8
         
     | 
| 956 | 
         
            +
                        num_inference_steps = 10
         
     | 
| 957 | 
         
            +
                    elif optimization_level == "quality":
         
     | 
| 958 | 
         
            +
                        num_frames = 16
         
     | 
| 959 | 
         
            +
                        num_inference_steps = 30
         
     | 
| 960 | 
         
            +
                    else:  # balanced
         
     | 
| 961 | 
         
            +
                        num_frames = 12
         
     | 
| 962 | 
         
            +
                        num_inference_steps = 20
         
     | 
| 963 | 
         
            +
                    
         
     | 
| 964 | 
         
            +
                    model_data = load_video_model(model_name)
         
     | 
| 965 | 
         
            +
                    pipeline = model_data["pipeline"]
         
     | 
| 966 | 
         
            +
                    
         
     | 
| 967 | 
         
            +
                    # Configuración específica por tipo de modelo
         
     | 
| 968 | 
         
            +
                    if "zeroscope" in model_name.lower():
         
     | 
| 969 | 
         
            +
                        # Zeroscope models
         
     | 
| 970 | 
         
            +
                        result = pipeline(
         
     | 
| 971 | 
         
            +
                            prompt,
         
     | 
| 972 | 
         
            +
                            num_inference_steps=num_inference_steps,
         
     | 
| 973 | 
         
            +
                            num_frames=num_frames,
         
     | 
| 974 | 
         
            +
                            height=256,
         
     | 
| 975 | 
         
            +
                            width=256
         
     | 
| 976 | 
         
            +
                        )
         
     | 
| 977 | 
         
            +
                    elif "animatediff" in model_name.lower():
         
     | 
| 978 | 
         
            +
                        # AnimateDiff models
         
     | 
| 979 | 
         
            +
                        result = pipeline(
         
     | 
| 980 | 
         
            +
                            prompt,
         
     | 
| 981 | 
         
            +
                            num_inference_steps=num_inference_steps,
         
     | 
| 982 | 
         
            +
                            num_frames=num_frames
         
     | 
| 983 | 
         
            +
                        )
         
     | 
| 984 | 
         
            +
                    else:
         
     | 
| 985 | 
         
            +
                        # Text-to-video models (default)
         
     | 
| 986 | 
         
            +
                        result = pipeline(
         
     | 
| 987 | 
         
            +
                            prompt,
         
     | 
| 988 | 
         
            +
                            num_inference_steps=num_inference_steps,
         
     | 
| 989 | 
         
            +
                            num_frames=num_frames
         
     | 
| 990 | 
         
            +
                        )
         
     | 
| 991 | 
         
            +
                    
         
     | 
| 992 | 
         
            +
                    print("Video generado exitosamente")
         
     | 
| 993 | 
         
            +
                    
         
     | 
| 994 | 
         
            +
                    # Manejar diferentes tipos de respuesta
         
     | 
| 995 | 
         
            +
                    if hasattr(result, 'frames'):
         
     | 
| 996 | 
         
            +
                        video_frames = result.frames
         
     | 
| 997 | 
         
            +
                    elif hasattr(result, 'videos'):
         
     | 
| 998 | 
         
            +
                        video_frames = result.videos
         
     | 
| 999 | 
         
            +
                    else:
         
     | 
| 1000 | 
         
            +
                        video_frames = result
         
     | 
| 1001 | 
         
            +
                    
         
     | 
| 1002 | 
         
            +
                    # Convertir a formato compatible con Gradio
         
     | 
| 1003 | 
         
            +
                    if isinstance(video_frames, list):
         
     | 
| 1004 | 
         
            +
                        if len(video_frames) == 1:
         
     | 
| 1005 | 
         
            +
                            return video_frames[0]
         
     | 
| 1006 | 
         
            +
                        else:
         
     | 
| 1007 | 
         
            +
                            return video_frames
         
     | 
| 1008 | 
         
            +
                    else:
         
     | 
| 1009 | 
         
            +
                        # Si es un tensor numpy, convertirlo a formato de video
         
     | 
| 1010 | 
         
            +
                        if hasattr(video_frames, 'shape'):
         
     | 
| 1011 | 
         
            +
                            import numpy as np
         
     | 
| 1012 | 
         
            +
                            print(f"Forma del video: {video_frames.shape}")
         
     | 
| 1013 | 
         
            +
                            
         
     | 
| 1014 | 
         
            +
                            # Convertir a formato de video compatible con Gradio
         
     | 
| 1015 | 
         
            +
                            if len(video_frames.shape) == 4:  # (frames, height, width, channels)
         
     | 
| 1016 | 
         
            +
                                # Convertir frames a formato de video
         
     | 
| 1017 | 
         
            +
                                frames_list = []
         
     | 
| 1018 | 
         
            +
                                for i in range(video_frames.shape[0]):
         
     | 
| 1019 | 
         
            +
                                    frame = video_frames[i]
         
     | 
| 1020 | 
         
            +
                                    # Asegurar que el frame esté en el rango correcto (0-255)
         
     | 
| 1021 | 
         
            +
                                    if frame.dtype == np.float32 or frame.dtype == np.float16:
         
     | 
| 1022 | 
         
            +
                                        frame = (frame * 255).astype(np.uint8)
         
     | 
| 1023 | 
         
            +
                                    frames_list.append(frame)
         
     | 
| 1024 | 
         
            +
                                
         
     | 
| 1025 | 
         
            +
                                # Crear video a partir de frames
         
     | 
| 1026 | 
         
            +
                                import imageio
         
     | 
| 1027 | 
         
            +
                                import tempfile
         
     | 
| 1028 | 
         
            +
                                import os
         
     | 
| 1029 | 
         
            +
                                
         
     | 
| 1030 | 
         
            +
                                # Crear archivo temporal
         
     | 
| 1031 | 
         
            +
                                with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as tmp_file:
         
     | 
| 1032 | 
         
            +
                                    temp_path = tmp_file.name
         
     | 
| 1033 | 
         
            +
                                
         
     | 
| 1034 | 
         
            +
                                # Guardar frames como video
         
     | 
| 1035 | 
         
            +
                                imageio.mimsave(temp_path, frames_list, fps=8)
         
     | 
| 1036 | 
         
            +
                                
         
     | 
| 1037 | 
         
            +
                                print(f"Video guardado en: {temp_path}")
         
     | 
| 1038 | 
         
            +
                                return temp_path
         
     | 
| 1039 | 
         
            +
                                
         
     | 
| 1040 | 
         
            +
                            elif len(video_frames.shape) == 5:  # (batch, frames, height, width, channels)
         
     | 
| 1041 | 
         
            +
                                # Tomar el primer batch
         
     | 
| 1042 | 
         
            +
                                frames = video_frames[0]
         
     | 
| 1043 | 
         
            +
                                return generate_video_with_info(prompt, model_name, optimization_level, input_image)
         
     | 
| 1044 | 
         
            +
                            else:
         
     | 
| 1045 | 
         
            +
                                print(f"Forma no reconocida: {video_frames.shape}")
         
     | 
| 1046 | 
         
            +
                                return None
         
     | 
| 1047 | 
         
            +
                        else:
         
     | 
| 1048 | 
         
            +
                            return video_frames
         
     | 
| 1049 | 
         
            +
                    
         
     | 
| 1050 | 
         
            +
                except Exception as e:
         
     | 
| 1051 | 
         
            +
                    print(f"Error generando video: {str(e)}")
         
     | 
| 1052 | 
         
            +
                    print(f"Tipo de error: {type(e).__name__}")
         
     | 
| 1053 | 
         
            +
                    import traceback
         
     | 
| 1054 | 
         
            +
                    traceback.print_exc()
         
     | 
| 1055 | 
         
            +
                    return f"Error generando video: {str(e)}"
         
     | 
| 1056 | 
         
            +
             
     | 
| 1057 | 
         
            +
            def chat_with_model(message, history, model_name):
         
     | 
| 1058 | 
         
            +
                """Función de chat para DialoGPT con formato de mensajes actualizado"""
         
     | 
| 1059 | 
         
            +
                try:
         
     | 
| 1060 | 
         
            +
                    model_data = load_text_model(model_name)
         
     | 
| 1061 | 
         
            +
                    tokenizer = model_data["tokenizer"]
         
     | 
| 1062 | 
         
            +
                    model = model_data["model"]
         
     | 
| 1063 | 
         
            +
                    
         
     | 
| 1064 | 
         
            +
                    # Construir historial de conversación desde el nuevo formato
         
     | 
| 1065 | 
         
            +
                    conversation = ""
         
     | 
| 1066 | 
         
            +
                    for msg in history:
         
     | 
| 1067 | 
         
            +
                        if msg["role"] == "user":
         
     | 
| 1068 | 
         
            +
                            conversation += f"User: {msg['content']}\n"
         
     | 
| 1069 | 
         
            +
                        elif msg["role"] == "assistant":
         
     | 
| 1070 | 
         
            +
                            conversation += f"Assistant: {msg['content']}\n"
         
     | 
| 1071 | 
         
            +
                    
         
     | 
| 1072 | 
         
            +
                    conversation += f"User: {message}\nAssistant:"
         
     | 
| 1073 | 
         
            +
                    
         
     | 
| 1074 | 
         
            +
                    # Generar respuesta
         
     | 
| 1075 | 
         
            +
                    inputs = tokenizer.encode(conversation, return_tensors="pt", truncation=True, max_length=512)
         
     | 
| 1076 | 
         
            +
                    
         
     | 
| 1077 | 
         
            +
                    with torch.no_grad():
         
     | 
| 1078 | 
         
            +
                        outputs = model.generate(
         
     | 
| 1079 | 
         
            +
                            inputs,
         
     | 
| 1080 | 
         
            +
                            max_length=inputs.shape[1] + 50,
         
     | 
| 1081 | 
         
            +
                            temperature=0.7,
         
     | 
| 1082 | 
         
            +
                            do_sample=True,
         
     | 
| 1083 | 
         
            +
                            pad_token_id=tokenizer.eos_token_id
         
     | 
| 1084 | 
         
            +
                        )
         
     | 
| 1085 | 
         
            +
                    
         
     | 
| 1086 | 
         
            +
                    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
         
     | 
| 1087 | 
         
            +
                    
         
     | 
| 1088 | 
         
            +
                    # Extraer solo la respuesta del asistente
         
     | 
| 1089 | 
         
            +
                    response = response.split("Assistant:")[-1].strip()
         
     | 
| 1090 | 
         
            +
                    
         
     | 
| 1091 | 
         
            +
                    # Retornar el historial actualizado con el nuevo formato
         
     | 
| 1092 | 
         
            +
                    history.append({"role": "user", "content": message})
         
     | 
| 1093 | 
         
            +
                    history.append({"role": "assistant", "content": response})
         
     | 
| 1094 | 
         
            +
                    
         
     | 
| 1095 | 
         
            +
                    return history
         
     | 
| 1096 | 
         
            +
                    
         
     | 
| 1097 | 
         
            +
                except Exception as e:
         
     | 
| 1098 | 
         
            +
                    error_msg = f"Error en el chat: {str(e)}"
         
     | 
| 1099 | 
         
            +
                    history.append({"role": "user", "content": message})
         
     | 
| 1100 | 
         
            +
                    history.append({"role": "assistant", "content": error_msg})
         
     | 
| 1101 | 
         
            +
                    return history
         
     | 
| 1102 | 
         
            +
             
     | 
| 1103 | 
         
            +
            # Interfaz de Gradio
         
     | 
| 1104 | 
         
            +
            with gr.Blocks(title="Modelos Libres de IA", theme=gr.themes.Soft()) as demo:
         
     | 
| 1105 | 
         
            +
                gr.Markdown("# 🤖 Modelos Libres de IA")
         
     | 
| 1106 | 
         
            +
                gr.Markdown("### Genera texto e imágenes sin límites de cuota")
         
     | 
| 1107 | 
         
            +
                
         
     | 
| 1108 | 
         
            +
                with gr.Tabs():
         
     | 
| 1109 | 
         
            +
                    # Tab de Generación de Texto
         
     | 
| 1110 | 
         
            +
                    with gr.TabItem("📝 Generación de Texto"):
         
     | 
| 1111 | 
         
            +
                        with gr.Row():
         
     | 
| 1112 | 
         
            +
                            with gr.Column():
         
     | 
| 1113 | 
         
            +
                                text_model = gr.Dropdown(
         
     | 
| 1114 | 
         
            +
                                    choices=list(MODELS["text"].keys()),
         
     | 
| 1115 | 
         
            +
                                    value="microsoft/DialoGPT-medium",
         
     | 
| 1116 | 
         
            +
                                    label="Modelo de Texto"
         
     | 
| 1117 | 
         
            +
                                )
         
     | 
| 1118 | 
         
            +
                                text_prompt = gr.Textbox(
         
     | 
| 1119 | 
         
            +
                                    label="Prompt",
         
     | 
| 1120 | 
         
            +
                                    placeholder="Escribe tu prompt aquí...",
         
     | 
| 1121 | 
         
            +
                                    lines=3
         
     | 
| 1122 | 
         
            +
                                )
         
     | 
| 1123 | 
         
            +
                                max_length = gr.Slider(
         
     | 
| 1124 | 
         
            +
                                    minimum=50,
         
     | 
| 1125 | 
         
            +
                                    maximum=200,
         
     | 
| 1126 | 
         
            +
                                    value=100,
         
     | 
| 1127 | 
         
            +
                                    step=10,
         
     | 
| 1128 | 
         
            +
                                    label="Longitud máxima"
         
     | 
| 1129 | 
         
            +
                                )
         
     | 
| 1130 | 
         
            +
                                text_btn = gr.Button("Generar Texto", variant="primary")
         
     | 
| 1131 | 
         
            +
                            
         
     | 
| 1132 | 
         
            +
                            with gr.Column():
         
     | 
| 1133 | 
         
            +
                                text_output = gr.Textbox(
         
     | 
| 1134 | 
         
            +
                                    label="Resultado",
         
     | 
| 1135 | 
         
            +
                                    lines=10,
         
     | 
| 1136 | 
         
            +
                                    interactive=False
         
     | 
| 1137 | 
         
            +
                                )
         
     | 
| 1138 | 
         
            +
                        
         
     | 
| 1139 | 
         
            +
                        text_btn.click(
         
     | 
| 1140 | 
         
            +
                            generate_text,
         
     | 
| 1141 | 
         
            +
                            inputs=[text_prompt, text_model, max_length],
         
     | 
| 1142 | 
         
            +
                            outputs=text_output
         
     | 
| 1143 | 
         
            +
                        )
         
     | 
| 1144 | 
         
            +
                    
         
     | 
| 1145 | 
         
            +
                    # Tab de Chat
         
     | 
| 1146 | 
         
            +
                    with gr.TabItem("💬 Chat"):
         
     | 
| 1147 | 
         
            +
                        with gr.Row():
         
     | 
| 1148 | 
         
            +
                            with gr.Column():
         
     | 
| 1149 | 
         
            +
                                chat_model = gr.Dropdown(
         
     | 
| 1150 | 
         
            +
                                    choices=list(MODELS["chat"].keys()),
         
     | 
| 1151 | 
         
            +
                                    value="microsoft/DialoGPT-medium",
         
     | 
| 1152 | 
         
            +
                                    label="Modelo de Chat"
         
     | 
| 1153 | 
         
            +
                                )
         
     | 
| 1154 | 
         
            +
                            
         
     | 
| 1155 | 
         
            +
                            with gr.Column():
         
     | 
| 1156 | 
         
            +
                                chatbot = gr.Chatbot(
         
     | 
| 1157 | 
         
            +
                                    label="Chat",
         
     | 
| 1158 | 
         
            +
                                    height=400,
         
     | 
| 1159 | 
         
            +
                                    type="messages"
         
     | 
| 1160 | 
         
            +
                                )
         
     | 
| 1161 | 
         
            +
                                chat_input = gr.Textbox(
         
     | 
| 1162 | 
         
            +
                                    label="Mensaje",
         
     | 
| 1163 | 
         
            +
                                    placeholder="Escribe tu mensaje...",
         
     | 
| 1164 | 
         
            +
                                    lines=2
         
     | 
| 1165 | 
         
            +
                                )
         
     | 
| 1166 | 
         
            +
                                chat_btn = gr.Button("Enviar", variant="primary")
         
     | 
| 1167 | 
         
            +
                        
         
     | 
| 1168 | 
         
            +
                        chat_btn.click(
         
     | 
| 1169 | 
         
            +
                            chat_with_model,
         
     | 
| 1170 | 
         
            +
                            inputs=[chat_input, chatbot, chat_model],
         
     | 
| 1171 | 
         
            +
                            outputs=[chatbot]
         
     | 
| 1172 | 
         
            +
                        )
         
     | 
| 1173 | 
         
            +
                        
         
     | 
| 1174 | 
         
            +
                        chat_input.submit(
         
     | 
| 1175 | 
         
            +
                            chat_with_model,
         
     | 
| 1176 | 
         
            +
                            inputs=[chat_input, chatbot, chat_model],
         
     | 
| 1177 | 
         
            +
                            outputs=[chatbot]
         
     | 
| 1178 | 
         
            +
                        )
         
     | 
| 1179 | 
         
            +
                    
         
     | 
| 1180 | 
         
            +
                    # Tab de Traducción
         
     | 
| 1181 | 
         
            +
                    with gr.TabItem("🌐 Traducción"):
         
     | 
| 1182 | 
         
            +
                        with gr.Row():
         
     | 
| 1183 | 
         
            +
                            with gr.Column():
         
     | 
| 1184 | 
         
            +
                                translate_model = gr.Dropdown(
         
     | 
| 1185 | 
         
            +
                                    choices=["Helsinki-NLP/opus-mt-es-en", "Helsinki-NLP/opus-mt-en-es"],
         
     | 
| 1186 | 
         
            +
                                    value="Helsinki-NLP/opus-mt-es-en",
         
     | 
| 1187 | 
         
            +
                                    label="Modelo de Traducción"
         
     | 
| 1188 | 
         
            +
                                )
         
     | 
| 1189 | 
         
            +
                                translate_text = gr.Textbox(
         
     | 
| 1190 | 
         
            +
                                    label="Texto a traducir",
         
     | 
| 1191 | 
         
            +
                                    placeholder="Escribe el texto que quieres traducir...",
         
     | 
| 1192 | 
         
            +
                                    lines=3
         
     | 
| 1193 | 
         
            +
                                )
         
     | 
| 1194 | 
         
            +
                                translate_btn = gr.Button("Traducir", variant="primary")
         
     | 
| 1195 | 
         
            +
                            
         
     | 
| 1196 | 
         
            +
                            with gr.Column():
         
     | 
| 1197 | 
         
            +
                                translate_output = gr.Textbox(
         
     | 
| 1198 | 
         
            +
                                    label="Traducción",
         
     | 
| 1199 | 
         
            +
                                    lines=3,
         
     | 
| 1200 | 
         
            +
                                    interactive=False
         
     | 
| 1201 | 
         
            +
                                )
         
     | 
| 1202 | 
         
            +
                        
         
     | 
| 1203 | 
         
            +
                        translate_btn.click(
         
     | 
| 1204 | 
         
            +
                            generate_text,
         
     | 
| 1205 | 
         
            +
                            inputs=[translate_text, translate_model, gr.Slider(value=100, visible=False)],
         
     | 
| 1206 | 
         
            +
                            outputs=translate_output
         
     | 
| 1207 | 
         
            +
                        )
         
     | 
| 1208 | 
         
            +
                    
         
     | 
| 1209 | 
         
            +
                    # Tab de Generación de Imágenes
         
     | 
| 1210 | 
         
            +
                    with gr.TabItem("🎨 Generación de Imágenes"):
         
     | 
| 1211 | 
         
            +
                        with gr.Row():
         
     | 
| 1212 | 
         
            +
                            with gr.Column():
         
     | 
| 1213 | 
         
            +
                                # Modelo
         
     | 
| 1214 | 
         
            +
                                image_model = gr.Dropdown(
         
     | 
| 1215 | 
         
            +
                                    choices=list(MODELS["image"].keys()),
         
     | 
| 1216 | 
         
            +
                                    value="CompVis/stable-diffusion-v1-4",
         
     | 
| 1217 | 
         
            +
                                    label="Modelo",
         
     | 
| 1218 | 
         
            +
                                    info="Select a high-quality model (FLUX models require HF_TOKEN)"
         
     | 
| 1219 | 
         
            +
                                )
         
     | 
| 1220 | 
         
            +
                                
         
     | 
| 1221 | 
         
            +
                                # Prompt principal
         
     | 
| 1222 | 
         
            +
                                image_prompt = gr.Textbox(
         
     | 
| 1223 | 
         
            +
                                    label="Prompt",
         
     | 
| 1224 | 
         
            +
                                    placeholder="Describe la imagen que quieres generar...",
         
     | 
| 1225 | 
         
            +
                                    lines=3
         
     | 
| 1226 | 
         
            +
                                )
         
     | 
| 1227 | 
         
            +
                                
         
     | 
| 1228 | 
         
            +
                                # Negative prompt
         
     | 
| 1229 | 
         
            +
                                negative_prompt = gr.Textbox(
         
     | 
| 1230 | 
         
            +
                                    label="Negative prompt",
         
     | 
| 1231 | 
         
            +
                                    placeholder="Enter a negative prompt (optional)",
         
     | 
| 1232 | 
         
            +
                                    lines=2
         
     | 
| 1233 | 
         
            +
                                )
         
     | 
| 1234 | 
         
            +
                                
         
     | 
| 1235 | 
         
            +
                                # Advanced Settings
         
     | 
| 1236 | 
         
            +
                                with gr.Accordion("Advanced Settings", open=False):
         
     | 
| 1237 | 
         
            +
                                    with gr.Row():
         
     | 
| 1238 | 
         
            +
                                        with gr.Column():
         
     | 
| 1239 | 
         
            +
                                            seed = gr.Slider(
         
     | 
| 1240 | 
         
            +
                                                minimum=0,
         
     | 
| 1241 | 
         
            +
                                                maximum=2147483647,
         
     | 
| 1242 | 
         
            +
                                                value=324354329,
         
     | 
| 1243 | 
         
            +
                                                step=1,
         
     | 
| 1244 | 
         
            +
                                                label="Seed",
         
     | 
| 1245 | 
         
            +
                                                info="Random seed for generation"
         
     | 
| 1246 | 
         
            +
                                            )
         
     | 
| 1247 | 
         
            +
                                            randomize_seed = gr.Checkbox(
         
     | 
| 1248 | 
         
            +
                                                value=True,
         
     | 
| 1249 | 
         
            +
                                                label="Randomize seed"
         
     | 
| 1250 | 
         
            +
                                            )
         
     | 
| 1251 | 
         
            +
                                        
         
     | 
| 1252 | 
         
            +
                                        with gr.Column():
         
     | 
| 1253 | 
         
            +
                                            width = gr.Slider(
         
     | 
| 1254 | 
         
            +
                                                minimum=256,
         
     | 
| 1255 | 
         
            +
                                                maximum=1024,
         
     | 
| 1256 | 
         
            +
                                                value=1024,
         
     | 
| 1257 | 
         
            +
                                                step=64,
         
     | 
| 1258 | 
         
            +
                                                label="Width"
         
     | 
| 1259 | 
         
            +
                                            )
         
     | 
| 1260 | 
         
            +
                                            height = gr.Slider(
         
     | 
| 1261 | 
         
            +
                                                minimum=256,
         
     | 
| 1262 | 
         
            +
                                                maximum=1024,
         
     | 
| 1263 | 
         
            +
                                                value=1024,
         
     | 
| 1264 | 
         
            +
                                                step=64,
         
     | 
| 1265 | 
         
            +
                                                label="Height"
         
     | 
| 1266 | 
         
            +
                                            )
         
     | 
| 1267 | 
         
            +
                                    
         
     | 
| 1268 | 
         
            +
                                    with gr.Row():
         
     | 
| 1269 | 
         
            +
                                        with gr.Column():
         
     | 
| 1270 | 
         
            +
                                            guidance_scale = gr.Slider(
         
     | 
| 1271 | 
         
            +
                                                minimum=0,
         
     | 
| 1272 | 
         
            +
                                                maximum=20,
         
     | 
| 1273 | 
         
            +
                                                value=8.5,
         
     | 
| 1274 | 
         
            +
                                                step=0.1,
         
     | 
| 1275 | 
         
            +
                                                label="Guidance scale",
         
     | 
| 1276 | 
         
            +
                                                info="Controls how closely the image follows the prompt (higher = more adherence)"
         
     | 
| 1277 | 
         
            +
                                            )
         
     | 
| 1278 | 
         
            +
                                            num_inference_steps = gr.Slider(
         
     | 
| 1279 | 
         
            +
                                                minimum=1,
         
     | 
| 1280 | 
         
            +
                                                maximum=100,
         
     | 
| 1281 | 
         
            +
                                                value=31,
         
     | 
| 1282 | 
         
            +
                                                step=1,
         
     | 
| 1283 | 
         
            +
                                                label="Number of inference steps",
         
     | 
| 1284 | 
         
            +
                                                info="More steps = higher quality but slower generation"
         
     | 
| 1285 | 
         
            +
                                            )
         
     | 
| 1286 | 
         
            +
                                        
         
     | 
| 1287 | 
         
            +
                                        with gr.Column():
         
     | 
| 1288 | 
         
            +
                                            eta = gr.Slider(
         
     | 
| 1289 | 
         
            +
                                                minimum=0,
         
     | 
| 1290 | 
         
            +
                                                maximum=1,
         
     | 
| 1291 | 
         
            +
                                                value=0,
         
     | 
| 1292 | 
         
            +
                                                step=0.1,
         
     | 
| 1293 | 
         
            +
                                                label="Eta (DDIM)",
         
     | 
| 1294 | 
         
            +
                                                info="DDIM eta parameter (0 = deterministic, 1 = stochastic)"
         
     | 
| 1295 | 
         
            +
                                            )
         
     | 
| 1296 | 
         
            +
                                            strength = gr.Slider(
         
     | 
| 1297 | 
         
            +
                                                minimum=0,
         
     | 
| 1298 | 
         
            +
                                                maximum=1,
         
     | 
| 1299 | 
         
            +
                                                value=1,
         
     | 
| 1300 | 
         
            +
                                                step=0.1,
         
     | 
| 1301 | 
         
            +
                                                label="Strength",
         
     | 
| 1302 | 
         
            +
                                                info="Strength of the transformation (for img2img models)"
         
     | 
| 1303 | 
         
            +
                                            )
         
     | 
| 1304 | 
         
            +
                                    
         
     | 
| 1305 | 
         
            +
                                    with gr.Row():
         
     | 
| 1306 | 
         
            +
                                        num_images = gr.Slider(
         
     | 
| 1307 | 
         
            +
                                            minimum=1,
         
     | 
| 1308 | 
         
            +
                                            maximum=4,
         
     | 
| 1309 | 
         
            +
                                            value=1,
         
     | 
| 1310 | 
         
            +
                                            step=1,
         
     | 
| 1311 | 
         
            +
                                            label="Images per prompt",
         
     | 
| 1312 | 
         
            +
                                            info="Number of images to generate (may slow down generation)"
         
     | 
| 1313 | 
         
            +
                                        )
         
     | 
| 1314 | 
         
            +
                                        safety_checker = gr.Checkbox(
         
     | 
| 1315 | 
         
            +
                                            value=True,
         
     | 
| 1316 | 
         
            +
                                            label="Enable content safety filtering"
         
     | 
| 1317 | 
         
            +
                                        )
         
     | 
| 1318 | 
         
            +
                                
         
     | 
| 1319 | 
         
            +
                                # Botón de generación
         
     | 
| 1320 | 
         
            +
                                image_btn = gr.Button("Generar Imagen", variant="primary")
         
     | 
| 1321 | 
         
            +
                            
         
     | 
| 1322 | 
         
            +
                            with gr.Column():
         
     | 
| 1323 | 
         
            +
                                # Información del modelo
         
     | 
| 1324 | 
         
            +
                                model_info = gr.Markdown(
         
     | 
| 1325 | 
         
            +
                                    value="**Model Info:** CompVis/stable-diffusion-v1-4\n\n"
         
     | 
| 1326 | 
         
            +
                                          "🎨 Stable Diffusion v1.4 • Recommended steps: 20-50 • "
         
     | 
| 1327 | 
         
            +
                                          "Guidance scale: 7.5-15 • Best for: General purpose\n\n"
         
     | 
| 1328 | 
         
            +
                                          "**Status:** ✅ Available"
         
     | 
| 1329 | 
         
            +
                                )
         
     | 
| 1330 | 
         
            +
                                
         
     | 
| 1331 | 
         
            +
                                # Ejemplos
         
     | 
| 1332 | 
         
            +
                                examples = gr.Examples(
         
     | 
| 1333 | 
         
            +
                                    examples=[
         
     | 
| 1334 | 
         
            +
                                        ["Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"],
         
     | 
| 1335 | 
         
            +
                                        ["An astronaut riding a green horse"],
         
     | 
| 1336 | 
         
            +
                                        ["A delicious ceviche cheesecake slice"],
         
     | 
| 1337 | 
         
            +
                                        ["Futuristic AI assistant in a glowing galaxy, neon lights, sci-fi style, cinematic"],
         
     | 
| 1338 | 
         
            +
                                        ["Portrait of a beautiful woman, realistic, high quality, detailed"],
         
     | 
| 1339 | 
         
            +
                                        ["Anime girl with blue hair, detailed, high quality"],
         
     | 
| 1340 | 
         
            +
                                        ["Cyberpunk city at night, neon lights, detailed, 8k"],
         
     | 
| 1341 | 
         
            +
                                        ["Fantasy landscape with mountains and dragons, epic, detailed"]
         
     | 
| 1342 | 
         
            +
                                    ],
         
     | 
| 1343 | 
         
            +
                                    inputs=image_prompt
         
     | 
| 1344 | 
         
            +
                                )
         
     | 
| 1345 | 
         
            +
                                
         
     | 
| 1346 | 
         
            +
                                # Output de imagen
         
     | 
| 1347 | 
         
            +
                                image_output = gr.Image(
         
     | 
| 1348 | 
         
            +
                                    label="Imagen Generada",
         
     | 
| 1349 | 
         
            +
                                    type="pil"
         
     | 
| 1350 | 
         
            +
                                )
         
     | 
| 1351 | 
         
            +
                        
         
     | 
| 1352 | 
         
            +
                        # Función para actualizar info del modelo
         
     | 
| 1353 | 
         
            +
                        def update_model_info(model_name):
         
     | 
| 1354 | 
         
            +
                            model_descriptions = {
         
     | 
| 1355 | 
         
            +
                                "CompVis/stable-diffusion-v1-4": "🎨 Stable Diffusion v1.4 • Recommended steps: 20-50 • Guidance scale: 7.5-15 • Best for: General purpose",
         
     | 
| 1356 | 
         
            +
                                "stabilityai/stable-diffusion-2-1": "🎨 Stable Diffusion 2.1 • Recommended steps: 20-50 • Guidance scale: 7.5-15 • Best for: High quality",
         
     | 
| 1357 | 
         
            +
                                "stabilityai/stable-diffusion-xl-base-1.0": "🎨 SDXL Base • Recommended steps: 25-50 • Guidance scale: 7.5-15 • Best for: High resolution",
         
     | 
| 1358 | 
         
            +
                                "stabilityai/sdxl-turbo": "⚡ SDXL Turbo • Recommended steps: 1-4 • Guidance scale: 1.0 • Best for: Fast generation",
         
     | 
| 1359 | 
         
            +
                                "stabilityai/sd-turbo": "⚡ SD Turbo • Recommended steps: 1-4 • Guidance scale: 1.0 • Best for: Fast generation",
         
     | 
| 1360 | 
         
            +
                                "black-forest-labs/FLUX.1-dev": "🔐 FLUX Model - High quality • Recommended steps: 20-50 • Guidance scale: 3.5-7.5 • Best for: Professional results",
         
     | 
| 1361 | 
         
            +
                                "black-forest-labs/FLUX.1-schnell": "🔐 FLUX Schnell - Fast quality • Recommended steps: 15-30 • Guidance scale: 3.5-7.5 • Best for: Quick professional results"
         
     | 
| 1362 | 
         
            +
                            }
         
     | 
| 1363 | 
         
            +
                            
         
     | 
| 1364 | 
         
            +
                            description = model_descriptions.get(model_name, "🎨 Model • Recommended steps: 20-50 • Guidance scale: 7.5-15 • Best for: General purpose")
         
     | 
| 1365 | 
         
            +
                            return f"**Model Info:** {model_name}\n\n{description}\n\n**Status:** ✅ Available"
         
     | 
| 1366 | 
         
            +
                        
         
     | 
| 1367 | 
         
            +
                        # Eventos
         
     | 
| 1368 | 
         
            +
                        image_model.change(
         
     | 
| 1369 | 
         
            +
                            update_model_info,
         
     | 
| 1370 | 
         
            +
                            inputs=[image_model],
         
     | 
| 1371 | 
         
            +
                            outputs=[model_info]
         
     | 
| 1372 | 
         
            +
                                )
         
     | 
| 1373 | 
         
            +
                        
         
     | 
| 1374 | 
         
            +
                        image_btn.click(
         
     | 
| 1375 | 
         
            +
                            generate_image,
         
     | 
| 1376 | 
         
            +
                            inputs=[
         
     | 
| 1377 | 
         
            +
                                image_prompt, 
         
     | 
| 1378 | 
         
            +
                                image_model, 
         
     | 
| 1379 | 
         
            +
                                negative_prompt,
         
     | 
| 1380 | 
         
            +
                                seed,
         
     | 
| 1381 | 
         
            +
                                randomize_seed,
         
     | 
| 1382 | 
         
            +
                                width,
         
     | 
| 1383 | 
         
            +
                                height,
         
     | 
| 1384 | 
         
            +
                                guidance_scale,
         
     | 
| 1385 | 
         
            +
                                num_inference_steps,
         
     | 
| 1386 | 
         
            +
                                eta,
         
     | 
| 1387 | 
         
            +
                                strength,
         
     | 
| 1388 | 
         
            +
                                num_images,
         
     | 
| 1389 | 
         
            +
                                safety_checker
         
     | 
| 1390 | 
         
            +
                            ],
         
     | 
| 1391 | 
         
            +
                            outputs=image_output
         
     | 
| 1392 | 
         
            +
                        )
         
     | 
| 1393 | 
         
            +
                    
         
     | 
| 1394 | 
         
            +
                    # Tab de Generación de Videos
         
     | 
| 1395 | 
         
            +
                    with gr.TabItem("🎬 Generación de Videos"):
         
     | 
| 1396 | 
         
            +
                        with gr.Row():
         
     | 
| 1397 | 
         
            +
                            with gr.Column():
         
     | 
| 1398 | 
         
            +
                                video_model = gr.Dropdown(
         
     | 
| 1399 | 
         
            +
                                    choices=list(MODELS["video"].keys()),
         
     | 
| 1400 | 
         
            +
                                    value="damo-vilab/text-to-video-ms-1.7b",
         
     | 
| 1401 | 
         
            +
                                    label="Modelo de Video"
         
     | 
| 1402 | 
         
            +
                                )
         
     | 
| 1403 | 
         
            +
                                video_prompt = gr.Textbox(
         
     | 
| 1404 | 
         
            +
                                    label="Prompt de Video",
         
     | 
| 1405 | 
         
            +
                                    placeholder="Describe el video que quieres generar...",
         
     | 
| 1406 | 
         
            +
                                    lines=3
         
     | 
| 1407 | 
         
            +
                                )
         
     | 
| 1408 | 
         
            +
                                num_frames = gr.Slider(
         
     | 
| 1409 | 
         
            +
                                    minimum=8,
         
     | 
| 1410 | 
         
            +
                                    maximum=32,
         
     | 
| 1411 | 
         
            +
                                    value=16,
         
     | 
| 1412 | 
         
            +
                                    step=4,
         
     | 
| 1413 | 
         
            +
                                    label="Número de frames"
         
     | 
| 1414 | 
         
            +
                                )
         
     | 
| 1415 | 
         
            +
                                video_steps = gr.Slider(
         
     | 
| 1416 | 
         
            +
                                    minimum=10,
         
     | 
| 1417 | 
         
            +
                                    maximum=50,
         
     | 
| 1418 | 
         
            +
                                    value=20,
         
     | 
| 1419 | 
         
            +
                                    step=5,
         
     | 
| 1420 | 
         
            +
                                    label="Pasos de inferencia"
         
     | 
| 1421 | 
         
            +
                                )
         
     | 
| 1422 | 
         
            +
                                video_btn = gr.Button("Generar Video", variant="primary")
         
     | 
| 1423 | 
         
            +
                            
         
     | 
| 1424 | 
         
            +
                            with gr.Column():
         
     | 
| 1425 | 
         
            +
                                video_output = gr.Video(
         
     | 
| 1426 | 
         
            +
                                    label="Video Generado",
         
     | 
| 1427 | 
         
            +
                                    format="mp4"
         
     | 
| 1428 | 
         
            +
                                )
         
     | 
| 1429 | 
         
            +
                                
         
     | 
| 1430 | 
         
            +
                        video_btn.click(
         
     | 
| 1431 | 
         
            +
                            generate_video,
         
     | 
| 1432 | 
         
            +
                            inputs=[video_prompt, video_model, num_frames, video_steps],
         
     | 
| 1433 | 
         
            +
                            outputs=video_output
         
     | 
| 1434 | 
         
            +
                        )
         
     | 
| 1435 | 
         
            +
             
     | 
| 1436 | 
         
            +
                    # Agregar endpoint para generate_video_with_info dentro del contexto de Gradio
         
     | 
| 1437 | 
         
            +
            demo.load(
         
     | 
| 1438 | 
         
            +
                generate_video_with_info,
         
     | 
| 1439 | 
         
            +
                inputs=[
         
     | 
| 1440 | 
         
            +
                    gr.Textbox(label="Prompt", placeholder="Describe el video..."),
         
     | 
| 1441 | 
         
            +
                    gr.Dropdown(choices=list(MODELS["video"].keys()), label="Modelo"),
         
     | 
| 1442 | 
         
            +
                    gr.Dropdown(choices=["speed", "balanced", "quality"], value="balanced", label="Optimización"),
         
     | 
| 1443 | 
         
            +
                    gr.Image(label="Imagen de entrada (opcional)", type="pil")
         
     | 
| 1444 | 
         
            +
                ],
         
     | 
| 1445 | 
         
            +
                outputs=gr.Video(label="Video Generado", format="mp4"),
         
     | 
| 1446 | 
         
            +
                api_name="generate_video_with_info"
         
     | 
| 1447 | 
         
            +
            )
         
     | 
| 1448 | 
         
            +
             
     | 
| 1449 | 
         
            +
            # Configuración para Hugging Face Spaces
         
     | 
| 1450 | 
         
            +
            if __name__ == "__main__":
         
     | 
| 1451 | 
         
            +
                demo.launch() 
         
     |