optimize FLUX model for memory and speed - reduce resolution and steps
Browse files
app.py
CHANGED
|
@@ -191,7 +191,7 @@ def load_image_model(model_name):
|
|
| 191 |
print("🚀 Cargando FLUX Pipeline...")
|
| 192 |
pipe = FluxPipeline.from_pretrained(
|
| 193 |
model_name,
|
| 194 |
-
torch_dtype=torch.
|
| 195 |
use_auth_token=HF_TOKEN if HF_TOKEN else None
|
| 196 |
)
|
| 197 |
# Solo usar enable_model_cpu_offload si hay acelerador disponible
|
|
@@ -201,6 +201,12 @@ def load_image_model(model_name):
|
|
| 201 |
except Exception as offload_error:
|
| 202 |
print(f"⚠️ No se pudo habilitar CPU offload: {offload_error}")
|
| 203 |
print("✅ FLUX cargado sin CPU offload")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
except Exception as e:
|
| 205 |
print(f"❌ Error cargando FLUX: {e}")
|
| 206 |
# Fallback a Stable Diffusion
|
|
@@ -420,7 +426,7 @@ def generate_text(prompt, model_name, max_length=100):
|
|
| 420 |
return f"Error generando texto: {str(e)}"
|
| 421 |
|
| 422 |
def generate_image(prompt, model_name, num_inference_steps=20):
|
| 423 |
-
"""Generar imagen con el modelo seleccionado - versión
|
| 424 |
try:
|
| 425 |
print(f"Generando imagen con modelo: {model_name}")
|
| 426 |
print(f"Prompt: {prompt}")
|
|
@@ -437,21 +443,22 @@ def generate_image(prompt, model_name, num_inference_steps=20):
|
|
| 437 |
model_data = load_image_model(model_name)
|
| 438 |
pipeline = model_data["pipeline"]
|
| 439 |
|
| 440 |
-
# Configuración específica para FLUX
|
| 441 |
if "flux" in model_name.lower():
|
| 442 |
import random
|
| 443 |
# Generar un seed aleatorio para cada imagen
|
| 444 |
random_seed = random.randint(0, 999999)
|
| 445 |
print(f"🎲 Usando seed aleatorio para FLUX: {random_seed}")
|
| 446 |
-
print(f"🔧 Parámetros FLUX: guidance_scale=3.5, steps=
|
| 447 |
|
|
|
|
| 448 |
image = pipeline(
|
| 449 |
prompt,
|
| 450 |
-
height=1024
|
| 451 |
-
width=1024
|
| 452 |
guidance_scale=3.5, # ✅ Valor recomendado por la documentación
|
| 453 |
-
num_inference_steps=
|
| 454 |
-
max_sequence_length=
|
| 455 |
generator=torch.Generator("cpu").manual_seed(random_seed) # ✅ Seed aleatorio
|
| 456 |
).images[0]
|
| 457 |
else:
|
|
@@ -462,11 +469,11 @@ def generate_image(prompt, model_name, num_inference_steps=20):
|
|
| 462 |
guidance_scale=7.5
|
| 463 |
).images[0]
|
| 464 |
|
| 465 |
-
print("Imagen generada exitosamente")
|
| 466 |
return image
|
| 467 |
|
| 468 |
except Exception as e:
|
| 469 |
-
print(f"Error generando imagen: {str(e)}")
|
| 470 |
return f"Error generando imagen: {str(e)}"
|
| 471 |
|
| 472 |
def generate_video(prompt, model_name, num_frames=16, num_inference_steps=20):
|
|
|
|
| 191 |
print("🚀 Cargando FLUX Pipeline...")
|
| 192 |
pipe = FluxPipeline.from_pretrained(
|
| 193 |
model_name,
|
| 194 |
+
torch_dtype=torch.float32, # ✅ Cambiado a float32 para ahorrar memoria
|
| 195 |
use_auth_token=HF_TOKEN if HF_TOKEN else None
|
| 196 |
)
|
| 197 |
# Solo usar enable_model_cpu_offload si hay acelerador disponible
|
|
|
|
| 201 |
except Exception as offload_error:
|
| 202 |
print(f"⚠️ No se pudo habilitar CPU offload: {offload_error}")
|
| 203 |
print("✅ FLUX cargado sin CPU offload")
|
| 204 |
+
|
| 205 |
+
# Optimizaciones adicionales para ahorrar memoria
|
| 206 |
+
pipe.enable_attention_slicing()
|
| 207 |
+
pipe.enable_vae_slicing()
|
| 208 |
+
print("✅ Optimizaciones de memoria aplicadas a FLUX")
|
| 209 |
+
|
| 210 |
except Exception as e:
|
| 211 |
print(f"❌ Error cargando FLUX: {e}")
|
| 212 |
# Fallback a Stable Diffusion
|
|
|
|
| 426 |
return f"Error generando texto: {str(e)}"
|
| 427 |
|
| 428 |
def generate_image(prompt, model_name, num_inference_steps=20):
|
| 429 |
+
"""Generar imagen con el modelo seleccionado - versión optimizada para Spaces"""
|
| 430 |
try:
|
| 431 |
print(f"Generando imagen con modelo: {model_name}")
|
| 432 |
print(f"Prompt: {prompt}")
|
|
|
|
| 443 |
model_data = load_image_model(model_name)
|
| 444 |
pipeline = model_data["pipeline"]
|
| 445 |
|
| 446 |
+
# Configuración específica para FLUX - OPTIMIZADA para Spaces
|
| 447 |
if "flux" in model_name.lower():
|
| 448 |
import random
|
| 449 |
# Generar un seed aleatorio para cada imagen
|
| 450 |
random_seed = random.randint(0, 999999)
|
| 451 |
print(f"🎲 Usando seed aleatorio para FLUX: {random_seed}")
|
| 452 |
+
print(f"🔧 Parámetros FLUX OPTIMIZADOS: guidance_scale=3.5, steps=25, max_seq=256, height=512")
|
| 453 |
|
| 454 |
+
# Parámetros optimizados para reducir memoria y tiempo
|
| 455 |
image = pipeline(
|
| 456 |
prompt,
|
| 457 |
+
height=512, # ✅ Reducido de 1024 a 512 para ahorrar memoria
|
| 458 |
+
width=512, # ✅ Reducido de 1024 a 512 para ahorrar memoria
|
| 459 |
guidance_scale=3.5, # ✅ Valor recomendado por la documentación
|
| 460 |
+
num_inference_steps=25, # ✅ Reducido de 50 a 25 para ahorrar tiempo
|
| 461 |
+
max_sequence_length=256, # ✅ Reducido de 512 a 256 para ahorrar memoria
|
| 462 |
generator=torch.Generator("cpu").manual_seed(random_seed) # ✅ Seed aleatorio
|
| 463 |
).images[0]
|
| 464 |
else:
|
|
|
|
| 469 |
guidance_scale=7.5
|
| 470 |
).images[0]
|
| 471 |
|
| 472 |
+
print("✅ Imagen generada exitosamente")
|
| 473 |
return image
|
| 474 |
|
| 475 |
except Exception as e:
|
| 476 |
+
print(f"❌ Error generando imagen: {str(e)}")
|
| 477 |
return f"Error generando imagen: {str(e)}"
|
| 478 |
|
| 479 |
def generate_video(prompt, model_name, num_frames=16, num_inference_steps=20):
|