Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -46,6 +46,7 @@ def deactivate_loras(pipe):
|
|
| 46 |
pipe.unload_lora_weights()
|
| 47 |
return pipe
|
| 48 |
|
|
|
|
| 49 |
def calculate_optimal_dimensions(image):
|
| 50 |
original_width, original_height = image.size
|
| 51 |
MIN_ASPECT_RATIO = 9 / 16
|
|
@@ -70,13 +71,13 @@ def calculate_optimal_dimensions(image):
|
|
| 70 |
|
| 71 |
return width, height
|
| 72 |
|
|
|
|
| 73 |
@spaces.GPU(duration=45)
|
| 74 |
def inpaint(
|
| 75 |
image,
|
| 76 |
mask,
|
| 77 |
prompt: str = "",
|
| 78 |
preserve_unmasked: bool = True,
|
| 79 |
-
feather_radius: int = 0, # NUEVO: 0 = sin blur, >0 = suavizado en píxeles
|
| 80 |
seed: int = 0,
|
| 81 |
num_inference_steps: int = 28,
|
| 82 |
guidance_scale: int = 40,
|
|
@@ -90,7 +91,7 @@ def inpaint(
|
|
| 90 |
mask = mask.convert("L")
|
| 91 |
width, height = calculate_optimal_dimensions(image)
|
| 92 |
|
| 93 |
-
# Guardar imagen original redimensionada
|
| 94 |
original_resized = image.resize((width, height), Image.Resampling.LANCZOS)
|
| 95 |
original_array = np.array(original_resized).astype(np.float32)
|
| 96 |
|
|
@@ -98,15 +99,14 @@ def inpaint(
|
|
| 98 |
mask_resized = mask.resize((width, height), Image.Resampling.LANCZOS)
|
| 99 |
|
| 100 |
if preserve_unmasked:
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
mask_array = (mask_array > 0.5).astype(np.float32)
|
| 110 |
else:
|
| 111 |
mask_array = np.array(mask_resized).astype(np.float32) / 255.0
|
| 112 |
|
|
@@ -125,65 +125,42 @@ def inpaint(
|
|
| 125 |
generator=torch.Generator(device="cuda").manual_seed(seed),
|
| 126 |
).images[0]
|
| 127 |
|
| 128 |
-
#
|
| 129 |
if preserve_unmasked:
|
| 130 |
result_array = np.array(result).astype(np.float32)
|
|
|
|
|
|
|
| 131 |
mask_3channel = np.stack([mask_array] * 3, axis=-1)
|
| 132 |
|
| 133 |
-
#
|
|
|
|
|
|
|
| 134 |
blended = result_array * mask_3channel + original_array * (1.0 - mask_3channel)
|
|
|
|
|
|
|
| 135 |
result = Image.fromarray(np.clip(blended, 0, 255).astype(np.uint8), mode='RGB')
|
| 136 |
|
| 137 |
return result.convert("RGBA"), prompt, seed
|
| 138 |
|
| 139 |
|
| 140 |
-
# ============================================
|
| 141 |
-
# ACTUALIZACIÓN de inpaint_api
|
| 142 |
-
# ============================================
|
| 143 |
def inpaint_api(
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
loras_selected: List[Tuple[str, float]] = None
|
| 155 |
):
|
| 156 |
flux_keywords = flux_keywords or []
|
| 157 |
loras_selected = loras_selected or []
|
| 158 |
|
| 159 |
-
#
|
| 160 |
-
if seed is None or not isinstance(seed, (int, float)):
|
| 161 |
-
seed = -1
|
| 162 |
-
seed = int(seed)
|
| 163 |
-
|
| 164 |
-
if num_inference_steps is None:
|
| 165 |
-
num_inference_steps = 28
|
| 166 |
-
num_inference_steps = int(num_inference_steps)
|
| 167 |
-
|
| 168 |
-
if guidance_scale is None:
|
| 169 |
-
guidance_scale = 40
|
| 170 |
-
guidance_scale = float(guidance_scale)
|
| 171 |
-
|
| 172 |
-
if strength is None:
|
| 173 |
-
strength = 1.0
|
| 174 |
-
strength = float(strength)
|
| 175 |
-
|
| 176 |
-
if feather_radius is None:
|
| 177 |
-
feather_radius = 0
|
| 178 |
-
feather_radius = max(0, min(10, int(feather_radius))) # Limitar entre 0-10
|
| 179 |
-
|
| 180 |
-
# Rangos válidos
|
| 181 |
-
num_inference_steps = max(1, min(100, num_inference_steps))
|
| 182 |
-
guidance_scale = max(0.0, min(50.0, guidance_scale))
|
| 183 |
-
strength = max(0.0, min(1.0, strength))
|
| 184 |
-
|
| 185 |
-
# LoRAs
|
| 186 |
selected_loras_with_weights = []
|
|
|
|
| 187 |
for name, weight in loras_selected:
|
| 188 |
lora_obj = next((l for l in loras if l.display_name == name), None)
|
| 189 |
if lora_obj and weight != 0:
|
|
@@ -193,21 +170,24 @@ def inpaint_api(
|
|
| 193 |
if selected_loras_with_weights:
|
| 194 |
activate_loras(pipe, selected_loras_with_weights)
|
| 195 |
|
| 196 |
-
# Construir prompt
|
| 197 |
final_prompt = ""
|
|
|
|
| 198 |
if flux_keywords:
|
| 199 |
final_prompt += ", ".join(flux_keywords) + ", "
|
|
|
|
| 200 |
for lora, _ in selected_loras_with_weights:
|
| 201 |
if lora.keyword:
|
| 202 |
if isinstance(lora.keyword, str):
|
| 203 |
final_prompt += lora.keyword + ", "
|
| 204 |
else:
|
| 205 |
final_prompt += ", ".join(lora.keyword) + ", "
|
|
|
|
| 206 |
if final_prompt:
|
| 207 |
final_prompt += "\n\n"
|
| 208 |
final_prompt += prompt
|
| 209 |
|
| 210 |
-
if seed < 0:
|
| 211 |
seed = random.randint(0, MAX_SEED)
|
| 212 |
|
| 213 |
return inpaint(
|
|
@@ -215,7 +195,6 @@ def inpaint_api(
|
|
| 215 |
mask=mask,
|
| 216 |
prompt=final_prompt,
|
| 217 |
preserve_unmasked=preserve_unmasked,
|
| 218 |
-
feather_radius=feather_radius,
|
| 219 |
seed=seed,
|
| 220 |
num_inference_steps=num_inference_steps,
|
| 221 |
guidance_scale=guidance_scale,
|
|
@@ -223,9 +202,9 @@ def inpaint_api(
|
|
| 223 |
)
|
| 224 |
|
| 225 |
|
| 226 |
-
#
|
| 227 |
-
#
|
| 228 |
-
#
|
| 229 |
with gr.Blocks(title="Flux.1 Fill dev Inpainting with LoRAs", theme=gr.themes.Soft()) as demo:
|
| 230 |
gr.api(get_loras, api_name="get_loras")
|
| 231 |
with gr.Row():
|
|
@@ -234,57 +213,14 @@ with gr.Blocks(title="Flux.1 Fill dev Inpainting with LoRAs", theme=gr.themes.So
|
|
| 234 |
preserve_unmasked_checkbox = gr.Checkbox(
|
| 235 |
label="Preserve unmasked areas", value=True
|
| 236 |
)
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
maximum=10,
|
| 243 |
-
step=1,
|
| 244 |
-
value=0,
|
| 245 |
-
info="0 = Sharp edges (perfect preservation), higher = smoother blend",
|
| 246 |
-
interactive=True
|
| 247 |
-
)
|
| 248 |
-
|
| 249 |
-
seed_slider = gr.Slider(
|
| 250 |
-
label="Seed",
|
| 251 |
-
minimum=-1,
|
| 252 |
-
maximum=MAX_SEED,
|
| 253 |
-
step=1,
|
| 254 |
-
value=-1,
|
| 255 |
-
info="(-1 = Random)",
|
| 256 |
-
interactive=True
|
| 257 |
-
)
|
| 258 |
-
num_inference_steps_input = gr.Number(
|
| 259 |
-
label="Inference steps",
|
| 260 |
-
value=28,
|
| 261 |
-
minimum=1,
|
| 262 |
-
maximum=100,
|
| 263 |
-
step=1,
|
| 264 |
-
interactive=True
|
| 265 |
-
)
|
| 266 |
-
guidance_scale_input = gr.Number(
|
| 267 |
-
label="Guidance scale",
|
| 268 |
-
value=40,
|
| 269 |
-
minimum=0,
|
| 270 |
-
maximum=50,
|
| 271 |
-
step=0.5,
|
| 272 |
-
interactive=True
|
| 273 |
-
)
|
| 274 |
-
strength_input = gr.Number(
|
| 275 |
-
label="Strength",
|
| 276 |
-
value=1.0,
|
| 277 |
-
minimum=0.0,
|
| 278 |
-
maximum=1.0,
|
| 279 |
-
step=0.05,
|
| 280 |
-
interactive=True
|
| 281 |
-
)
|
| 282 |
|
| 283 |
gr.Markdown("### Flux Keywords")
|
| 284 |
-
flux_keywords_input = gr.CheckboxGroup(
|
| 285 |
-
choices=flux_keywords_available,
|
| 286 |
-
label="Flux Keywords"
|
| 287 |
-
)
|
| 288 |
|
| 289 |
if loras:
|
| 290 |
gr.Markdown("### Available LoRAs")
|
|
@@ -293,8 +229,8 @@ with gr.Blocks(title="Flux.1 Fill dev Inpainting with LoRAs", theme=gr.themes.So
|
|
| 293 |
type="array",
|
| 294 |
headers=["LoRA", "Weight"],
|
| 295 |
value=[[name, 0.0] for name in lora_names],
|
| 296 |
-
datatype=["str", "number"],
|
| 297 |
-
interactive=[False, True],
|
| 298 |
static_columns=[0],
|
| 299 |
label="LoRA selection (Weight 0 = disable)"
|
| 300 |
)
|
|
@@ -316,7 +252,6 @@ with gr.Blocks(title="Flux.1 Fill dev Inpainting with LoRAs", theme=gr.themes.So
|
|
| 316 |
mask_input,
|
| 317 |
prompt_input,
|
| 318 |
preserve_unmasked_checkbox,
|
| 319 |
-
feather_radius_slider, # NUEVO input
|
| 320 |
seed_slider,
|
| 321 |
num_inference_steps_input,
|
| 322 |
guidance_scale_input,
|
|
|
|
| 46 |
pipe.unload_lora_weights()
|
| 47 |
return pipe
|
| 48 |
|
| 49 |
+
|
| 50 |
def calculate_optimal_dimensions(image):
|
| 51 |
original_width, original_height = image.size
|
| 52 |
MIN_ASPECT_RATIO = 9 / 16
|
|
|
|
| 71 |
|
| 72 |
return width, height
|
| 73 |
|
| 74 |
+
|
| 75 |
@spaces.GPU(duration=45)
|
| 76 |
def inpaint(
|
| 77 |
image,
|
| 78 |
mask,
|
| 79 |
prompt: str = "",
|
| 80 |
preserve_unmasked: bool = True,
|
|
|
|
| 81 |
seed: int = 0,
|
| 82 |
num_inference_steps: int = 28,
|
| 83 |
guidance_scale: int = 40,
|
|
|
|
| 91 |
mask = mask.convert("L")
|
| 92 |
width, height = calculate_optimal_dimensions(image)
|
| 93 |
|
| 94 |
+
# Guardar imagen original redimensionada ANTES de cualquier procesamiento
|
| 95 |
original_resized = image.resize((width, height), Image.Resampling.LANCZOS)
|
| 96 |
original_array = np.array(original_resized).astype(np.float32)
|
| 97 |
|
|
|
|
| 99 |
mask_resized = mask.resize((width, height), Image.Resampling.LANCZOS)
|
| 100 |
|
| 101 |
if preserve_unmasked:
|
| 102 |
+
# Opción 1: Máscara binaria estricta (sin blur, preservación perfecta)
|
| 103 |
+
mask_array = np.array(mask_resized).astype(np.float32) / 255.0
|
| 104 |
+
# Binarizar la máscara con umbral para eliminar grises
|
| 105 |
+
mask_array = (mask_array > 0.5).astype(np.float32)
|
| 106 |
+
|
| 107 |
+
# Opción 2: Con feathering mínimo solo en el borde (descomenta si prefieres esto)
|
| 108 |
+
# mask_blurred = mask_resized.filter(ImageFilter.GaussianBlur(radius=1))
|
| 109 |
+
# mask_array = np.array(mask_blurred).astype(np.float32) / 255.0
|
|
|
|
| 110 |
else:
|
| 111 |
mask_array = np.array(mask_resized).astype(np.float32) / 255.0
|
| 112 |
|
|
|
|
| 125 |
generator=torch.Generator(device="cuda").manual_seed(seed),
|
| 126 |
).images[0]
|
| 127 |
|
| 128 |
+
# Preservación PERFECTA de áreas no enmascaradas
|
| 129 |
if preserve_unmasked:
|
| 130 |
result_array = np.array(result).astype(np.float32)
|
| 131 |
+
|
| 132 |
+
# Expandir máscara a 3 canales RGB
|
| 133 |
mask_3channel = np.stack([mask_array] * 3, axis=-1)
|
| 134 |
|
| 135 |
+
# Reemplazo EXACTO pixel por pixel:
|
| 136 |
+
# donde mask=1.0 (blanco/área a inpaint) -> usar resultado del modelo
|
| 137 |
+
# donde mask=0.0 (negro/área a preservar) -> usar imagen original EXACTA
|
| 138 |
blended = result_array * mask_3channel + original_array * (1.0 - mask_3channel)
|
| 139 |
+
|
| 140 |
+
# Convertir de vuelta sin pérdida
|
| 141 |
result = Image.fromarray(np.clip(blended, 0, 255).astype(np.uint8), mode='RGB')
|
| 142 |
|
| 143 |
return result.convert("RGBA"), prompt, seed
|
| 144 |
|
| 145 |
|
|
|
|
|
|
|
|
|
|
| 146 |
def inpaint_api(
|
| 147 |
+
image,
|
| 148 |
+
mask,
|
| 149 |
+
prompt: str = "",
|
| 150 |
+
preserve_unmasked: bool = True,
|
| 151 |
+
seed: int = 0,
|
| 152 |
+
num_inference_steps: int = 28,
|
| 153 |
+
guidance_scale: int = 40,
|
| 154 |
+
strength: float = 1.0,
|
| 155 |
+
flux_keywords: List[str] = None,
|
| 156 |
+
loras_selected: List[Tuple[str, float]] = None
|
|
|
|
| 157 |
):
|
| 158 |
flux_keywords = flux_keywords or []
|
| 159 |
loras_selected = loras_selected or []
|
| 160 |
|
| 161 |
+
# Convertir nombres a objetos LoRA
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
selected_loras_with_weights = []
|
| 163 |
+
|
| 164 |
for name, weight in loras_selected:
|
| 165 |
lora_obj = next((l for l in loras if l.display_name == name), None)
|
| 166 |
if lora_obj and weight != 0:
|
|
|
|
| 170 |
if selected_loras_with_weights:
|
| 171 |
activate_loras(pipe, selected_loras_with_weights)
|
| 172 |
|
| 173 |
+
# Construir prompt final
|
| 174 |
final_prompt = ""
|
| 175 |
+
|
| 176 |
if flux_keywords:
|
| 177 |
final_prompt += ", ".join(flux_keywords) + ", "
|
| 178 |
+
|
| 179 |
for lora, _ in selected_loras_with_weights:
|
| 180 |
if lora.keyword:
|
| 181 |
if isinstance(lora.keyword, str):
|
| 182 |
final_prompt += lora.keyword + ", "
|
| 183 |
else:
|
| 184 |
final_prompt += ", ".join(lora.keyword) + ", "
|
| 185 |
+
|
| 186 |
if final_prompt:
|
| 187 |
final_prompt += "\n\n"
|
| 188 |
final_prompt += prompt
|
| 189 |
|
| 190 |
+
if not isinstance(seed, int) or seed < 0:
|
| 191 |
seed = random.randint(0, MAX_SEED)
|
| 192 |
|
| 193 |
return inpaint(
|
|
|
|
| 195 |
mask=mask,
|
| 196 |
prompt=final_prompt,
|
| 197 |
preserve_unmasked=preserve_unmasked,
|
|
|
|
| 198 |
seed=seed,
|
| 199 |
num_inference_steps=num_inference_steps,
|
| 200 |
guidance_scale=guidance_scale,
|
|
|
|
| 202 |
)
|
| 203 |
|
| 204 |
|
| 205 |
+
# ========================
|
| 206 |
+
# UI DIRECTA A inpaint_api
|
| 207 |
+
# ========================
|
| 208 |
with gr.Blocks(title="Flux.1 Fill dev Inpainting with LoRAs", theme=gr.themes.Soft()) as demo:
|
| 209 |
gr.api(get_loras, api_name="get_loras")
|
| 210 |
with gr.Row():
|
|
|
|
| 213 |
preserve_unmasked_checkbox = gr.Checkbox(
|
| 214 |
label="Preserve unmasked areas", value=True
|
| 215 |
)
|
| 216 |
+
seed_slider = gr.Slider(label="Seed", minimum=-1, maximum=MAX_SEED, step=1,
|
| 217 |
+
value=-1, info="(-1 = Random)", interactive=True)
|
| 218 |
+
num_inference_steps_input = gr.Number(label="Inference steps", value=40, interactive=True)
|
| 219 |
+
guidance_scale_input = gr.Number(label="Guidance scale", value=28, interactive=True)
|
| 220 |
+
strength_input = gr.Number(label="Strength", value=1.0, interactive=True, maximum=1.0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
|
| 222 |
gr.Markdown("### Flux Keywords")
|
| 223 |
+
flux_keywords_input = gr.CheckboxGroup(choices=flux_keywords_available, label="Flux Keywords")
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
if loras:
|
| 226 |
gr.Markdown("### Available LoRAs")
|
|
|
|
| 229 |
type="array",
|
| 230 |
headers=["LoRA", "Weight"],
|
| 231 |
value=[[name, 0.0] for name in lora_names],
|
| 232 |
+
datatype=["str", "number"], # Primera columna string, segunda número
|
| 233 |
+
interactive=[False, True], # Solo la segunda columna editable
|
| 234 |
static_columns=[0],
|
| 235 |
label="LoRA selection (Weight 0 = disable)"
|
| 236 |
)
|
|
|
|
| 252 |
mask_input,
|
| 253 |
prompt_input,
|
| 254 |
preserve_unmasked_checkbox,
|
|
|
|
| 255 |
seed_slider,
|
| 256 |
num_inference_steps_input,
|
| 257 |
guidance_scale_input,
|