Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -313,7 +313,6 @@ def generate_image_to_image(prompt_mash, image_input_path, image_strength, steps
|
|
| 313 |
).images[0]
|
| 314 |
return final_image
|
| 315 |
|
| 316 |
-
|
| 317 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
| 318 |
# 한글 감지 및 번역
|
| 319 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
|
@@ -325,6 +324,8 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
| 325 |
if not selected_indices:
|
| 326 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
| 327 |
|
|
|
|
|
|
|
| 328 |
# Build the prompt with trigger words
|
| 329 |
prepends = []
|
| 330 |
appends = []
|
|
@@ -337,6 +338,7 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
| 337 |
appends.append(trigger_word)
|
| 338 |
prompt_mash = " ".join(prepends + [prompt] + appends)
|
| 339 |
print("Prompt Mash: ", prompt_mash)
|
|
|
|
| 340 |
# Unload previous LoRA weights
|
| 341 |
with calculateDuration("Unloading LoRA"):
|
| 342 |
pipe.unload_lora_weights()
|
|
@@ -390,6 +392,10 @@ def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_ind
|
|
| 390 |
final_image = image
|
| 391 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
| 392 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 393 |
yield final_image, seed, gr.update(value=progress_bar, visible=False)
|
| 394 |
|
| 395 |
run_lora.zerogpu = True
|
|
|
|
| 313 |
).images[0]
|
| 314 |
return final_image
|
| 315 |
|
|
|
|
| 316 |
def run_lora(prompt, image_input, image_strength, cfg_scale, steps, selected_indices, lora_scale_1, lora_scale_2, randomize_seed, seed, width, height, loras_state, progress=gr.Progress(track_tqdm=True)):
|
| 317 |
# 한글 감지 및 번역
|
| 318 |
if any('\u3131' <= char <= '\u318E' or '\uAC00' <= char <= '\uD7A3' for char in prompt):
|
|
|
|
| 324 |
if not selected_indices:
|
| 325 |
raise gr.Error("You must select at least one LoRA before proceeding.")
|
| 326 |
|
| 327 |
+
selected_loras = [loras_state[idx] for idx in selected_indices]
|
| 328 |
+
|
| 329 |
# Build the prompt with trigger words
|
| 330 |
prepends = []
|
| 331 |
appends = []
|
|
|
|
| 338 |
appends.append(trigger_word)
|
| 339 |
prompt_mash = " ".join(prepends + [prompt] + appends)
|
| 340 |
print("Prompt Mash: ", prompt_mash)
|
| 341 |
+
|
| 342 |
# Unload previous LoRA weights
|
| 343 |
with calculateDuration("Unloading LoRA"):
|
| 344 |
pipe.unload_lora_weights()
|
|
|
|
| 392 |
final_image = image
|
| 393 |
progress_bar = f'<div class="progress-container"><div class="progress-bar" style="--current: {step_counter}; --total: {steps};"></div></div>'
|
| 394 |
yield image, seed, gr.update(value=progress_bar, visible=True)
|
| 395 |
+
|
| 396 |
+
if final_image is None:
|
| 397 |
+
raise gr.Error("Failed to generate image")
|
| 398 |
+
|
| 399 |
yield final_image, seed, gr.update(value=progress_bar, visible=False)
|
| 400 |
|
| 401 |
run_lora.zerogpu = True
|