Spaces:
Running
on
Zero
Running
on
Zero
Fix flux memory handling issue.
Browse filespipe.enable_model_cpu_offload() vs manual pipe.to
app.py
CHANGED
|
@@ -403,7 +403,7 @@ def set_pipeline(
|
|
| 403 |
pbar.close()
|
| 404 |
|
| 405 |
|
| 406 |
-
@spaces.GPU(duration=
|
| 407 |
def generate_image_lowmem(
|
| 408 |
text,
|
| 409 |
neg_prompt=None,
|
|
@@ -431,17 +431,18 @@ def generate_image_lowmem(
|
|
| 431 |
#Enable xFormers memory-efficient attention (optional)
|
| 432 |
#pipe.enable_xformers_memory_efficient_attention()
|
| 433 |
print("\nEnabled xFormers memory-efficient attention.\n")
|
| 434 |
-
else:
|
| 435 |
pipe.attn_implementation="flash_attention_2"
|
| 436 |
print("\nEnabled flash_attention_2.\n")
|
| 437 |
# alternative version that may be more efficient
|
| 438 |
# pipe.enable_sequential_cpu_offload()
|
| 439 |
if pipeline_name == "FluxPipeline":
|
| 440 |
pipe.enable_model_cpu_offload()
|
| 441 |
-
pipe.vae.enable_slicing()
|
| 442 |
-
|
| 443 |
else:
|
| 444 |
-
pipe.enable_model_cpu_offload()
|
|
|
|
| 445 |
|
| 446 |
|
| 447 |
mask_parameters = {}
|
|
@@ -478,7 +479,7 @@ def generate_image_lowmem(
|
|
| 478 |
"image": conditioned_image,
|
| 479 |
}
|
| 480 |
else:
|
| 481 |
-
additional_parameters ={
|
| 482 |
"image": conditioned_image,
|
| 483 |
}
|
| 484 |
additional_parameters.update(mask_parameters)
|
|
@@ -526,7 +527,7 @@ def generate_image_lowmem(
|
|
| 526 |
#del conditions
|
| 527 |
del generator
|
| 528 |
# Move the pipeline and clear cache
|
| 529 |
-
|
| 530 |
torch.cuda.empty_cache()
|
| 531 |
torch.cuda.ipc_collect()
|
| 532 |
print(torch.cuda.memory_summary(device=None, abbreviated=False))
|
|
@@ -780,7 +781,7 @@ def composite_with_control_sync(input_image, new_image, slider_value):
|
|
| 780 |
|
| 781 |
# Load the images using open_image() if they are provided as file paths.
|
| 782 |
new_img_path, _ = get_image_from_dict(new_image)
|
| 783 |
-
if input_image is None:
|
| 784 |
return new_img_path
|
| 785 |
in_img = open_image(input_image) if isinstance(input_image, (dict,str)) else input_image
|
| 786 |
new_img = open_image(new_img_path)
|
|
@@ -1330,7 +1331,7 @@ with gr.Blocks(css_paths="style_20250314.css", title=title, theme='Surn/beeuty',
|
|
| 1330 |
hura_alpha_composite_slider = gr.Slider(0,100,50,0.5, label="HURA Transparancy", elem_id="hura_alpha_composite_slider", interactive=True)
|
| 1331 |
with gr.Row():
|
| 1332 |
hura_button = gr.Button("Composite Input Image", elem_classes="solid")
|
| 1333 |
-
hura_sketch_button = gr.Button("Composite
|
| 1334 |
|
| 1335 |
hura_sketch_button.click(
|
| 1336 |
fn=composite_with_control_sync,
|
|
@@ -1564,7 +1565,7 @@ with gr.Blocks(css_paths="style_20250314.css", title=title, theme='Surn/beeuty',
|
|
| 1564 |
"Replace Input Image with Template",
|
| 1565 |
elem_id="prerendered_replace_input_image_button",
|
| 1566 |
elem_classes="solid"
|
| 1567 |
-
)
|
| 1568 |
with gr.Row():
|
| 1569 |
with gr.Accordion("Template Images", open = False):
|
| 1570 |
with gr.Row():
|
|
@@ -1573,7 +1574,7 @@ with gr.Blocks(css_paths="style_20250314.css", title=title, theme='Surn/beeuty',
|
|
| 1573 |
prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images_by_quality(3,'thumbnail'), elem_id="gallery",
|
| 1574 |
elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
|
| 1575 |
with gr.Row():
|
| 1576 |
-
image_guidance_stength = gr.Slider(label="Image Guidance Strength (prompt percentage)", info="applies to Input, Sketch and Template Image",minimum=0, maximum=1.0, value=0.
|
| 1577 |
|
| 1578 |
with gr.Tab("Add Margins", id="margins") as margins_tab:
|
| 1579 |
with gr.Row():
|
|
|
|
| 403 |
pbar.close()
|
| 404 |
|
| 405 |
|
| 406 |
+
@spaces.GPU(duration=230, progress=gr.Progress(track_tqdm=True))
|
| 407 |
def generate_image_lowmem(
|
| 408 |
text,
|
| 409 |
neg_prompt=None,
|
|
|
|
| 431 |
#Enable xFormers memory-efficient attention (optional)
|
| 432 |
#pipe.enable_xformers_memory_efficient_attention()
|
| 433 |
print("\nEnabled xFormers memory-efficient attention.\n")
|
| 434 |
+
else:
|
| 435 |
pipe.attn_implementation="flash_attention_2"
|
| 436 |
print("\nEnabled flash_attention_2.\n")
|
| 437 |
# alternative version that may be more efficient
|
| 438 |
# pipe.enable_sequential_cpu_offload()
|
| 439 |
if pipeline_name == "FluxPipeline":
|
| 440 |
pipe.enable_model_cpu_offload()
|
| 441 |
+
#pipe.vae.enable_slicing()
|
| 442 |
+
pipe.vae.enable_tiling()
|
| 443 |
else:
|
| 444 |
+
#pipe.enable_model_cpu_offload()
|
| 445 |
+
pipe.vae.enable_tiling()
|
| 446 |
|
| 447 |
|
| 448 |
mask_parameters = {}
|
|
|
|
| 479 |
"image": conditioned_image,
|
| 480 |
}
|
| 481 |
else:
|
| 482 |
+
additional_parameters ={
|
| 483 |
"image": conditioned_image,
|
| 484 |
}
|
| 485 |
additional_parameters.update(mask_parameters)
|
|
|
|
| 527 |
#del conditions
|
| 528 |
del generator
|
| 529 |
# Move the pipeline and clear cache
|
| 530 |
+
pipe.to("cpu")
|
| 531 |
torch.cuda.empty_cache()
|
| 532 |
torch.cuda.ipc_collect()
|
| 533 |
print(torch.cuda.memory_summary(device=None, abbreviated=False))
|
|
|
|
| 781 |
|
| 782 |
# Load the images using open_image() if they are provided as file paths.
|
| 783 |
new_img_path, _ = get_image_from_dict(new_image)
|
| 784 |
+
if (input_image is None) or (input_image["composite"] is None):
|
| 785 |
return new_img_path
|
| 786 |
in_img = open_image(input_image) if isinstance(input_image, (dict,str)) else input_image
|
| 787 |
new_img = open_image(new_img_path)
|
|
|
|
| 1331 |
hura_alpha_composite_slider = gr.Slider(0,100,50,0.5, label="HURA Transparancy", elem_id="hura_alpha_composite_slider", interactive=True)
|
| 1332 |
with gr.Row():
|
| 1333 |
hura_button = gr.Button("Composite Input Image", elem_classes="solid")
|
| 1334 |
+
hura_sketch_button = gr.Button("Composite Sketch Image", elem_classes="solid")
|
| 1335 |
|
| 1336 |
hura_sketch_button.click(
|
| 1337 |
fn=composite_with_control_sync,
|
|
|
|
| 1565 |
"Replace Input Image with Template",
|
| 1566 |
elem_id="prerendered_replace_input_image_button",
|
| 1567 |
elem_classes="solid"
|
| 1568 |
+
)
|
| 1569 |
with gr.Row():
|
| 1570 |
with gr.Accordion("Template Images", open = False):
|
| 1571 |
with gr.Row():
|
|
|
|
| 1574 |
prerendered_image_gallery = gr.Gallery(label="Image Gallery", show_label=True, value=build_prerendered_images_by_quality(3,'thumbnail'), elem_id="gallery",
|
| 1575 |
elem_classes="solid", type="filepath", columns=[3], rows=[3], preview=False ,object_fit="contain", height="auto", format="png",allow_preview=False)
|
| 1576 |
with gr.Row():
|
| 1577 |
+
image_guidance_stength = gr.Slider(label="Image Guidance Strength (prompt percentage)", info="applies to Input, Sketch and Template Image",minimum=0, maximum=1.0, value=0.92, step=0.01, interactive=True)
|
| 1578 |
|
| 1579 |
with gr.Tab("Add Margins", id="margins") as margins_tab:
|
| 1580 |
with gr.Row():
|