linoyts HF Staff commited on
Commit
dbfd737
·
verified ·
1 Parent(s): 09a6cb1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +83 -82
app.py CHANGED
@@ -4,9 +4,9 @@ import spaces
4
  import torch
5
  import random
6
  import os
 
 
7
 
8
- # from diffusers import QwenImageEditInpaintPipeline
9
- from optimization import optimize_pipeline_
10
  from diffusers.utils import load_image
11
 
12
  from diffusers import QwenImageControlNetModel, QwenImageControlNetInpaintPipeline
@@ -186,14 +186,14 @@ def infer(edit_images,
186
  prompt=prompt,
187
  negative_prompt=negative_prompt,
188
  control_image=image,
189
- control_mask=mask_image,
190
  controlnet_conditioning_scale=strength,
191
  num_inference_steps=num_inference_steps,
192
  true_cfg_scale=true_cfg_scale,
193
  generator=torch.Generator(device="cuda").manual_seed(seed)
194
  ).images[0]
195
 
196
- return [image,result_image], seed
197
 
198
  examples = [
199
  "change the hat to red",
@@ -217,87 +217,88 @@ css = """
217
 
218
 
219
  with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
220
- gr.HTML("<h1 style='text-align: center'>Qwen-Image with InstantX Inpainting ControlNet</style>")
221
- gr.Markdown(
222
- "Generate images with the [InstantX/Qwen-Image-ControlNet-Inpainting](https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting) that takes depth, pose and canny conditionings"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223
  )
224
- with gr.Row():
225
- with gr.Column():
226
- edit_image = gr.ImageEditor(
227
- label='Upload and draw mask for inpainting',
228
- type='pil',
229
- sources=["upload", "webcam"],
230
- image_mode='RGB',
231
- layers=False,
232
- brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
233
- height=600
234
- )
235
- prompt = gr.Text(
236
- label="Prompt",
237
- show_label=False,
238
- max_lines=1,
239
- placeholder="Enter your prompt (e.g., 'change the hat to red')",
240
- container=False,
241
- )
242
- negative_prompt = gr.Text(
243
- label="Negative Prompt",
244
- show_label=True,
245
- max_lines=1,
246
- placeholder="Enter what you don't want (optional)",
247
- container=False,
248
- value="",
249
- visible=False
250
- )
251
- run_button = gr.Button("Run")
252
-
253
- with gr.Column():
254
- result = gr.ImageSlider(label="Result", show_label=False, interactive=False)
255
- use_as_input_button = gr.Button("🔄 Use as Input Image", visible=False, variant="secondary")
256
 
257
- with gr.Accordion("Advanced Settings", open=False):
258
-
259
- seed = gr.Slider(
260
- label="Seed",
261
- minimum=0,
262
- maximum=MAX_SEED,
263
- step=1,
264
- value=42,
 
 
 
265
  )
266
 
267
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
268
-
269
-
270
- with gr.Row():
271
- strength = gr.Slider(
272
- label="Conditioning Scale",
273
- minimum=0.0,
274
- maximum=1.0,
275
- step=0.1,
276
- value=1.0,
277
- info="Controls how much the inpainted region should change"
278
- )
279
-
280
- true_cfg_scale = gr.Slider(
281
- label="True CFG Scale",
282
- minimum=1.0,
283
- maximum=10.0,
284
- step=0.5,
285
- value=4.0,
286
- info="Classifier-free guidance scale"
287
- )
288
-
289
- num_inference_steps = gr.Slider(
290
- label="Number of inference steps",
291
- minimum=1,
292
- maximum=50,
293
- step=1,
294
- value=30,
295
- )
296
- rewrite_prompt = gr.Checkbox(
297
- label="Enhance prompt (using HF Inference)",
298
- value=True
299
  )
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  # Event handlers for reuse functionality
302
  use_as_input_button.click(
303
  fn=use_output_as_input,
@@ -314,9 +315,9 @@ with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
314
  outputs=result,
315
  show_api=False
316
  ).then(
317
- fn = infer,
318
- inputs = [edit_image, prompt, negative_prompt, seed, randomize_seed, strength, num_inference_steps, true_cfg_scale, rewrite_prompt],
319
- outputs = [result, seed]
320
  ).then(
321
  fn=lambda: gr.update(visible=True),
322
  inputs=None,
 
4
  import torch
5
  import random
6
  import os
7
+ import json
8
+
9
 
 
 
10
  from diffusers.utils import load_image
11
 
12
  from diffusers import QwenImageControlNetModel, QwenImageControlNetInpaintPipeline
 
186
  prompt=prompt,
187
  negative_prompt=negative_prompt,
188
  control_image=image,
189
+ control_mask=mask,
190
  controlnet_conditioning_scale=strength,
191
  num_inference_steps=num_inference_steps,
192
  true_cfg_scale=true_cfg_scale,
193
  generator=torch.Generator(device="cuda").manual_seed(seed)
194
  ).images[0]
195
 
196
+ return [image, result_image], seed
197
 
198
  examples = [
199
  "change the hat to red",
 
217
 
218
 
219
  with gr.Blocks(css=css, theme=gr.themes.Citrus()) as demo:
220
+ gr.HTML("<h1 style='text-align: center'>Qwen-Image with InstantX Inpainting ControlNet</style>")
221
+ gr.Markdown(
222
+ "Generate images with the [InstantX/Qwen-Image-ControlNet-Inpainting](https://huggingface.co/InstantX/Qwen-Image-ControlNet-Inpainting) that takes depth, pose and canny conditionings"
223
+ )
224
+ with gr.Row():
225
+ with gr.Column():
226
+ edit_image = gr.ImageEditor(
227
+ label='Upload and draw mask for inpainting',
228
+ type='pil',
229
+ sources=["upload", "webcam"],
230
+ image_mode='RGB',
231
+ layers=False,
232
+ brush=gr.Brush(colors=["#FFFFFF"], color_mode="fixed"),
233
+ height=600
234
+ )
235
+ prompt = gr.Text(
236
+ label="Prompt",
237
+ show_label=False,
238
+ max_lines=1,
239
+ placeholder="Enter your prompt (e.g., 'change the hat to red')",
240
+ container=False,
241
+ )
242
+ negative_prompt = gr.Text(
243
+ label="Negative Prompt",
244
+ show_label=True,
245
+ max_lines=1,
246
+ placeholder="Enter what you don't want (optional)",
247
+ container=False,
248
+ value="",
249
+ visible=False
250
+ )
251
+ run_button = gr.Button("Run")
252
+
253
+ with gr.Column():
254
+ result = gr.ImageSlider(label="Result", show_label=False, interactive=False)
255
+ use_as_input_button = gr.Button("🔄 Use as Input Image", visible=False, variant="secondary")
256
+
257
+ with gr.Accordion("Advanced Settings", open=False):
258
+
259
+ seed = gr.Slider(
260
+ label="Seed",
261
+ minimum=0,
262
+ maximum=MAX_SEED,
263
+ step=1,
264
+ value=42,
265
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
268
+
269
+
270
+ with gr.Row():
271
+ strength = gr.Slider(
272
+ label="Conditioning Scale",
273
+ minimum=0.0,
274
+ maximum=1.0,
275
+ step=0.1,
276
+ value=1.0,
277
+ info="Controls how much the inpainted region should change"
278
  )
279
 
280
+ true_cfg_scale = gr.Slider(
281
+ label="True CFG Scale",
282
+ minimum=1.0,
283
+ maximum=10.0,
284
+ step=0.5,
285
+ value=4.0,
286
+ info="Classifier-free guidance scale"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
  )
288
 
289
+ num_inference_steps = gr.Slider(
290
+ label="Number of inference steps",
291
+ minimum=1,
292
+ maximum=50,
293
+ step=1,
294
+ value=30,
295
+ )
296
+
297
+ rewrite_prompt = gr.Checkbox(
298
+ label="Enhance prompt (using HF Inference)",
299
+ value=True
300
+ )
301
+
302
  # Event handlers for reuse functionality
303
  use_as_input_button.click(
304
  fn=use_output_as_input,
 
315
  outputs=result,
316
  show_api=False
317
  ).then(
318
+ fn=infer,
319
+ inputs=[edit_image, prompt, negative_prompt, seed, randomize_seed, strength, num_inference_steps, true_cfg_scale, rewrite_prompt],
320
+ outputs=[result, seed]
321
  ).then(
322
  fn=lambda: gr.update(visible=True),
323
  inputs=None,