LPX55 commited on
Commit
502a379
·
verified ·
1 Parent(s): a21161b

Update app_local.py

Browse files
Files changed (1) hide show
  1. app_local.py +66 -59
app_local.py CHANGED
@@ -55,9 +55,9 @@ def get_fresh_presets():
55
  ORIGINAL_PRESETS = deepcopy(PRESETS)
56
 
57
  # Preload enhancement model at startup
58
- print("🔄 Loading prompt enhancement model...")
59
  rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
60
- print("✅ Enhancement model loaded and ready!")
61
 
62
  SYSTEM_PROMPT_EDIT = '''
63
  # Edit Instruction Rewriter
@@ -113,7 +113,7 @@ def extract_json_response(model_output: str) -> str:
113
  end_idx = model_output.rfind('}')
114
  # Fix the condition - check if brackets were found
115
  if start_idx == -1 or end_idx == -1 or start_idx >= end_idx:
116
- print(f"No valid JSON structure found in output. Start: {start_idx}, End: {end_idx}")
117
  return None
118
  # Expand to the full object including outer braces
119
  end_idx += 1 # Include the closing brace
@@ -153,8 +153,8 @@ def extract_json_response(model_output: str) -> str:
153
  if str_values:
154
  return str_values[0].strip()
155
  except Exception as e:
156
- print(f"JSON parse error: {str(e)}")
157
- print(f"Model output was: {model_output}")
158
  return None
159
 
160
  def polish_prompt(original_prompt: str) -> str:
@@ -186,8 +186,8 @@ def polish_prompt(original_prompt: str) -> str:
186
  generated_ids[0][model_inputs.input_ids.shape[1]:],
187
  skip_special_tokens=True
188
  ).strip()
189
- print(f"Original Prompt: {original_prompt}")
190
- print(f"Model raw output: {enhanced}") # Debug logging
191
  # Try to extract JSON content
192
  rewritten_prompt = extract_json_response(enhanced)
193
  if rewritten_prompt:
@@ -256,7 +256,7 @@ pipe.fuse_lora()
256
  try:
257
  pipe.enable_vae_slicing()
258
  except Exception as e:
259
- print(f"VAE Slicing Failed: {e}")
260
 
261
 
262
  def toggle_output_count(preset_type):
@@ -297,39 +297,26 @@ def update_prompt_preview(preset_type, base_prompt):
297
  return preview_text
298
  else:
299
  return "Select a preset above to see how your base prompt will be modified for batch generation."
300
-
301
- def update_preset_prompt_textbox(preset_type, prompt_1, prompt_2, prompt_3, prompt_4):
302
- """Update preset prompts based on user input - now works with session copy"""
303
- if preset_type and preset_type in ORIGINAL_PRESETS:
304
- # Update each prompt in the preset copy (this won't persist globally)
305
- new_prompts = [prompt_1, prompt_2, prompt_3, prompt_4]
306
- # Create a working copy for preview purposes
307
- working_presets = get_fresh_presets()
308
- for i, new_prompt in enumerate(new_prompts):
309
- if i < len(working_presets[preset_type]["prompts"]):
310
- working_presets[preset_type]["prompts"][i] = new_prompt.strip()
311
- else:
312
- working_presets[preset_type]["prompts"].append(new_prompt.strip())
313
- # Return updated preset info for preview
314
- return update_prompt_preview_with_presets(preset_type, "your subject", working_presets)
315
- return "Select a preset first to edit its prompts."
316
 
317
  def update_prompt_preview_with_presets(preset_type, base_prompt, custom_presets):
318
- """Update the prompt preview display with custom presets"""
319
  if preset_type and preset_type in custom_presets:
320
  preset = custom_presets[preset_type]
321
  non_empty_prompts = [p for p in preset["prompts"] if p.strip()]
322
  if not non_empty_prompts:
323
  return "No prompts defined. Please enter at least one prompt in the editor."
324
- preview_text = f"**Preset: {preset_type}**\n\n"
325
- preview_text += f"*{preset['description']}*\n\n"
326
- preview_text += f"**Generating {len(non_empty_prompts)} image{'s' if len(non_empty_prompts) > 1 else ''}:**\n"
327
- for i, preset_prompt in enumerate(non_empty_prompts, 1):
328
- full_prompt = f"{base_prompt}, {preset_prompt}"
329
- preview_text += f"{i}. {full_prompt}\n"
330
- return preview_text
331
- else:
332
- return "Select a preset above to see how your base prompt will be modified for batch generation."
333
 
334
  @spaces.GPU()
335
  def infer(
@@ -346,6 +333,8 @@ def infer(
346
  ):
347
  """Image editing endpoint with optimized prompt handling - now uses fresh presets"""
348
  # Resize image to max 1024px on longest side
 
 
349
  def resize_image(pil_image, max_size=1024):
350
  """Resize image to maximum dimension of 1024px while maintaining aspect ratio"""
351
  try:
@@ -361,10 +350,10 @@ def infer(
361
  new_height = int(height * scale)
362
  # Resize image
363
  resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
364
- print(f"📝 Image resized from {width}x{height} to {new_width}x{new_height}")
365
  return resized_image
366
  except Exception as e:
367
- print(f"⚠️ Image resize failed: {e}")
368
  return pil_image # Return original if resize fails
369
 
370
  # Add noise function for batch variation
@@ -382,7 +371,7 @@ def infer(
382
  noisy_array = (noisy_array * 255).astype(np.uint8)
383
  return Image.fromarray(noisy_array)
384
  except Exception as e:
385
- print(f"Warning: Could not add noise to image: {e}")
386
  return pil_image # Return original if noise addition fails
387
 
388
  # Get fresh presets for this session
@@ -409,7 +398,7 @@ def infer(
409
  f"<p>Generating {len(non_empty_preset_prompts)} image{'s' if len(non_empty_preset_prompts) > 1 else ''}</p>"
410
  f"</div>"
411
  )
412
- print(f"Using preset: {preset_type} with {len(batch_prompts)} variations")
413
  else:
414
  # Fallback to manual if no valid prompts
415
  batch_prompts = [prompt]
@@ -487,7 +476,7 @@ def infer(
487
  num_images_per_prompt=2
488
  ).images
489
  edited_images.extend(result)
490
- print(f"Generated image {i+1}/{len(batch_prompts)} with prompt: {current_prompt}...")
491
  # Clear cache after generation
492
  # if device == "cuda":
493
  # torch.cuda.empty_cache()
@@ -509,7 +498,8 @@ def infer(
509
  with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Lightning Mode]") as demo:
510
  preset_prompts_state = gr.State(value=[])
511
  # preset_prompts_state = gr.State(value=["", "", "", ""])
512
-
 
513
  gr.Markdown("""
514
  <div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
515
  <h1 style="margin-bottom: 5px;">⚡️ Qwen-Image-Edit Lightning</h1>
@@ -626,31 +616,26 @@ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Ligh
626
  "Hint: depending on the original image, prompt quality, and complexity, you can often get away with 3 steps, even 2 steps without much loss in quality. </div>"
627
  )
628
 
629
- # Fix the show_preset_editor function to use ORIGINAL_PRESETS:
630
  def show_preset_editor(preset_type):
631
- if preset_type and preset_type in ORIGINAL_PRESETS: # Changed from PRESETS to ORIGINAL_PRESETS
632
- preset = ORIGINAL_PRESETS[preset_type]
633
- prompts = preset["prompts"]
634
- # Pad prompts to 4 items if needed
635
- while len(prompts) < 4:
636
- prompts.append("")
637
- return gr.Group(visible=True), prompts[0], prompts[1], prompts[2], prompts[3]
638
  return gr.Group(visible=False), "", "", "", ""
639
-
640
- # Fix the update_preset_count function to use ORIGINAL_PRESETS:
641
- def update_preset_count(preset_type, prompt_1, prompt_2, prompt_3, prompt_4):
642
- """Update the output count slider based on non-empty preset prompts"""
643
- if preset_type and preset_type in ORIGINAL_PRESETS: # Changed from PRESETS to ORIGINAL_PRESETS
644
- non_empty_count = len([p for p in [prompt_1, prompt_2, prompt_3, prompt_4] if p.strip()])
645
- return gr.Slider(value=max(1, min(4, non_empty_count)), interactive=False)
646
- return gr.Slider(interactive=True, visible=False)
647
 
648
  # Update the preset_dropdown.change handlers to use ORIGINAL_PRESETS
649
  preset_dropdown.change(
650
- fn=toggle_output_count,
651
- inputs=preset_dropdown,
652
- outputs=[preset_editor, num_images_per_prompt, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4]
653
  )
 
654
 
655
  preset_dropdown.change(
656
  fn=update_prompt_preview,
@@ -658,6 +643,28 @@ with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Ligh
658
  outputs=prompt_preview
659
  )
660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
661
  preset_prompt_1.change(
662
  fn=update_preset_count,
663
  inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
 
55
  ORIGINAL_PRESETS = deepcopy(PRESETS)
56
 
57
  # Preload enhancement model at startup
58
+ logger.info("🔄 Loading prompt enhancement model...")
59
  rewriter_tokenizer = AutoTokenizer.from_pretrained(REWRITER_MODEL)
60
+ logger.info("✅ Enhancement model loaded and ready!")
61
 
62
  SYSTEM_PROMPT_EDIT = '''
63
  # Edit Instruction Rewriter
 
113
  end_idx = model_output.rfind('}')
114
  # Fix the condition - check if brackets were found
115
  if start_idx == -1 or end_idx == -1 or start_idx >= end_idx:
116
+ logger.warning(f"No valid JSON structure found in output. Start: {start_idx}, End: {end_idx}")
117
  return None
118
  # Expand to the full object including outer braces
119
  end_idx += 1 # Include the closing brace
 
153
  if str_values:
154
  return str_values[0].strip()
155
  except Exception as e:
156
+ logger.warning(f"JSON parse error: {str(e)}")
157
+ logger.warning(f"Model output was: {model_output}")
158
  return None
159
 
160
  def polish_prompt(original_prompt: str) -> str:
 
186
  generated_ids[0][model_inputs.input_ids.shape[1]:],
187
  skip_special_tokens=True
188
  ).strip()
189
+ logger.info(f"Original Prompt: {original_prompt}")
190
+ logger.info(f"Model raw output: {enhanced}") # Debug logging
191
  # Try to extract JSON content
192
  rewritten_prompt = extract_json_response(enhanced)
193
  if rewritten_prompt:
 
256
  try:
257
  pipe.enable_vae_slicing()
258
  except Exception as e:
259
+ logger.info(f"VAE Slicing Failed: {e}")
260
 
261
 
262
  def toggle_output_count(preset_type):
 
297
  return preview_text
298
  else:
299
  return "Select a preset above to see how your base prompt will be modified for batch generation."
300
+
301
+ def update_preset_prompt_textbox(preset_type, p1, p2, p3, p4):
302
+ if preset_type and preset_type in preset_state.value:
303
+ preset_state.value[preset_type]["prompts"] = [p1, p2, p3, p4]
304
+ # Re‑render preview with updated data
305
+ return update_prompt_preview_with_presets(preset_type, prompt.value, preset_state.value)
306
+ return "Select a preset first."
 
 
 
 
 
 
 
 
 
307
 
308
  def update_prompt_preview_with_presets(preset_type, base_prompt, custom_presets):
 
309
  if preset_type and preset_type in custom_presets:
310
  preset = custom_presets[preset_type]
311
  non_empty_prompts = [p for p in preset["prompts"] if p.strip()]
312
  if not non_empty_prompts:
313
  return "No prompts defined. Please enter at least one prompt in the editor."
314
+ preview = f"**Preset: {preset_type}**\n\n{preset['description']}\n\n"
315
+ preview += f"**Generating {len(non_empty_prompts)} image{'s' if len(non_empty_prompts)>1 else ''}:**\n"
316
+ for i, pp in enumerate(non_empty_prompts, 1):
317
+ preview += f"{i}. {base_prompt}, {pp}\n"
318
+ return preview
319
+ return "Select a preset to see the preview."
 
 
 
320
 
321
  @spaces.GPU()
322
  def infer(
 
333
  ):
334
  """Image editing endpoint with optimized prompt handling - now uses fresh presets"""
335
  # Resize image to max 1024px on longest side
336
+ session_presets = preset_state.value
337
+
338
  def resize_image(pil_image, max_size=1024):
339
  """Resize image to maximum dimension of 1024px while maintaining aspect ratio"""
340
  try:
 
350
  new_height = int(height * scale)
351
  # Resize image
352
  resized_image = pil_image.resize((new_width, new_height), Image.LANCZOS)
353
+ logger.info(f"📝 Image resized from {width}x{height} to {new_width}x{new_height}")
354
  return resized_image
355
  except Exception as e:
356
+ logger.warning(f"⚠️ Image resize failed: {e}")
357
  return pil_image # Return original if resize fails
358
 
359
  # Add noise function for batch variation
 
371
  noisy_array = (noisy_array * 255).astype(np.uint8)
372
  return Image.fromarray(noisy_array)
373
  except Exception as e:
374
+ logger.warning(f"Warning: Could not add noise to image: {e}")
375
  return pil_image # Return original if noise addition fails
376
 
377
  # Get fresh presets for this session
 
398
  f"<p>Generating {len(non_empty_preset_prompts)} image{'s' if len(non_empty_preset_prompts) > 1 else ''}</p>"
399
  f"</div>"
400
  )
401
+ logger.info(f"Using preset: {preset_type} with {len(batch_prompts)} variations")
402
  else:
403
  # Fallback to manual if no valid prompts
404
  batch_prompts = [prompt]
 
476
  num_images_per_prompt=2
477
  ).images
478
  edited_images.extend(result)
479
+ logger.info(f"Generated image {i+1}/{len(batch_prompts)} with prompt: {current_prompt}...")
480
  # Clear cache after generation
481
  # if device == "cuda":
482
  # torch.cuda.empty_cache()
 
498
  with gr.Blocks(title="'Qwen Image Edit' Model Playground & Showcase [4-Step Lightning Mode]") as demo:
499
  preset_prompts_state = gr.State(value=[])
500
  # preset_prompts_state = gr.State(value=["", "", "", ""])
501
+ preset_state = gr.State(value=get_fresh_presets())
502
+
503
  gr.Markdown("""
504
  <div style="text-align: center; background: linear-gradient(to right, #3a7bd5, #00d2ff); color: white; padding: 20px; border-radius: 8px;">
505
  <h1 style="margin-bottom: 5px;">⚡️ Qwen-Image-Edit Lightning</h1>
 
616
  "Hint: depending on the original image, prompt quality, and complexity, you can often get away with 3 steps, even 2 steps without much loss in quality. </div>"
617
  )
618
 
 
619
  def show_preset_editor(preset_type):
620
+ if preset_type and preset_type in preset_state.value:
621
+ preset = preset_state.value[preset_type]
622
+ prompts = preset["prompts"] + [""] * (4 - len(preset["prompts"]))
623
+ return gr.Group(visible=True), *prompts[:4]
 
 
 
624
  return gr.Group(visible=False), "", "", "", ""
625
+
626
+ def update_preset_count(preset_type, p1, p2, p3, p4):
627
+ if preset_type and preset_type in preset_state.value:
628
+ count = len([p for p in (p1,p2,p3,p4) if p.strip()])
629
+ return gr.Slider(value=max(1, min(4, count)), interactive=False)
630
+ return gr.Slider(interactive=True)
 
 
631
 
632
  # Update the preset_dropdown.change handlers to use ORIGINAL_PRESETS
633
  preset_dropdown.change(
634
+ fn=show_preset_editor,
635
+ inputs=[preset_dropdown],
636
+ outputs=[preset_editor, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4]
637
  )
638
+
639
 
640
  preset_dropdown.change(
641
  fn=update_prompt_preview,
 
643
  outputs=prompt_preview
644
  )
645
 
646
+ preset_prompt_1.change(
647
+ fn=update_preset_prompt_textbox,
648
+ inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
649
+ outputs=prompt_preview
650
+ )
651
+
652
+ preset_prompt_2.change(
653
+ fn=update_preset_prompt_textbox,
654
+ inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
655
+ outputs=prompt_preview
656
+ )
657
+ preset_prompt_3.change(
658
+ fn=update_preset_prompt_textbox,
659
+ inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
660
+ outputs=prompt_preview
661
+ )
662
+ preset_prompt_4.change(
663
+ fn=update_preset_prompt_textbox,
664
+ inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],
665
+ outputs=prompt_preview
666
+ )
667
+
668
  preset_prompt_1.change(
669
  fn=update_preset_count,
670
  inputs=[preset_dropdown, preset_prompt_1, preset_prompt_2, preset_prompt_3, preset_prompt_4],