akhaliq HF Staff commited on
Commit
86cad56
·
verified ·
1 Parent(s): 19b3ff6

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +150 -0
app.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from PIL import Image
4
+ from diffusers import QwenImageEditPlusPipeline
5
+ import spaces
6
+
7
+ # Load pipeline at startup
8
+ pipeline = QwenImageEditPlusPipeline.from_pretrained(
9
+ "Qwen/Qwen-Image-Edit-2509",
10
+ torch_dtype=torch.bfloat16
11
+ )
12
+ pipeline.to('cuda')
13
+ pipeline.set_progress_bar_config(disable=None)
14
+
15
+ @spaces.GPU(duration=120)
16
+ def edit_images(image1, image2, prompt, seed, true_cfg_scale, negative_prompt, num_steps, guidance_scale):
17
+ if image1 is None or image2 is None:
18
+ gr.Warning("Please upload both images")
19
+ return None
20
+
21
+ # Convert to PIL if needed
22
+ if not isinstance(image1, Image.Image):
23
+ image1 = Image.fromarray(image1)
24
+ if not isinstance(image2, Image.Image):
25
+ image2 = Image.fromarray(image2)
26
+
27
+ inputs = {
28
+ "image": [image1, image2],
29
+ "prompt": prompt,
30
+ "generator": torch.manual_seed(seed),
31
+ "true_cfg_scale": true_cfg_scale,
32
+ "negative_prompt": negative_prompt,
33
+ "num_inference_steps": num_steps,
34
+ "guidance_scale": guidance_scale,
35
+ "num_images_per_prompt": 1,
36
+ }
37
+
38
+ with torch.inference_mode():
39
+ output = pipeline(**inputs)
40
+ return output.images[0]
41
+
42
+ # Example prompts
43
+ example_prompts = [
44
+ "The magician bear is on the left, the alchemist bear is on the right, facing each other in the central park square.",
45
+ "Two characters standing side by side in a beautiful garden with flowers blooming",
46
+ "The hero on the left and the villain on the right, facing off in an epic battle scene",
47
+ "Two friends sitting together on a park bench, enjoying the sunset",
48
+ ]
49
+
50
+ with gr.Blocks(css="footer {visibility: hidden}") as demo:
51
+ gr.Markdown(
52
+ """
53
+ # Qwen Image Edit Plus
54
+
55
+ Upload two images and describe how you want them combined or edited together.
56
+
57
+ [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
58
+ """
59
+ )
60
+
61
+ with gr.Row():
62
+ with gr.Column():
63
+ image1_input = gr.Image(
64
+ label="First Image",
65
+ type="pil",
66
+ height=300
67
+ )
68
+ image2_input = gr.Image(
69
+ label="Second Image",
70
+ type="pil",
71
+ height=300
72
+ )
73
+
74
+ with gr.Column():
75
+ output_image = gr.Image(
76
+ label="Edited Result",
77
+ type="pil",
78
+ height=620
79
+ )
80
+
81
+ with gr.Group():
82
+ prompt_input = gr.Textbox(
83
+ label="Prompt",
84
+ placeholder="Describe how you want the images combined or edited...",
85
+ value=example_prompts[0],
86
+ lines=3
87
+ )
88
+
89
+ gr.Examples(
90
+ examples=[[p] for p in example_prompts],
91
+ inputs=[prompt_input],
92
+ label="Example Prompts"
93
+ )
94
+
95
+ with gr.Accordion("Advanced Settings", open=False):
96
+ with gr.Row():
97
+ seed_input = gr.Number(
98
+ label="Seed",
99
+ value=0,
100
+ precision=0
101
+ )
102
+ num_steps = gr.Slider(
103
+ label="Number of Inference Steps",
104
+ minimum=20,
105
+ maximum=100,
106
+ value=40,
107
+ step=1
108
+ )
109
+
110
+ with gr.Row():
111
+ true_cfg_scale = gr.Slider(
112
+ label="True CFG Scale",
113
+ minimum=1.0,
114
+ maximum=10.0,
115
+ value=4.0,
116
+ step=0.5
117
+ )
118
+ guidance_scale = gr.Slider(
119
+ label="Guidance Scale",
120
+ minimum=1.0,
121
+ maximum=5.0,
122
+ value=1.0,
123
+ step=0.1
124
+ )
125
+
126
+ negative_prompt = gr.Textbox(
127
+ label="Negative Prompt",
128
+ value=" ",
129
+ placeholder="What to avoid in the generation..."
130
+ )
131
+
132
+ generate_btn = gr.Button("Generate Edited Image", variant="primary", size="lg")
133
+
134
+ generate_btn.click(
135
+ fn=edit_images,
136
+ inputs=[
137
+ image1_input,
138
+ image2_input,
139
+ prompt_input,
140
+ seed_input,
141
+ true_cfg_scale,
142
+ negative_prompt,
143
+ num_steps,
144
+ guidance_scale
145
+ ],
146
+ outputs=output_image,
147
+ show_progress="full"
148
+ )
149
+
150
+ demo.launch()