Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -83,7 +83,12 @@ def enhance_prompt(input_prompt, model_choice):
|
|
| 83 |
return enhanced_text
|
| 84 |
|
| 85 |
@spaces.GPU(duration=120)
|
| 86 |
-
def generate_image(additional_positive_prompt, additional_negative_prompt, height, width, num_inference_steps,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
if use_random_seed:
|
| 88 |
seed = random.randint(0, 2**32 - 1)
|
| 89 |
else:
|
|
@@ -95,8 +100,8 @@ def generate_image(additional_positive_prompt, additional_negative_prompt, heigh
|
|
| 95 |
# Set clip skip
|
| 96 |
pipe.text_encoder.config.num_hidden_layers -= (clip_skip - 1)
|
| 97 |
|
| 98 |
-
# Start with the default positive prompt prefix
|
| 99 |
-
full_positive_prompt = DEFAULT_POSITIVE_PREFIX
|
| 100 |
|
| 101 |
# Add Florence-2 caption if enabled and image is provided
|
| 102 |
if use_florence2 and input_image is not None:
|
|
@@ -115,13 +120,19 @@ def generate_image(additional_positive_prompt, additional_negative_prompt, heigh
|
|
| 115 |
long_enhanced = enhance_prompt(enhanced_prompt, "Long")
|
| 116 |
long_enhanced = long_enhanced.lower().replace('.', ',')
|
| 117 |
enhanced_prompt = f"{enhanced_prompt}, {long_enhanced}"
|
| 118 |
-
full_positive_prompt +=
|
| 119 |
|
| 120 |
-
# Add the default positive suffix
|
| 121 |
-
|
|
|
|
| 122 |
|
| 123 |
# Combine default negative prompt with additional negative prompt
|
| 124 |
-
full_negative_prompt =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
try:
|
| 127 |
image = pipe(
|
|
@@ -134,21 +145,17 @@ def generate_image(additional_positive_prompt, additional_negative_prompt, heigh
|
|
| 134 |
num_images_per_prompt=num_images_per_prompt,
|
| 135 |
generator=torch.Generator(pipe.device).manual_seed(seed)
|
| 136 |
).images
|
| 137 |
-
return image, seed, full_positive_prompt
|
| 138 |
except Exception as e:
|
| 139 |
print(f"Error during image generation: {str(e)}")
|
| 140 |
-
return None, seed, full_positive_prompt
|
| 141 |
|
| 142 |
# Gradio interface
|
| 143 |
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
| 144 |
gr.HTML("""
|
| 145 |
<h1 align="center">Pony Realism v21 SDXL - Text-to-Image Generation</h1>
|
| 146 |
<p align="center">
|
| 147 |
-
<a href="https://huggingface.co/John6666/pony-realism-v21main-sdxl/" target="_blank">[
|
| 148 |
-
<a href="https://civitai.com/models/372465/pony-realism" target="_blank">[civitai Model Page]</a>
|
| 149 |
-
<a href="https://huggingface.co/microsoft/Florence-2-base" target="_blank">[Florence-2 Model]</a>
|
| 150 |
-
<a href="https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance-Long" target="_blank">[Prompt Enhancer Long]</a>
|
| 151 |
-
<a href="https://huggingface.co/gokaygokay/Lamini-Prompt-Enchance" target="_blank">[Prompt Enhancer Medium]</a>
|
| 152 |
</p>
|
| 153 |
""")
|
| 154 |
|
|
@@ -174,21 +181,46 @@ with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
|
| 174 |
use_medium_enhancer = gr.Checkbox(label="Use Medium Prompt Enhancer", value=False)
|
| 175 |
use_long_enhancer = gr.Checkbox(label="Use Long Prompt Enhancer", value=False)
|
| 176 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
generate_btn = gr.Button("Generate Image")
|
| 178 |
|
| 179 |
with gr.Column(scale=1):
|
| 180 |
output_gallery = gr.Gallery(label="Result", elem_id="gallery", show_label=False)
|
| 181 |
seed_used = gr.Number(label="Seed Used")
|
| 182 |
-
|
|
|
|
| 183 |
|
| 184 |
generate_btn.click(
|
| 185 |
fn=generate_image,
|
| 186 |
inputs=[
|
| 187 |
positive_prompt, negative_prompt, height, width, num_inference_steps,
|
| 188 |
guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler,
|
| 189 |
-
clip_skip, use_florence2, use_medium_enhancer, use_long_enhancer,
|
|
|
|
|
|
|
| 190 |
],
|
| 191 |
-
outputs=[output_gallery, seed_used,
|
| 192 |
)
|
| 193 |
|
| 194 |
demo.launch(debug=True)
|
|
|
|
| 83 |
return enhanced_text
|
| 84 |
|
| 85 |
@spaces.GPU(duration=120)
|
| 86 |
+
def generate_image(additional_positive_prompt, additional_negative_prompt, height, width, num_inference_steps,
|
| 87 |
+
guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler, clip_skip,
|
| 88 |
+
use_florence2, use_medium_enhancer, use_long_enhancer,
|
| 89 |
+
use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
|
| 90 |
+
input_image=None, progress=gr.Progress(track_tqdm=True)):
|
| 91 |
+
|
| 92 |
if use_random_seed:
|
| 93 |
seed = random.randint(0, 2**32 - 1)
|
| 94 |
else:
|
|
|
|
| 100 |
# Set clip skip
|
| 101 |
pipe.text_encoder.config.num_hidden_layers -= (clip_skip - 1)
|
| 102 |
|
| 103 |
+
# Start with the default positive prompt prefix if enabled
|
| 104 |
+
full_positive_prompt = DEFAULT_POSITIVE_PREFIX + ", " if use_positive_prefix else ""
|
| 105 |
|
| 106 |
# Add Florence-2 caption if enabled and image is provided
|
| 107 |
if use_florence2 and input_image is not None:
|
|
|
|
| 120 |
long_enhanced = enhance_prompt(enhanced_prompt, "Long")
|
| 121 |
long_enhanced = long_enhanced.lower().replace('.', ',')
|
| 122 |
enhanced_prompt = f"{enhanced_prompt}, {long_enhanced}"
|
| 123 |
+
full_positive_prompt += enhanced_prompt
|
| 124 |
|
| 125 |
+
# Add the default positive suffix if enabled
|
| 126 |
+
if use_positive_suffix:
|
| 127 |
+
full_positive_prompt += f", {DEFAULT_POSITIVE_SUFFIX}"
|
| 128 |
|
| 129 |
# Combine default negative prompt with additional negative prompt
|
| 130 |
+
full_negative_prompt = ""
|
| 131 |
+
if use_negative_prefix:
|
| 132 |
+
full_negative_prompt += f"{DEFAULT_NEGATIVE_PREFIX}, "
|
| 133 |
+
full_negative_prompt += additional_negative_prompt if additional_negative_prompt else ""
|
| 134 |
+
if use_negative_suffix:
|
| 135 |
+
full_negative_prompt += f", {DEFAULT_NEGATIVE_SUFFIX}"
|
| 136 |
|
| 137 |
try:
|
| 138 |
image = pipe(
|
|
|
|
| 145 |
num_images_per_prompt=num_images_per_prompt,
|
| 146 |
generator=torch.Generator(pipe.device).manual_seed(seed)
|
| 147 |
).images
|
| 148 |
+
return image, seed, full_positive_prompt, full_negative_prompt
|
| 149 |
except Exception as e:
|
| 150 |
print(f"Error during image generation: {str(e)}")
|
| 151 |
+
return None, seed, full_positive_prompt, full_negative_prompt
|
| 152 |
|
| 153 |
# Gradio interface
|
| 154 |
with gr.Blocks(theme='bethecloud/storj_theme') as demo:
|
| 155 |
gr.HTML("""
|
| 156 |
<h1 align="center">Pony Realism v21 SDXL - Text-to-Image Generation</h1>
|
| 157 |
<p align="center">
|
| 158 |
+
<a href="https://huggingface.co/John6666/pony-realism-v21main-sdxl/" target="_blank">[Model Page]</a>
|
|
|
|
|
|
|
|
|
|
|
|
|
| 159 |
</p>
|
| 160 |
""")
|
| 161 |
|
|
|
|
| 181 |
use_medium_enhancer = gr.Checkbox(label="Use Medium Prompt Enhancer", value=False)
|
| 182 |
use_long_enhancer = gr.Checkbox(label="Use Long Prompt Enhancer", value=False)
|
| 183 |
|
| 184 |
+
with gr.Accordion("Prefix and Suffix Settings", open=False):
|
| 185 |
+
use_positive_prefix = gr.Checkbox(
|
| 186 |
+
label="Use Positive Prefix",
|
| 187 |
+
value=True,
|
| 188 |
+
info=f"Prefix: {DEFAULT_POSITIVE_PREFIX}"
|
| 189 |
+
)
|
| 190 |
+
use_positive_suffix = gr.Checkbox(
|
| 191 |
+
label="Use Positive Suffix",
|
| 192 |
+
value=True,
|
| 193 |
+
info=f"Suffix: {DEFAULT_POSITIVE_SUFFIX}"
|
| 194 |
+
)
|
| 195 |
+
use_negative_prefix = gr.Checkbox(
|
| 196 |
+
label="Use Negative Prefix",
|
| 197 |
+
value=True,
|
| 198 |
+
info=f"Prefix: {DEFAULT_NEGATIVE_PREFIX}"
|
| 199 |
+
)
|
| 200 |
+
use_negative_suffix = gr.Checkbox(
|
| 201 |
+
label="Use Negative Suffix",
|
| 202 |
+
value=True,
|
| 203 |
+
info=f"Suffix: {DEFAULT_NEGATIVE_SUFFIX}"
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
generate_btn = gr.Button("Generate Image")
|
| 207 |
|
| 208 |
with gr.Column(scale=1):
|
| 209 |
output_gallery = gr.Gallery(label="Result", elem_id="gallery", show_label=False)
|
| 210 |
seed_used = gr.Number(label="Seed Used")
|
| 211 |
+
full_positive_prompt_used = gr.Textbox(label="Full Positive Prompt Used")
|
| 212 |
+
full_negative_prompt_used = gr.Textbox(label="Full Negative Prompt Used")
|
| 213 |
|
| 214 |
generate_btn.click(
|
| 215 |
fn=generate_image,
|
| 216 |
inputs=[
|
| 217 |
positive_prompt, negative_prompt, height, width, num_inference_steps,
|
| 218 |
guidance_scale, num_images_per_prompt, use_random_seed, seed, sampler,
|
| 219 |
+
clip_skip, use_florence2, use_medium_enhancer, use_long_enhancer,
|
| 220 |
+
use_positive_prefix, use_positive_suffix, use_negative_prefix, use_negative_suffix,
|
| 221 |
+
input_image
|
| 222 |
],
|
| 223 |
+
outputs=[output_gallery, seed_used, full_positive_prompt_used, full_negative_prompt_used]
|
| 224 |
)
|
| 225 |
|
| 226 |
demo.launch(debug=True)
|