Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,23 +1,20 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from diffusers import
|
| 3 |
from PIL import Image, ImageDraw, ImageFont
|
| 4 |
import torch
|
| 5 |
import random
|
| 6 |
|
| 7 |
-
# Load model
|
| 8 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 9 |
-
torch_dtype = torch.float16 if device == "cuda" else torch.float32
|
| 10 |
|
| 11 |
-
pipe =
|
| 12 |
-
"
|
| 13 |
-
torch_dtype=
|
| 14 |
-
|
| 15 |
)
|
| 16 |
pipe = pipe.to(device)
|
| 17 |
|
| 18 |
MAX_SEED = 2**32 - 1
|
| 19 |
|
| 20 |
-
# Add "SelamGPT" watermark to image
|
| 21 |
def add_watermark(image):
|
| 22 |
draw = ImageDraw.Draw(image)
|
| 23 |
font = ImageFont.load_default()
|
|
@@ -28,37 +25,40 @@ def add_watermark(image):
|
|
| 28 |
draw.text((x, y), text, font=font, fill=(255, 255, 255))
|
| 29 |
return image
|
| 30 |
|
| 31 |
-
# Main generation function
|
| 32 |
def generate(prompt, seed, randomize_seed):
|
| 33 |
if randomize_seed or seed == 0:
|
| 34 |
seed = random.randint(0, MAX_SEED)
|
| 35 |
-
generator = torch.Generator(device).manual_seed(seed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
|
| 37 |
-
image = pipe(prompt=prompt, generator=generator).images[0]
|
| 38 |
image = add_watermark(image)
|
| 39 |
return image, seed
|
| 40 |
|
| 41 |
examples = [
|
| 42 |
-
"
|
| 43 |
-
"
|
| 44 |
-
"α αα΅ α¨αα
α α΅α«α α α°α«α« α α³α½",
|
| 45 |
]
|
| 46 |
|
| 47 |
with gr.Blocks() as demo:
|
| 48 |
-
gr.Markdown("
|
| 49 |
|
| 50 |
-
prompt = gr.Textbox(label="
|
| 51 |
-
|
| 52 |
|
| 53 |
result = gr.Image(label="Generated Image")
|
| 54 |
-
|
| 55 |
-
with gr.Accordion("βοΈ Advanced Settings", open=False):
|
| 56 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 57 |
-
randomize_seed = gr.Checkbox(label="
|
| 58 |
|
| 59 |
gr.Examples(examples=examples, inputs=[prompt])
|
| 60 |
|
| 61 |
-
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from diffusers import AutoPipelineForText2Image
|
| 3 |
from PIL import Image, ImageDraw, ImageFont
|
| 4 |
import torch
|
| 5 |
import random
|
| 6 |
|
|
|
|
| 7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 8 |
|
| 9 |
+
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 10 |
+
"stabilityai/sdxl-turbo",
|
| 11 |
+
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
|
| 12 |
+
variant="fp16" if device == "cuda" else None
|
| 13 |
)
|
| 14 |
pipe = pipe.to(device)
|
| 15 |
|
| 16 |
MAX_SEED = 2**32 - 1
|
| 17 |
|
|
|
|
| 18 |
def add_watermark(image):
|
| 19 |
draw = ImageDraw.Draw(image)
|
| 20 |
font = ImageFont.load_default()
|
|
|
|
| 25 |
draw.text((x, y), text, font=font, fill=(255, 255, 255))
|
| 26 |
return image
|
| 27 |
|
|
|
|
| 28 |
def generate(prompt, seed, randomize_seed):
|
| 29 |
if randomize_seed or seed == 0:
|
| 30 |
seed = random.randint(0, MAX_SEED)
|
| 31 |
+
generator = torch.Generator(device=device).manual_seed(seed)
|
| 32 |
+
|
| 33 |
+
image = pipe(
|
| 34 |
+
prompt=prompt,
|
| 35 |
+
num_inference_steps=2,
|
| 36 |
+
guidance_scale=0.0,
|
| 37 |
+
generator=generator,
|
| 38 |
+
).images[0]
|
| 39 |
|
|
|
|
| 40 |
image = add_watermark(image)
|
| 41 |
return image, seed
|
| 42 |
|
| 43 |
examples = [
|
| 44 |
+
"Futuristic Ethiopian city at sunset, detailed, cinematic",
|
| 45 |
+
"α αα΅ α«α« αα΅α₯ α¨α°α°αα¨ α¨α³αα£α α¨α°α α αα΅α α¨α³α°α¨ αα α₯ααα",
|
|
|
|
| 46 |
]
|
| 47 |
|
| 48 |
with gr.Blocks() as demo:
|
| 49 |
+
gr.Markdown("## π¨ SelamGPT - Super Fast Text-to-Image Generator")
|
| 50 |
|
| 51 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Type your idea in English or Amharic")
|
| 52 |
+
run = gr.Button("Generate")
|
| 53 |
|
| 54 |
result = gr.Image(label="Generated Image")
|
| 55 |
+
with gr.Accordion("Advanced Settings", open=False):
|
|
|
|
| 56 |
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
|
| 57 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 58 |
|
| 59 |
gr.Examples(examples=examples, inputs=[prompt])
|
| 60 |
|
| 61 |
+
run.click(fn=generate, inputs=[prompt, seed, randomize_seed], outputs=[result, seed])
|
| 62 |
|
| 63 |
if __name__ == "__main__":
|
| 64 |
demo.launch()
|