Spaces:
Runtime error
Runtime error
| import os | |
| import torch | |
| import gradio as gr | |
| from diffusers import DiffusionPipeline | |
| from PIL import Image, ImageDraw, ImageFont | |
| # ===== FREE-TIER CONFIG ===== | |
| WATERMARK_TEXT = "SelamGPT" | |
| MODEL_NAME = "DeepFloyd/IF-II-L-v1.0" | |
| # Initialize pipeline (lazy load later) | |
| pipe = None | |
| def load_model(): | |
| global pipe | |
| if pipe is None: | |
| pipe = DiffusionPipeline.from_pretrained( | |
| MODEL_NAME, | |
| torch_dtype=torch.float16, | |
| variant="fp16" | |
| ) | |
| pipe.to("cuda") | |
| # ===== OPTIMIZED WATERMARK ===== | |
| def add_watermark(image): | |
| try: | |
| draw = ImageDraw.Draw(image) | |
| font = ImageFont.load_default(20) | |
| text_width = draw.textlength(WATERMARK_TEXT, font=font) | |
| draw.text( | |
| (image.width - text_width - 15, image.height - 30), | |
| WATERMARK_TEXT, | |
| font=font, | |
| fill=(255, 255, 255) | |
| ) | |
| return image | |
| except Exception: | |
| return image | |
| # ===== GENERATION FUNCTION ===== | |
| def generate_image(prompt): | |
| if not prompt.strip(): | |
| return None, "⚠️ Please enter a prompt" | |
| try: | |
| load_model() | |
| result = pipe( | |
| prompt=prompt, | |
| output_type="pil", | |
| generator=torch.Generator(device="cuda").manual_seed(42), | |
| num_inference_steps=30, | |
| guidance_scale=7.0 | |
| ) | |
| return add_watermark(result.images[0]), "✔️ Generation successful" | |
| except torch.cuda.OutOfMemoryError: | |
| return None, "⚠️ Out of memory - Try a simpler prompt" | |
| except Exception as e: | |
| return None, f"⚠️ Error: {str(e)[:200]}" | |
| # ===== GRADIO INTERFACE ===== | |
| with gr.Blocks(title="SelamGPT Pro") as demo: | |
| gr.Markdown(""" | |
| # 🎨 SelamGPT (DeepFloyd IF-II-L) | |
| *Free Tier Optimized - May take 2-3 minutes for first generation* | |
| """) | |
| with gr.Row(): | |
| with gr.Column(): | |
| prompt_input = gr.Textbox( | |
| label="Describe your image", | |
| placeholder="A traditional Ethiopian market scene...", | |
| lines=3 | |
| ) | |
| generate_btn = gr.Button("Generate", variant="primary") | |
| gr.Examples( | |
| examples=[ | |
| ["Habesha cultural dress with gold embroidery, studio lighting"], | |
| ["Lalibela churches at sunrise, foggy morning"], | |
| ["Futuristic Addis Ababa with Ethiopian architecture"] | |
| ], | |
| inputs=prompt_input | |
| ) | |
| with gr.Column(): | |
| output_image = gr.Image( | |
| label="Generated Image", | |
| type="pil", | |
| format="webp", | |
| height=400 | |
| ) | |
| status_output = gr.Textbox( | |
| label="Status", | |
| interactive=False | |
| ) | |
| generate_btn.click( | |
| fn=generate_image, | |
| inputs=prompt_input, | |
| outputs=[output_image, status_output] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860 | |
| ) |