Spaces:
Sleeping
Sleeping
| gimport gradio as gr | |
| import os | |
| from huggingface_hub import InferenceClient | |
| from PIL import Image | |
| import requests | |
| from io import BytesIO | |
| import time | |
| from datetime import datetime | |
| # Custom CSS for a modern, attractive interface | |
| custom_css = """ | |
| .gradio-container { | |
| font-family: 'Inter', sans-serif; | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| min-height: 100vh; | |
| } | |
| .container { | |
| max-width: 1200px !important; | |
| margin: auto; | |
| padding: 20px; | |
| } | |
| #title { | |
| text-align: center; | |
| color: white; | |
| font-size: 3em; | |
| font-weight: 800; | |
| margin-bottom: 10px; | |
| text-shadow: 2px 2px 4px rgba(0,0,0,0.3); | |
| letter-spacing: -1px; | |
| } | |
| #subtitle { | |
| text-align: center; | |
| color: rgba(255,255,255,0.9); | |
| font-size: 1.2em; | |
| margin-bottom: 30px; | |
| font-weight: 400; | |
| } | |
| .input-container { | |
| background: rgba(255,255,255,0.95); | |
| border-radius: 20px; | |
| padding: 30px; | |
| box-shadow: 0 20px 60px rgba(0,0,0,0.3); | |
| backdrop-filter: blur(10px); | |
| margin-bottom: 30px; | |
| } | |
| .output-container { | |
| background: rgba(255,255,255,0.95); | |
| border-radius: 20px; | |
| padding: 30px; | |
| box-shadow: 0 20px 60px rgba(0,0,0,0.3); | |
| backdrop-filter: blur(10px); | |
| } | |
| .generate-btn { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; | |
| color: white !important; | |
| font-size: 1.2em !important; | |
| font-weight: 600 !important; | |
| padding: 15px 40px !important; | |
| border-radius: 10px !important; | |
| border: none !important; | |
| cursor: pointer !important; | |
| transition: transform 0.2s !important; | |
| box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important; | |
| } | |
| .generate-btn:hover { | |
| transform: translateY(-2px) !important; | |
| box-shadow: 0 6px 20px rgba(102, 126, 234, 0.5) !important; | |
| } | |
| .prompt-ideas { | |
| display: flex; | |
| flex-wrap: wrap; | |
| gap: 10px; | |
| margin-top: 15px; | |
| } | |
| .prompt-tag { | |
| background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%); | |
| color: white; | |
| padding: 8px 15px; | |
| border-radius: 20px; | |
| font-size: 0.9em; | |
| cursor: pointer; | |
| transition: transform 0.2s; | |
| } | |
| .prompt-tag:hover { | |
| transform: scale(1.05); | |
| } | |
| footer { | |
| display: none !important; | |
| } | |
| .progress-bar { | |
| background: linear-gradient(90deg, #667eea, #764ba2, #667eea) !important; | |
| background-size: 200% 100% !important; | |
| animation: shimmer 2s infinite !important; | |
| } | |
| @keyframes shimmer { | |
| 0% { background-position: 0% 50%; } | |
| 100% { background-position: 200% 50%; } | |
| } | |
| """ | |
| # Initialize the Hugging Face client | |
| def init_client(): | |
| token = os.environ.get('HF_TOKEN') | |
| if not token: | |
| # Fallback for demo purposes - replace with your token | |
| # IMPORTANT: In production, always use environment variables | |
| token = "HF_TOKEN" # Replace with new token | |
| return InferenceClient( | |
| provider="auto", | |
| api_key=token, | |
| ) | |
| # Sample prompts for inspiration | |
| SAMPLE_PROMPTS = [ | |
| "π Cyberpunk city at night with neon lights", | |
| "πΈ Japanese garden in cherry blossom season", | |
| "ποΈ Majestic mountain landscape at sunset", | |
| "π Futuristic space station orbiting Earth", | |
| "π§ββοΈ Wizard's tower in a magical forest", | |
| "ποΈ Tropical beach with crystal clear water", | |
| "π¨ Abstract colorful art explosion", | |
| "ποΈ Ancient Greek temple at golden hour", | |
| "π Galaxy with vibrant nebula colors", | |
| "π¦ Macro shot of butterfly on flower" | |
| ] | |
| def generate_image(prompt, style_preset, negative_prompt, num_steps, guidance_scale, progress=gr.Progress()): | |
| """Generate image using Hugging Face Inference API""" | |
| if not prompt: | |
| return None, "β οΈ Please enter a prompt to generate an image." | |
| try: | |
| progress(0, desc="π¨ Initializing AI model...") | |
| client = init_client() | |
| # Add style to prompt if selected | |
| enhanced_prompt = prompt | |
| if style_preset != "None": | |
| style_additions = { | |
| "Photorealistic": "photorealistic, highly detailed, professional photography, 8k resolution", | |
| "Artistic": "artistic, painterly, creative, expressive brushstrokes", | |
| "Anime": "anime style, manga art, japanese animation, vibrant colors", | |
| "Digital Art": "digital art, concept art, highly detailed, artstation trending", | |
| "Oil Painting": "oil painting, classical art, museum quality, masterpiece", | |
| "Watercolor": "watercolor painting, soft colors, artistic, flowing", | |
| "3D Render": "3d render, octane render, unreal engine, ray tracing", | |
| "Vintage": "vintage style, retro, nostalgic, old photograph aesthetic" | |
| } | |
| enhanced_prompt = f"{prompt}, {style_additions[style_preset]}" | |
| progress(0.3, desc="π Sending request to AI...") | |
| # Generate the image | |
| start_time = time.time() | |
| # Note: The InferenceClient doesn't support all parameters directly | |
| # Using the model's default settings | |
| image = client.text_to_image( | |
| enhanced_prompt, | |
| model="Shakker-Labs/AWPortrait-QW", | |
| ) | |
| generation_time = time.time() - start_time | |
| progress(0.9, desc="β¨ Finalizing your creation...") | |
| # Add timestamp to image metadata | |
| timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| success_message = f""" | |
| β **Image generated successfully!** | |
| π **Prompt used:** {enhanced_prompt} | |
| β±οΈ **Generation time:** {generation_time:.2f} seconds | |
| π **Created at:** {timestamp} | |
| π¨ **Model:** Shakker-Labs/AWPortrait-QW | |
| """ | |
| progress(1.0, desc="β Complete!") | |
| return image, success_message | |
| except Exception as e: | |
| error_message = f""" | |
| β **Error generating image:** | |
| {str(e)} | |
| **Troubleshooting tips:** | |
| - Check if your HF_TOKEN is valid | |
| - Ensure you have internet connection | |
| - Try a simpler prompt | |
| - Check if the model is available | |
| """ | |
| return None, error_message | |
| def use_sample_prompt(prompt): | |
| """Extract the actual prompt from the sample (remove emoji)""" | |
| return prompt.split(' ', 1)[1] if ' ' in prompt else prompt | |
| # Create the Gradio interface | |
| with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as app: | |
| # Header | |
| gr.HTML(""" | |
| <div id="title">π¨ AI Image Generator</div> | |
| <div id="subtitle">Transform your ideas into stunning visuals with AI</div> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1, elem_classes="input-container"): | |
| gr.Markdown("### πΌοΈ Create Your Masterpiece") | |
| # Main prompt input | |
| prompt_input = gr.Textbox( | |
| label="Enter your prompt", | |
| placeholder="Describe what you want to see...", | |
| lines=3, | |
| elem_id="prompt-input" | |
| ) | |
| # Style presets | |
| style_preset = gr.Dropdown( | |
| label="π¨ Style Preset", | |
| choices=["None", "Photorealistic", "Artistic", "Anime", "Digital Art", | |
| "Oil Painting", "Watercolor", "3D Render", "Vintage"], | |
| value="None" | |
| ) | |
| # Advanced settings (collapsible) | |
| with gr.Accordion("βοΈ Advanced Settings", open=False): | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| placeholder="What you don't want in the image...", | |
| lines=2 | |
| ) | |
| with gr.Row(): | |
| num_steps = gr.Slider( | |
| label="Inference Steps", | |
| minimum=10, | |
| maximum=50, | |
| value=30, | |
| step=5 | |
| ) | |
| guidance_scale = gr.Slider( | |
| label="Guidance Scale", | |
| minimum=1, | |
| maximum=20, | |
| value=7.5, | |
| step=0.5 | |
| ) | |
| # Sample prompts section | |
| gr.Markdown("### π‘ Need Inspiration?") | |
| sample_prompts = gr.Dropdown( | |
| label="Choose a sample prompt", | |
| choices=SAMPLE_PROMPTS, | |
| interactive=True | |
| ) | |
| # Generate button | |
| generate_btn = gr.Button( | |
| "π Generate Image", | |
| elem_classes="generate-btn", | |
| variant="primary" | |
| ) | |
| with gr.Column(scale=1, elem_classes="output-container"): | |
| gr.Markdown("### πΌοΈ Generated Image") | |
| # Output image | |
| output_image = gr.Image( | |
| label="Your Creation", | |
| type="pil", | |
| elem_id="output-image" | |
| ) | |
| # Status/Info output | |
| output_status = gr.Markdown( | |
| value="Ready to generate your first image! π¨" | |
| ) | |
| # Footer with tips | |
| gr.HTML(""" | |
| <div style="text-align: center; margin-top: 40px; color: white; opacity: 0.9;"> | |
| <h3>π‘ Pro Tips for Better Results</h3> | |
| <p>β’ Be specific and descriptive in your prompts</p> | |
| <p>β’ Include details about lighting, mood, and style</p> | |
| <p>β’ Experiment with different style presets</p> | |
| <p>β’ Use negative prompts to exclude unwanted elements</p> | |
| </div> | |
| """) | |
| # Event handlers | |
| sample_prompts.change( | |
| fn=use_sample_prompt, | |
| inputs=[sample_prompts], | |
| outputs=[prompt_input] | |
| ) | |
| generate_btn.click( | |
| fn=generate_image, | |
| inputs=[prompt_input, style_preset, negative_prompt, num_steps, guidance_scale], | |
| outputs=[output_image, output_status] | |
| ) | |
| # Also allow generation by pressing Enter in the prompt field | |
| prompt_input.submit( | |
| fn=generate_image, | |
| inputs=[prompt_input, style_preset, negative_prompt, num_steps, guidance_scale], | |
| outputs=[output_image, output_status] | |
| ) | |
| # Launch the app | |
| if __name__ == "__main__": | |
| app.launch( | |
| share=True, | |
| show_error=True, | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| favicon_path=None | |
| ) |