File size: 10,985 Bytes
4c4d4ab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
gimport gradio as gr
import os
from huggingface_hub import InferenceClient
from PIL import Image
import requests
from io import BytesIO
import time
from datetime import datetime

# Custom CSS for a modern, attractive interface
custom_css = """
    .gradio-container {
        font-family: 'Inter', sans-serif;
        background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
        min-height: 100vh;
    }
    
    .container {
        max-width: 1200px !important;
        margin: auto;
        padding: 20px;
    }
    
    #title {
        text-align: center;
        color: white;
        font-size: 3em;
        font-weight: 800;
        margin-bottom: 10px;
        text-shadow: 2px 2px 4px rgba(0,0,0,0.3);
        letter-spacing: -1px;
    }
    
    #subtitle {
        text-align: center;
        color: rgba(255,255,255,0.9);
        font-size: 1.2em;
        margin-bottom: 30px;
        font-weight: 400;
    }
    
    .input-container {
        background: rgba(255,255,255,0.95);
        border-radius: 20px;
        padding: 30px;
        box-shadow: 0 20px 60px rgba(0,0,0,0.3);
        backdrop-filter: blur(10px);
        margin-bottom: 30px;
    }
    
    .output-container {
        background: rgba(255,255,255,0.95);
        border-radius: 20px;
        padding: 30px;
        box-shadow: 0 20px 60px rgba(0,0,0,0.3);
        backdrop-filter: blur(10px);
    }
    
    .generate-btn {
        background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
        color: white !important;
        font-size: 1.2em !important;
        font-weight: 600 !important;
        padding: 15px 40px !important;
        border-radius: 10px !important;
        border: none !important;
        cursor: pointer !important;
        transition: transform 0.2s !important;
        box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
    }
    
    .generate-btn:hover {
        transform: translateY(-2px) !important;
        box-shadow: 0 6px 20px rgba(102, 126, 234, 0.5) !important;
    }
    
    .prompt-ideas {
        display: flex;
        flex-wrap: wrap;
        gap: 10px;
        margin-top: 15px;
    }
    
    .prompt-tag {
        background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
        color: white;
        padding: 8px 15px;
        border-radius: 20px;
        font-size: 0.9em;
        cursor: pointer;
        transition: transform 0.2s;
    }
    
    .prompt-tag:hover {
        transform: scale(1.05);
    }
    
    footer {
        display: none !important;
    }
    
    .progress-bar {
        background: linear-gradient(90deg, #667eea, #764ba2, #667eea) !important;
        background-size: 200% 100% !important;
        animation: shimmer 2s infinite !important;
    }
    
    @keyframes shimmer {
        0% { background-position: 0% 50%; }
        100% { background-position: 200% 50%; }
    }
"""

# Initialize the Hugging Face client
def init_client():
    token = os.environ.get('HF_TOKEN')
    if not token:
        # Fallback for demo purposes - replace with your token
        # IMPORTANT: In production, always use environment variables
        token = "HF_TOKEN"  # Replace with new token
    
    return InferenceClient(
        provider="auto",
        api_key=token,
    )

# Sample prompts for inspiration
SAMPLE_PROMPTS = [
    "πŸŒƒ Cyberpunk city at night with neon lights",
    "🌸 Japanese garden in cherry blossom season",
    "πŸ”οΈ Majestic mountain landscape at sunset",
    "πŸš€ Futuristic space station orbiting Earth",
    "πŸ§™β€β™‚οΈ Wizard's tower in a magical forest",
    "πŸ–οΈ Tropical beach with crystal clear water",
    "🎨 Abstract colorful art explosion",
    "πŸ›οΈ Ancient Greek temple at golden hour",
    "🌌 Galaxy with vibrant nebula colors",
    "πŸ¦‹ Macro shot of butterfly on flower"
]

def generate_image(prompt, style_preset, negative_prompt, num_steps, guidance_scale, progress=gr.Progress()):
    """Generate image using Hugging Face Inference API"""
    
    if not prompt:
        return None, "⚠️ Please enter a prompt to generate an image."
    
    try:
        progress(0, desc="🎨 Initializing AI model...")
        client = init_client()
        
        # Add style to prompt if selected
        enhanced_prompt = prompt
        if style_preset != "None":
            style_additions = {
                "Photorealistic": "photorealistic, highly detailed, professional photography, 8k resolution",
                "Artistic": "artistic, painterly, creative, expressive brushstrokes",
                "Anime": "anime style, manga art, japanese animation, vibrant colors",
                "Digital Art": "digital art, concept art, highly detailed, artstation trending",
                "Oil Painting": "oil painting, classical art, museum quality, masterpiece",
                "Watercolor": "watercolor painting, soft colors, artistic, flowing",
                "3D Render": "3d render, octane render, unreal engine, ray tracing",
                "Vintage": "vintage style, retro, nostalgic, old photograph aesthetic"
            }
            enhanced_prompt = f"{prompt}, {style_additions[style_preset]}"
        
        progress(0.3, desc="πŸš€ Sending request to AI...")
        
        # Generate the image
        start_time = time.time()
        
        # Note: The InferenceClient doesn't support all parameters directly
        # Using the model's default settings
        image = client.text_to_image(
            enhanced_prompt,
            model="Shakker-Labs/AWPortrait-QW",
        )
        
        generation_time = time.time() - start_time
        
        progress(0.9, desc="✨ Finalizing your creation...")
        
        # Add timestamp to image metadata
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        
        success_message = f"""
        βœ… **Image generated successfully!**
        
        πŸ“ **Prompt used:** {enhanced_prompt}
        ⏱️ **Generation time:** {generation_time:.2f} seconds
        πŸ• **Created at:** {timestamp}
        🎨 **Model:** Shakker-Labs/AWPortrait-QW
        """
        
        progress(1.0, desc="βœ… Complete!")
        
        return image, success_message
        
    except Exception as e:
        error_message = f"""
        ❌ **Error generating image:**
        {str(e)}
        
        **Troubleshooting tips:**
        - Check if your HF_TOKEN is valid
        - Ensure you have internet connection
        - Try a simpler prompt
        - Check if the model is available
        """
        return None, error_message

def use_sample_prompt(prompt):
    """Extract the actual prompt from the sample (remove emoji)"""
    return prompt.split(' ', 1)[1] if ' ' in prompt else prompt

# Create the Gradio interface
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as app:
    # Header
    gr.HTML("""
        <div id="title">🎨 AI Image Generator</div>
        <div id="subtitle">Transform your ideas into stunning visuals with AI</div>
    """)
    
    with gr.Row():
        with gr.Column(scale=1, elem_classes="input-container"):
            gr.Markdown("### πŸ–ΌοΈ Create Your Masterpiece")
            
            # Main prompt input
            prompt_input = gr.Textbox(
                label="Enter your prompt",
                placeholder="Describe what you want to see...",
                lines=3,
                elem_id="prompt-input"
            )
            
            # Style presets
            style_preset = gr.Dropdown(
                label="🎨 Style Preset",
                choices=["None", "Photorealistic", "Artistic", "Anime", "Digital Art", 
                        "Oil Painting", "Watercolor", "3D Render", "Vintage"],
                value="None"
            )
            
            # Advanced settings (collapsible)
            with gr.Accordion("βš™οΈ Advanced Settings", open=False):
                negative_prompt = gr.Textbox(
                    label="Negative Prompt",
                    placeholder="What you don't want in the image...",
                    lines=2
                )
                
                with gr.Row():
                    num_steps = gr.Slider(
                        label="Inference Steps",
                        minimum=10,
                        maximum=50,
                        value=30,
                        step=5
                    )
                    
                    guidance_scale = gr.Slider(
                        label="Guidance Scale",
                        minimum=1,
                        maximum=20,
                        value=7.5,
                        step=0.5
                    )
            
            # Sample prompts section
            gr.Markdown("### πŸ’‘ Need Inspiration?")
            sample_prompts = gr.Dropdown(
                label="Choose a sample prompt",
                choices=SAMPLE_PROMPTS,
                interactive=True
            )
            
            # Generate button
            generate_btn = gr.Button(
                "πŸš€ Generate Image",
                elem_classes="generate-btn",
                variant="primary"
            )
        
        with gr.Column(scale=1, elem_classes="output-container"):
            gr.Markdown("### πŸ–ΌοΈ Generated Image")
            
            # Output image
            output_image = gr.Image(
                label="Your Creation",
                type="pil",
                elem_id="output-image"
            )
            
            # Status/Info output
            output_status = gr.Markdown(
                value="Ready to generate your first image! 🎨"
            )
    
    # Footer with tips
    gr.HTML("""
        <div style="text-align: center; margin-top: 40px; color: white; opacity: 0.9;">
            <h3>πŸ’‘ Pro Tips for Better Results</h3>
            <p>β€’ Be specific and descriptive in your prompts</p>
            <p>β€’ Include details about lighting, mood, and style</p>
            <p>β€’ Experiment with different style presets</p>
            <p>β€’ Use negative prompts to exclude unwanted elements</p>
        </div>
    """)
    
    # Event handlers
    sample_prompts.change(
        fn=use_sample_prompt,
        inputs=[sample_prompts],
        outputs=[prompt_input]
    )
    
    generate_btn.click(
        fn=generate_image,
        inputs=[prompt_input, style_preset, negative_prompt, num_steps, guidance_scale],
        outputs=[output_image, output_status]
    )
    
    # Also allow generation by pressing Enter in the prompt field
    prompt_input.submit(
        fn=generate_image,
        inputs=[prompt_input, style_preset, negative_prompt, num_steps, guidance_scale],
        outputs=[output_image, output_status]
    )

# Launch the app
if __name__ == "__main__":
    app.launch(
        share=True,
        show_error=True,
        server_name="0.0.0.0",
        server_port=7860,
        favicon_path=None
    )