reab5555 commited on
Commit
e30072d
Β·
verified Β·
1 Parent(s): 6c99909

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -242
app.py CHANGED
@@ -2,7 +2,6 @@ import gradio as gr
2
  import os
3
  from huggingface_hub import InferenceClient
4
  import tempfile
5
- import shutil
6
  from pathlib import Path
7
 
8
  # Initialize the client
@@ -12,305 +11,156 @@ client = InferenceClient(
12
  bill_to="huggingface",
13
  )
14
 
15
- def text_to_video(prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None):
16
  """Generate video from text prompt"""
17
  try:
18
  if profile is None:
19
  return None, "❌ Click Sign in with Hugging Face button to use this app for free"
20
-
21
  if not prompt or prompt.strip() == "":
22
  return None, "Please enter a text prompt"
23
-
24
- # Generate video from text
25
  video = client.text_to_video(
26
  prompt,
27
  model="akhaliq/veo3.1-fast",
 
28
  )
29
-
30
- # Save the video to a temporary file
31
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
32
  tmp_file.write(video)
33
  video_path = tmp_file.name
34
-
35
  return video_path, f"βœ… Video generated successfully from prompt: '{prompt[:50]}...'"
36
-
37
  except Exception as e:
38
  return None, f"❌ Error generating video: {str(e)}"
39
 
40
- def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None):
 
41
  """Generate video from image and prompt"""
42
  try:
43
  if profile is None:
44
  return None, "❌ Click Sign in with Hugging Face button to use this app for free"
45
-
46
  if image is None:
47
  return None, "Please upload an image"
48
-
49
  if not prompt or prompt.strip() == "":
50
  return None, "Please enter a prompt describing the motion"
51
-
52
- # Read the image file
53
- if isinstance(image, str):
54
- # If image is a file path
55
- with open(image, "rb") as image_file:
56
- input_image = image_file.read()
 
 
57
  else:
58
- # If image is already bytes or similar
59
- import io
60
- from PIL import Image as PILImage
61
-
62
- # Convert to bytes if necessary
63
- if isinstance(image, PILImage.Image):
64
- buffer = io.BytesIO()
65
- image.save(buffer, format='PNG')
66
- input_image = buffer.getvalue()
67
- else:
68
- # Assume it's a numpy array or similar
69
- pil_image = PILImage.fromarray(image)
70
- buffer = io.BytesIO()
71
- pil_image.save(buffer, format='PNG')
72
- input_image = buffer.getvalue()
73
-
74
- # Generate video from image
75
  video = client.image_to_video(
76
  input_image,
77
  prompt=prompt,
78
  model="akhaliq/veo3.1-fast-image-to-video",
 
79
  )
80
-
81
- # Save the video to a temporary file
82
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
83
  tmp_file.write(video)
84
  video_path = tmp_file.name
85
-
86
  return video_path, f"βœ… Video generated successfully with motion: '{prompt[:50]}...'"
87
-
88
  except Exception as e:
89
  return None, f"❌ Error generating video: {str(e)}"
90
 
91
- def clear_text_tab():
92
- """Clear text-to-video tab"""
93
- return "", None, ""
94
-
95
- def clear_image_tab():
96
- """Clear image-to-video tab"""
97
- return None, "", None, ""
98
 
99
- # Custom CSS for better styling
100
  custom_css = """
101
  .container {
102
  max-width: 1200px;
103
  margin: auto;
104
  }
105
- .header-link {
106
- text-decoration: none;
107
- color: #2196F3;
108
- font-weight: bold;
109
- }
110
- .header-link:hover {
111
- text-decoration: underline;
112
- }
113
  .status-box {
114
  padding: 10px;
115
  border-radius: 5px;
116
  margin-top: 10px;
117
  }
118
- .auth-warning {
119
- color: #ff6b00;
120
- font-weight: bold;
121
- text-align: center;
122
- margin: 1em 0;
123
- padding: 1em;
124
- background-color: #fff3e0;
125
- border-radius: 5px;
126
- }
127
  """
128
 
129
- # Create the Gradio interface
130
  with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator") as demo:
131
- gr.Markdown(
132
- """
133
- # 🎬 AI Video Generator
134
- ### Generate stunning videos from text or animate your images with AI
135
- #### Powered by VEO 3.1 Fast Model | [Built with anycoder](https://huggingface.co/spaces/akhaliq/anycoder)
136
- """
137
- )
138
-
139
- gr.HTML(
140
- """
141
- <div class="auth-warning">
142
- ⚠️ You must Sign in with Hugging Face using the button below to use this app.
143
- </div>
144
- """
145
- )
146
-
147
- # Add login button - required for OAuth
148
  gr.LoginButton()
149
-
150
  with gr.Tabs() as tabs:
151
- # Text-to-Video Tab
152
  with gr.Tab("πŸ“ Text to Video", id=0):
153
- gr.Markdown("### Transform your text descriptions into dynamic videos")
154
-
 
 
 
 
 
 
 
 
 
155
  with gr.Row():
156
- with gr.Column(scale=1):
157
- text_prompt = gr.Textbox(
158
- label="Text Prompt",
159
- placeholder="Describe the video you want to create... (e.g., 'A young man walking on the street during sunset')",
160
- lines=4,
161
- max_lines=6
162
- )
163
-
164
- with gr.Row():
165
- text_generate_btn = gr.Button("🎬 Generate Video", variant="primary", scale=2)
166
- text_clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear", scale=1)
167
-
168
- text_status = gr.Textbox(
169
- label="Status",
170
- interactive=False,
171
- visible=True,
172
- elem_classes=["status-box"]
173
- )
174
-
175
- with gr.Column(scale=1):
176
- text_video_output = gr.Video(
177
- label="Generated Video",
178
- autoplay=True,
179
- show_download_button=True,
180
- height=400
181
- )
182
-
183
- # Examples for text-to-video
184
- gr.Examples(
185
- examples=[
186
- ["A serene beach at sunset with gentle waves"],
187
- ["A bustling city street with neon lights at night"],
188
- ["A majestic eagle soaring through mountain peaks"],
189
- ["An astronaut floating in space near the International Space Station"],
190
- ["Cherry blossoms falling in slow motion in a Japanese garden"],
191
- ],
192
- inputs=text_prompt,
193
- label="Example Prompts"
194
  )
195
-
196
- # Image-to-Video Tab
197
  with gr.Tab("πŸ–ΌοΈ Image to Video", id=1):
198
- gr.Markdown("### Bring your static images to life with motion")
199
-
 
 
 
 
 
 
200
  with gr.Row():
201
- with gr.Column(scale=1):
202
- image_input = gr.Image(
203
- label="Upload Image",
204
- type="pil",
205
- height=300
206
- )
207
-
208
- image_prompt = gr.Textbox(
209
- label="Motion Prompt",
210
- placeholder="Describe how the image should move... (e.g., 'The cat starts to dance')",
211
- lines=3,
212
- max_lines=5
213
- )
214
-
215
- with gr.Row():
216
- image_generate_btn = gr.Button("🎬 Animate Image", variant="primary", scale=2)
217
- image_clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear", scale=1)
218
-
219
- image_status = gr.Textbox(
220
- label="Status",
221
- interactive=False,
222
- visible=True,
223
- elem_classes=["status-box"]
224
- )
225
-
226
- with gr.Column(scale=1):
227
- image_video_output = gr.Video(
228
- label="Generated Video",
229
- autoplay=True,
230
- show_download_button=True,
231
- height=400
232
- )
233
-
234
- # Examples for image-to-video
235
- gr.Examples(
236
- examples=[
237
- [None, "The person starts walking forward"],
238
- [None, "The animal begins to run"],
239
- [None, "Camera slowly zooms in while the subject smiles"],
240
- [None, "The flowers sway gently in the breeze"],
241
- [None, "The clouds move across the sky in time-lapse"],
242
- ],
243
- inputs=[image_input, image_prompt],
244
- label="Example Motion Prompts"
245
  )
246
-
247
- # How to Use section
248
- with gr.Accordion("πŸ“– How to Use", open=False):
249
- gr.Markdown(
250
- """
251
- ### Text to Video:
252
- 1. Enter a detailed description of the video you want to create
253
- 2. Optionally adjust advanced settings (duration, aspect ratio, resolution)
254
- 3. Click "Generate Video" and wait for the AI to create your video
255
- 4. Download or preview your generated video
256
-
257
- ### Image to Video:
258
- 1. Upload an image you want to animate
259
- 2. Describe the motion or action you want to add to the image
260
- 3. Optionally adjust advanced settings
261
- 4. Click "Animate Image" to bring your image to life
262
- 5. Download or preview your animated video
263
-
264
- ### Tips for Better Results:
265
- - Be specific and descriptive in your prompts
266
- - For image-to-video, describe natural motions that fit the image
267
- - Use high-quality input images for better results
268
- - Experiment with different prompts to get the desired effect
269
- """
270
- )
271
-
272
- # Event handlers
273
- text_generate_btn.click(
274
- fn=text_to_video,
275
- inputs=[text_prompt],
276
- outputs=[text_video_output, text_status],
277
- show_progress="full",
278
- queue=False,
279
- api_name=False,
280
- show_api=False
281
- )
282
-
283
- text_clear_btn.click(
284
- fn=clear_text_tab,
285
- inputs=[],
286
- outputs=[text_prompt, text_video_output, text_status],
287
- queue=False
288
- )
289
-
290
- image_generate_btn.click(
291
- fn=image_to_video,
292
- inputs=[image_input, image_prompt],
293
- outputs=[image_video_output, image_status],
294
- show_progress="full",
295
- queue=False,
296
- api_name=False,
297
- show_api=False
298
- )
299
-
300
- image_clear_btn.click(
301
- fn=clear_image_tab,
302
- inputs=[],
303
- outputs=[image_input, image_prompt, image_video_output, image_status],
304
- queue=False
305
- )
306
-
307
- # Launch the app
308
  if __name__ == "__main__":
309
- demo.launch(
310
- show_api=False,
311
- share=False,
312
- show_error=True,
313
- enable_monitoring=False,
314
- quiet=True,
315
- ssr_mode=True
316
- )
 
2
  import os
3
  from huggingface_hub import InferenceClient
4
  import tempfile
 
5
  from pathlib import Path
6
 
7
  # Initialize the client
 
11
  bill_to="huggingface",
12
  )
13
 
14
+ def text_to_video(prompt, aspect_ratio="16:9", profile: gr.OAuthProfile | None = None):
15
  """Generate video from text prompt"""
16
  try:
17
  if profile is None:
18
  return None, "❌ Click Sign in with Hugging Face button to use this app for free"
19
+
20
  if not prompt or prompt.strip() == "":
21
  return None, "Please enter a text prompt"
22
+
23
+ # Generate video from text with aspect ratio parameter
24
  video = client.text_to_video(
25
  prompt,
26
  model="akhaliq/veo3.1-fast",
27
+ aspect_ratio=aspect_ratio,
28
  )
29
+
30
+ # Save the video
31
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
32
  tmp_file.write(video)
33
  video_path = tmp_file.name
34
+
35
  return video_path, f"βœ… Video generated successfully from prompt: '{prompt[:50]}...'"
36
+
37
  except Exception as e:
38
  return None, f"❌ Error generating video: {str(e)}"
39
 
40
+
41
+ def image_to_video(image, prompt, aspect_ratio="16:9", profile: gr.OAuthProfile | None = None):
42
  """Generate video from image and prompt"""
43
  try:
44
  if profile is None:
45
  return None, "❌ Click Sign in with Hugging Face button to use this app for free"
46
+
47
  if image is None:
48
  return None, "Please upload an image"
49
+
50
  if not prompt or prompt.strip() == "":
51
  return None, "Please enter a prompt describing the motion"
52
+
53
+ import io
54
+ from PIL import Image as PILImage
55
+
56
+ # Convert image to bytes
57
+ buffer = io.BytesIO()
58
+ if isinstance(image, PILImage.Image):
59
+ image.save(buffer, format='PNG')
60
  else:
61
+ PILImage.fromarray(image).save(buffer, format='PNG')
62
+ input_image = buffer.getvalue()
63
+
64
+ # Generate video from image with aspect ratio parameter
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  video = client.image_to_video(
66
  input_image,
67
  prompt=prompt,
68
  model="akhaliq/veo3.1-fast-image-to-video",
69
+ aspect_ratio=aspect_ratio,
70
  )
71
+
72
+ # Save the video
73
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as tmp_file:
74
  tmp_file.write(video)
75
  video_path = tmp_file.name
76
+
77
  return video_path, f"βœ… Video generated successfully with motion: '{prompt[:50]}...'"
78
+
79
  except Exception as e:
80
  return None, f"❌ Error generating video: {str(e)}"
81
 
 
 
 
 
 
 
 
82
 
83
+ # Custom CSS
84
  custom_css = """
85
  .container {
86
  max-width: 1200px;
87
  margin: auto;
88
  }
 
 
 
 
 
 
 
 
89
  .status-box {
90
  padding: 10px;
91
  border-radius: 5px;
92
  margin-top: 10px;
93
  }
 
 
 
 
 
 
 
 
 
94
  """
95
 
96
+ # Interface
97
  with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator") as demo:
98
+ gr.Markdown("# 🎬 AI Video Generator\n### Generate videos from text or animate images with AI")
99
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
  gr.LoginButton()
101
+
102
  with gr.Tabs() as tabs:
103
+ # --- TEXT TO VIDEO TAB ---
104
  with gr.Tab("πŸ“ Text to Video", id=0):
105
+ text_prompt = gr.Textbox(
106
+ label="Text Prompt",
107
+ placeholder="Describe the video you want to create...",
108
+ lines=4
109
+ )
110
+ text_aspect = gr.Radio(
111
+ choices=["16:9", "9:16"],
112
+ value="16:9",
113
+ label="Aspect Ratio"
114
+ )
115
+
116
  with gr.Row():
117
+ text_generate_btn = gr.Button("🎬 Generate Video", variant="primary")
118
+ text_clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear")
119
+
120
+ text_status = gr.Textbox(label="Status", interactive=False)
121
+ text_video_output = gr.Video(label="Generated Video", autoplay=True, show_download_button=True)
122
+
123
+ text_generate_btn.click(
124
+ fn=text_to_video,
125
+ inputs=[text_prompt, text_aspect],
126
+ outputs=[text_video_output, text_status],
127
+ show_progress="full"
128
+ )
129
+
130
+ text_clear_btn.click(
131
+ fn=lambda: ("", None, ""),
132
+ outputs=[text_prompt, text_video_output, text_status]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  )
134
+
135
+ # --- IMAGE TO VIDEO TAB ---
136
  with gr.Tab("πŸ–ΌοΈ Image to Video", id=1):
137
+ image_input = gr.Image(label="Upload Image", type="pil", height=300)
138
+ image_prompt = gr.Textbox(label="Motion Prompt", placeholder="Describe how the image should move...")
139
+ image_aspect = gr.Radio(
140
+ choices=["16:9", "9:16"],
141
+ value="16:9",
142
+ label="Aspect Ratio"
143
+ )
144
+
145
  with gr.Row():
146
+ image_generate_btn = gr.Button("🎬 Animate Image", variant="primary")
147
+ image_clear_btn = gr.ClearButton(value="πŸ—‘οΈ Clear")
148
+
149
+ image_status = gr.Textbox(label="Status", interactive=False)
150
+ image_video_output = gr.Video(label="Generated Video", autoplay=True, show_download_button=True)
151
+
152
+ image_generate_btn.click(
153
+ fn=image_to_video,
154
+ inputs=[image_input, image_prompt, image_aspect],
155
+ outputs=[image_video_output, image_status],
156
+ show_progress="full"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  )
158
+
159
+ image_clear_btn.click(
160
+ fn=lambda: (None, "", None, ""),
161
+ outputs=[image_input, image_prompt, image_video_output, image_status]
162
+ )
163
+
164
+ # Launch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  if __name__ == "__main__":
166
+ demo.launch(share=False, show_api=False, show_error=True, quiet=True)