akhaliq HF Staff commited on
Commit
9e7a994
·
verified ·
1 Parent(s): 42f1e7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +157 -256
app.py CHANGED
@@ -1,49 +1,11 @@
1
  import gradio as gr
2
  import os
3
  from huggingface_hub import InferenceClient
4
- import tempfile
5
- import shutil
6
  from pathlib import Path
7
- from typing import Optional
8
- import time
9
-
10
- # -------------------------
11
- # Utilities
12
- # -------------------------
13
-
14
- def cleanup_temp_files():
15
- """Clean up old temporary video files"""
16
- try:
17
- temp_dir = tempfile.gettempdir()
18
- for file_path in Path(temp_dir).glob("*.mp4"):
19
- try:
20
- if file_path.stat().st_mtime < (time.time() - 300):
21
- file_path.unlink(missing_ok=True)
22
- except Exception:
23
- pass
24
- except Exception as e:
25
- print(f"Cleanup error: {e}")
26
 
27
- def _client_from_token(token: Optional[str]) -> InferenceClient:
28
- """Create InferenceClient from user's OAuth token"""
29
- if not token:
30
- raise gr.Error("Please sign in first. This app requires your Hugging Face login.")
31
- # IMPORTANT: do not set bill_to when using user OAuth tokens
32
- # This ensures the user is billed, not Hugging Face
33
- return InferenceClient(
34
- provider="fal-ai",
35
- api_key=token,
36
- )
37
-
38
- def _save_bytes_as_temp_mp4(data: bytes) -> str:
39
- """Save video bytes to temporary MP4 file"""
40
- temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False)
41
- try:
42
- temp_file.write(data)
43
- temp_file.flush()
44
- return temp_file.name
45
- finally:
46
- temp_file.close()
47
 
48
  def generate_video_with_auth(image, prompt, profile: gr.OAuthProfile | None, progress=gr.Progress()):
49
  """
@@ -56,162 +18,149 @@ def generate_video_with_auth(image, prompt, profile: gr.OAuthProfile | None, pro
56
  progress: Gradio progress tracker
57
 
58
  Returns:
59
- Tuple of (video_path, status_message)
60
  """
 
 
 
 
 
 
 
 
 
61
  try:
62
- # Check authentication
63
- if profile is None:
64
- return None, "❌ Sign in with Hugging Face to continue. This app uses your inference provider credits."
65
-
66
- if image is None:
67
- return None, "❌ Please upload an image first!"
68
-
69
- if not prompt or prompt.strip() == "":
70
- return None, "❌ Please enter a prompt describing the desired motion!"
71
-
72
  progress(0.2, desc="Processing image...")
73
 
74
- cleanup_temp_files()
75
-
76
  # Read the image file
77
  if isinstance(image, str):
78
- # If image is a file path
79
  with open(image, "rb") as image_file:
80
  input_image = image_file.read()
81
  else:
82
- # If image is PIL Image or array
83
- import io
84
- from PIL import Image as PILImage
85
-
86
- if isinstance(image, PILImage.Image):
87
- buffer = io.BytesIO()
88
- image.save(buffer, format='PNG')
89
- input_image = buffer.getvalue()
90
- else:
91
- # Assume it's a numpy array
92
- pil_image = PILImage.fromarray(image)
93
- buffer = io.BytesIO()
94
- pil_image.save(buffer, format='PNG')
95
- input_image = buffer.getvalue()
96
 
97
  progress(0.4, desc="Generating video with AI...")
98
 
99
- # Create client with user's OAuth token (not HF_TOKEN)
100
- # IMPORTANT: Do not use bill_to parameter - this ensures user gets billed
101
  client = InferenceClient(
102
  provider="fal-ai",
103
- api_key=profile.oauth_info.access_token, # Use user's token
104
  )
105
 
106
  # Generate video using the inference client
107
- try:
108
- video = client.image_to_video(
109
- input_image,
110
- prompt=prompt,
111
- model="chetwinlow1/Ovi",
112
- )
113
- except Exception as e:
114
- import requests
115
- if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403:
116
- return None, "❌ Access denied by provider (403). Make sure your HF account has credits/permission for provider 'fal-ai' and model 'chetwinlow1/Ovi'."
117
- raise
118
 
119
  progress(0.9, desc="Finalizing video...")
120
 
121
  # Save the video to a temporary file
122
- video_path = _save_bytes_as_temp_mp4(video)
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  progress(1.0, desc="Complete!")
125
 
126
- return video_path, f"✅ Video generated successfully! Prompt: '{prompt[:60]}...'"
127
 
128
- except gr.Error as e:
129
- return None, f"❌ {str(e)}"
130
  except Exception as e:
131
- return None, f" Generation failed. If this keeps happening, check your provider quota or try again later. Error: {str(e)}"
132
-
133
- def clear_all():
134
- """Clear all inputs and outputs"""
135
- return None, "", None, ""
136
-
137
- # Custom CSS for better styling
138
- custom_css = """
139
- .container {
140
- max-width: 1200px;
141
- margin: auto;
142
- }
143
- .header-link {
144
- text-decoration: none;
145
- color: #2196F3;
146
- font-weight: bold;
147
- }
148
- .header-link:hover {
149
- text-decoration: underline;
150
- }
151
- .status-box {
152
- padding: 10px;
153
- border-radius: 5px;
154
- margin-top: 10px;
155
- }
156
- .notice {
157
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
158
- color: white;
159
- padding: 14px 16px;
160
- border-radius: 12px;
161
- margin: 18px auto 6px;
162
- max-width: 860px;
163
- text-align: center;
164
- font-size: 0.98rem;
165
- }
166
- .info-box {
167
- background-color: #f0f7ff;
168
- border-left: 4px solid #4285f4;
169
- padding: 1em;
170
- margin: 1em 0;
171
- border-radius: 4px;
172
- }
173
- .special-tokens-box {
174
- background: linear-gradient(135deg, #ffeaa7 0%, #fdcb6e 100%);
175
- padding: 1em;
176
- margin: 1em 0;
177
- border-radius: 8px;
178
- border-left: 4px solid #e17055;
179
- }
180
- """
181
 
182
  # Create the Gradio interface
183
- with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Ovi Image-to-Video Generator (Paid)") as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
- # Header with payment notice
186
  gr.HTML(
187
  """
188
- <div style="text-align:center; padding:2em 1em 1em;">
189
- <h1 style="font-size:2.2em; margin-bottom:6px;">🎬 Ovi: Image-to-Video with Audio</h1>
190
- <p style="color:#777; margin:0 0 8px;">Generate synchronized video and audio from images</p>
191
- <div class="notice">
192
- <b>Heads up:</b> This is a paid app that uses <b>your</b> inference provider credits when you run generations.
193
- Free users get <b>$0.10 in included credits</b>. <b>PRO users</b> get <b>$2 in included credits</b>
194
- and can continue using beyond that (with billing).
195
- <a href='http://huggingface.co/subscribe/pro?source=ovi' target='_blank' style='color:#fff; text-decoration:underline; font-weight:bold;'>Subscribe to PRO</a>
196
- for more credits. Please sign in with your Hugging Face account to continue.
197
- </div>
198
- <p style="font-size: 0.9em; color: #999; margin-top: 10px;">
199
- Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color:#667eea; text-decoration:underline;">anycoder</a>
200
- </p>
201
  </div>
202
  """
203
  )
204
 
205
  gr.Markdown(
206
  """
207
- ### Transform your static images into dynamic videos with synchronized audio using AI!
208
 
209
- Powered by **Ovi: Twin Backbone Cross-Modal Fusion for Audio-Video Generation** via [HuggingFace Inference Providers](https://huggingface.co/docs/huggingface_hub/guides/inference)
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  """
211
  )
212
 
213
  # Add login button - required for OAuth
214
- login_btn = gr.LoginButton("Sign in with Hugging Face")
215
 
216
  gr.HTML(
217
  """
@@ -230,7 +179,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Ovi Image-to-Video
230
 
231
  gr.HTML(
232
  """
233
- <div class="special-tokens-box">
234
  <strong>✨ Special Tokens for Enhanced Control:</strong>
235
  <ul>
236
  <li><strong>Speech:</strong> <code>&lt;S&gt;Your speech content here&lt;E&gt;</code> - Text enclosed in these tags will be converted to speech</li>
@@ -247,36 +196,26 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Ovi Image-to-Video
247
  with gr.Column(scale=1):
248
  image_input = gr.Image(
249
  label="📸 Upload Image",
250
- type="pil",
251
  sources=["upload", "clipboard"],
252
  height=400,
253
  )
254
 
255
  prompt_input = gr.Textbox(
256
  label="✍️ Text Prompt",
257
- placeholder="Describe the motion and audio you want... (e.g., 'A person walking forward while talking')",
258
- lines=4,
259
- max_lines=6
260
  )
261
 
262
- with gr.Row():
263
- generate_btn = gr.Button(
264
- "🎬 Generate Video",
265
- variant="primary",
266
- scale=2
267
- )
268
-
269
- clear_btn = gr.Button(
270
- "🗑️ Clear",
271
- variant="secondary",
272
- scale=1
273
- )
274
 
275
- status_output = gr.Textbox(
276
- label="Status",
277
- interactive=False,
278
- visible=True,
279
- elem_classes=["status-box"]
280
  )
281
 
282
  gr.Examples(
@@ -287,7 +226,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Ovi Image-to-Video
287
  ]
288
  ],
289
  inputs=[image_input, prompt_input],
290
- label="Example Prompts",
291
  )
292
 
293
  with gr.Column(scale=1):
@@ -295,7 +234,6 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Ovi Image-to-Video
295
  label="🎥 Generated Video",
296
  height=400,
297
  autoplay=True,
298
- show_download_button=True
299
  )
300
 
301
  gr.Markdown(
@@ -316,107 +254,70 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="Ovi Image-to-Video
316
 
317
  ---
318
 
319
- ### 💳 Pricing Information
320
 
321
- This app uses the Hugging Face Inference API (provider: fal-ai) which charges based on usage:
322
  - **Free users**: $0.10 in included credits
323
  - **PRO users**: $2 in included credits + ability to continue with billing
 
324
 
325
- [Subscribe to PRO](http://huggingface.co/subscribe/pro?source=ovi) for more credits and features!
326
  """
327
  )
328
 
329
- # How to Use section
330
- with gr.Accordion("📖 How to Use", open=False):
331
- gr.Markdown(
332
- """
333
- ### Getting Started:
334
- 1. **Sign in** with your Hugging Face account using the button above
335
- 2. **Upload** your image - any photo or illustration
336
- 3. **Describe** the motion and audio you want in the prompt
337
- 4. **Use special tokens** for speech and audio descriptions (optional but recommended)
338
- 5. **Generate** and watch your image come to life with synchronized audio!
339
-
340
- ### Special Tokens Guide:
341
-
342
- **Speech Token**: `<S>text<E>`
343
- - Use this to add spoken dialogue to your video
344
- - Example: `The person says <S>Hello, how are you?<E>`
345
-
346
- **Audio Description Token**: `<AUDCAP>description<ENDAUDCAP>`
347
- - Use this to describe background sounds and audio effects
348
- - Example: `<AUDCAP>Birds chirping, gentle wind blowing<ENDAUDCAP>`
349
-
350
- ### Tips for Better Results:
351
- - Be specific and descriptive in your prompts
352
- - Combine visual motion descriptions with audio elements
353
- - Use high-quality input images for better results
354
- - Experiment with different prompts and special tokens
355
- - Processing takes 30-60 seconds per generation
356
-
357
- ### ⚠️ Important Notes:
358
- - This is a **paid app** that uses your inference provider credits
359
- - Each generation consumes credits based on processing time
360
- - Free accounts have limited credits ($0.10)
361
- - PRO accounts get more credits ($2) and can continue with billing
362
- - Videos are 5 seconds long at 24 FPS
363
- - Supports multiple aspect ratios (9:16, 16:9, 1:1, etc)
364
- """
365
- )
366
-
367
- gr.Markdown(
368
- """
369
- ---
370
-
371
- ### 🔗 Resources
372
-
373
- - [Ovi Model Card](https://huggingface.co/chetwinlow1/Ovi)
374
- - [Character AI](https://character.ai)
375
- - [Hugging Face Inference API Docs](https://huggingface.co/docs/huggingface_hub/guides/inference)
376
- - [Subscribe to PRO](http://huggingface.co/subscribe/pro?source=ovi)
377
-
378
- ### 📊 Model Specifications
379
-
380
- - **Provider**: fal-ai
381
- - **Model**: chetwinlow1/Ovi
382
- - **Output**: 5-second videos at 24 FPS with audio
383
- - **Input**: Image + Text prompt
384
- - **Resolution**: 720×720 area (various aspect ratios)
385
- """
386
- )
387
-
388
  # Event handlers with authentication
 
389
  generate_btn.click(
390
  fn=generate_video_with_auth,
391
  inputs=[image_input, prompt_input],
392
- outputs=[video_output, status_output],
393
- show_progress="full",
394
  queue=False,
395
  api_name=False,
396
  show_api=False,
397
  )
398
 
399
  clear_btn.click(
400
- fn=clear_all,
401
- inputs=[],
402
- outputs=[image_input, prompt_input, video_output, status_output],
403
  queue=False,
404
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
 
406
  # Launch the app
407
  if __name__ == "__main__":
408
- try:
409
- cleanup_temp_files()
410
- if os.path.exists("gradio_cached_examples"):
411
- shutil.rmtree("gradio_cached_examples", ignore_errors=True)
412
- except Exception as e:
413
- print(f"Initial cleanup error: {e}")
414
-
415
- demo.queue(status_update_rate="auto", api_open=False, default_concurrency_limit=None)
416
  demo.launch(
417
  show_api=False,
418
- share=False,
419
- show_error=True,
420
  enable_monitoring=False,
421
  quiet=True,
422
  )
 
1
  import gradio as gr
2
  import os
3
  from huggingface_hub import InferenceClient
 
 
4
  from pathlib import Path
5
+ import tempfile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # DO NOT create a global client for paid apps
8
+ # Each user's client will be created using their OAuth token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def generate_video_with_auth(image, prompt, profile: gr.OAuthProfile | None, progress=gr.Progress()):
11
  """
 
18
  progress: Gradio progress tracker
19
 
20
  Returns:
21
+ Path to the generated video file
22
  """
23
+ if profile is None:
24
+ raise gr.Error("Please sign in with Hugging Face to use this paid app")
25
+
26
+ if image is None:
27
+ raise gr.Error("Please upload an image first!")
28
+
29
+ if not prompt or prompt.strip() == "":
30
+ raise gr.Error("Please enter a prompt describing the desired motion!")
31
+
32
  try:
 
 
 
 
 
 
 
 
 
 
33
  progress(0.2, desc="Processing image...")
34
 
 
 
35
  # Read the image file
36
  if isinstance(image, str):
 
37
  with open(image, "rb") as image_file:
38
  input_image = image_file.read()
39
  else:
40
+ # If image is a PIL Image, save it temporarily
41
+ temp_image = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
42
+ image.save(temp_image.name)
43
+ with open(temp_image.name, "rb") as image_file:
44
+ input_image = image_file.read()
 
 
 
 
 
 
 
 
 
45
 
46
  progress(0.4, desc="Generating video with AI...")
47
 
48
+ # CRITICAL FOR PAID APPS: Create client with user's OAuth token
49
+ # Do NOT use bill_to parameter - this makes the USER pay, not HuggingFace
50
  client = InferenceClient(
51
  provider="fal-ai",
52
+ api_key=profile.oauth_info.access_token,
53
  )
54
 
55
  # Generate video using the inference client
56
+ video = client.image_to_video(
57
+ input_image,
58
+ prompt=prompt,
59
+ model="chetwinlow1/Ovi",
60
+ )
 
 
 
 
 
 
61
 
62
  progress(0.9, desc="Finalizing video...")
63
 
64
  # Save the video to a temporary file
65
+ output_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4")
66
+
67
+ # Check if video is bytes or a file path
68
+ if isinstance(video, bytes):
69
+ with open(output_path.name, "wb") as f:
70
+ f.write(video)
71
+ elif isinstance(video, str) and os.path.exists(video):
72
+ # If it's a path, copy it
73
+ import shutil
74
+ shutil.copy(video, output_path.name)
75
+ else:
76
+ # Try to write it directly
77
+ with open(output_path.name, "wb") as f:
78
+ f.write(video)
79
 
80
  progress(1.0, desc="Complete!")
81
 
82
+ return output_path.name
83
 
 
 
84
  except Exception as e:
85
+ raise gr.Error(f"Error generating video: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  # Create the Gradio interface
88
+ with gr.Blocks(
89
+ theme=gr.themes.Soft(
90
+ primary_hue="blue",
91
+ secondary_hue="indigo",
92
+ ),
93
+ css="""
94
+ .header-link {
95
+ font-size: 0.9em;
96
+ color: #666;
97
+ text-decoration: none;
98
+ margin-bottom: 1em;
99
+ display: inline-block;
100
+ }
101
+ .header-link:hover {
102
+ color: #333;
103
+ text-decoration: underline;
104
+ }
105
+ .main-header {
106
+ text-align: center;
107
+ margin-bottom: 2em;
108
+ }
109
+ .info-box {
110
+ background-color: #f0f7ff;
111
+ border-left: 4px solid #4285f4;
112
+ padding: 1em;
113
+ margin: 1em 0;
114
+ border-radius: 4px;
115
+ }
116
+ .auth-warning {
117
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
118
+ color: white;
119
+ padding: 14px 16px;
120
+ border-radius: 12px;
121
+ margin: 18px auto 6px;
122
+ max-width: 860px;
123
+ text-align: center;
124
+ font-size: 0.98rem;
125
+ font-weight: bold;
126
+ }
127
+ """,
128
+ title="Image to Video Generator with Ovi (Paid)",
129
+ ) as demo:
130
 
 
131
  gr.HTML(
132
  """
133
+ <div class="main-header">
134
+ <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" class="header-link">
135
+ Built with anycoder
136
+ </a>
 
 
 
 
 
 
 
 
 
137
  </div>
138
  """
139
  )
140
 
141
  gr.Markdown(
142
  """
143
+ # 🎬 Image to Video Generator with Ovi
144
 
145
+ Transform your static images into dynamic videos with synchronized audio using AI! Upload an image and describe the motion you want to see.
146
+
147
+ Powered by Ovi: Twin Backbone Cross-Modal Fusion for Audio-Video Generation via [HuggingFace Inference Providers](https://huggingface.co/docs/huggingface_hub/guides/inference).
148
+ """
149
+ )
150
+
151
+ gr.HTML(
152
+ """
153
+ <div class="auth-warning">
154
+ 💳 <b>PAID APP:</b> This app uses <b>YOUR</b> inference provider credits.
155
+ Free users get $0.10 in included credits. PRO users get $2 in credits and can continue with billing.
156
+ <a href='http://huggingface.co/subscribe/pro?source=ovi' target='_blank' style='color:#fff; text-decoration:underline;'>Subscribe to PRO</a> for more credits.
157
+ Please sign in below to continue.
158
+ </div>
159
  """
160
  )
161
 
162
  # Add login button - required for OAuth
163
+ gr.LoginButton()
164
 
165
  gr.HTML(
166
  """
 
179
 
180
  gr.HTML(
181
  """
182
+ <div class="info-box">
183
  <strong>✨ Special Tokens for Enhanced Control:</strong>
184
  <ul>
185
  <li><strong>Speech:</strong> <code>&lt;S&gt;Your speech content here&lt;E&gt;</code> - Text enclosed in these tags will be converted to speech</li>
 
196
  with gr.Column(scale=1):
197
  image_input = gr.Image(
198
  label="📸 Upload Image",
199
+ type="filepath",
200
  sources=["upload", "clipboard"],
201
  height=400,
202
  )
203
 
204
  prompt_input = gr.Textbox(
205
  label="✍️ Text Prompt",
206
+ lines=3,
207
+ placeholder="Describe the motion and audio you want to see..."
 
208
  )
209
 
210
+ generate_btn = gr.Button(
211
+ "🎬 Generate Video",
212
+ variant="primary",
213
+ size="lg",
214
+ )
 
 
 
 
 
 
 
215
 
216
+ clear_btn = gr.Button(
217
+ "🗑️ Clear",
218
+ variant="secondary",
 
 
219
  )
220
 
221
  gr.Examples(
 
226
  ]
227
  ],
228
  inputs=[image_input, prompt_input],
229
+ label="Example",
230
  )
231
 
232
  with gr.Column(scale=1):
 
234
  label="🎥 Generated Video",
235
  height=400,
236
  autoplay=True,
 
237
  )
238
 
239
  gr.Markdown(
 
254
 
255
  ---
256
 
257
+ ### 💳 Pricing & Credits
258
 
259
+ This is a **paid app** that charges your HuggingFace inference provider account:
260
  - **Free users**: $0.10 in included credits
261
  - **PRO users**: $2 in included credits + ability to continue with billing
262
+ - Each video generation consumes credits based on processing time
263
 
264
+ [Subscribe to PRO](http://huggingface.co/subscribe/pro?source=ovi) for more credits!
265
  """
266
  )
267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
  # Event handlers with authentication
269
+ # NOTE: Do NOT pass profile as input - Gradio injects it automatically
270
  generate_btn.click(
271
  fn=generate_video_with_auth,
272
  inputs=[image_input, prompt_input],
273
+ outputs=[video_output],
 
274
  queue=False,
275
  api_name=False,
276
  show_api=False,
277
  )
278
 
279
  clear_btn.click(
280
+ fn=lambda: (None, "", None),
281
+ inputs=None,
282
+ outputs=[image_input, prompt_input, video_output],
283
  queue=False,
284
  )
285
+
286
+ gr.Markdown(
287
+ """
288
+ ---
289
+
290
+ ### 🚀 How it works
291
+
292
+ 1. **Sign in** with your Hugging Face account (required for paid app)
293
+ 2. **Upload** your image - any photo or illustration
294
+ 3. **Describe** the motion you want to see in the prompt
295
+ 4. **Generate** and watch your image come to life with synchronized audio!
296
+ 5. **Credits are deducted** from your HuggingFace inference provider account
297
+
298
+ ### ⚠️ Notes
299
+
300
+ - **This is a PAID app** - uses your inference provider credits
301
+ - Video generation may take 30-60 seconds
302
+ - Generates 5-second videos at 24 FPS with synchronized audio
303
+ - Supports multiple aspect ratios (9:16, 16:9, 1:1, etc) at 720×720 area
304
+ - Best results with clear, high-quality images
305
+ - The model works best with realistic subjects and natural motions
306
+ - Free accounts have limited credits - upgrade to PRO for more
307
+
308
+ ### 🔗 Resources
309
+
310
+ - [Ovi Model Card](https://huggingface.co/chetwinlow1/Ovi)
311
+ - [Character AI](https://character.ai)
312
+ - [Subscribe to PRO](http://huggingface.co/subscribe/pro?source=ovi)
313
+ - [Inference API Documentation](https://huggingface.co/docs/huggingface_hub/guides/inference)
314
+ """
315
+ )
316
 
317
  # Launch the app
318
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
319
  demo.launch(
320
  show_api=False,
 
 
321
  enable_monitoring=False,
322
  quiet=True,
323
  )