|
|
import gradio as gr |
|
|
import os |
|
|
from huggingface_hub import InferenceClient |
|
|
import tempfile |
|
|
import shutil |
|
|
from pathlib import Path |
|
|
from typing import Optional, Union |
|
|
import time |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cleanup_temp_files(): |
|
|
try: |
|
|
temp_dir = tempfile.gettempdir() |
|
|
for file_path in Path(temp_dir).glob("*.mp4"): |
|
|
try: |
|
|
if file_path.stat().st_mtime < (time.time() - 300): |
|
|
file_path.unlink(missing_ok=True) |
|
|
except Exception: |
|
|
pass |
|
|
except Exception as e: |
|
|
print(f"Cleanup error: {e}") |
|
|
|
|
|
def _client_from_token(token: Optional[str]) -> InferenceClient: |
|
|
if not token: |
|
|
raise gr.Error("Please sign in first. This app requires your Hugging Face login.") |
|
|
|
|
|
return InferenceClient( |
|
|
provider="fal-ai", |
|
|
api_key=token, |
|
|
) |
|
|
|
|
|
def _save_bytes_as_temp_mp4(data: bytes) -> str: |
|
|
temp_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) |
|
|
try: |
|
|
temp_file.write(data) |
|
|
temp_file.flush() |
|
|
return temp_file.name |
|
|
finally: |
|
|
temp_file.close() |
|
|
|
|
|
def text_to_video(prompt, token: gr.OAuthToken | None, duration=5, aspect_ratio="16:9", resolution="720p", *_): |
|
|
"""Generate video from text prompt""" |
|
|
try: |
|
|
if token is None or not getattr(token, "token", None): |
|
|
return None, "β Sign in with Hugging Face to continue. This app uses your inference provider credits." |
|
|
|
|
|
if not prompt or prompt.strip() == "": |
|
|
return None, "Please enter a text prompt" |
|
|
|
|
|
cleanup_temp_files() |
|
|
|
|
|
|
|
|
client = _client_from_token(token.token) |
|
|
|
|
|
|
|
|
try: |
|
|
video = client.text_to_video( |
|
|
prompt, |
|
|
model="akhaliq/veo3.1-fast", |
|
|
) |
|
|
except Exception as e: |
|
|
import requests |
|
|
if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403: |
|
|
return None, "β Access denied by provider (403). Make sure your HF account has credits/permission for provider 'fal-ai' and model 'akhaliq/veo3.1-fast'." |
|
|
raise |
|
|
|
|
|
|
|
|
video_path = _save_bytes_as_temp_mp4(video) |
|
|
|
|
|
return video_path, f"β
Video generated successfully from prompt: '{prompt[:50]}...'" |
|
|
|
|
|
except gr.Error as e: |
|
|
return None, f"β {str(e)}" |
|
|
except Exception as e: |
|
|
return None, f"β Generation failed. If this keeps happening, check your provider quota or try again later." |
|
|
|
|
|
def image_to_video(image, prompt, token: gr.OAuthToken | None, duration=5, aspect_ratio="16:9", resolution="720p", *_): |
|
|
"""Generate video from image and prompt""" |
|
|
try: |
|
|
if token is None or not getattr(token, "token", None): |
|
|
return None, "β Sign in with Hugging Face to continue. This app uses your inference provider credits." |
|
|
|
|
|
if image is None: |
|
|
return None, "Please upload an image" |
|
|
|
|
|
if not prompt or prompt.strip() == "": |
|
|
return None, "Please enter a prompt describing the motion" |
|
|
|
|
|
cleanup_temp_files() |
|
|
|
|
|
|
|
|
if isinstance(image, str): |
|
|
|
|
|
with open(image, "rb") as image_file: |
|
|
input_image = image_file.read() |
|
|
else: |
|
|
|
|
|
import io |
|
|
from PIL import Image as PILImage |
|
|
|
|
|
|
|
|
if isinstance(image, PILImage.Image): |
|
|
buffer = io.BytesIO() |
|
|
image.save(buffer, format='PNG') |
|
|
input_image = buffer.getvalue() |
|
|
else: |
|
|
|
|
|
pil_image = PILImage.fromarray(image) |
|
|
buffer = io.BytesIO() |
|
|
pil_image.save(buffer, format='PNG') |
|
|
input_image = buffer.getvalue() |
|
|
|
|
|
|
|
|
client = _client_from_token(token.token) |
|
|
|
|
|
|
|
|
try: |
|
|
video = client.image_to_video( |
|
|
input_image, |
|
|
prompt=prompt, |
|
|
model="akhaliq/veo3.1-fast-image-to-video", |
|
|
) |
|
|
except Exception as e: |
|
|
import requests |
|
|
if isinstance(e, requests.HTTPError) and getattr(e.response, "status_code", None) == 403: |
|
|
return None, "β Access denied by provider (403). Make sure your HF account has credits/permission for provider 'fal-ai' and model 'akhaliq/veo3.1-fast-image-to-video'." |
|
|
raise |
|
|
|
|
|
|
|
|
video_path = _save_bytes_as_temp_mp4(video) |
|
|
|
|
|
return video_path, f"β
Video generated successfully with motion: '{prompt[:50]}...'" |
|
|
|
|
|
except gr.Error as e: |
|
|
return None, f"β {str(e)}" |
|
|
except Exception as e: |
|
|
return None, f"β Generation failed. If this keeps happening, check your provider quota or try again later." |
|
|
|
|
|
def clear_text_tab(): |
|
|
"""Clear text-to-video tab""" |
|
|
return "", None, "" |
|
|
|
|
|
def clear_image_tab(): |
|
|
"""Clear image-to-video tab""" |
|
|
return None, "", None, "" |
|
|
|
|
|
|
|
|
custom_css = """ |
|
|
.container { |
|
|
max-width: 1200px; |
|
|
margin: auto; |
|
|
} |
|
|
.header-link { |
|
|
text-decoration: none; |
|
|
color: #2196F3; |
|
|
font-weight: bold; |
|
|
} |
|
|
.header-link:hover { |
|
|
text-decoration: underline; |
|
|
} |
|
|
.status-box { |
|
|
padding: 10px; |
|
|
border-radius: 5px; |
|
|
margin-top: 10px; |
|
|
} |
|
|
.notice { |
|
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
|
color: white; |
|
|
padding: 14px 16px; |
|
|
border-radius: 12px; |
|
|
margin: 18px auto 6px; |
|
|
max-width: 860px; |
|
|
text-align: center; |
|
|
font-size: 0.98rem; |
|
|
} |
|
|
.mobile-link-container { |
|
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); |
|
|
padding: 1.5em; |
|
|
border-radius: 10px; |
|
|
text-align: center; |
|
|
margin: 1em 0; |
|
|
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); |
|
|
} |
|
|
.mobile-link { |
|
|
color: white !important; |
|
|
font-size: 1.2em; |
|
|
font-weight: bold; |
|
|
text-decoration: none; |
|
|
display: inline-block; |
|
|
padding: 0.5em 1.5em; |
|
|
background: rgba(255, 255, 255, 0.2); |
|
|
border-radius: 25px; |
|
|
transition: all 0.3s ease; |
|
|
} |
|
|
.mobile-link:hover { |
|
|
background: rgba(255, 255, 255, 0.3); |
|
|
transform: translateY(-2px); |
|
|
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); |
|
|
} |
|
|
.mobile-text { |
|
|
color: white; |
|
|
margin-bottom: 0.5em; |
|
|
font-size: 1.1em; |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator (Paid)") as demo: |
|
|
gr.Markdown( |
|
|
""" |
|
|
# π¬ AI Video Generator |
|
|
### Generate stunning videos from text or animate your images with AI |
|
|
#### Powered by VEO 3.1 Fast Model via Hugging Face Inference API (provider: fal-ai) |
|
|
""" |
|
|
) |
|
|
|
|
|
gr.HTML( |
|
|
""" |
|
|
<div style="text-align:center; max-width:900px; margin:0 auto;"> |
|
|
<h1 style="font-size:2.2em; margin-bottom:6px;">π¬ Sora-2</h1> |
|
|
<p style="color:#777; margin:0 0 8px;">Generate videos via the Hugging Face Inference API (provider: fal-ai)</p> |
|
|
<div class="notice"> |
|
|
<b>Heads up:</b> This is a paid app that uses <b>your</b> inference provider credits when you run generations. |
|
|
Free users get <b>$0.10 in included credits</b>. <b>PRO users</b> get <b>$2 in included credits</b> |
|
|
and can continue using beyond that (with billing). |
|
|
<a href='http://huggingface.co/subscribe/pro?source=veo3' target='_blank' style='color:#fff; text-decoration:underline; font-weight:bold;'>Subscribe to PRO</a> |
|
|
for more credits. Please sign in with your Hugging Face account to continue. |
|
|
</div> |
|
|
<p style="font-size: 0.9em; color: #999; margin-top: 10px;"> |
|
|
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color:#fff; text-decoration:underline;">anycoder</a> |
|
|
</p> |
|
|
</div> |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
gr.HTML( |
|
|
""" |
|
|
<div class="mobile-link-container"> |
|
|
<div class="mobile-text">π± On mobile? Use the optimized version:</div> |
|
|
<a href="https://akhaliq-veo3-1-fast.hf.space" target="_blank" class="mobile-link"> |
|
|
π Open Mobile Version |
|
|
</a> |
|
|
</div> |
|
|
""" |
|
|
) |
|
|
|
|
|
gr.HTML( |
|
|
""" |
|
|
<p style="text-align: center; font-size: 0.9em; color: #999; margin-top: 10px;"> |
|
|
Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color:#667eea; text-decoration:underline;">anycoder</a> |
|
|
</p> |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
login_btn = gr.LoginButton("Sign in with Hugging Face") |
|
|
|
|
|
with gr.Tabs() as tabs: |
|
|
|
|
|
with gr.Tab("π Text to Video", id=0): |
|
|
gr.Markdown("### Transform your text descriptions into dynamic videos") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
text_prompt = gr.Textbox( |
|
|
label="Text Prompt", |
|
|
placeholder="Describe the video you want to create... (e.g., 'A young man walking on the street during sunset')", |
|
|
lines=4, |
|
|
max_lines=6 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
text_generate_btn = gr.Button("π¬ Generate Video", variant="primary", scale=2) |
|
|
text_clear_btn = gr.ClearButton(value="ποΈ Clear", scale=1) |
|
|
|
|
|
text_status = gr.Textbox( |
|
|
label="Status", |
|
|
interactive=False, |
|
|
visible=True, |
|
|
elem_classes=["status-box"] |
|
|
) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
text_video_output = gr.Video( |
|
|
label="Generated Video", |
|
|
autoplay=True, |
|
|
show_download_button=True, |
|
|
height=400 |
|
|
) |
|
|
|
|
|
|
|
|
gr.Examples( |
|
|
examples=[ |
|
|
["A serene beach at sunset with gentle waves"], |
|
|
["A bustling city street with neon lights at night"], |
|
|
["A majestic eagle soaring through mountain peaks"], |
|
|
["An astronaut floating in space near the International Space Station"], |
|
|
["Cherry blossoms falling in slow motion in a Japanese garden"], |
|
|
], |
|
|
inputs=text_prompt, |
|
|
label="Example Prompts" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Tab("πΌοΈ Image to Video", id=1): |
|
|
gr.Markdown("### Bring your static images to life with motion") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
image_input = gr.Image( |
|
|
label="Upload Image", |
|
|
type="pil", |
|
|
height=300 |
|
|
) |
|
|
|
|
|
image_prompt = gr.Textbox( |
|
|
label="Motion Prompt", |
|
|
placeholder="Describe how the image should move... (e.g., 'The cat starts to dance')", |
|
|
lines=3, |
|
|
max_lines=5 |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
image_generate_btn = gr.Button("π¬ Animate Image", variant="primary", scale=2) |
|
|
image_clear_btn = gr.ClearButton(value="ποΈ Clear", scale=1) |
|
|
|
|
|
image_status = gr.Textbox( |
|
|
label="Status", |
|
|
interactive=False, |
|
|
visible=True, |
|
|
elem_classes=["status-box"] |
|
|
) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
image_video_output = gr.Video( |
|
|
label="Generated Video", |
|
|
autoplay=True, |
|
|
show_download_button=True, |
|
|
height=400 |
|
|
) |
|
|
|
|
|
|
|
|
gr.Examples( |
|
|
examples=[ |
|
|
[None, "The person starts walking forward"], |
|
|
[None, "The animal begins to run"], |
|
|
[None, "Camera slowly zooms in while the subject smiles"], |
|
|
[None, "The flowers sway gently in the breeze"], |
|
|
[None, "The clouds move across the sky in time-lapse"], |
|
|
], |
|
|
inputs=[image_input, image_prompt], |
|
|
label="Example Motion Prompts" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Accordion("π How to Use", open=False): |
|
|
gr.Markdown( |
|
|
""" |
|
|
### Text to Video: |
|
|
1. Enter a detailed description of the video you want to create |
|
|
2. Optionally adjust advanced settings (duration, aspect ratio, resolution) |
|
|
3. Click "Generate Video" and wait for the AI to create your video |
|
|
4. Download or preview your generated video |
|
|
|
|
|
### Image to Video: |
|
|
1. Upload an image you want to animate |
|
|
2. Describe the motion or action you want to add to the image |
|
|
3. Optionally adjust advanced settings |
|
|
4. Click "Animate Image" to bring your image to life |
|
|
5. Download or preview your animated video |
|
|
|
|
|
### Tips for Better Results: |
|
|
- Be specific and descriptive in your prompts |
|
|
- For image-to-video, describe natural motions that fit the image |
|
|
- Use high-quality input images for better results |
|
|
- Experiment with different prompts to get the desired effect |
|
|
|
|
|
### Mobile Users: |
|
|
- For the best mobile experience, use the optimized version at: https://akhaliq-veo3-1-fast.hf.space |
|
|
""" |
|
|
) |
|
|
|
|
|
|
|
|
text_generate_btn.click( |
|
|
fn=text_to_video, |
|
|
inputs=[text_prompt], |
|
|
outputs=[text_video_output, text_status], |
|
|
show_progress="full", |
|
|
queue=False, |
|
|
api_name=False, |
|
|
show_api=False |
|
|
) |
|
|
|
|
|
text_clear_btn.click( |
|
|
fn=clear_text_tab, |
|
|
inputs=[], |
|
|
outputs=[text_prompt, text_video_output, text_status], |
|
|
queue=False |
|
|
) |
|
|
|
|
|
image_generate_btn.click( |
|
|
fn=image_to_video, |
|
|
inputs=[image_input, image_prompt], |
|
|
outputs=[image_video_output, image_status], |
|
|
show_progress="full", |
|
|
queue=False, |
|
|
api_name=False, |
|
|
show_api=False |
|
|
) |
|
|
|
|
|
image_clear_btn.click( |
|
|
fn=clear_image_tab, |
|
|
inputs=[], |
|
|
outputs=[image_input, image_prompt, image_video_output, image_status], |
|
|
queue=False |
|
|
) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
try: |
|
|
cleanup_temp_files() |
|
|
if os.path.exists("gradio_cached_examples"): |
|
|
shutil.rmtree("gradio_cached_examples", ignore_errors=True) |
|
|
except Exception as e: |
|
|
print(f"Initial cleanup error: {e}") |
|
|
|
|
|
demo.queue(status_update_rate="auto", api_open=False, default_concurrency_limit=None) |
|
|
demo.launch( |
|
|
show_api=False, |
|
|
share=False, |
|
|
show_error=True, |
|
|
enable_monitoring=False, |
|
|
quiet=True, |
|
|
ssr_mode=True |
|
|
) |