File size: 8,234 Bytes
0c5241e 00cb4e1 0c5241e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
from __future__ import annotations
import os
import random
import tempfile
from typing import Annotated
import gradio as gr
from huggingface_hub import InferenceClient
from app import _log_call_end, _log_call_start, _truncate_for_log
from ._docstrings import autodoc
HF_VIDEO_TOKEN = os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")
# Single source of truth for the LLM-facing tool description
TOOL_SUMMARY = (
"Generate a short MP4 video from a text prompt via Hugging Face serverless inference; "
"control model, steps, guidance, seed, size, fps, and duration; returns a temporary MP4 file path. "
"Return the generated media to the user in this format ``."
)
def _write_video_tmp(data_iter_or_bytes: object, suffix: str = ".mp4") -> str:
fd, fname = tempfile.mkstemp(suffix=suffix)
try:
with os.fdopen(fd, "wb") as file:
if isinstance(data_iter_or_bytes, (bytes, bytearray)):
file.write(data_iter_or_bytes)
elif hasattr(data_iter_or_bytes, "read"):
file.write(data_iter_or_bytes.read())
elif hasattr(data_iter_or_bytes, "content"):
file.write(data_iter_or_bytes.content) # type: ignore[attr-defined]
elif hasattr(data_iter_or_bytes, "__iter__") and not isinstance(data_iter_or_bytes, (str, dict)):
for chunk in data_iter_or_bytes: # type: ignore[assignment]
if chunk:
file.write(chunk)
else:
raise gr.Error("Unsupported video data type returned by provider.")
except Exception:
try:
os.remove(fname)
except Exception:
pass
raise
return fname
@autodoc(
summary=TOOL_SUMMARY,
)
def Generate_Video(
prompt: Annotated[str, "Text description of the video to generate (e.g., 'a red fox running through a snowy forest at sunrise')."],
model_id: Annotated[str, "Hugging Face model id in the form 'creator/model-name'. Defaults to Wan-AI/Wan2.2-T2V-A14B."] = "Wan-AI/Wan2.2-T2V-A14B",
negative_prompt: Annotated[str, "What should NOT appear in the video."] = "",
steps: Annotated[int, "Number of denoising steps (1–100). Higher can improve quality but is slower."] = 25,
cfg_scale: Annotated[float, "Guidance scale (1–20). Higher = follow the prompt more closely, lower = more creative."] = 3.5,
seed: Annotated[int, "Random seed for reproducibility. Use -1 for a random seed per call."] = -1,
width: Annotated[int, "Output width in pixels (multiples of 8 recommended)."] = 768,
height: Annotated[int, "Output height in pixels (multiples of 8 recommended)."] = 768,
fps: Annotated[int, "Frames per second of the output video (e.g., 24)."] = 24,
duration: Annotated[float, "Target duration in seconds (provider/model dependent, commonly 2–6s)."] = 4.0,
) -> str:
_log_call_start(
"Generate_Video",
prompt=_truncate_for_log(prompt, 160),
model_id=model_id,
steps=steps,
cfg_scale=cfg_scale,
fps=fps,
duration=duration,
size=f"{width}x{height}",
)
if not prompt or not prompt.strip():
_log_call_end("Generate_Video", "error=empty prompt")
raise gr.Error("Please provide a non-empty prompt.")
providers = ["auto", "replicate", "fal-ai"]
last_error: Exception | None = None
parameters = {
"negative_prompt": negative_prompt or None,
"num_inference_steps": steps,
"guidance_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1_000_000_000),
"width": width,
"height": height,
"fps": fps,
"duration": duration,
}
for provider in providers:
try:
client = InferenceClient(api_key=HF_VIDEO_TOKEN, provider=provider)
if hasattr(client, "text_to_video"):
num_frames = int(duration * fps) if duration and fps else None
extra_body = {}
if width:
extra_body["width"] = width
if height:
extra_body["height"] = height
if fps:
extra_body["fps"] = fps
if duration:
extra_body["duration"] = duration
result = client.text_to_video(
prompt=prompt,
model=model_id,
guidance_scale=cfg_scale,
negative_prompt=[negative_prompt] if negative_prompt else None,
num_frames=num_frames,
num_inference_steps=steps,
seed=parameters["seed"],
extra_body=extra_body if extra_body else None,
)
else:
result = client.post(
model=model_id,
json={"inputs": prompt, "parameters": {k: v for k, v in parameters.items() if v is not None}},
)
path = _write_video_tmp(result, suffix=".mp4")
try:
size = os.path.getsize(path)
except Exception:
size = -1
_log_call_end("Generate_Video", f"provider={provider} path={os.path.basename(path)} bytes={size}")
return path
except Exception as exc: # pylint: disable=broad-except
last_error = exc
continue
msg = str(last_error) if last_error else "Unknown error"
lowered = msg.lower()
if "404" in msg:
raise gr.Error(f"Model not found or unavailable: {model_id}. Check the id and HF token access.")
if "503" in msg:
raise gr.Error("The model is warming up. Please try again shortly.")
if "401" in msg or "403" in msg:
raise gr.Error("Please duplicate the space and provide a `HF_READ_TOKEN` to enable Image and Video Generation.")
if ("api_key" in lowered) or ("hf auth login" in lowered) or ("unauthorized" in lowered) or ("forbidden" in lowered):
raise gr.Error("Please duplicate the space and provide a `HF_READ_TOKEN` to enable Image and Video Generation.")
_log_call_end("Generate_Video", f"error={_truncate_for_log(msg, 200)}")
raise gr.Error(f"Video generation failed: {msg}")
def build_interface() -> gr.Interface:
return gr.Interface(
fn=Generate_Video,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter a prompt for the video", lines=2),
gr.Textbox(
label="Model",
value="Wan-AI/Wan2.2-T2V-A14B",
placeholder="creator/model-name",
max_lines=1,
info="<a href=\"https://huggingface.co/models?pipeline_tag=text-to-video&inference_provider=nebius,cerebras,novita,fireworks-ai,together,fal-ai,groq,featherless-ai,nscale,hyperbolic,sambanova,cohere,replicate,scaleway,publicai,hf-inference&sort=trending\" target=\"_blank\" rel=\"noopener noreferrer\">Browse models</a>",
),
gr.Textbox(label="Negative Prompt", value="", lines=2),
gr.Slider(minimum=1, maximum=100, value=25, step=1, label="Steps"),
gr.Slider(minimum=1.0, maximum=20.0, value=3.5, step=0.1, label="CFG Scale"),
gr.Slider(minimum=-1, maximum=1_000_000_000, value=-1, step=1, label="Seed (-1 = random)"),
gr.Slider(minimum=64, maximum=1920, value=768, step=8, label="Width"),
gr.Slider(minimum=64, maximum=1920, value=768, step=8, label="Height"),
gr.Slider(minimum=4, maximum=60, value=24, step=1, label="FPS"),
gr.Slider(minimum=1.0, maximum=10.0, value=4.0, step=0.5, label="Duration (s)"),
],
outputs=gr.Video(label="Generated Video", show_download_button=True, format="mp4"),
title="Generate Video",
description=(
"<div style=\"text-align:center\">Generate short videos via Hugging Face serverless inference. "
"Default model is Wan2.2-T2V-A14B.</div>"
),
api_description=TOOL_SUMMARY,
flagging_mode="never",
show_api=bool(os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")),
)
__all__ = ["Generate_Video", "build_interface"]
|