Spaces:
Runtime error
Runtime error
| # app.py (Final version with live logging and corrections) | |
| import gradio as gr | |
| import uuid | |
| import subprocess | |
| import threading | |
| import os | |
| import time | |
| import sys | |
| import re | |
| import traceback | |
| # Add project root to Python path to fix any import issues | |
| sys.path.insert(0, os.getcwd()) | |
| # --- Download Kokoro models if they don't exist --- | |
| model_dir = "models" | |
| kokoro_model_path = os.path.join(model_dir, "kokoro-v0_19.onnx") | |
| kokoro_voices_path = os.path.join(model_dir, "voices.bin") | |
| if not os.path.exists(kokoro_model_path) or not os.path.exists(kokoro_voices_path): | |
| print("Downloading Kokoro TTS models...") | |
| os.makedirs(model_dir, exist_ok=True) | |
| # Using specific wget commands for clarity and robustness | |
| os.system(f"wget -O {kokoro_model_path} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/kokoro-v0_19.onnx") | |
| os.system(f"wget -O {kokoro_voices_path} https://github.com/thewh1teagle/kokoro-onnx/releases/download/model-files/voices.bin") | |
| print("Model download complete.") | |
| # In-memory dictionary to track task status. | |
| tasks = {} | |
| def run_video_generation(task_id: str, topic: str, context: str, model: str): | |
| """ | |
| Runs the main generation script in a separate process and captures output in real-time. | |
| """ | |
| tasks[task_id]['status'] = 'running' | |
| tasks[task_id]['log'] = 'Process started...\n' | |
| # Sanitize topic name to create a valid directory/file prefix | |
| file_prefix = re.sub(r'[^a-z0-9_]+', '_', topic.lower()) | |
| # The generate_video.py script will create this directory inside the general 'output' folder | |
| output_dir = os.path.join("output", file_prefix) | |
| # --- IMPORTANT: Command points to the specific output directory for this topic --- | |
| command = [ | |
| "python", "-u", "generate_video.py", # '-u' for unbuffered output | |
| "--model", model, | |
| "--topic", topic, | |
| "--context", context, | |
| "--output_dir", output_dir | |
| # Langfuse is disabled by not including the --use_langfuse flag | |
| ] | |
| try: | |
| # Use Popen to run the process in the background and stream output | |
| process = subprocess.Popen( | |
| command, | |
| stdout=subprocess.PIPE, | |
| stderr=subprocess.STDOUT, | |
| text=True, | |
| bufsize=1, | |
| universal_newlines=True, | |
| ) | |
| # Read output line-by-line in real-time | |
| for line in iter(process.stdout.readline, ''): | |
| print(line, end='') # Print to Hugging Face console logs | |
| tasks[task_id]['log'] += line | |
| process.wait() # Wait for the process to complete | |
| if process.returncode == 0: | |
| # Check for the final combined video file | |
| final_video_path = os.path.join(output_dir, f"{file_prefix}_combined.mp4") | |
| if os.path.exists(final_video_path): | |
| tasks[task_id]['status'] = 'completed' | |
| tasks[task_id]['video_path'] = final_video_path | |
| tasks[task_id]['log'] += f"\nβ Success! Video available at: {final_video_path}" | |
| else: | |
| tasks[task_id]['status'] = 'failed' | |
| tasks[task_id]['error'] = "Script finished, but the final combined video file was not found." | |
| tasks[task_id]['log'] += f"\nβ Error: Output video not found at {final_video_path}" | |
| else: | |
| tasks[task_id]['status'] = 'failed' | |
| tasks[task_id]['error'] = f"Process failed with return code {process.returncode}." | |
| tasks[task_id]['log'] += f"\nβ Error: Process failed. Check logs for details." | |
| except Exception as e: | |
| print(f"Caught an exception: {e}") | |
| tasks[task_id]['status'] = 'failed' | |
| tasks[task_id]['error'] = str(e) | |
| tasks[task_id]['log'] += f"\nβ An exception occurred: {traceback.format_exc()}" | |
| def start_generation(topic: str, context: str, model: str): | |
| if not all([topic, context, model]): | |
| return "Topic, Context, and Model cannot be empty.", "" | |
| task_id = str(uuid.uuid4()) | |
| tasks[task_id] = {'status': 'queued', 'model': model, 'log': ''} | |
| thread = threading.Thread( | |
| target=run_video_generation, | |
| args=(task_id, topic, context, model) | |
| ) | |
| thread.start() | |
| return f"β Task started with ID: {task_id}. Go to 'Check Status' tab to monitor progress.", task_id | |
| def check_status(task_id: str): | |
| if not task_id: | |
| return "Please provide a Task ID.", None, "Please enter a Task ID above and click 'Check Status'." | |
| task = tasks.get(task_id) | |
| if not task: | |
| return "Task not found.", None, f"No task found with ID: {task_id}" | |
| status = task.get('status') | |
| model = task.get('model', 'Unknown') | |
| log = task.get('log', 'No logs yet...') | |
| if status == 'completed': | |
| video_path = task.get('video_path') | |
| status_message = f"β Status: {status} (Model: {model})" | |
| return status_message, video_path, log | |
| elif status == 'failed': | |
| error = task.get('error', 'Unknown error') | |
| status_message = f"β Status: {status} (Model: {model})" | |
| return status_message, None, log | |
| status_message = f"π Status: {status} (Model: {model})" | |
| return status_message, None, log | |
| # Create the Gradio interface | |
| with gr.Blocks(css="footer {display: none !important}", title="Theorem Explain Agent") as demo: | |
| gr.Markdown("# π Theorem Explain Agent: Video Generation") | |
| gr.Markdown("Generate educational videos explaining mathematical theorems and concepts. This may take several minutes.") | |
| with gr.Tab("π Start Generation"): | |
| gr.Markdown("### 1. Enter the details for your video") | |
| model_input = gr.Dropdown( | |
| label="Model", | |
| choices=["gemini/gemini-1.5-flash-001", "gemini/gemini-1.5-pro-002"], | |
| value="gemini/gemini-1.5-flash-001", | |
| info="Select the AI model for content generation." | |
| ) | |
| topic_input = gr.Textbox(label="Topic", placeholder="e.g., The Pythagorean Theorem") | |
| context_input = gr.Textbox(label="Context", placeholder="A short explanation of the theorem.", lines=3) | |
| start_button = gr.Button("π¬ Generate Video", variant="primary") | |
| gr.Markdown("### 2. Monitor your task") | |
| with gr.Row(): | |
| status_output = gr.Textbox(label="Status", interactive=False) | |
| task_id_output = gr.Textbox(label="Task ID", interactive=False) | |
| with gr.Tab("π Check Status & View Video"): | |
| gr.Markdown("### Paste your Task ID to check progress and view the final video") | |
| with gr.Row(): | |
| task_id_input = gr.Textbox(label="Task ID", placeholder="Enter the Task ID you received") | |
| check_button = gr.Button("π Check Status", variant="secondary") | |
| status_display = gr.Textbox(label="Current Status", interactive=False) | |
| video_output = gr.Video(label="Generated Video", interactive=False) | |
| log_display = gr.Textbox(label="Live Generation Logs", lines=15, interactive=False) | |
| # Connect the functions to the interface | |
| start_button.click( | |
| fn=start_generation, | |
| inputs=[topic_input, context_input, model_input], | |
| outputs=[status_output, task_id_output] | |
| ) | |
| check_button.click( | |
| fn=check_status, | |
| inputs=[task_id_input], | |
| outputs=[status_display, video_output, log_display], | |
| # Every 2 seconds, poll the status | |
| every=2 | |
| ) | |
| # Launch the app | |
| demo.launch() |