Spaces:
Sleeping
Sleeping
| # core/script_generator.py | |
| import asyncio | |
| import uuid | |
| from typing import List, Dict | |
| from config import OPENROUTER_API_KEY | |
| from core.seed_manager import SeedManager | |
| import httpx | |
| import json | |
| # Initialize seed manager | |
| seed_manager = SeedManager() | |
| # ---------------- OPENROUTER LLM CALL ---------------- | |
| async def _call_openrouter_llm(prompt: str) -> str: | |
| """ | |
| Calls OpenRouter LLM to generate proposed video script. | |
| Returns the raw text script. | |
| """ | |
| url = "https://api.openrouter.ai/v1/chat/completions" | |
| headers = {"Authorization": f"Bearer {OPENROUTER_API_KEY}"} | |
| payload = { | |
| "model": "gpt-4.1-mini", # powerful and suitable for script generation | |
| "messages": [ | |
| {"role": "system", "content": "You are a professional creative video script writer."}, | |
| {"role": "user", "content": prompt} | |
| ], | |
| "max_tokens": 1500, | |
| "temperature": 0.7 | |
| } | |
| async with httpx.AsyncClient(timeout=60) as client: | |
| response = await client.post(url, json=payload, headers=headers) | |
| response.raise_for_status() | |
| data = response.json() | |
| # OpenRouter returns message content in choices[0].message.content | |
| return data["choices"][0]["message"]["content"] | |
| # ---------------- SCRIPT PROCESSING ---------------- | |
| def parse_script_to_scenes(script_text: str) -> List[Dict]: | |
| """ | |
| Converts a script text into scene + keyframe JSON. | |
| Each scene may have multiple keyframes. | |
| Assigns unique scene_ids and seeds. | |
| """ | |
| scenes_json = [] | |
| scene_counter = 1 | |
| keyframe_counter = 1 | |
| lines = [line.strip() for line in script_text.split("\n") if line.strip()] | |
| for line in lines: | |
| # Generate a unique scene_id and seed for this scene | |
| scene_id = scene_counter | |
| seed = seed_manager.generate_seed(scene_id) | |
| # We assume each line is a keyframe | |
| scenes_json.append({ | |
| "scene": scene_counter, | |
| "scene_id": scene_id, | |
| "keyframe_number": keyframe_counter, | |
| "description": line, | |
| "camera": "default", # can be improved later | |
| "seed": seed | |
| }) | |
| keyframe_counter += 1 | |
| scene_counter += 1 | |
| return scenes_json | |
| # ---------------- MAIN FUNCTION ---------------- | |
| async def generate_script_async(idea: str, user_confirmed: bool = True) -> List[Dict]: | |
| """ | |
| Full pipeline for script generation: | |
| 1. Generates proposed script from LLM | |
| 2. Waits for user confirmation | |
| 3. Converts confirmed script into scene + keyframe JSON | |
| """ | |
| prompt = f"Create a professional video script for: {idea}. Write each scene in one line." | |
| raw_script = await _call_openrouter_llm(prompt) | |
| # Here you can integrate actual user confirmation in your frontend | |
| if not user_confirmed: | |
| return [{"proposed_script": raw_script}] | |
| # Convert approved script into structured scene/keyframe JSON | |
| scenes = parse_script_to_scenes(raw_script) | |
| return scenes | |
| def generate_script(idea: str, user_confirmed: bool = True) -> List[Dict]: | |
| """ | |
| Synchronous wrapper for pipeline integration. | |
| """ | |
| return asyncio.get_event_loop().run_until_complete( | |
| generate_script_async(idea, user_confirmed) | |
| ) | |