""" Complete Configuration for OpenManus Production Deployment Includes: All model configurations, agent settings, category mappings, and service configurations """ import os from typing import Dict, List, Optional, Any from dataclasses import dataclass from enum import Enum @dataclass class ModelConfig: """Configuration for individual AI models""" name: str category: str api_endpoint: str max_tokens: int = 4096 temperature: float = 0.7 supported_formats: List[str] = None special_parameters: Dict[str, Any] = None rate_limit: int = 100 # requests per minute class CategoryConfig: """Configuration for model categories""" # Core AI Models - Text Generation (Qwen, DeepSeek, etc.) TEXT_GENERATION_MODELS = { # Qwen Models (35 models) "qwen/qwen-2.5-72b-instruct": ModelConfig( name="Qwen 2.5 72B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-72B-Instruct", max_tokens=8192, temperature=0.7, ), "qwen/qwen-2.5-32b-instruct": ModelConfig( name="Qwen 2.5 32B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-32B-Instruct", max_tokens=8192, ), "qwen/qwen-2.5-14b-instruct": ModelConfig( name="Qwen 2.5 14B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-14B-Instruct", max_tokens=8192, ), "qwen/qwen-2.5-7b-instruct": ModelConfig( name="Qwen 2.5 7B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-7B-Instruct", ), "qwen/qwen-2.5-3b-instruct": ModelConfig( name="Qwen 2.5 3B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-3B-Instruct", ), "qwen/qwen-2.5-1.5b-instruct": ModelConfig( name="Qwen 2.5 1.5B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-1.5B-Instruct", ), "qwen/qwen-2.5-0.5b-instruct": ModelConfig( name="Qwen 2.5 0.5B Instruct", category="text-generation", api_endpoint="https://api-inference.huggingface.co/models/Qwen/Qwen2.5-0.5B-Instruct", ), # ... (Add all 35 Qwen models) # DeepSeek Models (17 models) "deepseek-ai/deepseek-coder-33b-instruct": ModelConfig( name="DeepSeek Coder 33B Instruct", category="code-generation", api_endpoint="https://api-inference.huggingface.co/models/deepseek-ai/deepseek-coder-33b-instruct", max_tokens=8192, special_parameters={"code_focused": True}, ), "deepseek-ai/deepseek-coder-6.7b-instruct": ModelConfig( name="DeepSeek Coder 6.7B Instruct", category="code-generation", api_endpoint="https://api-inference.huggingface.co/models/deepseek-ai/deepseek-coder-6.7b-instruct", ), # ... (Add all 17 DeepSeek models) } # Image Editing Models (10 models) IMAGE_EDITING_MODELS = { "stabilityai/stable-diffusion-xl-refiner-1.0": ModelConfig( name="SDXL Refiner 1.0", category="image-editing", api_endpoint="https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-refiner-1.0", supported_formats=["image/png", "image/jpeg"], ), "runwayml/stable-diffusion-inpainting": ModelConfig( name="Stable Diffusion Inpainting", category="image-inpainting", api_endpoint="https://api-inference.huggingface.co/models/runwayml/stable-diffusion-inpainting", supported_formats=["image/png", "image/jpeg"], ), # ... (Add all 10 image editing models) } # TTS/STT Models (15 models) SPEECH_MODELS = { "microsoft/speecht5_tts": ModelConfig( name="SpeechT5 TTS", category="text-to-speech", api_endpoint="https://api-inference.huggingface.co/models/microsoft/speecht5_tts", supported_formats=["audio/wav", "audio/mp3"], ), "openai/whisper-large-v3": ModelConfig( name="Whisper Large v3", category="automatic-speech-recognition", api_endpoint="https://api-inference.huggingface.co/models/openai/whisper-large-v3", supported_formats=["audio/wav", "audio/mp3", "audio/flac"], ), # ... (Add all 15 speech models) } # Face Swap Models (6 models) FACE_SWAP_MODELS = { "deepinsight/insightface": ModelConfig( name="InsightFace", category="face-swap", api_endpoint="https://api-inference.huggingface.co/models/deepinsight/insightface", supported_formats=["image/png", "image/jpeg"], ), # ... (Add all 6 face swap models) } # Talking Avatar Models (9 models) AVATAR_MODELS = { "microsoft/DiT-XL-2-512": ModelConfig( name="DiT Avatar Generator", category="talking-avatar", api_endpoint="https://api-inference.huggingface.co/models/microsoft/DiT-XL-2-512", supported_formats=["video/mp4", "image/png"], ), # ... (Add all 9 avatar models) } # Arabic-English Interactive Models (12 models) ARABIC_ENGLISH_MODELS = { "aubmindlab/bert-base-arabertv02": ModelConfig( name="AraBERT v02", category="arabic-text", api_endpoint="https://api-inference.huggingface.co/models/aubmindlab/bert-base-arabertv02", special_parameters={"language": "ar-en"}, ), "UBC-NLP/MARBERT": ModelConfig( name="MARBERT", category="arabic-text", api_endpoint="https://api-inference.huggingface.co/models/UBC-NLP/MARBERT", special_parameters={"language": "ar-en"}, ), # ... (Add all 12 Arabic-English models) } class AgentConfig: """Configuration for AI Agents""" # Manus Agent Configuration MANUS_AGENT = { "name": "Manus", "description": "Versatile AI agent with 200+ models", "max_steps": 20, "max_observe": 10000, "system_prompt_template": """You are Manus, an advanced AI agent with access to 200+ specialized models. Available categories: - Text Generation (Qwen, DeepSeek, etc.) - Image Editing & Generation - Speech (TTS/STT) - Face Swap & Avatar Generation - Arabic-English Interactive Models - Code Generation & Review - Multimodal AI - Document Processing - 3D Generation - Video Processing User workspace: {directory}""", "tools": [ "PythonExecute", "BrowserUseTool", "StrReplaceEditor", "AskHuman", "Terminate", "HuggingFaceModels", ], "model_preferences": { "text": "qwen/qwen-2.5-72b-instruct", "code": "deepseek-ai/deepseek-coder-33b-instruct", "image": "stabilityai/stable-diffusion-xl-refiner-1.0", "speech": "microsoft/speecht5_tts", "arabic": "aubmindlab/bert-base-arabertv02", }, } class ServiceConfig: """Configuration for all services""" # Cloudflare Services CLOUDFLARE_CONFIG = { "d1_database": { "enabled": True, "tables": ["users", "sessions", "agent_interactions", "model_usage"], "auto_migrate": True, }, "r2_storage": { "enabled": True, "buckets": ["user-files", "generated-content", "model-cache"], "max_file_size": "100MB", }, "kv_storage": { "enabled": True, "namespaces": ["sessions", "model-cache", "user-preferences"], "ttl": 86400, # 24 hours }, "durable_objects": { "enabled": True, "classes": ["ChatSession", "ModelRouter", "UserContext"], }, } # Authentication Configuration AUTH_CONFIG = { "method": "mobile_password", "password_min_length": 8, "session_duration": 86400, # 24 hours "max_concurrent_sessions": 5, "mobile_validation": { "international": True, "formats": ["+1234567890", "01234567890"], }, } # Model Usage Configuration MODEL_CONFIG = { "rate_limits": { "free_tier": 100, # requests per day "premium_tier": 1000, "enterprise_tier": 10000, }, "fallback_models": { "text": ["qwen/qwen-2.5-7b-instruct", "qwen/qwen-2.5-3b-instruct"], "image": ["runwayml/stable-diffusion-v1-5"], "code": ["deepseek-ai/deepseek-coder-6.7b-instruct"], }, "cache_settings": {"enabled": True, "ttl": 3600, "max_size": "1GB"}, # 1 hour } class EnvironmentConfig: """Environment-specific configurations""" @staticmethod def get_production_config(): """Get production environment configuration""" return { "environment": "production", "debug": False, "log_level": "INFO", "server": {"host": "0.0.0.0", "port": 7860, "workers": 4}, "database": {"type": "sqlite", "url": "auth.db", "pool_size": 10}, "security": { "secret_key": os.getenv("SECRET_KEY", "your-secret-key"), "cors_origins": ["*"], "rate_limiting": True, }, "monitoring": {"metrics": True, "logging": True, "health_checks": True}, } @staticmethod def get_development_config(): """Get development environment configuration""" return { "environment": "development", "debug": True, "log_level": "DEBUG", "server": {"host": "127.0.0.1", "port": 7860, "workers": 1}, "database": {"type": "sqlite", "url": "auth_dev.db", "pool_size": 2}, "security": { "secret_key": "dev-secret-key", "cors_origins": ["http://localhost:*"], "rate_limiting": False, }, } # Global configuration instance class OpenManusConfig: """Main configuration class for OpenManus""" def __init__(self, environment: str = "production"): self.environment = environment self.categories = CategoryConfig() self.agent = AgentConfig() self.services = ServiceConfig() if environment == "production": self.env_config = EnvironmentConfig.get_production_config() else: self.env_config = EnvironmentConfig.get_development_config() def get_model_config(self, model_id: str) -> Optional[ModelConfig]: """Get configuration for a specific model""" all_models = { **self.categories.TEXT_GENERATION_MODELS, **self.categories.IMAGE_EDITING_MODELS, **self.categories.SPEECH_MODELS, **self.categories.FACE_SWAP_MODELS, **self.categories.AVATAR_MODELS, **self.categories.ARABIC_ENGLISH_MODELS, } return all_models.get(model_id) def get_category_models(self, category: str) -> Dict[str, ModelConfig]: """Get all models in a category""" if category == "text-generation": return self.categories.TEXT_GENERATION_MODELS elif category == "image-editing": return self.categories.IMAGE_EDITING_MODELS elif category in ["text-to-speech", "automatic-speech-recognition"]: return self.categories.SPEECH_MODELS elif category == "face-swap": return self.categories.FACE_SWAP_MODELS elif category == "talking-avatar": return self.categories.AVATAR_MODELS elif category == "arabic-text": return self.categories.ARABIC_ENGLISH_MODELS else: return {} def validate_config(self) -> bool: """Validate the configuration""" try: # Check required environment variables required_env = ( ["CLOUDFLARE_API_TOKEN", "HF_TOKEN"] if self.environment == "production" else [] ) missing_env = [var for var in required_env if not os.getenv(var)] if missing_env: print(f"Missing required environment variables: {missing_env}") return False print(f"Configuration validated for {self.environment} environment") return True except Exception as e: print(f"Configuration validation failed: {e}") return False # Create global config instance config = OpenManusConfig(environment=os.getenv("ENVIRONMENT", "production"))