Spaces:
Running
Running
| import gradio as gr | |
| # Custom CSS for gradient background and styling | |
| custom_css = """ | |
| .gradio-container { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 25%, #f093fb 50%, #4facfe 75%, #00f2fe 100%); | |
| background-size: 400% 400%; | |
| animation: gradient-animation 15s ease infinite; | |
| min-height: 100vh; | |
| } | |
| @keyframes gradient-animation { | |
| 0% { background-position: 0% 50%; } | |
| 50% { background-position: 100% 50%; } | |
| 100% { background-position: 0% 50%; } | |
| } | |
| .dark .gradio-container { | |
| background: linear-gradient(135deg, #1a1a2e 0%, #16213e 25%, #0f3460 50%, #533483 75%, #e94560 100%); | |
| background-size: 400% 400%; | |
| animation: gradient-animation 15s ease infinite; | |
| } | |
| /* Style for the main content area */ | |
| .main-container { | |
| background-color: rgba(255, 255, 255, 0.95); | |
| backdrop-filter: blur(10px); | |
| border-radius: 20px; | |
| padding: 20px; | |
| box-shadow: 0 8px 32px 0 rgba(31, 38, 135, 0.37); | |
| border: 1px solid rgba(255, 255, 255, 0.18); | |
| } | |
| .dark .main-container { | |
| background-color: rgba(30, 30, 30, 0.95); | |
| border: 1px solid rgba(255, 255, 255, 0.1); | |
| } | |
| /* Sidebar styling */ | |
| .sidebar { | |
| background-color: rgba(255, 255, 255, 0.9); | |
| backdrop-filter: blur(10px); | |
| border-radius: 15px; | |
| padding: 20px; | |
| margin: 10px; | |
| } | |
| .dark .sidebar { | |
| background-color: rgba(40, 40, 40, 0.9); | |
| } | |
| /* Chat interface styling */ | |
| .chat-container { | |
| height: 600px; | |
| } | |
| """ | |
| def create_chat_interface(model_name): | |
| """Create a chat interface for the selected model""" | |
| # This creates the actual chat interface | |
| return gr.load( | |
| f"models/{model_name}", | |
| provider="fireworks-ai" | |
| ) | |
| with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as demo: | |
| # State to track login status | |
| is_logged_in = gr.State(False) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| with gr.Group(elem_classes="sidebar"): | |
| gr.Markdown("# ๐ Inference Provider") | |
| gr.Markdown( | |
| "This Space showcases OpenAI GPT-OSS models, served by the Cerebras API. " | |
| "Sign in with your Hugging Face account to use this API." | |
| ) | |
| # Model selection dropdown | |
| model_dropdown = gr.Dropdown( | |
| choices=[ | |
| "openai/gpt-oss-120b", | |
| "openai/gpt-oss-20b" | |
| ], | |
| value="openai/gpt-oss-120b", | |
| label="Select Model", | |
| info="Choose between different model sizes" | |
| ) | |
| # Login button | |
| login_button = gr.LoginButton("Sign in with Hugging Face", size="lg") | |
| # Status display | |
| status_text = gr.Markdown("โ Not logged in", visible=True) | |
| # Additional options | |
| with gr.Accordion("โ๏ธ Advanced Options", open=False): | |
| temperature = gr.Slider( | |
| minimum=0, | |
| maximum=2, | |
| value=0.7, | |
| step=0.1, | |
| label="Temperature" | |
| ) | |
| max_tokens = gr.Slider( | |
| minimum=1, | |
| maximum=4096, | |
| value=512, | |
| step=1, | |
| label="Max Tokens" | |
| ) | |
| # Load model button | |
| load_button = gr.Button("๐ Load Selected Model", variant="primary", size="lg") | |
| with gr.Column(scale=3): | |
| with gr.Group(elem_classes="main-container"): | |
| gr.Markdown("## ๐ฌ Chat Interface") | |
| # Chat interface placeholder | |
| chat_interface = gr.ChatInterface( | |
| fn=lambda message, history: "Please sign in and load a model to start chatting.", | |
| examples=["Hello! How are you?", "What can you help me with?", "Tell me a joke"], | |
| retry_btn=None, | |
| undo_btn="Delete Previous", | |
| clear_btn="Clear", | |
| elem_classes="chat-container" | |
| ) | |
| # Handle login status | |
| def update_login_status(profile): | |
| if profile: | |
| return gr.update(value="โ Logged in", visible=True), True | |
| return gr.update(value="โ Not logged in", visible=True), False | |
| # Load the selected model | |
| def load_selected_model(model_name, logged_in): | |
| if not logged_in: | |
| gr.Warning("Please sign in first to use the models!") | |
| return | |
| gr.Info(f"Loading {model_name}... This may take a moment.") | |
| # Here you would implement the actual model loading | |
| # For now, we'll update the chat interface | |
| try: | |
| # Load the model-specific interface | |
| loaded_interface = gr.load( | |
| f"models/{model_name}", | |
| accept_token=True, | |
| provider="fireworks-ai" | |
| ) | |
| gr.Success(f"Successfully loaded {model_name}!") | |
| return loaded_interface | |
| except Exception as e: | |
| gr.Error(f"Failed to load model: {str(e)}") | |
| return None | |
| # Connect the login button to status update | |
| login_button.click( | |
| fn=update_login_status, | |
| inputs=[login_button], | |
| outputs=[status_text, is_logged_in] | |
| ) | |
| # Connect the load button | |
| load_button.click( | |
| fn=load_selected_model, | |
| inputs=[model_dropdown, is_logged_in], | |
| outputs=[] | |
| ) | |
| # Alternative approach: Direct loading with model selection | |
| # Uncomment this if you want to use the original approach with modifications | |
| """ | |
| with gr.Blocks(fill_height=True, theme="Nymbo/Nymbo_Theme", css=custom_css) as demo: | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| with gr.Group(elem_classes="sidebar"): | |
| gr.Markdown("# ๐ Inference Provider") | |
| gr.Markdown("This Space showcases OpenAI GPT-OSS models. Sign in to use.") | |
| model_choice = gr.Radio( | |
| choices=["openai/gpt-oss-120b", "openai/gpt-oss-20b"], | |
| value="openai/gpt-oss-120b", | |
| label="Select Model" | |
| ) | |
| button = gr.LoginButton("Sign in") | |
| with gr.Column(scale=3): | |
| with gr.Group(elem_classes="main-container"): | |
| # Default to 120b model | |
| gr.load("models/openai/gpt-oss-120b", accept_token=button, provider="fireworks-ai") | |
| """ | |
| demo.launch() |