Spaces:
Sleeping
Sleeping
| # app.py | |
| import streamlit as st | |
| from models import demo | |
| # Page configuration | |
| st.set_page_config( | |
| page_title="DeepSeek Chatbot - ruslanmv.com", | |
| page_icon="🤖", | |
| layout="centered" | |
| ) | |
| # Initialize session state for chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Sidebar for model selection and parameters | |
| with st.sidebar: | |
| st.header("Model Configuration") | |
| # Model selection | |
| selected_model = st.selectbox( | |
| "Choose Model", | |
| [ | |
| "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", | |
| "deepseek-ai/DeepSeek-R1", | |
| "deepseek-ai/DeepSeek-R1-Zero" | |
| ], | |
| index=0 | |
| ) | |
| # System message | |
| system_message = st.text_area( | |
| "System Message", | |
| value="You are a friendly Chatbot created by ruslanmv.com", | |
| height=100 | |
| ) | |
| # Generation parameters | |
| max_tokens = st.slider( | |
| "Max Tokens", | |
| min_value=1, | |
| max_value=4000, | |
| value=512, | |
| step=10 | |
| ) | |
| temperature = st.slider( | |
| "Temperature", | |
| min_value=0.1, | |
| max_value=4.0, | |
| value=0.7, | |
| step=0.1 | |
| ) | |
| top_p = st.slider( | |
| "Top-p (nucleus sampling)", | |
| min_value=0.1, | |
| max_value=1.0, | |
| value=0.9, | |
| step=0.1 | |
| ) | |
| # Main chat interface | |
| st.title("🤖 DeepSeek Chatbot") | |
| st.caption("Powered by ruslanmv.com - Choose your model and parameters in the sidebar") | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Chat input | |
| if prompt := st.chat_input("Type your message..."): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Prepare full prompt with system message | |
| full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:" | |
| try: | |
| # Generate response using selected model | |
| with st.spinner("Generating response..."): | |
| # Updated parameter names to match model expectations | |
| response = demo.fn( | |
| full_prompt, | |
| max_length=max_tokens, # Changed from max_new_tokens | |
| temperature=temperature, | |
| top_p=top_p | |
| ) | |
| # Display assistant response | |
| with st.chat_message("assistant"): | |
| st.markdown(response) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| except Exception as e: | |
| st.error(f"Error generating response: {str(e)}") |