Spaces:
Running
Running
| import streamlit as st | |
| from huggingface_hub import InferenceClient | |
| import os | |
| from typing import Iterator | |
| API_KEY = os.getenv("TOGETHER_API_KEY") | |
| if not API_KEY: | |
| raise ValueError("API key is missing! Make sure TOGETHER_API_KEY is set in the Secrets.") | |
| # Initialize the client with Together AI provider | |
| def get_client(): | |
| return InferenceClient( | |
| provider="together", | |
| api_key=API_KEY | |
| ) | |
| def process_file(file) -> str: | |
| """Process uploaded file and return its content""" | |
| if file is None: | |
| return "" | |
| try: | |
| content = file.getvalue().decode('utf-8') | |
| return content | |
| except Exception as e: | |
| return f"Error reading file: {str(e)}" | |
| def generate_response( | |
| message: str, | |
| history: list[tuple[str, str]], | |
| system_message: str, | |
| max_tokens: int, | |
| temperature: float, | |
| top_p: float, | |
| files=None | |
| ) -> Iterator[str]: | |
| """Generate streaming response from the model""" | |
| client = get_client() | |
| # Process file if uploaded | |
| # Process multiple files if uploaded | |
| all_content = "" | |
| if files: | |
| file_contents = [process_file(file) for file in files] | |
| all_content = "\n\n".join([ | |
| f"File {i+1} content:\n{content}" | |
| for i, content in enumerate(file_contents) | |
| ]) | |
| if all_content: | |
| message = f"{all_content}\n\nUser message:\n{message}" | |
| messages = [{"role": "system", "content": system_message}] | |
| # Add conversation history | |
| for user_msg, assistant_msg in history: | |
| if user_msg: | |
| messages.append({"role": "user", "content": user_msg}) | |
| if assistant_msg: | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| # Add current message | |
| messages.append({"role": "user", "content": message}) | |
| try: | |
| stream = client.chat.completions.create( | |
| model="deepseek-ai/DeepSeek-R1", | |
| messages=messages, | |
| max_tokens=max_tokens, | |
| stream=True, | |
| temperature=temperature, | |
| top_p=top_p, | |
| ) | |
| for chunk in stream: | |
| if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: | |
| yield chunk.choices[0].delta.content | |
| except Exception as e: | |
| yield f"Error: {str(e)}" | |
| def main(): | |
| st.set_page_config(page_title="DeepSeek Chat", page_icon="💭", layout="wide") | |
| # Initialize session state for chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| st.title("DeepSeek Chat with File Upload") | |
| st.markdown("Chat with DeepSeek AI model. You can optionally upload files for the model to analyze.") | |
| # Sidebar for parameters | |
| with st.sidebar: | |
| st.header("Settings") | |
| system_message = st.text_area( | |
| "System Message", | |
| value="You are a friendly Chatbot.", | |
| height=100 | |
| ) | |
| max_tokens = st.slider( | |
| "Max Tokens", | |
| min_value=1, | |
| max_value=8192, | |
| value=8192, | |
| step=1 | |
| ) | |
| temperature = st.slider( | |
| "Temperature", | |
| min_value=0.1, | |
| max_value=4.0, | |
| value=0.0, | |
| step=0.1 | |
| ) | |
| top_p = st.slider( | |
| "Top-p (nucleus sampling)", | |
| min_value=0.1, | |
| max_value=1.0, | |
| value=0.95, | |
| step=0.05 | |
| ) | |
| uploaded_file = st.file_uploader( | |
| "Upload File (optional)", | |
| type=['txt', 'py', 'md', 'swift', 'java', 'js', 'ts', 'rb', 'go', | |
| 'php', 'c', 'cpp', 'h', 'hpp', 'cs', 'html', 'css', 'kt', 'svelte'], | |
| accept_multiple_files=True # Add this parameter | |
| ) | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.write(message["content"]) | |
| # Chat input | |
| if prompt := st.chat_input("What would you like to know?"): | |
| # Display user message | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.write(prompt) | |
| # Generate and display assistant response | |
| with st.chat_message("assistant"): | |
| response_placeholder = st.empty() | |
| full_response = "" | |
| # Get message history for context | |
| history = [(msg["content"], next_msg["content"]) | |
| for msg, next_msg in zip(st.session_state.messages[::2], st.session_state.messages[1::2])] | |
| # Stream the response | |
| for response_chunk in generate_response( | |
| prompt, | |
| history, | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| uploaded_file | |
| ): | |
| full_response += response_chunk | |
| print(full_response) | |
| response_placeholder.markdown(full_response + "▌") | |
| response_placeholder.markdown(full_response) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
| if __name__ == "__main__": | |
| main() |