import streamlit as st from bs4 import BeautifulSoup from openai import OpenAI from dotenv import load_dotenv import os # ==== Load API Key from .env ==== load_dotenv() hf_api_key = os.getenv("deploy") if not hf_api_key: st.error("โŒ API key not found. Please set HUGGINGFACE_API_KEY in your .env file.") st.stop() # ==== Initialize Client ==== client = OpenAI( base_url="https://router.huggingface.co/v1", api_key=hf_api_key, ) # ==== Streamlit Page Setup ==== st.set_page_config(page_title="OSS ChatGPT", layout="wide") st.title("๐Ÿค– ChatGPT") # ==== Sidebar ==== st.sidebar.title("๐Ÿ› ๏ธ Settings") model_choice = st.sidebar.selectbox("Choose a model", [ "openai/gpt-oss-20b:hyperbolic", "openai/gpt-oss-120b:hyperbolic" ]) if st.sidebar.button("๐Ÿงน Clear Chat"): st.session_state.messages = [] st.rerun() # ==== Session Initialization ==== if "messages" not in st.session_state: st.session_state.messages = [] # ==== LaTeX Rendering Helper ==== def render_markdown_with_latex(text: str): mathjax_script = """ """ st.markdown(mathjax_script, unsafe_allow_html=True) st.markdown(text, unsafe_allow_html=True) # ... (keep all your existing imports and setup code until the message display section) # ... (keep all your existing imports and setup code until the message display section) # ==== Display Chat History ==== for i, msg in enumerate(st.session_state.messages): with st.chat_message(msg["role"]): if msg["role"] == "assistant": # Check if content contains thinking pattern if msg["content"].startswith("") and "" in msg["content"]: # Extract thinking and response parts thinking_part = msg["content"].split("")[1].split("")[0].strip() response_part = msg["content"].split("")[1].strip() # Display the main response render_markdown_with_latex(response_part) # Create expander for thinking with st.expander("๐Ÿ’ญ Show Thinking"): st.markdown(f"{thinking_part}", unsafe_allow_html=True) else: render_markdown_with_latex(msg["content"]) else: # Display user messages render_markdown_with_latex(msg["content"]) # ==== Chat Input ==== st.write("Akshay") if prompt := st.chat_input("Type your message...",key="unique_chat_input_key"): print(f"=> {prompt}") with st.chat_message("user"): st.markdown(prompt) # Add user message to history st.session_state.messages.append({"role": "user", "content": prompt}) try: with st.chat_message("assistant"): response_placeholder = st.empty() with st.spinner("Thinking..."): completion = client.chat.completions.create( model=model_choice, messages=[ {"role": m["role"], "content": m["content"]} for m in st.session_state.messages ] ) raw = completion.choices[0].message.content # Parse thinking and response if "" in raw and "" in raw: thinking_part = raw.split("")[1].split("")[0].strip() response_part = raw.split("")[1].strip() # Display main response clean_response = BeautifulSoup(response_part, "html.parser").get_text() with st.expander("๐Ÿ’ญ Show Thinking"): st.markdown(f"{thinking_part}", unsafe_allow_html=True) response_placeholder.markdown(clean_response) # Store both parts in history full_content = f"{thinking_part}{clean_response}" else: clean_response = BeautifulSoup(raw, "html.parser").get_text() response_placeholder.markdown(clean_response) full_content = clean_response # Add assistant response to history st.session_state.messages.append({"role": "assistant", "content": full_content}) except Exception as e: error_msg = f"โŒ Error: {str(e)}" with st.chat_message("assistant"): st.error(error_msg) st.session_state.messages.append({"role": "assistant", "content": error_msg})