Spaces:
Sleeping
Sleeping
| import os | |
| import streamlit as st | |
| import random | |
| import time | |
| from langchain.chains import RetrievalQA | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.document_loaders import DataFrameLoader | |
| from langchain.embeddings import OpenAIEmbeddings | |
| from langchain.vectorstores import Chroma | |
| # Get OpenAI setup | |
| openai_api_key = os.getenv("openai_token") | |
| embedding = OpenAIEmbeddings(openai_api_key=openai_api_key) | |
| def get_vectordb(): | |
| embedding = OpenAIEmbeddings(openai_api_key=os.getenv("openai_token")) | |
| return Chroma(persist_directory="./chroma_db", embedding_function=embedding) | |
| vectordb = get_vectordb() | |
| # # Setup vector database | |
| # persist_directory = './chroma_db' | |
| # vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) | |
| llm_name = "gpt-3.5-turbo" | |
| llm = ChatOpenAI(model_name=llm_name, temperature=0.7, | |
| openai_api_key=openai_api_key) | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm, | |
| retriever=vectordb.as_retriever(search_kwargs={"k": 5}) | |
| ) | |
| # Streamed response emulator | |
| def response_generator(prompt): | |
| response = qa_chain({"query": prompt})['result'] | |
| for word in response.split(): | |
| yield word + " " | |
| time.sleep(0.05) | |
| st.title("Technical Support Chatbot") | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Accept user input | |
| if prompt := st.chat_input("Enter your question here"): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message in chat message container | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Display assistant response in chat message container | |
| with st.chat_message("assistant"): | |
| response = st.write_stream(response_generator(prompt)) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |