Spaces:
Sleeping
Sleeping
| import os | |
| import streamlit as st | |
| import random | |
| import time | |
| from langchain.chains import RetrievalQA | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.document_loaders import DataFrameLoader | |
| from langchain.embeddings import OpenAIEmbeddings | |
| from langchain.vectorstores import Chroma | |
| # Get OpenAI setup | |
| openai_api_key = os.getenv("openai_token") | |
| embedding = OpenAIEmbeddings(openai_api_key=openai_api_key) | |
| # Setup vector database | |
| persist_directory = './chroma_db' | |
| vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding) | |
| llm_name = "gpt-3.5-turbo" | |
| llm = ChatOpenAI(model_name=llm_name, temperature=0, | |
| openai_api_key=openai_api_key) | |
| qa_chain = RetrievalQA.from_chain_type( | |
| llm, | |
| retriever=vectordb.as_retriever() | |
| ) | |
| question = "production is broken how do I fix it?" | |
| result = qa_chain({"query": question}) | |
| print(result['result']) | |
| # Streamed response emulator | |
| def response_generator(prompt): | |
| response = qa_chain({"query": prompt})['result'] | |
| for word in response.split(): | |
| yield word + " " | |
| time.sleep(0.05) | |
| st.title("Simple chat") | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Accept user input | |
| if prompt := st.chat_input("What is up?"): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message in chat message container | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Display assistant response in chat message container | |
| with st.chat_message("assistant"): | |
| response = st.write_stream(response_generator(prompt)) | |
| # Add assistant response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |