Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,8 @@ import streamlit as st
|
|
| 2 |
import os
|
| 3 |
import requests
|
| 4 |
from dotenv import load_dotenv # Only needed if using a .env file
|
|
|
|
|
|
|
| 5 |
|
| 6 |
# Langchain and HuggingFace
|
| 7 |
from langchain.vectorstores import Chroma
|
|
@@ -68,36 +70,36 @@ if "messages" not in st.session_state:
|
|
| 68 |
st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
|
| 69 |
|
| 70 |
# Function for generating response using the last three conversations
|
| 71 |
-
def generate_response(prompt_input):
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
|
| 102 |
|
| 103 |
# result += res['result']
|
|
@@ -157,6 +159,7 @@ def generate_response(prompt_input):
|
|
| 157 |
# return result_text
|
| 158 |
|
| 159 |
# return res['result']
|
|
|
|
| 160 |
def generate_response(prompt_input):
|
| 161 |
# Retrieve vector database context using ONLY the current user input
|
| 162 |
retriever = st.session_state.chain.retriever
|
|
@@ -182,14 +185,19 @@ def generate_response(prompt_input):
|
|
| 182 |
|
| 183 |
# Process the response text
|
| 184 |
result_text = res['result']
|
|
|
|
|
|
|
| 185 |
if result_text.startswith('According to the provided context, '):
|
| 186 |
-
result_text = result_text[35:].
|
| 187 |
elif result_text.startswith('Based on the provided context, '):
|
| 188 |
-
result_text = result_text[31:].
|
| 189 |
elif result_text.startswith('According to the provided text, '):
|
| 190 |
-
result_text = result_text[34:].
|
| 191 |
elif result_text.startswith('According to the context, '):
|
| 192 |
-
result_text = result_text[26:].
|
|
|
|
|
|
|
|
|
|
| 193 |
|
| 194 |
# Extract and format sources (if available)
|
| 195 |
sources = []
|
|
@@ -207,8 +215,6 @@ def generate_response(prompt_input):
|
|
| 207 |
|
| 208 |
return result_text
|
| 209 |
|
| 210 |
-
|
| 211 |
-
|
| 212 |
# Display chat messages
|
| 213 |
for message in st.session_state.messages:
|
| 214 |
with st.chat_message(message["role"]):
|
|
|
|
| 2 |
import os
|
| 3 |
import requests
|
| 4 |
from dotenv import load_dotenv # Only needed if using a .env file
|
| 5 |
+
import re # To help clean up leading whitespace
|
| 6 |
+
|
| 7 |
|
| 8 |
# Langchain and HuggingFace
|
| 9 |
from langchain.vectorstores import Chroma
|
|
|
|
| 70 |
st.session_state.messages = [{"role": "assistant", "content": "How may I help you today?"}]
|
| 71 |
|
| 72 |
# Function for generating response using the last three conversations
|
| 73 |
+
# def generate_response(prompt_input):
|
| 74 |
+
# # Initialize result
|
| 75 |
+
# result = ''
|
| 76 |
+
|
| 77 |
+
# # Prepare conversation history: get the last 3 user and assistant messages
|
| 78 |
+
# conversation_history = ""
|
| 79 |
+
# recent_messages = st.session_state.messages[-3:] # Last 3 user and assistant exchanges (each exchange is 2 messages)
|
| 80 |
+
|
| 81 |
+
# for message in recent_messages:
|
| 82 |
+
# conversation_history += f"{message['role']}: {message['content']}\n"
|
| 83 |
+
|
| 84 |
+
# # Append the current user prompt to the conversation history
|
| 85 |
+
# conversation_history += f"user: {prompt_input}\n"
|
| 86 |
+
|
| 87 |
+
# # Invoke chain with the truncated conversation history
|
| 88 |
+
# res = st.session_state.chain.invoke(conversation_history)
|
| 89 |
+
|
| 90 |
+
# # Process response (as in the original code)
|
| 91 |
+
# if res['result'].startswith('According to the provided context, '):
|
| 92 |
+
# res['result'] = res['result'][35:]
|
| 93 |
+
# res['result'] = res['result'][0].upper() + res['result'][1:]
|
| 94 |
+
# elif res['result'].startswith('Based on the provided context, '):
|
| 95 |
+
# res['result'] = res['result'][31:]
|
| 96 |
+
# res['result'] = res['result'][0].upper() + res['result'][1:]
|
| 97 |
+
# elif res['result'].startswith('According to the provided text, '):
|
| 98 |
+
# res['result'] = res['result'][34:]
|
| 99 |
+
# res['result'] = res['result'][0].upper() + res['result'][1:]
|
| 100 |
+
# elif res['result'].startswith('According to the context, '):
|
| 101 |
+
# res['result'] = res['result'][26:]
|
| 102 |
+
# res['result'] = res['result'][0].upper() + res['result'][1:]
|
| 103 |
|
| 104 |
|
| 105 |
# result += res['result']
|
|
|
|
| 159 |
# return result_text
|
| 160 |
|
| 161 |
# return res['result']
|
| 162 |
+
|
| 163 |
def generate_response(prompt_input):
|
| 164 |
# Retrieve vector database context using ONLY the current user input
|
| 165 |
retriever = st.session_state.chain.retriever
|
|
|
|
| 185 |
|
| 186 |
# Process the response text
|
| 187 |
result_text = res['result']
|
| 188 |
+
|
| 189 |
+
# Clean up prefixing phrases and capitalize the first letter
|
| 190 |
if result_text.startswith('According to the provided context, '):
|
| 191 |
+
result_text = result_text[35:].strip()
|
| 192 |
elif result_text.startswith('Based on the provided context, '):
|
| 193 |
+
result_text = result_text[31:].strip()
|
| 194 |
elif result_text.startswith('According to the provided text, '):
|
| 195 |
+
result_text = result_text[34:].strip()
|
| 196 |
elif result_text.startswith('According to the context, '):
|
| 197 |
+
result_text = result_text[26:].strip()
|
| 198 |
+
|
| 199 |
+
# Ensure the first letter is uppercase
|
| 200 |
+
result_text = result_text[0].upper() + result_text[1:] if result_text else result_text
|
| 201 |
|
| 202 |
# Extract and format sources (if available)
|
| 203 |
sources = []
|
|
|
|
| 215 |
|
| 216 |
return result_text
|
| 217 |
|
|
|
|
|
|
|
| 218 |
# Display chat messages
|
| 219 |
for message in st.session_state.messages:
|
| 220 |
with st.chat_message(message["role"]):
|