WIP
Browse files- app.py +107 -64
- sandbox/20240310 - CQA - Semantic Routing 1.ipynb +0 -0
- style.css +2 -2
- test.json +0 -0
app.py
CHANGED
|
@@ -140,79 +140,121 @@ async def chat(query,history,audience,sources,reports):
|
|
| 140 |
"transform_query":("🔄️ Thinking step by step to answer the question",True),
|
| 141 |
"retrieve_documents":("🔄️ Searching in the knowledge base",False),
|
| 142 |
}
|
| 143 |
-
|
|
|
|
| 144 |
try:
|
| 145 |
async for event in result:
|
| 146 |
-
if
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
last_message_content = history[-1].content
|
| 156 |
-
last_message_content
|
| 157 |
-
|
| 158 |
-
history[-1] = ChatMessage(role="assistant", content =
|
| 159 |
-
# new_token = event["data"]["chunk"].content
|
| 160 |
-
# # time.sleep(0.01)
|
| 161 |
-
# previous_answer = history[-1][1]
|
| 162 |
-
# previous_answer = previous_answer if previous_answer is not None else ""
|
| 163 |
-
# answer_yet = previous_answer + new_token
|
| 164 |
-
# answer_yet = parse_output_llm_with_sources(answer_yet)
|
| 165 |
-
# history[-1] = (query,answer_yet)
|
| 166 |
|
|
|
|
|
|
|
|
|
|
| 167 |
|
| 168 |
-
elif event["name"] == "retrieve_documents" and event["event"] == "on_chain_end": # when documents are retrieved
|
| 169 |
-
try:
|
| 170 |
-
docs = event["data"]["output"]["documents"]
|
| 171 |
-
docs_html = []
|
| 172 |
-
for i, d in enumerate(docs, 1):
|
| 173 |
-
docs_html.append(make_html_source(d, i))
|
| 174 |
-
docs_html = "".join(docs_html)
|
| 175 |
-
except Exception as e:
|
| 176 |
-
print(f"Error getting documents: {e}")
|
| 177 |
-
print(event)
|
| 178 |
-
|
| 179 |
-
# elif event["name"] == "retrieve_documents" and event["event"] == "on_chain_start":
|
| 180 |
-
# print(event)
|
| 181 |
-
# questions = event["data"]["input"]["questions"]
|
| 182 |
-
# questions = "\n".join([f"{i+1}. {q['question']} ({q['source']})" for i,q in enumerate(questions)])
|
| 183 |
-
# answer_yet = "🔄️ Searching in the knowledge base\n{questions}"
|
| 184 |
-
# history[-1] = (query,answer_yet)
|
| 185 |
-
|
| 186 |
-
# TODO append step de tool avec les questions qui sont utilisées pour la recherche
|
| 187 |
-
for event_name,(event_description,display_output) in steps_display.items(): # display steps
|
| 188 |
-
if event["name"] == event_name:
|
| 189 |
-
if event["event"] == "on_chain_start":
|
| 190 |
-
# answer_yet = f"<p><span class='loader'></span>{event_description}</p>"
|
| 191 |
-
# answer_yet = make_toolbox(event_description, "", checked = False)
|
| 192 |
-
answer_yet = event_description
|
| 193 |
-
# answer_yet = ChatMessage(role="assistant", content = "processing", metadata={'title' :event_description})
|
| 194 |
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
#
|
| 204 |
-
#
|
| 205 |
-
|
| 206 |
-
#
|
| 207 |
-
#
|
| 208 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
|
| 210 |
-
#
|
| 211 |
# try:
|
| 212 |
-
#
|
| 213 |
-
#
|
|
|
|
|
|
|
|
|
|
| 214 |
# except Exception as e:
|
| 215 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 216 |
|
| 217 |
|
| 218 |
|
|
@@ -220,6 +262,7 @@ async def chat(query,history,audience,sources,reports):
|
|
| 220 |
yield history,docs_html,output_query,output_language,gallery,output_query,output_keywords
|
| 221 |
|
| 222 |
except Exception as e:
|
|
|
|
| 223 |
raise gr.Error(f"{e}")
|
| 224 |
|
| 225 |
|
|
|
|
| 140 |
"transform_query":("🔄️ Thinking step by step to answer the question",True),
|
| 141 |
"retrieve_documents":("🔄️ Searching in the knowledge base",False),
|
| 142 |
}
|
| 143 |
+
|
| 144 |
+
used_documents = []
|
| 145 |
try:
|
| 146 |
async for event in result:
|
| 147 |
+
if "langgraph_node" in event["metadata"]:
|
| 148 |
+
node = event["metadata"]["langgraph_node"]
|
| 149 |
+
|
| 150 |
+
if event["event"] == "on_chain_end" and event["name"] == "retrieve_documents" :# when documents are retrieved
|
| 151 |
+
try:
|
| 152 |
+
docs = event["data"]["output"]["documents"]
|
| 153 |
+
docs_html = []
|
| 154 |
+
for i, d in enumerate(docs, 1):
|
| 155 |
+
docs_html.append(make_html_source(d, i))
|
| 156 |
+
|
| 157 |
+
used_documents = used_documents + [d.metadata["name"] for d in docs]
|
| 158 |
+
# already_used_sources =
|
| 159 |
+
history[-1].content = "Adding source :\n" + "\n".join(np.unique(used_documents))
|
| 160 |
+
|
| 161 |
+
docs_html = "".join(docs_html)
|
| 162 |
+
|
| 163 |
+
except Exception as e:
|
| 164 |
+
print(f"Error getting documents: {e}")
|
| 165 |
+
print(event)
|
| 166 |
+
|
| 167 |
+
elif event["name"] in steps_display.keys() and event["event"] == "on_chain_start": #display steps
|
| 168 |
+
event_description,display_output = steps_display[node]
|
| 169 |
+
if not hasattr(history[-1], 'metadata') or history[-1].metadata["title"] != event_description: # if a new step begins
|
| 170 |
+
history.append(ChatMessage(role="assistant", content = "", metadata={'title' :event_description}))
|
| 171 |
+
|
| 172 |
+
elif event["name"] != "transform_query" and event["event"] == "on_chat_model_stream" and node in ["answer_rag", "answer_search"]:# if streaming answer
|
| 173 |
+
if start_streaming == False:
|
| 174 |
+
start_streaming = True
|
| 175 |
+
history.append(ChatMessage(role="assistant", content = ""))
|
| 176 |
last_message_content = history[-1].content
|
| 177 |
+
new_message_content = last_message_content + event["data"]["chunk"].content
|
| 178 |
+
new_message_content = parse_output_llm_with_sources(new_message_content)
|
| 179 |
+
history[-1] = ChatMessage(role="assistant", content = new_message_content)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
|
| 181 |
+
if event["name"] == "transform_query" and event["event"] =="on_chain_end":
|
| 182 |
+
if hasattr(history[-1],"content"):
|
| 183 |
+
history[-1].content += "Decompose question : \n\n" + "\n".join([q["question"] for q in event["data"]["output"]["remaining_questions"]])
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# if event["event"] == "on_chat_model_stream" and event["name"] != "transform_query": # if streaming answer
|
| 189 |
+
# if start_streaming == False:
|
| 190 |
+
# start_streaming = True
|
| 191 |
+
# # history[-1] = (query,"")
|
| 192 |
+
# history.append(ChatMessage(role="assistant", content = ""))
|
| 193 |
+
# if type(history[-1].metadata) != dict :
|
| 194 |
+
# # print("metadata : ", history[-1].metadata)
|
| 195 |
+
# # history.append(ChatMessage(role="assistant", content = ""))
|
| 196 |
+
|
| 197 |
+
# last_message_content = history[-1].content
|
| 198 |
+
# last_message_content += event["data"]["chunk"].content
|
| 199 |
+
# last_message_content = parse_output_llm_with_sources(last_message_content)
|
| 200 |
+
# history[-1] = ChatMessage(role="assistant", content = last_message_content)
|
| 201 |
+
# # new_token = event["data"]["chunk"].content
|
| 202 |
+
# # # time.sleep(0.01)
|
| 203 |
+
# # previous_answer = history[-1][1]
|
| 204 |
+
# # previous_answer = previous_answer if previous_answer is not None else ""
|
| 205 |
+
# # answer_yet = previous_answer + new_token
|
| 206 |
+
# # answer_yet = parse_output_llm_with_sources(answer_yet)
|
| 207 |
+
# # history[-1] = (query,answer_yet)
|
| 208 |
+
|
| 209 |
|
| 210 |
+
# elif event["name"] == "retrieve_documents" and event["event"] == "on_chain_end": # when documents are retrieved
|
| 211 |
# try:
|
| 212 |
+
# docs = event["data"]["output"]["documents"]
|
| 213 |
+
# docs_html = []
|
| 214 |
+
# for i, d in enumerate(docs, 1):
|
| 215 |
+
# docs_html.append(make_html_source(d, i))
|
| 216 |
+
# docs_html = "".join(docs_html)
|
| 217 |
# except Exception as e:
|
| 218 |
+
# print(f"Error getting documents: {e}")
|
| 219 |
+
# print(event)
|
| 220 |
+
|
| 221 |
+
# # elif event["name"] == "retrieve_documents" and event["event"] == "on_chain_start":
|
| 222 |
+
# # print(event)
|
| 223 |
+
# # questions = event["data"]["input"]["questions"]
|
| 224 |
+
# # questions = "\n".join([f"{i+1}. {q['question']} ({q['source']})" for i,q in enumerate(questions)])
|
| 225 |
+
# # answer_yet = "🔄️ Searching in the knowledge base\n{questions}"
|
| 226 |
+
# # history[-1] = (query,answer_yet)
|
| 227 |
+
|
| 228 |
+
# # TODO append step de tool avec les questions qui sont utilisées pour la recherche
|
| 229 |
+
# for event_name,(event_description,display_output) in steps_display.items(): # display steps
|
| 230 |
+
# if event["name"] == event_name:
|
| 231 |
+
# if event["event"] == "on_chain_start":
|
| 232 |
+
# # answer_yet = f"<p><span class='loader'></span>{event_description}</p>"
|
| 233 |
+
# # answer_yet = make_toolbox(event_description, "", checked = False)
|
| 234 |
+
# answer_yet = event_description
|
| 235 |
+
# # answer_yet = ChatMessage(role="assistant", content = "processing", metadata={'title' :event_description})
|
| 236 |
+
|
| 237 |
+
# history.append(ChatMessage(role="assistant", content = "", metadata={'title' :event_description}))
|
| 238 |
+
# # history[-1] = (query,answer_yet)
|
| 239 |
+
# # elif event["event"] == "on_chain_end":
|
| 240 |
+
# # answer_yet = ""
|
| 241 |
+
# # history[-1] = (query,answer_yet)
|
| 242 |
+
# # if display_output:
|
| 243 |
+
# # print(event["data"]["output"])
|
| 244 |
+
|
| 245 |
+
# # if op['path'] == path_reformulation: # reforulated question
|
| 246 |
+
# # try:
|
| 247 |
+
# # output_language = op['value']["language"] # str
|
| 248 |
+
# # output_query = op["value"]["question"]
|
| 249 |
+
# # except Exception as e:
|
| 250 |
+
# # raise gr.Error(f"ClimateQ&A Error: {e} - The error has been noted, try another question and if the error remains, you can contact us :)")
|
| 251 |
+
|
| 252 |
+
# # if op["path"] == path_keywords:
|
| 253 |
+
# # try:
|
| 254 |
+
# # output_keywords = op['value']["keywords"] # str
|
| 255 |
+
# # output_keywords = " AND ".join(output_keywords)
|
| 256 |
+
# # except Exception as e:
|
| 257 |
+
# # pass
|
| 258 |
|
| 259 |
|
| 260 |
|
|
|
|
| 262 |
yield history,docs_html,output_query,output_language,gallery,output_query,output_keywords
|
| 263 |
|
| 264 |
except Exception as e:
|
| 265 |
+
print(event, "has failed")
|
| 266 |
raise gr.Error(f"{e}")
|
| 267 |
|
| 268 |
|
sandbox/20240310 - CQA - Semantic Routing 1.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
style.css
CHANGED
|
@@ -426,7 +426,7 @@ span.chatbot > p > img{
|
|
| 426 |
|
| 427 |
.loader {
|
| 428 |
border: 1px solid #d0d0d0 !important; /* Light grey background */
|
| 429 |
-
border-top: 1px solid #
|
| 430 |
border-right: 1px solid #3498db !important; /* Blue color */
|
| 431 |
border-radius: 50%;
|
| 432 |
width: 20px;
|
|
@@ -464,4 +464,4 @@ span.chatbot > p > img{
|
|
| 464 |
|
| 465 |
.score-orange{
|
| 466 |
color:red !important;
|
| 467 |
-
}
|
|
|
|
| 426 |
|
| 427 |
.loader {
|
| 428 |
border: 1px solid #d0d0d0 !important; /* Light grey background */
|
| 429 |
+
border-top: 1px solid #db3434 !important; /* Blue color */
|
| 430 |
border-right: 1px solid #3498db !important; /* Blue color */
|
| 431 |
border-radius: 50%;
|
| 432 |
width: 20px;
|
|
|
|
| 464 |
|
| 465 |
.score-orange{
|
| 466 |
color:red !important;
|
| 467 |
+
}
|
test.json
ADDED
|
File without changes
|