add graphs in the result and fix sources retrieved
Browse files- app.py +19 -19
- climateqa/engine/chains/retrieve_documents.py +6 -4
- climateqa/engine/llm/openai.py +1 -1
app.py
CHANGED
|
@@ -89,7 +89,7 @@ user_id = create_user_id()
|
|
| 89 |
# Create vectorstore and retriever
|
| 90 |
vectorstore = get_pinecone_vectorstore(embeddings_function)
|
| 91 |
llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
|
| 92 |
-
reranker = get_reranker("
|
| 93 |
agent = make_graph_agent(llm,vectorstore,reranker)
|
| 94 |
|
| 95 |
|
|
@@ -232,24 +232,24 @@ async def chat(query,history,audience,sources,reports):
|
|
| 232 |
except Exception as e:
|
| 233 |
print(f"Skipped adding image {i} because of {e}")
|
| 234 |
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
|
| 252 |
-
|
| 253 |
|
| 254 |
|
| 255 |
def save_feedback(feed: str, user_id):
|
|
@@ -324,7 +324,7 @@ def vote(data: gr.LikeData):
|
|
| 324 |
|
| 325 |
|
| 326 |
|
| 327 |
-
with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=theme,elem_id = "main-component") as demo:
|
| 328 |
|
| 329 |
with gr.Tab("ClimateQ&A"):
|
| 330 |
|
|
|
|
| 89 |
# Create vectorstore and retriever
|
| 90 |
vectorstore = get_pinecone_vectorstore(embeddings_function)
|
| 91 |
llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
|
| 92 |
+
reranker = get_reranker("large")
|
| 93 |
agent = make_graph_agent(llm,vectorstore,reranker)
|
| 94 |
|
| 95 |
|
|
|
|
| 232 |
except Exception as e:
|
| 233 |
print(f"Skipped adding image {i} because of {e}")
|
| 234 |
|
| 235 |
+
if len(image_dict) > 0:
|
| 236 |
+
|
| 237 |
+
gallery = [x["img"] for x in list(image_dict.values())]
|
| 238 |
+
img = list(image_dict.values())[0]
|
| 239 |
+
img_md = img["md"]
|
| 240 |
+
img_caption = img["caption"]
|
| 241 |
+
img_code = img["figure_code"]
|
| 242 |
+
if img_code != "N/A":
|
| 243 |
+
img_name = f"{img['key']} - {img['figure_code']}"
|
| 244 |
+
else:
|
| 245 |
+
img_name = f"{img['key']}"
|
| 246 |
+
|
| 247 |
+
history.append(ChatMessage(role="assistant", content = f"\n\n{img_md}\n<p class='chatbot-caption'><b>{img_name}</b> - {img_caption}</p>"))
|
| 248 |
+
# answer_yet = history[-1][1] + f"\n\n{img_md}\n<p class='chatbot-caption'><b>{img_name}</b> - {img_caption}</p>"
|
| 249 |
+
# history[-1] = (history[-1][0],answer_yet)
|
| 250 |
+
# history = [tuple(x) for x in history]
|
| 251 |
|
| 252 |
+
yield history,docs_html,output_query,output_language,gallery#,output_query,output_keywords
|
| 253 |
|
| 254 |
|
| 255 |
def save_feedback(feed: str, user_id):
|
|
|
|
| 324 |
|
| 325 |
|
| 326 |
|
| 327 |
+
with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=theme,elem_id = "main-component", js=js_script.js_code) as demo:
|
| 328 |
|
| 329 |
with gr.Tab("ClimateQ&A"):
|
| 330 |
|
climateqa/engine/chains/retrieve_documents.py
CHANGED
|
@@ -82,11 +82,13 @@ def make_retriever_node(vectorstore,reranker,llm,rerank_by_question=True, k_fina
|
|
| 82 |
# # Option 2 - Get 100/n documents by question and rerank the total
|
| 83 |
# if rerank_by_question:
|
| 84 |
# k_by_question = divide_into_parts(k_final,len(questions))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
docs = []
|
| 90 |
k_by_question = k_final // state["n_questions"]
|
| 91 |
|
| 92 |
sources = current_question["sources"]
|
|
|
|
| 82 |
# # Option 2 - Get 100/n documents by question and rerank the total
|
| 83 |
# if rerank_by_question:
|
| 84 |
# k_by_question = divide_into_parts(k_final,len(questions))
|
| 85 |
+
if "documents" in state and state["documents"] is not None:
|
| 86 |
+
docs = state["documents"]
|
| 87 |
+
else:
|
| 88 |
+
docs = []
|
| 89 |
|
| 90 |
+
|
| 91 |
+
|
|
|
|
|
|
|
| 92 |
k_by_question = k_final // state["n_questions"]
|
| 93 |
|
| 94 |
sources = current_question["sources"]
|
climateqa/engine/llm/openai.py
CHANGED
|
@@ -7,7 +7,7 @@ try:
|
|
| 7 |
except Exception:
|
| 8 |
pass
|
| 9 |
|
| 10 |
-
def get_llm(model="gpt-
|
| 11 |
|
| 12 |
llm = ChatOpenAI(
|
| 13 |
model=model,
|
|
|
|
| 7 |
except Exception:
|
| 8 |
pass
|
| 9 |
|
| 10 |
+
def get_llm(model="gpt-4o-mini",max_tokens=1024, temperature=0.0, streaming=True,timeout=30, **kwargs):
|
| 11 |
|
| 12 |
llm = ChatOpenAI(
|
| 13 |
model=model,
|