diff --git a/app.py b/app.py
index d65d52a13c37a916be68bf4012d6b76ecf301202..d62c847aa1b02a859de6dcb75bada2f7837f25bd 100644
--- a/app.py
+++ b/app.py
@@ -1,13 +1,12 @@
from climateqa.engine.embeddings import get_embeddings_function
embeddings_function = get_embeddings_function()
-from climateqa.knowledge.openalex import OpenAlex
from sentence_transformers import CrossEncoder
# reranker = CrossEncoder("mixedbread-ai/mxbai-rerank-xsmall-v1")
-oa = OpenAlex()
import gradio as gr
+from gradio_modal import Modal
import pandas as pd
import numpy as np
import os
@@ -29,7 +28,9 @@ from utils import create_user_id
from gradio_modal import Modal
+from PIL import Image
+from langchain_core.runnables.schema import StreamEvent
# ClimateQ&A imports
from climateqa.engine.llm import get_llm
@@ -39,13 +40,15 @@ from climateqa.engine.reranker import get_reranker
from climateqa.engine.embeddings import get_embeddings_function
from climateqa.engine.chains.prompts import audience_prompts
from climateqa.sample_questions import QUESTIONS
-from climateqa.constants import POSSIBLE_REPORTS
+from climateqa.constants import POSSIBLE_REPORTS, OWID_CATEGORIES
from climateqa.utils import get_image_from_azure_blob_storage
-from climateqa.engine.keywords import make_keywords_chain
-# from climateqa.engine.chains.answer_rag import make_rag_papers_chain
-from climateqa.engine.graph import make_graph_agent,display_graph
+from climateqa.engine.graph import make_graph_agent
+from climateqa.engine.embeddings import get_embeddings_function
+from climateqa.engine.chains.retrieve_papers import find_papers
+
+from front.utils import serialize_docs,process_figures
-from front.utils import make_html_source, make_html_figure_sources,parse_output_llm_with_sources,serialize_docs,make_toolbox
+from climateqa.event_handler import init_audience, handle_retrieved_documents, stream_answer,handle_retrieved_owid_graphs
# Load environment variables in local mode
try:
@@ -54,6 +57,8 @@ try:
except Exception as e:
pass
+import requests
+
# Set up Gradio Theme
theme = gr.themes.Base(
primary_hue="blue",
@@ -104,52 +109,47 @@ CITATION_TEXT = r"""@misc{climateqa,
# Create vectorstore and retriever
-vectorstore = get_pinecone_vectorstore(embeddings_function)
-llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
-reranker = get_reranker("large")
-agent = make_graph_agent(llm,vectorstore,reranker)
+vectorstore = get_pinecone_vectorstore(embeddings_function, index_name = os.getenv("PINECONE_API_INDEX"))
+vectorstore_graphs = get_pinecone_vectorstore(embeddings_function, index_name = os.getenv("PINECONE_API_INDEX_OWID"), text_key="description")
+llm = get_llm(provider="openai",max_tokens = 1024,temperature = 0.0)
+reranker = get_reranker("nano")
+agent = make_graph_agent(llm=llm, vectorstore_ipcc=vectorstore, vectorstore_graphs=vectorstore_graphs, reranker=reranker)
+def update_config_modal_visibility(config_open):
+ new_config_visibility_status = not config_open
+ return gr.update(visible=new_config_visibility_status), new_config_visibility_status
-async def chat(query,history,audience,sources,reports):
+async def chat(query, history, audience, sources, reports, relevant_content_sources, search_only):
"""taking a query and a message history, use a pipeline (reformulation, retriever, answering) to yield a tuple of:
(messages in gradio format, messages in langchain format, source documents)"""
date_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f">> NEW QUESTION ({date_now}) : {query}")
- if audience == "Children":
- audience_prompt = audience_prompts["children"]
- elif audience == "General public":
- audience_prompt = audience_prompts["general"]
- elif audience == "Experts":
- audience_prompt = audience_prompts["experts"]
- else:
- audience_prompt = audience_prompts["experts"]
+ audience_prompt = init_audience(audience)
# Prepare default values
- if len(sources) == 0:
- sources = ["IPCC"]
+ if sources is None or len(sources) == 0:
+ sources = ["IPCC", "IPBES", "IPOS"]
- # if len(reports) == 0: # TODO
- reports = []
+ if reports is None or len(reports) == 0:
+ reports = []
- inputs = {"user_input": query,"audience": audience_prompt,"sources_input":sources}
+ inputs = {"user_input": query,"audience": audience_prompt,"sources_input":sources, "relevant_content_sources" : relevant_content_sources, "search_only": search_only}
result = agent.astream_events(inputs,version = "v1")
-
- # path_reformulation = "/logs/reformulation/final_output"
- # path_keywords = "/logs/keywords/final_output"
- # path_retriever = "/logs/find_documents/final_output"
- # path_answer = "/logs/answer/streamed_output_str/-"
+
docs = []
+ used_figures=[]
+ related_contents = []
docs_html = ""
output_query = ""
output_language = ""
output_keywords = ""
- gallery = []
start_streaming = False
+ graphs_html = ""
figures = '
'
steps_display = {
@@ -166,36 +166,29 @@ async def chat(query,history,audience,sources,reports):
node = event["metadata"]["langgraph_node"]
if event["event"] == "on_chain_end" and event["name"] == "retrieve_documents" :# when documents are retrieved
- try:
- docs = event["data"]["output"]["documents"]
- docs_html = []
- textual_docs = [d for d in docs if d.metadata["chunk_type"] == "text"]
- for i, d in enumerate(textual_docs, 1):
- if d.metadata["chunk_type"] == "text":
- docs_html.append(make_html_source(d, i))
-
- used_documents = used_documents + [f"{d.metadata['short_name']} - {d.metadata['name']}" for d in docs]
- history[-1].content = "Adding sources :\n\n - " + "\n - ".join(np.unique(used_documents))
-
- docs_html = "".join(docs_html)
-
- except Exception as e:
- print(f"Error getting documents: {e}")
- print(event)
-
+ docs, docs_html, history, used_documents, related_contents = handle_retrieved_documents(event, history, used_documents)
+
+ elif event["event"] == "on_chain_end" and node == "categorize_intent" and event["name"] == "_write": # when the query is transformed
+
+ intent = event["data"]["output"]["intent"]
+ if "language" in event["data"]["output"]:
+ output_language = event["data"]["output"]["language"]
+ else :
+ output_language = "English"
+ history[-1].content = f"Language identified : {output_language} \n Intent identified : {intent}"
+
+
elif event["name"] in steps_display.keys() and event["event"] == "on_chain_start": #display steps
- event_description,display_output = steps_display[node]
+ event_description, display_output = steps_display[node]
if not hasattr(history[-1], 'metadata') or history[-1].metadata["title"] != event_description: # if a new step begins
history.append(ChatMessage(role="assistant", content = "", metadata={'title' :event_description}))
elif event["name"] != "transform_query" and event["event"] == "on_chat_model_stream" and node in ["answer_rag", "answer_search","answer_chitchat"]:# if streaming answer
- if start_streaming == False:
- start_streaming = True
- history.append(ChatMessage(role="assistant", content = ""))
- answer_message_content += event["data"]["chunk"].content
- answer_message_content = parse_output_llm_with_sources(answer_message_content)
- history[-1] = ChatMessage(role="assistant", content = answer_message_content)
- # history.append(ChatMessage(role="assistant", content = new_message_content))
+ history, start_streaming, answer_message_content = stream_answer(history, event, start_streaming, answer_message_content)
+
+ elif event["name"] in ["retrieve_graphs", "retrieve_graphs_ai"] and event["event"] == "on_chain_end":
+ graphs_html = handle_retrieved_owid_graphs(event, graphs_html)
+
if event["name"] == "transform_query" and event["event"] =="on_chain_end":
if hasattr(history[-1],"content"):
@@ -204,7 +197,7 @@ async def chat(query,history,audience,sources,reports):
if event["name"] == "categorize_intent" and event["event"] == "on_chain_start":
print("X")
- yield history,docs_html,output_query,output_language,gallery, figures #,output_query,output_keywords
+ yield history, docs_html, output_query, output_language, related_contents , graphs_html, #,output_query,output_keywords
except Exception as e:
print(event, "has failed")
@@ -232,68 +225,7 @@ async def chat(query,history,audience,sources,reports):
print(f"Error logging on Azure Blob Storage: {e}")
raise gr.Error(f"ClimateQ&A Error: {str(e)[:100]} - The error has been noted, try another question and if the error remains, you can contact us :)")
-
-
-
- # image_dict = {}
- # for i,doc in enumerate(docs):
-
- # if doc.metadata["chunk_type"] == "image":
- # try:
- # key = f"Image {i+1}"
- # image_path = doc.metadata["image_path"].split("documents/")[1]
- # img = get_image_from_azure_blob_storage(image_path)
-
- # # Convert the image to a byte buffer
- # buffered = BytesIO()
- # img.save(buffered, format="PNG")
- # img_str = base64.b64encode(buffered.getvalue()).decode()
-
- # # Embedding the base64 string in Markdown
- # markdown_image = f""
- # image_dict[key] = {"img":img,"md":markdown_image,"short_name": doc.metadata["short_name"],"figure_code":doc.metadata["figure_code"],"caption":doc.page_content,"key":key,"figure_code":doc.metadata["figure_code"], "img_str" : img_str}
- # except Exception as e:
- # print(f"Skipped adding image {i} because of {e}")
-
- # if len(image_dict) > 0:
-
- # gallery = [x["img"] for x in list(image_dict.values())]
- # img = list(image_dict.values())[0]
- # img_md = img["md"]
- # img_caption = img["caption"]
- # img_code = img["figure_code"]
- # if img_code != "N/A":
- # img_name = f"{img['key']} - {img['figure_code']}"
- # else:
- # img_name = f"{img['key']}"
-
- # history.append(ChatMessage(role="assistant", content = f"\n\n{img_md}\n{img_name} - {img_caption}
"))
-
- docs_figures = [d for d in docs if d.metadata["chunk_type"] == "image"]
- for i, doc in enumerate(docs_figures):
- if doc.metadata["chunk_type"] == "image":
- try:
- key = f"Image {i+1}"
-
- image_path = doc.metadata["image_path"].split("documents/")[1]
- img = get_image_from_azure_blob_storage(image_path)
-
- # Convert the image to a byte buffer
- buffered = BytesIO()
- img.save(buffered, format="PNG")
- img_str = base64.b64encode(buffered.getvalue()).decode()
-
- figures = figures + make_html_figure_sources(doc, i, img_str)
-
- gallery.append(img)
-
- except Exception as e:
- print(f"Skipped adding image {i} because of {e}")
-
-
-
-
- yield history,docs_html,output_query,output_language,gallery, figures#,output_query,output_keywords
+ yield history, docs_html, output_query, output_language, related_contents, graphs_html
def save_feedback(feed: str, user_id):
@@ -317,29 +249,9 @@ def log_on_azure(file, logs, share_client):
file_client.upload_file(logs)
-def generate_keywords(query):
- chain = make_keywords_chain(llm)
- keywords = chain.invoke(query)
- keywords = " AND ".join(keywords["keywords"])
- return keywords
-papers_cols_widths = {
- "doc":50,
- "id":100,
- "title":300,
- "doi":100,
- "publication_year":100,
- "abstract":500,
- "rerank_score":100,
- "is_oa":50,
-}
-
-papers_cols = list(papers_cols_widths.keys())
-papers_cols_widths = list(papers_cols_widths.values())
-
-
# --------------------------------------------------------------------
# Gradio
# --------------------------------------------------------------------
@@ -370,10 +282,23 @@ def vote(data: gr.LikeData):
else:
print(data)
+def save_graph(saved_graphs_state, embedding, category):
+ print(f"\nCategory:\n{saved_graphs_state}\n")
+ if category not in saved_graphs_state:
+ saved_graphs_state[category] = []
+ if embedding not in saved_graphs_state[category]:
+ saved_graphs_state[category].append(embedding)
+ return saved_graphs_state, gr.Button("Graph Saved")
+
with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=theme,elem_id = "main-component") as demo:
+ chat_completed_state = gr.State(0)
+ current_graphs = gr.State([])
+ saved_graphs = gr.State({})
+ config_open = gr.State(False)
+
with gr.Tab("ClimateQ&A"):
with gr.Row(elem_id="chatbot-row"):
@@ -396,12 +321,16 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
with gr.Row(elem_id = "input-message"):
textbox=gr.Textbox(placeholder="Ask me anything here!",show_label=False,scale=7,lines = 1,interactive = True,elem_id="input-textbox")
-
+
+ config_button = gr.Button("",elem_id="config-button")
+ # config_checkbox_button = gr.Checkbox(label = '⚙️', value="show",visible=True, interactive=True, elem_id="checkbox-config")
+
+
- with gr.Column(scale=1, variant="panel",elem_id = "right-panel"):
+ with gr.Column(scale=2, variant="panel",elem_id = "right-panel"):
- with gr.Tabs() as tabs:
+ with gr.Tabs(elem_id = "right_panel_tab") as tabs:
with gr.TabItem("Examples",elem_id = "tab-examples",id = 0):
examples_hidden = gr.Textbox(visible = False)
@@ -427,91 +356,210 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
)
samples.append(group_examples)
+
+ # with gr.Tab("Configuration", id = 10, ) as tab_config:
+ # # gr.Markdown("Reminders: You can talk in any language, ClimateQ&A is multi-lingual!")
+ # pass
+
+ # with gr.Row():
+
+ # dropdown_sources = gr.CheckboxGroup(
+ # ["IPCC", "IPBES","IPOS"],
+ # label="Select source",
+ # value=["IPCC"],
+ # interactive=True,
+ # )
+ # dropdown_external_sources = gr.CheckboxGroup(
+ # ["IPCC figures","OpenAlex", "OurWorldInData"],
+ # label="Select database to search for relevant content",
+ # value=["IPCC figures"],
+ # interactive=True,
+ # )
+
+ # dropdown_reports = gr.Dropdown(
+ # POSSIBLE_REPORTS,
+ # label="Or select specific reports",
+ # multiselect=True,
+ # value=None,
+ # interactive=True,
+ # )
+
+ # search_only = gr.Checkbox(label="Search only without chating", value=False, interactive=True, elem_id="checkbox-chat")
+
+
+ # dropdown_audience = gr.Dropdown(
+ # ["Children","General public","Experts"],
+ # label="Select audience",
+ # value="Experts",
+ # interactive=True,
+ # )
+
+
+ # after = gr.Slider(minimum=1950,maximum=2023,step=1,value=1960,label="Publication date",show_label=True,interactive=True,elem_id="date-papers", visible=False)
+
- with gr.Tab("Sources",elem_id = "tab-citations",id = 1):
- sources_textbox = gr.HTML(show_label=False, elem_id="sources-textbox")
- docs_textbox = gr.State("")
-
-
-
+ # output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False, visible= False)
+ # output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False, visible= False)
- # with Modal(visible = False) as config_modal:
- with gr.Tab("Configuration",elem_id = "tab-config",id = 2):
- gr.Markdown("Reminder: You can talk in any language, ClimateQ&A is multi-lingual!")
+ # dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after])
+ # # dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after], visible=True)
- dropdown_sources = gr.CheckboxGroup(
- ["IPCC", "IPBES","IPOS"],
- label="Select source",
- value=["IPCC"],
- interactive=True,
- )
+ with gr.Tab("Sources",elem_id = "tab-sources",id = 1) as tab_sources:
+ sources_textbox = gr.HTML(show_label=False, elem_id="sources-textbox")
+
+
+
+ with gr.Tab("Recommended content", elem_id="tab-recommended_content",id=2) as tab_recommended_content:
+ with gr.Tabs(elem_id = "group-subtabs") as tabs_recommended_content:
+
+ with gr.Tab("Figures",elem_id = "tab-figures",id = 3) as tab_figures:
+ sources_raw = gr.State()
+
+ with Modal(visible=False, elem_id="modal_figure_galery") as figure_modal:
+ gallery_component = gr.Gallery(object_fit='scale-down',elem_id="gallery-component", height="80vh")
+
+ show_full_size_figures = gr.Button("Show figures in full size",elem_id="show-figures",interactive=True)
+ show_full_size_figures.click(lambda : Modal(visible=True),None,figure_modal)
+
+ figures_cards = gr.HTML(show_label=False, elem_id="sources-figures")
+
+
+
+ with gr.Tab("Papers",elem_id = "tab-citations",id = 4) as tab_papers:
+ # btn_summary = gr.Button("Summary")
+ # Fenêtre simulée pour le Summary
+ with gr.Accordion(visible=True, elem_id="papers-summary-popup", label= "See summary of relevant papers", open= False) as summary_popup:
+ papers_summary = gr.Markdown("", visible=True, elem_id="papers-summary")
+
+ # btn_relevant_papers = gr.Button("Relevant papers")
+ # Fenêtre simulée pour les Relevant Papers
+ with gr.Accordion(visible=True, elem_id="papers-relevant-popup",label= "See relevant papers", open= False) as relevant_popup:
+ papers_html = gr.HTML(show_label=False, elem_id="papers-textbox")
+
+ btn_citations_network = gr.Button("Explore papers citations network")
+ # Fenêtre simulée pour le Citations Network
+ with Modal(visible=False) as papers_modal:
+ citations_network = gr.HTML("Citations Network Graph
", visible=True, elem_id="papers-citations-network")
+ btn_citations_network.click(lambda: Modal(visible=True), None, papers_modal)
+
+
+
+ with gr.Tab("Graphs", elem_id="tab-graphs", id=5) as tab_graphs:
+
+ graphs_container = gr.HTML("There are no graphs to be displayed at the moment. Try asking another question.
",elem_id="graphs-container")
+ current_graphs.change(lambda x : x, inputs=[current_graphs], outputs=[graphs_container])
+
+ with Modal(visible=False,elem_id="modal-config") as config_modal:
+ gr.Markdown("Reminders: You can talk in any language, ClimateQ&A is multi-lingual!")
- dropdown_reports = gr.Dropdown(
- POSSIBLE_REPORTS,
- label="Or select specific reports",
- multiselect=True,
- value=None,
- interactive=True,
- )
+
+ # with gr.Row():
+
+ dropdown_sources = gr.CheckboxGroup(
+ ["IPCC", "IPBES","IPOS"],
+ label="Select source (by default search in all sources)",
+ value=["IPCC"],
+ interactive=True,
+ )
+
+ dropdown_reports = gr.Dropdown(
+ POSSIBLE_REPORTS,
+ label="Or select specific reports",
+ multiselect=True,
+ value=None,
+ interactive=True,
+ )
+
+ dropdown_external_sources = gr.CheckboxGroup(
+ ["IPCC figures","OpenAlex", "OurWorldInData"],
+ label="Select database to search for relevant content",
+ value=["IPCC figures"],
+ interactive=True,
+ )
- dropdown_audience = gr.Dropdown(
- ["Children","General public","Experts"],
- label="Select audience",
- value="Experts",
- interactive=True,
- )
+ search_only = gr.Checkbox(label="Search only for recommended content without chating", value=False, interactive=True, elem_id="checkbox-chat")
- output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False)
- output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False)
+ dropdown_audience = gr.Dropdown(
+ ["Children","General public","Experts"],
+ label="Select audience",
+ value="Experts",
+ interactive=True,
+ )
+
+
+ after = gr.Slider(minimum=1950,maximum=2023,step=1,value=1960,label="Publication date",show_label=True,interactive=True,elem_id="date-papers", visible=False)
+
- with gr.Tab("Figures",elem_id = "tab-figures",id = 3):
- with Modal(visible=False, elem_id="modal_figure_galery") as modal:
- gallery_component = gr.Gallery(object_fit='scale-down',elem_id="gallery-component", height="80vh")
-
- show_full_size_figures = gr.Button("Show figures in full size",elem_id="show-figures",interactive=True)
- show_full_size_figures.click(lambda : Modal(visible=True),None,modal)
+ output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False, visible= False)
+ output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False, visible= False)
- figures_cards = gr.HTML(show_label=False, elem_id="sources-figures")
-
+ dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after])
+
+ close_config_modal = gr.Button("Validate and Close",elem_id="close-config-modal")
+ close_config_modal.click(fn=update_config_modal_visibility, inputs=[config_open], outputs=[config_modal, config_open])
+ # dropdown_external_sources.change(lambda x: gr.update(visible = True ) if "OpenAlex" in x else gr.update(visible=False) , inputs=[dropdown_external_sources], outputs=[after], visible=True)
+
+
+ config_button.click(fn=update_config_modal_visibility, inputs=[config_open], outputs=[config_modal, config_open])
+
+ # with gr.Tab("OECD",elem_id = "tab-oecd",id = 6):
+ # oecd_indicator = "RIVER_FLOOD_RP100_POP_SH"
+ # oecd_topic = "climate"
+ # oecd_latitude = "46.8332"
+ # oecd_longitude = "5.3725"
+ # oecd_zoom = "5.6442"
+ # # Create the HTML content with the iframe
+ # iframe_html = f"""
+ #
+ # """
+ # oecd_textbox = gr.HTML(iframe_html, show_label=False, elem_id="oecd-textbox")
+
#---------------------------------------------------------------------------------------
# OTHER TABS
#---------------------------------------------------------------------------------------
+ # with gr.Tab("Settings",elem_id = "tab-config",id = 2):
- # with gr.Tab("Figures",elem_id = "tab-images",elem_classes = "max-height other-tabs"):
- # gallery_component = gr.Gallery(object_fit='cover')
+ # gr.Markdown("Reminder: You can talk in any language, ClimateQ&A is multi-lingual!")
- # with gr.Tab("Papers (beta)",elem_id = "tab-papers",elem_classes = "max-height other-tabs"):
- # with gr.Row():
- # with gr.Column(scale=1):
- # query_papers = gr.Textbox(placeholder="Question",show_label=False,lines = 1,interactive = True,elem_id="query-papers")
- # keywords_papers = gr.Textbox(placeholder="Keywords",show_label=False,lines = 1,interactive = True,elem_id="keywords-papers")
- # after = gr.Slider(minimum=1950,maximum=2023,step=1,value=1960,label="Publication date",show_label=True,interactive=True,elem_id="date-papers")
- # search_papers = gr.Button("Search",elem_id="search-papers",interactive=True)
+ # dropdown_sources = gr.CheckboxGroup(
+ # ["IPCC", "IPBES","IPOS", "OpenAlex"],
+ # label="Select source",
+ # value=["IPCC"],
+ # interactive=True,
+ # )
- # with gr.Column(scale=7):
+ # dropdown_reports = gr.Dropdown(
+ # POSSIBLE_REPORTS,
+ # label="Or select specific reports",
+ # multiselect=True,
+ # value=None,
+ # interactive=True,
+ # )
- # with gr.Tab("Summary",elem_id="papers-summary-tab"):
- # papers_summary = gr.Markdown(visible=True,elem_id="papers-summary")
+ # dropdown_audience = gr.Dropdown(
+ # ["Children","General public","Experts"],
+ # label="Select audience",
+ # value="Experts",
+ # interactive=True,
+ # )
- # with gr.Tab("Relevant papers",elem_id="papers-results-tab"):
- # papers_dataframe = gr.Dataframe(visible=True,elem_id="papers-table",headers = papers_cols)
- # with gr.Tab("Citations network",elem_id="papers-network-tab"):
- # citations_network = gr.HTML(visible=True,elem_id="papers-citations-network")
+ # output_query = gr.Textbox(label="Query used for retrieval",show_label = True,elem_id = "reformulated-query",lines = 2,interactive = False)
+ # output_language = gr.Textbox(label="Language",show_label = True,elem_id = "language",lines = 1,interactive = False)
-
with gr.Tab("About",elem_classes = "max-height other-tabs"):
with gr.Row():
with gr.Column(scale=1):
@@ -519,13 +567,15 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
- gr.Markdown("""
-### More info
-- See more info at [https://climateqa.com](https://climateqa.com/docs/intro/)
-- Feedbacks on this [form](https://forms.office.com/e/1Yzgxm6jbp)
-
-### Citation
-""")
+ gr.Markdown(
+ """
+ ### More info
+ - See more info at [https://climateqa.com](https://climateqa.com/docs/intro/)
+ - Feedbacks on this [form](https://forms.office.com/e/1Yzgxm6jbp)
+
+ ### Citation
+ """
+ )
with gr.Accordion(CITATION_LABEL,elem_id="citation", open = False,):
# # Display citation label and text)
gr.Textbox(
@@ -538,25 +588,61 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
- def start_chat(query,history):
- # history = history + [(query,None)]
- # history = [tuple(x) for x in history]
+ def start_chat(query,history,search_only):
history = history + [ChatMessage(role="user", content=query)]
- return (gr.update(interactive = False),gr.update(selected=1),history)
+ if search_only:
+ return (gr.update(interactive = False),gr.update(selected=1),history)
+ else:
+ return (gr.update(interactive = False),gr.update(selected=2),history)
def finish_chat():
- return (gr.update(interactive = True,value = ""))
+ return gr.update(interactive = True,value = "")
+
+ # Initialize visibility states
+ summary_visible = False
+ relevant_visible = False
+
+ # Functions to toggle visibility
+ def toggle_summary_visibility():
+ global summary_visible
+ summary_visible = not summary_visible
+ return gr.update(visible=summary_visible)
+
+ def toggle_relevant_visibility():
+ global relevant_visible
+ relevant_visible = not relevant_visible
+ return gr.update(visible=relevant_visible)
+
+ def change_completion_status(current_state):
+ current_state = 1 - current_state
+ return current_state
+
+ def update_sources_number_display(sources_textbox, figures_cards, current_graphs, papers_html):
+ sources_number = sources_textbox.count("")
+ figures_number = figures_cards.count("")
+ graphs_number = current_graphs.count("