Display images before answering
Browse files- app.py +44 -25
- front/utils.py +2 -1
- sandbox/20240702 - CQA - Graph Functionality.ipynb +0 -0
- style.css +10 -1
app.py
CHANGED
|
@@ -31,6 +31,7 @@ from langchain_chroma import Chroma
|
|
| 31 |
from collections import defaultdict
|
| 32 |
from gradio_modal import Modal
|
| 33 |
|
|
|
|
| 34 |
|
| 35 |
|
| 36 |
# ClimateQ&A imports
|
|
@@ -134,6 +135,7 @@ async def chat(query,history,audience,sources,reports,current_graphs):
|
|
| 134 |
|
| 135 |
docs = []
|
| 136 |
docs_used = True
|
|
|
|
| 137 |
docs_html = ""
|
| 138 |
output_query = ""
|
| 139 |
output_language = ""
|
|
@@ -174,6 +176,11 @@ async def chat(query,history,audience,sources,reports,current_graphs):
|
|
| 174 |
except Exception as e:
|
| 175 |
print(f"Error getting documents: {e}")
|
| 176 |
print(event)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
elif event["name"] in steps_display.keys() and event["event"] == "on_chain_start": #display steps
|
| 179 |
event_description,display_output = steps_display[node]
|
|
@@ -267,32 +274,48 @@ async def chat(query,history,audience,sources,reports,current_graphs):
|
|
| 267 |
raise gr.Error(f"ClimateQ&A Error: {str(e)[:100]} - The error has been noted, try another question and if the error remains, you can contact us :)")
|
| 268 |
|
| 269 |
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
if doc.metadata["chunk_type"] == "image":
|
| 273 |
-
try:
|
| 274 |
-
key = f"Image {i+1}"
|
| 275 |
-
|
| 276 |
-
image_path = doc.metadata["image_path"].split("documents/")[1]
|
| 277 |
-
img = get_image_from_azure_blob_storage(image_path)
|
| 278 |
-
|
| 279 |
-
# Convert the image to a byte buffer
|
| 280 |
-
buffered = BytesIO()
|
| 281 |
-
img.save(buffered, format="PNG")
|
| 282 |
-
img_str = base64.b64encode(buffered.getvalue()).decode()
|
| 283 |
-
|
| 284 |
-
figures = figures + make_html_figure_sources(doc, i, img_str)
|
| 285 |
-
|
| 286 |
-
gallery.append(img)
|
| 287 |
|
| 288 |
-
except Exception as e:
|
| 289 |
-
print(f"Skipped adding image {i} because of {e}")
|
| 290 |
|
| 291 |
|
| 292 |
|
| 293 |
|
| 294 |
yield history, docs_html, output_query, output_language, gallery, figures, graphs_html#,output_query,output_keywords
|
| 295 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
def save_feedback(feed: str, user_id):
|
| 298 |
if len(feed) > 1:
|
|
@@ -619,16 +642,12 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
| 619 |
|
| 620 |
|
| 621 |
|
| 622 |
-
# config_button = gr.Button("Configuration",elem_id="config-button",interactive=True)
|
| 623 |
-
|
| 624 |
def start_chat(query,history):
|
| 625 |
-
# history = history + [(query,None)]
|
| 626 |
-
# history = [tuple(x) for x in history]
|
| 627 |
history = history + [ChatMessage(role="user", content=query)]
|
| 628 |
return (gr.update(interactive = False),gr.update(selected=1),history)
|
| 629 |
|
| 630 |
def finish_chat():
|
| 631 |
-
return
|
| 632 |
|
| 633 |
|
| 634 |
def change_completion_status(current_state):
|
|
@@ -651,7 +670,7 @@ with gr.Blocks(title="Climate Q&A", css_paths=os.getcwd()+ "/style.css", theme=t
|
|
| 651 |
(textbox
|
| 652 |
.submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_textbox")
|
| 653 |
.then(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, current_graphs], [chatbot,sources_textbox,output_query,output_language,gallery_component, figures_cards, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
|
| 654 |
-
.then(finish_chat, None, [textbox
|
| 655 |
.then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs],[tab_sources, tab_figures, tab_recommended_content] )
|
| 656 |
)
|
| 657 |
|
|
|
|
| 31 |
from collections import defaultdict
|
| 32 |
from gradio_modal import Modal
|
| 33 |
|
| 34 |
+
from PIL import Image
|
| 35 |
|
| 36 |
|
| 37 |
# ClimateQ&A imports
|
|
|
|
| 135 |
|
| 136 |
docs = []
|
| 137 |
docs_used = True
|
| 138 |
+
used_figures=[]
|
| 139 |
docs_html = ""
|
| 140 |
output_query = ""
|
| 141 |
output_language = ""
|
|
|
|
| 176 |
except Exception as e:
|
| 177 |
print(f"Error getting documents: {e}")
|
| 178 |
print(event)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
used_figures, figures, gallery = process_figures(docs, figures, gallery, used_figures)
|
| 183 |
+
|
| 184 |
|
| 185 |
elif event["name"] in steps_display.keys() and event["event"] == "on_chain_start": #display steps
|
| 186 |
event_description,display_output = steps_display[node]
|
|
|
|
| 274 |
raise gr.Error(f"ClimateQ&A Error: {str(e)[:100]} - The error has been noted, try another question and if the error remains, you can contact us :)")
|
| 275 |
|
| 276 |
|
| 277 |
+
|
| 278 |
+
# figures, gallery = process_figures(docs, figures, gallery)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 279 |
|
|
|
|
|
|
|
| 280 |
|
| 281 |
|
| 282 |
|
| 283 |
|
| 284 |
yield history, docs_html, output_query, output_language, gallery, figures, graphs_html#,output_query,output_keywords
|
| 285 |
|
| 286 |
+
def process_figures(docs, figures, gallery, used_figures):
|
| 287 |
+
|
| 288 |
+
docs_figures = [d for d in docs if d.metadata["chunk_type"] == "image"]
|
| 289 |
+
for i, doc in enumerate(docs_figures):
|
| 290 |
+
if doc.metadata["chunk_type"] == "image":
|
| 291 |
+
if doc.metadata["figure_code"] != "N/A":
|
| 292 |
+
title = f"{doc.metadata['figure_code']} - {doc.metadata['short_name']}"
|
| 293 |
+
else:
|
| 294 |
+
title = f"{doc.metadata['short_name']}"
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
if title not in used_figures:
|
| 298 |
+
used_figures.append(title)
|
| 299 |
+
try:
|
| 300 |
+
key = f"Image {i+1}"
|
| 301 |
+
|
| 302 |
+
image_path = doc.metadata["image_path"].split("documents/")[1]
|
| 303 |
+
img = get_image_from_azure_blob_storage(image_path)
|
| 304 |
+
|
| 305 |
+
# Convert the image to a byte buffer
|
| 306 |
+
buffered = BytesIO()
|
| 307 |
+
max_image_length = 500
|
| 308 |
+
img_resized = img.resize((max_image_length, int(max_image_length * img.size[1]/img.size[0])))
|
| 309 |
+
img_resized.save(buffered, format="PNG")
|
| 310 |
+
|
| 311 |
+
img_str = base64.b64encode(buffered.getvalue()).decode()
|
| 312 |
+
|
| 313 |
+
figures = figures + make_html_figure_sources(doc, i, img_str)
|
| 314 |
+
gallery.append(img)
|
| 315 |
+
except Exception as e:
|
| 316 |
+
print(f"Skipped adding image {i} because of {e}")
|
| 317 |
+
|
| 318 |
+
return used_figures, figures, gallery
|
| 319 |
|
| 320 |
def save_feedback(feed: str, user_id):
|
| 321 |
if len(feed) > 1:
|
|
|
|
| 642 |
|
| 643 |
|
| 644 |
|
|
|
|
|
|
|
| 645 |
def start_chat(query,history):
|
|
|
|
|
|
|
| 646 |
history = history + [ChatMessage(role="user", content=query)]
|
| 647 |
return (gr.update(interactive = False),gr.update(selected=1),history)
|
| 648 |
|
| 649 |
def finish_chat():
|
| 650 |
+
return gr.update(interactive = True,value = "")
|
| 651 |
|
| 652 |
|
| 653 |
def change_completion_status(current_state):
|
|
|
|
| 670 |
(textbox
|
| 671 |
.submit(start_chat, [textbox,chatbot], [textbox,tabs,chatbot],queue = False,api_name = "start_chat_textbox")
|
| 672 |
.then(chat, [textbox,chatbot,dropdown_audience, dropdown_sources,dropdown_reports, current_graphs], [chatbot,sources_textbox,output_query,output_language,gallery_component, figures_cards, current_graphs],concurrency_limit = 8,api_name = "chat_textbox")
|
| 673 |
+
.then(finish_chat, None, [textbox],api_name = "finish_chat_textbox")
|
| 674 |
.then(update_sources_number_display, [sources_textbox, figures_cards, current_graphs],[tab_sources, tab_figures, tab_recommended_content] )
|
| 675 |
)
|
| 676 |
|
front/utils.py
CHANGED
|
@@ -216,8 +216,9 @@ def make_html_figure_sources(source,i,img_str):
|
|
| 216 |
<div class="card card-image">
|
| 217 |
<div class="card-content">
|
| 218 |
<h2>Image {i} - {title} - Page {int(meta['page_number'])}</h2>
|
| 219 |
-
<p class='ai-generated'>AI-generated description</p>
|
| 220 |
<img src="data:image/png;base64, { img_str }" alt="Alt text" />
|
|
|
|
|
|
|
| 221 |
|
| 222 |
<p>{content}</p>
|
| 223 |
|
|
|
|
| 216 |
<div class="card card-image">
|
| 217 |
<div class="card-content">
|
| 218 |
<h2>Image {i} - {title} - Page {int(meta['page_number'])}</h2>
|
|
|
|
| 219 |
<img src="data:image/png;base64, { img_str }" alt="Alt text" />
|
| 220 |
+
<p class='ai-generated'>AI-generated description</p>
|
| 221 |
+
|
| 222 |
|
| 223 |
<p>{content}</p>
|
| 224 |
|
sandbox/20240702 - CQA - Graph Functionality.ipynb
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
style.css
CHANGED
|
@@ -11,6 +11,10 @@
|
|
| 11 |
margin: 0px;
|
| 12 |
}
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
/* fix for huggingface infinite growth*/
|
| 16 |
main.flex.flex-1.flex-col {
|
|
@@ -93,7 +97,12 @@ body.dark .tip-box * {
|
|
| 93 |
font-size:14px !important;
|
| 94 |
|
| 95 |
}
|
| 96 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
a {
|
| 99 |
text-decoration: none;
|
|
|
|
| 11 |
margin: 0px;
|
| 12 |
}
|
| 13 |
|
| 14 |
+
.gradio-container {
|
| 15 |
+
width: 100%!important;
|
| 16 |
+
max-width: 100% !important;
|
| 17 |
+
}
|
| 18 |
|
| 19 |
/* fix for huggingface infinite growth*/
|
| 20 |
main.flex.flex-1.flex-col {
|
|
|
|
| 97 |
font-size:14px !important;
|
| 98 |
|
| 99 |
}
|
| 100 |
+
.card-content img {
|
| 101 |
+
display: block;
|
| 102 |
+
margin: auto;
|
| 103 |
+
max-width: 100%; /* Ensures the image is responsive */
|
| 104 |
+
height: auto;
|
| 105 |
+
}
|
| 106 |
|
| 107 |
a {
|
| 108 |
text-decoration: none;
|