ALVHB95 commited on
Commit
f043359
·
1 Parent(s): 77dfcc0
Files changed (1) hide show
  1. app.py +58 -16
app.py CHANGED
@@ -1,6 +1,10 @@
1
  """
2
  =========================================================
3
  app.py — Green Greta (Gradio + TF/Keras 3 + LangChain 0.3)
 
 
 
 
4
  =========================================================
5
  """
6
 
@@ -15,7 +19,7 @@ os.environ.setdefault("GRADIO_ANALYTICS_ENABLED", "False")
15
  os.environ.setdefault("ANONYMIZED_TELEMETRY", "false")
16
  os.environ.setdefault("CHROMA_TELEMETRY_ENABLED", "FALSE")
17
  os.environ.setdefault("USER_AGENT", "green-greta/1.0 (+contact-or-repo)")
18
- # If you want deterministic CPU math from TF (optional):
19
  # os.environ.setdefault("TF_ENABLE_ONEDNN_OPTS", "0")
20
 
21
  import gradio as gr
@@ -31,7 +35,7 @@ except Exception:
31
  user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
32
  header_template = {"User-Agent": user_agent}
33
 
34
- # --- LangChain core ---
35
  from langchain_text_splitters import RecursiveCharacterTextSplitter
36
  from langchain_core.prompts import ChatPromptTemplate
37
  from langchain.chains import ConversationalRetrievalChain
@@ -221,16 +225,14 @@ qa_chain = ConversationalRetrievalChain.from_llm(
221
  combine_docs_chain_kwargs={"prompt": qa_prompt},
222
  get_chat_history=lambda h: h,
223
  rephrase_question=False,
224
- return_source_documents=False, # <- we only need the final answer
225
- # Use default output key "answer" so we don't need to parse JSON
226
  )
227
 
228
  def chat_interface(question, history):
 
229
  try:
230
  result = qa_chain.invoke({"question": question})
231
- # ConversationalRetrievalChain returns {"answer": "...", ...}
232
  answer = result.get("answer", "")
233
- # Safety fallback: if empty, return a friendly default
234
  if not answer:
235
  return "Lo siento, no pude generar una respuesta útil con los fragmentos disponibles."
236
  return answer
@@ -268,26 +270,66 @@ banner_tab_content = """
268
  banner_tab = gr.Markdown(banner_tab_content)
269
 
270
  # ============================
271
- # 7) Gradio app (tabs + run)
272
  # ============================
 
 
273
  custom_css = """
274
- /* Make the chat area taller without using the height arg */
275
- .gr-chatbot { min-height: 700px !important; }
276
- .gr-chatbot > div { min-height: 700px !important; }
277
  .gradio-container { max-width: 1200px !important; }
 
 
 
278
  """
279
 
280
- chatbot_gradio_app = gr.ChatInterface(
281
- fn=chat_interface,
282
- title="<span style='color: rgb(243, 239, 224);'>Green Greta</span>",
283
- theme=theme,
284
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285
 
 
 
 
286
  app = gr.TabbedInterface(
287
  [banner_tab, image_gradio_app, chatbot_gradio_app],
288
  tab_names=["Welcome to Green Greta", "Green Greta Image Classification", "Green Greta Chat"],
289
  theme=theme,
290
- css=custom_css,
291
  )
292
 
293
  app.queue()
 
1
  """
2
  =========================================================
3
  app.py — Green Greta (Gradio + TF/Keras 3 + LangChain 0.3)
4
+ - Chat tab uses Blocks + Chatbot(height=...) ✅
5
+ - LLM: meta-llama/Meta-Llama-3.1-8B-Instruct ✅
6
+ - RAG: e5-base-v2 + (BM25+Vector) with safe fallback + Multi-Query + reranker ✅
7
+ - No JSON output leakage ✅
8
  =========================================================
9
  """
10
 
 
19
  os.environ.setdefault("ANONYMIZED_TELEMETRY", "false")
20
  os.environ.setdefault("CHROMA_TELEMETRY_ENABLED", "FALSE")
21
  os.environ.setdefault("USER_AGENT", "green-greta/1.0 (+contact-or-repo)")
22
+ # Optional: more reproducible CPU math (silences some TF logs)
23
  # os.environ.setdefault("TF_ENABLE_ONEDNN_OPTS", "0")
24
 
25
  import gradio as gr
 
35
  user_agent = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36"
36
  header_template = {"User-Agent": user_agent}
37
 
38
+ # --- LangChain core / RAG ---
39
  from langchain_text_splitters import RecursiveCharacterTextSplitter
40
  from langchain_core.prompts import ChatPromptTemplate
41
  from langchain.chains import ConversationalRetrievalChain
 
225
  combine_docs_chain_kwargs={"prompt": qa_prompt},
226
  get_chat_history=lambda h: h,
227
  rephrase_question=False,
228
+ return_source_documents=False, # we only need the final answer
 
229
  )
230
 
231
  def chat_interface(question, history):
232
+ """Wrap the RAG chain to return a clean text answer."""
233
  try:
234
  result = qa_chain.invoke({"question": question})
 
235
  answer = result.get("answer", "")
 
236
  if not answer:
237
  return "Lo siento, no pude generar una respuesta útil con los fragmentos disponibles."
238
  return answer
 
270
  banner_tab = gr.Markdown(banner_tab_content)
271
 
272
  # ============================
273
+ # 7) Chat tab (Blocks + Chatbot with height) — OPTION A
274
  # ============================
275
+
276
+ # CSS: make chat area taller and widen app a bit
277
  custom_css = """
 
 
 
278
  .gradio-container { max-width: 1200px !important; }
279
+ #greta-chat { height: 700px !important; }
280
+ #greta-chat .gr-chatbot { height: 700px !important; min-height: 700px !important; }
281
+ #greta-chat .overflow-y-auto { height: 660px !important; max-height: 660px !important; }
282
  """
283
 
284
+ def _user_submit(user_msg, history):
285
+ """Append user turn; bot fills later."""
286
+ if not user_msg:
287
+ return "", history
288
+ history = history + [[user_msg, None]]
289
+ return "", history
290
+
291
+ def _bot_respond(history):
292
+ """Generate bot answer for the last user turn."""
293
+ user_msg = history[-1][0]
294
+ # Pass previous history to our RAG function (excluding the current empty bot turn)
295
+ answer = chat_interface(user_msg, history[:-1])
296
+ history[-1][1] = answer
297
+ return history
298
+
299
+ with gr.Blocks(theme=theme, css=custom_css) as chatbot_gradio_app:
300
+ gr.Markdown("<h1 style='text-align:center;color:#f3efe0;'>Green Greta</h1>")
301
+ chat = gr.Chatbot(label="Chatbot", height=700, elem_id="greta-chat", show_copy_button=True)
302
+ with gr.Row():
303
+ msg = gr.Textbox(placeholder="Type a message…", scale=9)
304
+ send = gr.Button("Submit", scale=1)
305
+ with gr.Row():
306
+ retry = gr.Button("↻ Retry")
307
+ undo = gr.Button("↩︎ Undo")
308
+ clear = gr.Button("🗑 Clear")
309
+
310
+ # Submit via button or Enter
311
+ send.click(_user_submit, [msg, chat], [msg, chat], queue=False).then(
312
+ _bot_respond, [chat], [chat]
313
+ )
314
+ msg.submit(_user_submit, [msg, chat], [msg, chat], queue=False).then(
315
+ _bot_respond, [chat], [chat]
316
+ )
317
+
318
+ # Utilities
319
+ clear.click(lambda: [], None, chat, queue=False)
320
+ undo.click(lambda h: h[:-1] if h else h, chat, chat, queue=False)
321
+ retry.click(
322
+ lambda h: (h[:-1] + [[h[-1][0], None]]) if h else h, # re-ask last user msg
323
+ chat, chat, queue=False
324
+ ).then(_bot_respond, [chat], [chat])
325
 
326
+ # ============================
327
+ # 8) Tabs + launch
328
+ # ============================
329
  app = gr.TabbedInterface(
330
  [banner_tab, image_gradio_app, chatbot_gradio_app],
331
  tab_names=["Welcome to Green Greta", "Green Greta Image Classification", "Green Greta Chat"],
332
  theme=theme,
 
333
  )
334
 
335
  app.queue()