vikramvasudevan commited on
Commit
e1e6ddb
·
verified ·
1 Parent(s): 75ef845

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +58 -10
app.py CHANGED
@@ -1,4 +1,6 @@
1
  import os
 
 
2
  import gradio as gr
3
  from config import SanatanConfig
4
  from drive_downloader import ZipDownloader
@@ -17,10 +19,10 @@ graph = generate_graph()
17
  def init():
18
  load_dotenv(override=True)
19
 
20
- # if(os.path.isdir("./chromadb-store")):
21
- # logger.warning("database exists locally. not downloading.")
22
- # return
23
-
24
  downloader = ZipDownloader(
25
  service_account_json=os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON")
26
  )
@@ -58,15 +60,61 @@ def chat(message, history, thread_id):
58
  )
59
  return response["messages"][-1].content
60
 
 
 
 
 
 
61
  async def chat_streaming(message, history, thread_id):
62
  state = {"messages": (history or []) + [{"role": "user", "content": message}]}
63
  config = {"configurable": {"thread_id": thread_id}}
64
- async for step in graph.astream(state,config=config):
65
- # LangGraph yields steps like {"respond": ChatState, "post_process": ChatState}
66
- for node_output in step.values():
67
- yield node_output["messages"][-1].content
68
 
69
- print("received chat message for thread:", thread_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
 
71
  thread_id = gr.State(init_session)
72
  supported_scriptures = "\n - ".join(
@@ -108,7 +156,7 @@ chatbot = gr.Chatbot(
108
  render_markdown=True,
109
  )
110
 
111
- debug_checkbox = gr.Checkbox(label="Debug (Streaming)", value=False)
112
 
113
  chatInterface = gr.ChatInterface(
114
  title="Sanatan-AI",
 
1
  import os
2
+ import random
3
+ import time
4
  import gradio as gr
5
  from config import SanatanConfig
6
  from drive_downloader import ZipDownloader
 
19
  def init():
20
  load_dotenv(override=True)
21
 
22
+ if(os.path.isdir("./chromadb-store")):
23
+ logger.warning("database exists locally. not downloading.")
24
+ return
25
+
26
  downloader = ZipDownloader(
27
  service_account_json=os.getenv("GOOGLE_SERVICE_ACCOUNT_JSON")
28
  )
 
60
  )
61
  return response["messages"][-1].content
62
 
63
+
64
+ import uuid
65
+ import asyncio
66
+ from html import escape
67
+
68
  async def chat_streaming(message, history, thread_id):
69
  state = {"messages": (history or []) + [{"role": "user", "content": message}]}
70
  config = {"configurable": {"thread_id": thread_id}}
 
 
 
 
71
 
72
+ buffered_final = None
73
+ step_counter = 0
74
+
75
+ thinking_verbs = ["thinking","processing","crunching data","please wait","just a few more seconds","closing in","racing to the finish line"]
76
+
77
+ async for step in graph.astream(state, config=config):
78
+ for node_name, node_output in step.items():
79
+ step_counter += 1
80
+ messages = node_output.get("messages", [])
81
+ if not messages:
82
+ continue
83
+
84
+ last_message = messages[-1]
85
+ content = getattr(last_message, "content", "") or ""
86
+
87
+ full = escape(content)
88
+ truncated = (full[:300] + "…") if len(full) > 300 else full
89
+
90
+ buffered_final = full # Save latest response in case it's the final one
91
+
92
+ # Unique ID for each step to help frontend match animation + content
93
+ thinking_id = str(uuid.uuid4())
94
+
95
+ # Yield animated thinking bubble BEFORE actual message
96
+ yield f"<div id='{thinking_id}'><em>💭 {random.choice(thinking_verbs)}...</em></div>"
97
+ await asyncio.sleep(0.5) # let the thinking bubble show up slightly before
98
+
99
+ # Yield actual message in transparent font with node name
100
+ # Instead of two yields: one for thinking, one for message
101
+ # Combine both in one yield so frontend shows them as one stream block
102
+ html = (
103
+ f"<div><em>💭 {random.choice(thinking_verbs)} ...</em></div>"
104
+ f"<div style='opacity: 0.5' title='{full}'>"
105
+ f"<strong>[{node_name}]</strong> {truncated or '...'}"
106
+ f"</div>"
107
+ )
108
+ yield html
109
+
110
+ # ✅ Final answer: normal font, no node label
111
+ if buffered_final:
112
+ temp = ""
113
+ for char in buffered_final:
114
+ temp = temp + char
115
+ await asyncio.sleep(0.001) # ✅ non-blocking!
116
+ yield temp
117
+
118
 
119
  thread_id = gr.State(init_session)
120
  supported_scriptures = "\n - ".join(
 
156
  render_markdown=True,
157
  )
158
 
159
+ debug_checkbox = gr.Checkbox(label="Debug (Streaming)", value=True)
160
 
161
  chatInterface = gr.ChatInterface(
162
  title="Sanatan-AI",