Wfafa commited on
Commit
e2443b6
·
verified ·
1 Parent(s): edfac6b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -59
app.py CHANGED
@@ -37,12 +37,11 @@ def save_memory(memory):
37
 
38
  memory = load_memory()
39
 
40
- # 💬 Chat function (keeps original behavior)
41
  def chat_with_model(message, history, context):
42
  if not isinstance(history, list):
43
  history = []
44
 
45
- # 🌍 Web search mode
46
  if message.lower().startswith("search "):
47
  query = message[7:]
48
  search_result = search_web(query)
@@ -50,14 +49,13 @@ def chat_with_model(message, history, context):
50
  save_memory(history)
51
  return history, history
52
 
53
- # 🧠 Build conversation
54
  conversation = [{"role": "system", "content": (
55
- "You are EduAI — an educational AI assistant created by Wafa Fazly "
56
- "from Fathima Muslim Ladies College. "
57
- "You help students learn subjects such as Math, Science, English, and IT. "
58
- "EduAI runs on the model 'Qwen/Qwen3-VL-8B-Instruct', which was originally "
59
- "trained by Alibaba. Always answer truthfully when asked about your creation."
60
- )}]
61
 
62
  for past_user, past_bot in history[-5:]:
63
  conversation.append({"role": "user", "content": past_user})
@@ -65,7 +63,6 @@ def chat_with_model(message, history, context):
65
 
66
  conversation.append({"role": "user", "content": message})
67
 
68
- # 🚀 Send to Hugging Face model
69
  try:
70
  response = requests.post(
71
  "https://router.huggingface.co/v1/chat/completions",
@@ -76,14 +73,12 @@ def chat_with_model(message, history, context):
76
  json={
77
  "model": "Qwen/Qwen3-VL-8B-Instruct:novita",
78
  "messages": conversation
79
- },
80
- timeout=60
81
  )
82
 
83
  data = response.json()
84
  reply = data["choices"][0]["message"]["content"]
85
 
86
- # 🧮 Clean up math formatting
87
  reply = reply.replace("Step", "\n\n**Step")
88
  reply = reply.replace(":", ":**")
89
  reply = reply.replace("\\[", "\n\n\\[")
@@ -113,43 +108,7 @@ def clear_memory():
113
  os.remove(MEMORY_FILE)
114
  return [], "🧹 Chat memory cleared! Start fresh."
115
 
116
- # -----------------------
117
- # New: Pause functionality
118
- # -----------------------
119
-
120
- # send handler that respects paused state
121
- def send_handler(message, history, context, paused_state):
122
- # If paused, don't send to model — show paused hint
123
- if paused_state:
124
- # If history uses tuple style, append tuple; your app uses tuples for chat entries
125
- history.append((None, "⏸️ Chat is paused. Click Resume to continue."))
126
- return history, "" # update chatbot and clear textbox
127
- # Not paused: call original chat handler
128
- return chat_with_model(message, history, context)
129
-
130
-
131
- # toggle pause/resume and update UI (state + chat + button text + send button disabled)
132
- def toggle_pause(paused_state, history):
133
- # paused_state is boolean
134
- new_state = not bool(paused_state)
135
- # append a system message to chat
136
- if not isinstance(history, list):
137
- history = []
138
- if new_state:
139
- history.append((None, "⏸️ Chat paused. Send is disabled."))
140
- # change pause button label to "▶ Resume" and disable Send
141
- pause_btn_update = gr.Button.update(value="▶ Resume")
142
- send_btn_update = gr.Button.update(disabled=True)
143
- else:
144
- history.append((None, "▶️ Chat resumed. You can send messages now."))
145
- pause_btn_update = gr.Button.update(value="⏸ Pause")
146
- send_btn_update = gr.Button.update(disabled=False)
147
- # return new state, updated history for chatbot, and UI updates for pause & send buttons
148
- return new_state, history, pause_btn_update, send_btn_update
149
-
150
- # -----------------------
151
- # Build UI (unchanged layout; pause added)
152
- # -----------------------
153
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as iface:
154
  gr.Markdown(
155
  """
@@ -206,6 +165,7 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as iface:
206
  {"left": "\\[", "right": "\\]", "display": True}
207
  ]
208
  )
 
209
  msg = gr.Textbox(
210
  label="💭 Type your question here...",
211
  placeholder="Ask EduAI anything about your studies..."
@@ -214,20 +174,12 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as iface:
214
  with gr.Row():
215
  send = gr.Button("✨ Send Message")
216
  pause = gr.Button("⏸ Pause", variant="secondary")
217
- # state to keep track of pause (False = running, True = paused)
218
- pause_state = gr.State(False)
219
 
220
  # 🪄 Event handlers
221
  subj.change(update_context, inputs=subj, outputs=context_display)
222
  planner.change(update_context, inputs=planner, outputs=context_display)
223
  lang.change(update_context, inputs=lang, outputs=context_display)
224
-
225
- # send now uses send_handler and respects pause_state; outputs: chatbot and clears textbox
226
- send.click(send_handler, inputs=[msg, chatbot, context_display, pause_state], outputs=[chatbot, msg])
227
-
228
  clear_btn.click(clear_memory, outputs=[chatbot, context_display])
229
 
230
- # pause toggles pause_state, updates chatbot with a message, updates pause button label and disables/enables send
231
- pause.click(toggle_pause, inputs=[pause_state, chatbot], outputs=[pause_state, chatbot, pause, send])
232
-
233
  iface.launch()
 
37
 
38
  memory = load_memory()
39
 
40
+ # 💬 Chat function
41
  def chat_with_model(message, history, context):
42
  if not isinstance(history, list):
43
  history = []
44
 
 
45
  if message.lower().startswith("search "):
46
  query = message[7:]
47
  search_result = search_web(query)
 
49
  save_memory(history)
50
  return history, history
51
 
 
52
  conversation = [{"role": "system", "content": (
53
+ "You are EduAI — an educational AI assistant created by Wafa Fazly "
54
+ "from Fathima Muslim Ladies College. "
55
+ "You help students learn subjects such as Math, Science, English, and IT. "
56
+ "EduAI runs on the model 'Qwen/Qwen3-VL-8B-Instruct', which was originally "
57
+ "trained by Alibaba. Always answer truthfully when asked about your creation."
58
+ )}]
59
 
60
  for past_user, past_bot in history[-5:]:
61
  conversation.append({"role": "user", "content": past_user})
 
63
 
64
  conversation.append({"role": "user", "content": message})
65
 
 
66
  try:
67
  response = requests.post(
68
  "https://router.huggingface.co/v1/chat/completions",
 
73
  json={
74
  "model": "Qwen/Qwen3-VL-8B-Instruct:novita",
75
  "messages": conversation
76
+ }
 
77
  )
78
 
79
  data = response.json()
80
  reply = data["choices"][0]["message"]["content"]
81
 
 
82
  reply = reply.replace("Step", "\n\n**Step")
83
  reply = reply.replace(":", ":**")
84
  reply = reply.replace("\\[", "\n\n\\[")
 
108
  os.remove(MEMORY_FILE)
109
  return [], "🧹 Chat memory cleared! Start fresh."
110
 
111
+ # 🎨 Gradio Interface (UI Improved)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  with gr.Blocks(theme=gr.themes.Soft(primary_hue="violet")) as iface:
113
  gr.Markdown(
114
  """
 
165
  {"left": "\\[", "right": "\\]", "display": True}
166
  ]
167
  )
168
+
169
  msg = gr.Textbox(
170
  label="💭 Type your question here...",
171
  placeholder="Ask EduAI anything about your studies..."
 
174
  with gr.Row():
175
  send = gr.Button("✨ Send Message")
176
  pause = gr.Button("⏸ Pause", variant="secondary")
 
 
177
 
178
  # 🪄 Event handlers
179
  subj.change(update_context, inputs=subj, outputs=context_display)
180
  planner.change(update_context, inputs=planner, outputs=context_display)
181
  lang.change(update_context, inputs=lang, outputs=context_display)
182
+ send.click(chat_with_model, inputs=[msg, chatbot, context_display], outputs=[chatbot, chatbot])
 
 
 
183
  clear_btn.click(clear_memory, outputs=[chatbot, context_display])
184
 
 
 
 
185
  iface.launch()