akhaliq HF Staff commited on
Commit
9f0ab40
·
verified ·
1 Parent(s): 000dbdd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -10
app.py CHANGED
@@ -54,21 +54,31 @@ def tuples_from_messages(messages: List[Any]) -> List[List[str]]:
54
  return []
55
  # Already tuples-like
56
  if isinstance(messages[0], (list, tuple)) and len(messages[0]) == 2:
57
- return [list(x) for x in messages]
 
 
 
 
 
 
 
58
 
59
  # Convert from messages-style
60
  pairs: List[List[str]] = []
61
  last_user: Optional[str] = None
62
  for m in messages:
 
 
 
63
  role = m.get("role")
64
  content = m.get("content", "")
65
  if role == "user":
66
- last_user = content
67
  elif role == "assistant":
68
  if last_user is None:
69
- pairs.append(["", content])
70
  else:
71
- pairs.append([last_user, content])
72
  last_user = None
73
  if last_user is not None:
74
  pairs.append([last_user, ""])
@@ -81,7 +91,13 @@ def messages_from_tuples(history_tuples: List[List[str]]) -> List[Dict[str, str]
81
  [{"role": "user", ...}, {"role": "assistant", ...}, ...]
82
  """
83
  messages: List[Dict[str, str]] = []
84
- for u, a in history_tuples:
 
 
 
 
 
 
85
  if u:
86
  messages.append({"role": "user", "content": u})
87
  if a:
@@ -235,18 +251,20 @@ def clear_chat():
235
 
236
  def chat_fn(message, history, system_prompt, temperature, top_p):
237
  """Non-streaming chat handler (returns tuples)."""
238
- history = tuples_from_messages(history)
239
  if not chat_model.model_loaded:
240
  return history + [[message, "Please wait for the model to load or reload the space."]]
241
 
242
  formatted_history = messages_from_tuples(history)
243
  response = chat_model.generate_once(message, formatted_history, system_prompt, temperature, MAX_NEW_TOKENS, top_p)
244
- return history + [[message, response]]
 
 
245
 
246
 
247
  def chat_stream_fn(message, history, system_prompt, temperature, top_p):
248
  """Streaming chat handler: yields updated tuples as tokens arrive."""
249
- history = tuples_from_messages(history)
250
  if not chat_model.model_loaded:
251
  yield history + [[message, "Please wait for the model to load or reload the space."]]
252
  return
@@ -256,7 +274,8 @@ def chat_stream_fn(message, history, system_prompt, temperature, top_p):
256
  # Start a new row for the assistant and fill progressively
257
  base = history + [[message, ""]]
258
  for chunk in chat_model.stream_generate(message, formatted_history, system_prompt, temperature, MAX_NEW_TOKENS, top_p):
259
- yield base[:-1] + [[message, chunk]]
 
260
  # Ensure completion (in case streamer ended exactly on boundary)
261
  # No extra yield needed; last chunk already yielded.
262
 
@@ -334,6 +353,7 @@ with gr.Blocks(
334
 
335
  chatbot = gr.Chatbot(
336
  type="tuples",
 
337
  label="Chat History",
338
  height=500,
339
  show_copy_button=True,
@@ -391,4 +411,4 @@ with gr.Blocks(
391
  demo.queue()
392
 
393
  if __name__ == "__main__":
394
- demo.launch(show_error=True, debug=True)
 
54
  return []
55
  # Already tuples-like
56
  if isinstance(messages[0], (list, tuple)) and len(messages[0]) == 2:
57
+ out: List[List[str]] = []
58
+ for x in messages:
59
+ try:
60
+ a, b = x[0], x[1]
61
+ except Exception:
62
+ continue
63
+ out.append([str(a) if a is not None else "", str(b) if b is not None else ""])
64
+ return out
65
 
66
  # Convert from messages-style
67
  pairs: List[List[str]] = []
68
  last_user: Optional[str] = None
69
  for m in messages:
70
+ if not isinstance(m, dict):
71
+ # Skip any stray items
72
+ continue
73
  role = m.get("role")
74
  content = m.get("content", "")
75
  if role == "user":
76
+ last_user = str(content)
77
  elif role == "assistant":
78
  if last_user is None:
79
+ pairs.append(["", str(content)])
80
  else:
81
+ pairs.append([last_user, str(content)])
82
  last_user = None
83
  if last_user is not None:
84
  pairs.append([last_user, ""])
 
91
  [{"role": "user", ...}, {"role": "assistant", ...}, ...]
92
  """
93
  messages: List[Dict[str, str]] = []
94
+ for pair in history_tuples:
95
+ if not isinstance(pair, (list, tuple)) or len(pair) != 2:
96
+ # Skip malformed entries defensively
97
+ continue
98
+ u, a = pair
99
+ u = "" if u is None else str(u)
100
+ a = "" if a is None else str(a)
101
  if u:
102
  messages.append({"role": "user", "content": u})
103
  if a:
 
251
 
252
  def chat_fn(message, history, system_prompt, temperature, top_p):
253
  """Non-streaming chat handler (returns tuples)."""
254
+ history = tuples_from_messages(history or [])
255
  if not chat_model.model_loaded:
256
  return history + [[message, "Please wait for the model to load or reload the space."]]
257
 
258
  formatted_history = messages_from_tuples(history)
259
  response = chat_model.generate_once(message, formatted_history, system_prompt, temperature, MAX_NEW_TOKENS, top_p)
260
+
261
+ # Always return strict [[str, str], ...]
262
+ return tuples_from_messages(history + [[message, response]])
263
 
264
 
265
  def chat_stream_fn(message, history, system_prompt, temperature, top_p):
266
  """Streaming chat handler: yields updated tuples as tokens arrive."""
267
+ history = tuples_from_messages(history or [])
268
  if not chat_model.model_loaded:
269
  yield history + [[message, "Please wait for the model to load or reload the space."]]
270
  return
 
274
  # Start a new row for the assistant and fill progressively
275
  base = history + [[message, ""]]
276
  for chunk in chat_model.stream_generate(message, formatted_history, system_prompt, temperature, MAX_NEW_TOKENS, top_p):
277
+ yield tuples_from_messages(base[:-1] + [[message, chunk]])
278
+ # Final state already yielded
279
  # Ensure completion (in case streamer ended exactly on boundary)
280
  # No extra yield needed; last chunk already yielded.
281
 
 
353
 
354
  chatbot = gr.Chatbot(
355
  type="tuples",
356
+ value=[], # ensure initial value is a list of [user, assistant]
357
  label="Chat History",
358
  height=500,
359
  show_copy_button=True,
 
411
  demo.queue()
412
 
413
  if __name__ == "__main__":
414
+ demo.launch(show_error=True, debug=True)