rdune71 commited on
Commit
6dabfc1
·
verified ·
1 Parent(s): e2a70cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -3
app.py CHANGED
@@ -236,6 +236,12 @@ def generate_with_streaming(messages, model, max_tokens=8192, temperature=0.7, t
236
  except Exception as e:
237
  yield f"Connection error: {str(e)}"
238
 
 
 
 
 
 
 
239
  def respond(message, chat_history, model_choice, max_tokens, temperature, top_p, creativity, precision, system_prompt, use_web_search):
240
  """Main response handler with conversation history"""
241
  if not message:
@@ -252,7 +258,7 @@ def respond(message, chat_history, model_choice, max_tokens, temperature, top_p,
252
  chat_history = [system_message] + chat_history
253
 
254
  # Check if the message contains search results that need analysis
255
- if "SEARCH RESULTS" in message:
256
  # This is search results that need analysis
257
  # Extract the original query and search results
258
  lines = message.split('\n')
@@ -263,6 +269,8 @@ def respond(message, chat_history, model_choice, max_tokens, temperature, top_p,
263
  query = first_line.split("'")[1]
264
  else:
265
  query = message[:100] # Fallback
 
 
266
 
267
  # Perform analysis
268
  analysis_prompt = analyze_search_results(query, message)
@@ -284,8 +292,40 @@ def respond(message, chat_history, model_choice, max_tokens, temperature, top_p,
284
  # Always perform search if web search is enabled
285
  if use_web_search:
286
  search_result = perform_search(message)
287
- yield "", chat_history + [user_message, {"role": "assistant", "content": search_result}], search_result
288
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289
 
290
  # Normal flow - generate response
291
  current_history = chat_history + [user_message]
 
236
  except Exception as e:
237
  yield f"Connection error: {str(e)}"
238
 
239
+ def is_news_related_query(query):
240
+ """Check if query is related to news"""
241
+ news_keywords = ['news', 'headline', 'breaking', 'latest', 'today', 'current event', 'update', 'report']
242
+ query_lower = query.lower()
243
+ return any(word in query_lower for word in news_keywords)
244
+
245
  def respond(message, chat_history, model_choice, max_tokens, temperature, top_p, creativity, precision, system_prompt, use_web_search):
246
  """Main response handler with conversation history"""
247
  if not message:
 
258
  chat_history = [system_message] + chat_history
259
 
260
  # Check if the message contains search results that need analysis
261
+ if "SEARCH RESULTS" in message or "SEARCH RESULTS" in message:
262
  # This is search results that need analysis
263
  # Extract the original query and search results
264
  lines = message.split('\n')
 
269
  query = first_line.split("'")[1]
270
  else:
271
  query = message[:100] # Fallback
272
+ else:
273
+ query = "news summary"
274
 
275
  # Perform analysis
276
  analysis_prompt = analyze_search_results(query, message)
 
292
  # Always perform search if web search is enabled
293
  if use_web_search:
294
  search_result = perform_search(message)
295
+
296
+ # If this is a news-related query, automatically analyze the results
297
+ if is_news_related_query(message):
298
+ # Extract the original query for analysis
299
+ lines = search_result.split('\n')
300
+ if len(lines) > 2:
301
+ first_line = lines[0]
302
+ if "'" in first_line:
303
+ query = first_line.split("'")[1]
304
+ else:
305
+ query = message
306
+ else:
307
+ query = message
308
+
309
+ # Perform analysis of the search results
310
+ analysis_prompt = analyze_search_results(query, search_result)
311
+
312
+ # Create history with analysis prompt
313
+ analysis_history = chat_history + [user_message, {"role": "assistant", "content": search_result}, {"role": "user", "content": analysis_prompt}]
314
+
315
+ # Generate analyzed response
316
+ full_response = ""
317
+ search_results_output = search_result # Store raw search results
318
+
319
+ for chunk in generate_with_streaming(analysis_history, model_choice, max_tokens, temperature * creativity, top_p * precision):
320
+ if isinstance(chunk, str):
321
+ full_response = chunk
322
+ # Stream both the analysis and raw search results
323
+ yield "", chat_history + [user_message, {"role": "assistant", "content": search_result}, {"role": "assistant", "content": full_response}], search_results_output
324
+ return
325
+ else:
326
+ # Non-news search, just return the search results
327
+ yield "", chat_history + [user_message, {"role": "assistant", "content": search_result}], search_result
328
+ return
329
 
330
  # Normal flow - generate response
331
  current_history = chat_history + [user_message]