rdune71 commited on
Commit
7b5f176
·
1 Parent(s): ad2999f

Finalize backend integration with chat endpoint and Redis memory

Browse files
Files changed (2) hide show
  1. api/chat.py +11 -6
  2. app.py +1 -1
api/chat.py CHANGED
@@ -22,13 +22,18 @@ async def chat(user_id: str, message: str):
22
 
23
  # Generate AI response
24
  try:
25
- response_stream = llm_client.generate(
26
- prompt=message,
27
- stream=True
28
- )
29
 
30
- # Stream response back
31
- return StreamingResponse(response_stream, media_type="text/event-stream")
 
 
 
 
 
 
 
32
 
33
  except Exception as e:
34
  raise HTTPException(status_code=500, detail=f"LLM generation failed: {e}")
 
22
 
23
  # Generate AI response
24
  try:
25
+ full_response = ""
26
+ response_stream = llm_client.generate(prompt=message, stream=True)
 
 
27
 
28
+ # Collect streamed response
29
+ for chunk in response_stream:
30
+ full_response += chunk
31
+
32
+ # Save updated conversation
33
+ conversation_history.append({"role": "assistant", "content": full_response})
34
+ save_user_state(user_id, {"conversation": json.dumps(conversation_history)})
35
+
36
+ return {"response": full_response}
37
 
38
  except Exception as e:
39
  raise HTTPException(status_code=500, detail=f"LLM generation failed: {e}")
app.py CHANGED
@@ -51,7 +51,7 @@ else:
51
  json={"user_id": user, "message": user_input}
52
  )
53
  if response.status_code == 200:
54
- ai_response = response.text # Adjust based on actual response format
55
  st.markdown(f"**AI Coach:** {ai_response}")
56
  else:
57
  st.error("Failed to get response from AI Coach.")
 
51
  json={"user_id": user, "message": user_input}
52
  )
53
  if response.status_code == 200:
54
+ ai_response = response.text
55
  st.markdown(f"**AI Coach:** {ai_response}")
56
  else:
57
  st.error("Failed to get response from AI Coach.")