Finalize backend integration with chat endpoint and Redis memory
Browse files- api/chat.py +11 -6
- app.py +1 -1
api/chat.py
CHANGED
|
@@ -22,13 +22,18 @@ async def chat(user_id: str, message: str):
|
|
| 22 |
|
| 23 |
# Generate AI response
|
| 24 |
try:
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
stream=True
|
| 28 |
-
)
|
| 29 |
|
| 30 |
-
#
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
except Exception as e:
|
| 34 |
raise HTTPException(status_code=500, detail=f"LLM generation failed: {e}")
|
|
|
|
| 22 |
|
| 23 |
# Generate AI response
|
| 24 |
try:
|
| 25 |
+
full_response = ""
|
| 26 |
+
response_stream = llm_client.generate(prompt=message, stream=True)
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
# Collect streamed response
|
| 29 |
+
for chunk in response_stream:
|
| 30 |
+
full_response += chunk
|
| 31 |
+
|
| 32 |
+
# Save updated conversation
|
| 33 |
+
conversation_history.append({"role": "assistant", "content": full_response})
|
| 34 |
+
save_user_state(user_id, {"conversation": json.dumps(conversation_history)})
|
| 35 |
+
|
| 36 |
+
return {"response": full_response}
|
| 37 |
|
| 38 |
except Exception as e:
|
| 39 |
raise HTTPException(status_code=500, detail=f"LLM generation failed: {e}")
|
app.py
CHANGED
|
@@ -51,7 +51,7 @@ else:
|
|
| 51 |
json={"user_id": user, "message": user_input}
|
| 52 |
)
|
| 53 |
if response.status_code == 200:
|
| 54 |
-
ai_response = response.text
|
| 55 |
st.markdown(f"**AI Coach:** {ai_response}")
|
| 56 |
else:
|
| 57 |
st.error("Failed to get response from AI Coach.")
|
|
|
|
| 51 |
json={"user_id": user, "message": user_input}
|
| 52 |
)
|
| 53 |
if response.status_code == 200:
|
| 54 |
+
ai_response = response.text
|
| 55 |
st.markdown(f"**AI Coach:** {ai_response}")
|
| 56 |
else:
|
| 57 |
st.error("Failed to get response from AI Coach.")
|