Update app.py
Browse files
app.py
CHANGED
|
@@ -122,85 +122,38 @@ def serve_frontend():
|
|
| 122 |
except Exception:
|
| 123 |
return "<h3>frontend.html not found in static/ — please add your frontend.html there.</h3>", 404
|
| 124 |
|
| 125 |
-
@app.route("/chat", methods=["POST"])
|
| 126 |
def chat():
|
| 127 |
data = request.get_json(force=True)
|
| 128 |
-
|
| 129 |
-
|
| 130 |
|
| 131 |
-
|
| 132 |
-
|
| 133 |
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
}
|
| 139 |
-
|
| 140 |
-
llm_messages = [{"role": "system", "content": PROGRAMMING_ASSISTANT_PROMPT}]
|
| 141 |
-
|
| 142 |
-
last_user_message = ""
|
| 143 |
-
for msg in chat_history:
|
| 144 |
-
role = msg.get("role")
|
| 145 |
-
content = msg.get("content")
|
| 146 |
-
if role in ["user", "assistant"] and content:
|
| 147 |
-
llm_messages.append({"role": role, "content": content})
|
| 148 |
-
if role == "user":
|
| 149 |
-
last_user_message = content
|
| 150 |
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
state["language"] = detected_lang
|
| 155 |
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
llm_messages[-1]["content"] = f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]"
|
| 159 |
-
elif last_user_message:
|
| 160 |
-
llm_messages.append({"role": "user", "content": f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]"})
|
| 161 |
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
llm_response = llm.invoke(llm_messages)
|
| 165 |
-
raw_response = llm_response.content if hasattr(llm_response, "content") else str(llm_response)
|
| 166 |
-
logger.info(f"Raw LLM response: {raw_response}")
|
| 167 |
-
parsed_result = extract_json_from_llm_response(raw_response)
|
| 168 |
-
except Exception as e:
|
| 169 |
-
logger.exception("LLM invocation failed")
|
| 170 |
-
error_detail = str(e)
|
| 171 |
-
if 'decommissioned' in error_detail:
|
| 172 |
-
error_detail = "LLM Model Error: The model is likely decommissioned. Please check the 'LLM_MODEL' environment variable or the default model in app.py."
|
| 173 |
-
return jsonify({"error": "LLM invocation failed", "detail": error_detail}), 500
|
| 174 |
-
|
| 175 |
-
if parsed_result.get("assistant_reply") == LLM_PARSE_ERROR_MESSAGE:
|
| 176 |
-
return jsonify({
|
| 177 |
-
"assistant_reply": LLM_PARSE_ERROR_MESSAGE,
|
| 178 |
-
"updated_state": state,
|
| 179 |
-
"suggested_tags": [],
|
| 180 |
-
})
|
| 181 |
-
|
| 182 |
-
updated_state_from_llm = parsed_result.get("state_updates", {})
|
| 183 |
-
if 'conversationSummary' in updated_state_from_llm:
|
| 184 |
-
state["conversationSummary"] = updated_state_from_llm["conversationSummary"]
|
| 185 |
-
if 'language' in updated_state_from_llm and updated_state_from_llm['language'].strip():
|
| 186 |
-
state["language"] = updated_state_from_llm["language"]
|
| 187 |
-
|
| 188 |
-
assistant_reply = parsed_result.get("assistant_reply")
|
| 189 |
-
code_snippet = parsed_result.get("code_snippet")
|
| 190 |
-
|
| 191 |
-
final_reply_content = assistant_reply
|
| 192 |
-
if code_snippet and code_snippet.strip():
|
| 193 |
-
if final_reply_content.strip():
|
| 194 |
-
final_reply_content += "\n\n"
|
| 195 |
-
final_reply_content += code_snippet
|
| 196 |
-
|
| 197 |
-
if not final_reply_content.strip():
|
| 198 |
-
final_reply_content = "I'm here to help with your code! What programming language are you using?"
|
| 199 |
|
|
|
|
| 200 |
return jsonify({
|
| 201 |
-
"assistant_reply":
|
| 202 |
-
"updated_state":
|
| 203 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 204 |
})
|
| 205 |
|
| 206 |
@app.route("/tag_reply", methods=["POST"])
|
|
|
|
| 122 |
except Exception:
|
| 123 |
return "<h3>frontend.html not found in static/ — please add your frontend.html there.</h3>", 404
|
| 124 |
|
|
|
|
| 125 |
def chat():
|
| 126 |
data = request.get_json(force=True)
|
| 127 |
+
chat_history = data.get("chat_history", [])
|
| 128 |
+
assistant_state = data.get("assistant_state", {})
|
| 129 |
|
| 130 |
+
conversation_summary = assistant_state.get("conversationSummary", "")
|
| 131 |
+
language = assistant_state.get("language", "Python")
|
| 132 |
|
| 133 |
+
# Build prompt with system + conversation summary + chat history
|
| 134 |
+
system_prompt = f"You are a helpful programming assistant. Current language: {language}. Conversation summary: {conversation_summary}"
|
| 135 |
+
messages = [{"role": "system", "content": system_prompt}]
|
| 136 |
+
messages.extend(chat_history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
|
| 138 |
+
# Call LLM, get plain text response
|
| 139 |
+
llm_response = llm.invoke(messages)
|
| 140 |
+
assistant_reply = llm_response.content if hasattr(llm_response, "content") else str(llm_response)
|
|
|
|
| 141 |
|
| 142 |
+
# Append assistant reply to chat history
|
| 143 |
+
chat_history.append({"role": "assistant", "content": assistant_reply})
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
+
# Optionally update conversation summary (e.g., call summarization chain)
|
| 146 |
+
conversation_summary = update_summary(chat_history)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
+
# Return plain text reply and updated state
|
| 149 |
return jsonify({
|
| 150 |
+
"assistant_reply": assistant_reply,
|
| 151 |
+
"updated_state": {
|
| 152 |
+
"conversationSummary": conversation_summary,
|
| 153 |
+
"language": language,
|
| 154 |
+
"taggedReplies": assistant_state.get("taggedReplies", []),
|
| 155 |
+
},
|
| 156 |
+
"chat_history": chat_history,
|
| 157 |
})
|
| 158 |
|
| 159 |
@app.route("/tag_reply", methods=["POST"])
|