File size: 11,248 Bytes
ceaa691 0c538c0 ceaa691 42e73f2 ceaa691 2d33bc7 0c538c0 5c0eb1b 42e73f2 ceaa691 2d33bc7 42e73f2 ceaa691 42e73f2 0c538c0 42e73f2 f85b1a5 42e73f2 6f648c9 f85b1a5 42e73f2 ceaa691 f85b1a5 42e73f2 ceaa691 2d33bc7 47c5ad7 0c538c0 42e73f2 f85b1a5 0c538c0 42e73f2 f85b1a5 42e73f2 f85b1a5 ceaa691 42e73f2 f85b1a5 0c538c0 2d33bc7 42e73f2 f85b1a5 42e73f2 0c538c0 f85b1a5 42e73f2 0c538c0 f85b1a5 42e73f2 0c538c0 f85b1a5 42e73f2 ffd1ea4 ceaa691 42e73f2 0c538c0 42e73f2 2d33bc7 f85b1a5 2d33bc7 0c538c0 2d33bc7 f85b1a5 ceaa691 42e73f2 ceaa691 0c538c0 42e73f2 f85b1a5 ceaa691 2d33bc7 42e73f2 5c0eb1b ceaa691 42e73f2 b2ae2a5 2d33bc7 0c538c0 2d33bc7 ffd1ea4 2d33bc7 0c538c0 2d33bc7 ceaa691 0c538c0 2d33bc7 f85b1a5 947aef7 42e73f2 5c0eb1b ceaa691 42e73f2 0c538c0 42e73f2 0c538c0 2d33bc7 f85b1a5 42e73f2 f85b1a5 5c0eb1b ffd1ea4 42e73f2 04342e7 2d33bc7 5b29aec 2d33bc7 5b29aec 2d33bc7 5b29aec 2d33bc7 5b29aec 2d33bc7 ceaa691 10400ea 0c538c0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 |
#!/usr/bin/env python3
import os
import json
import logging
import re
from typing import Dict, Any, List, Optional
from pathlib import Path
from flask import Flask, request, jsonify
from flask_cors import CORS
from dotenv import load_dotenv
from werkzeug.utils import secure_filename
from langchain_groq import ChatGroq
from typing_extensions import TypedDict
# --- Type Definitions for State Management ---
class TaggedReply(TypedDict):
reply: str
tags: List[str]
class AssistantState(TypedDict):
conversationSummary: str
language: str
taggedReplies: List[TaggedReply]
# Note: lastUserMessage is calculated on request, not stored in state
# --- Logging ---
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s")
logger = logging.getLogger("code-assistant")
# --- Load environment ---
load_dotenv()
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
if not GROQ_API_KEY:
logger.error("GROQ_API_KEY not set in environment")
# For deployment, consider raising an exception instead of exiting:
# raise ValueError("GROQ_API_KEY not set in environment")
exit(1)
# --- Flask app setup ---
BASE_DIR = Path(__file__).resolve().parent
static_folder = BASE_DIR / "static"
app = Flask(__name__, static_folder=str(static_folder), static_url_path="/static")
CORS(app)
# --- LLM setup ---
llm = ChatGroq(
model=os.getenv("LLM_MODEL", "meta-llama/llama-4-scout-17b-16e-instruct"),
temperature=0.1, # Set a lower, deterministic temperature
max_tokens=2048, # Ensure max_tokens is set to avoid truncation
api_key=GROQ_API_KEY,
)
PROGRAMMING_ASSISTANT_PROMPT = """
You are an expert programming assistant. Your role is to provide code suggestions, fix bugs, explain programming concepts, and offer contextual help based on the user's query and preferred programming language.
**CONTEXT HANDLING RULES (Follow these strictly):**
- **Conversation Summary:** At the end of every response, you MUST provide an updated, concise `conversationSummary` based on the entire chat history provided. This summary helps you maintain context.
- **Language Adaptation:** Adjust your suggestions, code, and explanations to the programming language specified in the 'language' field of the 'AssistantState'.
STRICT OUTPUT FORMAT (JSON ONLY):
Return a single JSON object with the following keys. **The JSON object MUST be enclosed in a single ```json block.**
- assistant_reply: string // A natural language reply to the user (short and helpful). Do NOT include code blocks here.
- code_snippet: string // If suggesting code, provide it here in a markdown code block (e.g., ```python\\nprint('Hello')\\n```). If no code is required, use an empty string: "".
- state_updates: object // updates to the internal state, must include: language, conversationSummary
- suggested_tags: array of strings // a list of 1-3 relevant tags for the assistant_reply
Rules:
- ALWAYS include all four top-level keys: `assistant_reply`, `code_snippet`, `state_updates`, and `suggested_tags`.
- ALWAYS include `assistant_reply` as a non-empty string.
- Do NOT produce any text outside the JSON block.
"""
def extract_json_from_llm_response(raw_response: str) -> dict:
default = {
"assistant_reply": "I'm sorry, I couldn't process the response correctly. Could you please rephrase?",
"code_snippet": "",
"state_updates": {"conversationSummary": "", "language": "Python"},
"suggested_tags": [],
}
if not raw_response or not isinstance(raw_response, str):
return default
# Use a non-greedy regex to find the JSON content inside the first code block
m = re.search(r"```json\s*([\s\S]*?)\s*```", raw_response)
json_string = m.group(1).strip() if m else raw_response
# Further refine candidate to just the JSON object content
first = json_string.find('{')
last = json_string.rfind('}')
candidate = json_string[first:last+1] if first != -1 and last != -1 and first < last else json_string
# Remove trailing commas which can break JSON parsing
candidate = re.sub(r',\s*(?=[}\]])', '', candidate)
try:
parsed = json.loads(candidate)
except Exception as e:
logger.warning("Failed to parse JSON from LLM output: %s. Candidate: %s", e, candidate)
return default
# Validate and clean up the parsed dictionary
if isinstance(parsed, dict) and "assistant_reply" in parsed:
parsed.setdefault("code_snippet", "")
parsed.setdefault("state_updates", {})
parsed["state_updates"].setdefault("conversationSummary", "")
parsed["state_updates"].setdefault("language", "Python")
parsed.setdefault("suggested_tags", [])
# Ensure reply is not empty
if not parsed["assistant_reply"].strip():
parsed["assistant_reply"] = "I need a clearer instruction to provide a reply."
return parsed
else:
logger.warning("Parsed JSON missing 'assistant_reply' or invalid format. Returning default.")
return default
def detect_language_from_text(text: str) -> Optional[str]:
"""Simple check for common programming languages."""
if not text:
return None
lower = text.lower()
known_languages = ["python", "javascript", "java", "c++", "c#", "go", "ruby", "php", "typescript", "swift"]
lang_match = re.search(r'\b(in|using|for)\s+(' + '|'.join(known_languages) + r')\b', lower)
if lang_match:
return lang_match.group(2).capitalize()
return None
# --- Flask routes ---
@app.route("/", methods=["GET"])
def serve_frontend():
try:
return app.send_static_file("frontend.html")
except Exception:
return "<h3>frontend.html not found in static/ — please add your frontend.html there.</h3>", 404
@app.route("/chat", methods=["POST"])
def chat():
data = request.get_json(force=True)
if not isinstance(data, dict):
return jsonify({"error": "invalid request body"}), 400
chat_history: List[Dict[str, str]] = data.get("chat_history") or []
assistant_state: AssistantState = data.get("assistant_state") or {}
# Initialize/Clean up state
state: AssistantState = {
"conversationSummary": assistant_state.get("conversationSummary", ""),
"language": assistant_state.get("language", "Python"),
"taggedReplies": assistant_state.get("taggedReplies", []),
}
# 1. Prepare LLM Messages from Full History
llm_messages = [{"role": "system", "content": PROGRAMMING_ASSISTANT_PROMPT}]
last_user_message = ""
for msg in chat_history:
role = msg.get("role")
content = msg.get("content")
if role in ["user", "assistant"] and content:
llm_messages.append({"role": role, "content": content})
if role == "user":
last_user_message = content
# 2. Language Detection & State Update
detected_lang = detect_language_from_text(last_user_message)
if detected_lang and detected_lang.lower() != state["language"].lower():
logger.info("Detected new language: %s", detected_lang)
state["language"] = detected_lang
# 3. Inject Contextual Hint and State into the LAST user message
context_hint = f"Current Language: {state['language']}. Conversation Summary so far: {state['conversationSummary']}"
if llm_messages and llm_messages[-1]["role"] == "user":
llm_messages[-1]["content"] = f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]"
elif last_user_message:
llm_messages.append({"role": "user", "content": f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]"})
try:
logger.info("Invoking LLM with full history and prepared prompt...")
llm_response = llm.invoke(llm_messages)
raw_response = llm_response.content if hasattr(llm_response, "content") else str(llm_response)
logger.info(f"Raw LLM response: {raw_response}")
parsed_result = extract_json_from_llm_response(raw_response)
except Exception as e:
logger.exception("LLM invocation failed")
error_detail = str(e)
if 'decommissioned' in error_detail:
error_detail = "LLM Model Error: The model is likely decommissioned. Please check the 'LLM_MODEL' environment variable or the default model in app.py."
return jsonify({"error": "LLM invocation failed", "detail": error_detail}), 500
# 4. State Update from LLM
updated_state_from_llm = parsed_result.get("state_updates", {})
if 'conversationSummary' in updated_state_from_llm:
state["conversationSummary"] = updated_state_from_llm["conversationSummary"]
if 'language' in updated_state_from_llm:
state["language"] = updated_state_from_llm["language"]
assistant_reply = parsed_result.get("assistant_reply")
code_snippet = parsed_result.get("code_snippet")
# 5. Final Response Payload: Combine the reply and the code snippet
# The frontend is expecting the code to be *in* the assistant_reply, so we stitch it back together.
final_reply_content = assistant_reply
if code_snippet and code_snippet.strip():
# Add a newline for clean separation if the reply isn't just whitespace
if final_reply_content.strip():
final_reply_content += "\n\n"
final_reply_content += code_snippet
if not final_reply_content.strip():
final_reply_content = "I'm here to help with your code! What programming language are you using?"
response_payload = {
"assistant_reply": final_reply_content, # Send combined reply + code
"updated_state": state,
"suggested_tags": parsed_result.get("suggested_tags", []),
}
return jsonify(response_payload)
@app.route("/tag_reply", methods=["POST"])
def tag_reply():
data = request.get_json(force=True)
if not isinstance(data, dict):
return jsonify({"error": "invalid request body"}), 400
reply_content = data.get("reply")
tags = data.get("tags")
assistant_state: AssistantState = data.get("assistant_state") or {}
if not reply_content or not tags:
return jsonify({"error": "Missing 'reply' or 'tags' in request"}), 400
tags = [str(t).strip() for t in tags if str(t).strip()]
if not tags:
return jsonify({"error": "Tags list cannot be empty"}), 400
state: AssistantState = {
"conversationSummary": assistant_state.get("conversationSummary", ""),
"language": assistant_state.get("language", "Python"),
"taggedReplies": assistant_state.get("taggedReplies", []),
}
new_tagged_reply: TaggedReply = {
"reply": reply_content,
"tags": tags,
}
state["taggedReplies"].append(new_tagged_reply)
logger.info("Reply tagged with: %s", tags)
return jsonify({
"message": "Reply saved and tagged successfully.",
"updated_state": state,
}), 200
@app.route("/ping", methods=["GET"])
def ping():
return jsonify({"status": "ok"})
if __name__ == "__main__":
port = int(os.getenv("PORT", 7860))
app.run(host="0.0.0.0", port=port, debug=True) |