|
|
|
|
|
import os |
|
|
import json |
|
|
import logging |
|
|
import re |
|
|
from typing import Dict, Any, List, Optional |
|
|
from pathlib import Path |
|
|
from flask import Flask, request, jsonify |
|
|
from flask_cors import CORS |
|
|
from dotenv import load_dotenv |
|
|
from werkzeug.utils import secure_filename |
|
|
from langchain_groq import ChatGroq |
|
|
from typing_extensions import TypedDict |
|
|
|
|
|
|
|
|
class TaggedReply(TypedDict): |
|
|
reply: str |
|
|
tags: List[str] |
|
|
|
|
|
class AssistantState(TypedDict): |
|
|
conversationSummary: str |
|
|
language: str |
|
|
taggedReplies: List[TaggedReply] |
|
|
|
|
|
|
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") |
|
|
logger = logging.getLogger("code-assistant") |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
GROQ_API_KEY = os.getenv("GROQ_API_KEY") |
|
|
if not GROQ_API_KEY: |
|
|
logger.error("GROQ_API_KEY not set in environment") |
|
|
|
|
|
|
|
|
exit(1) |
|
|
|
|
|
|
|
|
BASE_DIR = Path(__file__).resolve().parent |
|
|
static_folder = BASE_DIR / "static" |
|
|
|
|
|
app = Flask(__name__, static_folder=str(static_folder), static_url_path="/static") |
|
|
CORS(app) |
|
|
|
|
|
|
|
|
llm = ChatGroq( |
|
|
model=os.getenv("LLM_MODEL", "meta-llama/llama-4-scout-17b-16e-instruct"), |
|
|
temperature=0.1, |
|
|
max_tokens=2048, |
|
|
api_key=GROQ_API_KEY, |
|
|
) |
|
|
|
|
|
PROGRAMMING_ASSISTANT_PROMPT = """ |
|
|
You are an expert programming assistant. Your role is to provide code suggestions, fix bugs, explain programming concepts, and offer contextual help based on the user's query and preferred programming language. |
|
|
|
|
|
**CONTEXT HANDLING RULES (Follow these strictly):** |
|
|
- **Conversation Summary:** At the end of every response, you MUST provide an updated, concise `conversationSummary` based on the entire chat history provided. This summary helps you maintain context. |
|
|
- **Language Adaptation:** Adjust your suggestions, code, and explanations to the programming language specified in the 'language' field of the 'AssistantState'. |
|
|
|
|
|
STRICT OUTPUT FORMAT (JSON ONLY): |
|
|
Return a single JSON object with the following keys. **The JSON object MUST be enclosed in a single ```json block.** |
|
|
- assistant_reply: string // A natural language reply to the user (short and helpful). Do NOT include code blocks here. |
|
|
- code_snippet: string // If suggesting code, provide it here in a markdown code block (e.g., ```python\\nprint('Hello')\\n```). If no code is required, use an empty string: "". |
|
|
- state_updates: object // updates to the internal state, must include: language, conversationSummary |
|
|
- suggested_tags: array of strings // a list of 1-3 relevant tags for the assistant_reply |
|
|
|
|
|
Rules: |
|
|
- ALWAYS include all four top-level keys: `assistant_reply`, `code_snippet`, `state_updates`, and `suggested_tags`. |
|
|
- ALWAYS include `assistant_reply` as a non-empty string. |
|
|
- Do NOT produce any text outside the JSON block. |
|
|
""" |
|
|
|
|
|
def extract_json_from_llm_response(raw_response: str) -> dict: |
|
|
default = { |
|
|
"assistant_reply": "I'm sorry, I couldn't process the response correctly. Could you please rephrase?", |
|
|
"code_snippet": "", |
|
|
"state_updates": {"conversationSummary": "", "language": "Python"}, |
|
|
"suggested_tags": [], |
|
|
} |
|
|
|
|
|
if not raw_response or not isinstance(raw_response, str): |
|
|
return default |
|
|
|
|
|
|
|
|
m = re.search(r"```json\s*([\s\S]*?)\s*```", raw_response) |
|
|
json_string = m.group(1).strip() if m else raw_response |
|
|
|
|
|
|
|
|
first = json_string.find('{') |
|
|
last = json_string.rfind('}') |
|
|
candidate = json_string[first:last+1] if first != -1 and last != -1 and first < last else json_string |
|
|
|
|
|
|
|
|
candidate = re.sub(r',\s*(?=[}\]])', '', candidate) |
|
|
|
|
|
try: |
|
|
parsed = json.loads(candidate) |
|
|
except Exception as e: |
|
|
logger.warning("Failed to parse JSON from LLM output: %s. Candidate: %s", e, candidate) |
|
|
return default |
|
|
|
|
|
|
|
|
if isinstance(parsed, dict) and "assistant_reply" in parsed: |
|
|
parsed.setdefault("code_snippet", "") |
|
|
parsed.setdefault("state_updates", {}) |
|
|
parsed["state_updates"].setdefault("conversationSummary", "") |
|
|
parsed["state_updates"].setdefault("language", "Python") |
|
|
parsed.setdefault("suggested_tags", []) |
|
|
|
|
|
|
|
|
if not parsed["assistant_reply"].strip(): |
|
|
parsed["assistant_reply"] = "I need a clearer instruction to provide a reply." |
|
|
|
|
|
return parsed |
|
|
else: |
|
|
logger.warning("Parsed JSON missing 'assistant_reply' or invalid format. Returning default.") |
|
|
return default |
|
|
|
|
|
def detect_language_from_text(text: str) -> Optional[str]: |
|
|
"""Simple check for common programming languages.""" |
|
|
if not text: |
|
|
return None |
|
|
lower = text.lower() |
|
|
known_languages = ["python", "javascript", "java", "c++", "c#", "go", "ruby", "php", "typescript", "swift"] |
|
|
|
|
|
lang_match = re.search(r'\b(in|using|for)\s+(' + '|'.join(known_languages) + r')\b', lower) |
|
|
if lang_match: |
|
|
return lang_match.group(2).capitalize() |
|
|
return None |
|
|
|
|
|
|
|
|
@app.route("/", methods=["GET"]) |
|
|
def serve_frontend(): |
|
|
try: |
|
|
return app.send_static_file("frontend.html") |
|
|
except Exception: |
|
|
return "<h3>frontend.html not found in static/ — please add your frontend.html there.</h3>", 404 |
|
|
|
|
|
@app.route("/chat", methods=["POST"]) |
|
|
def chat(): |
|
|
data = request.get_json(force=True) |
|
|
if not isinstance(data, dict): |
|
|
return jsonify({"error": "invalid request body"}), 400 |
|
|
|
|
|
chat_history: List[Dict[str, str]] = data.get("chat_history") or [] |
|
|
assistant_state: AssistantState = data.get("assistant_state") or {} |
|
|
|
|
|
|
|
|
state: AssistantState = { |
|
|
"conversationSummary": assistant_state.get("conversationSummary", ""), |
|
|
"language": assistant_state.get("language", "Python"), |
|
|
"taggedReplies": assistant_state.get("taggedReplies", []), |
|
|
} |
|
|
|
|
|
|
|
|
llm_messages = [{"role": "system", "content": PROGRAMMING_ASSISTANT_PROMPT}] |
|
|
|
|
|
last_user_message = "" |
|
|
|
|
|
for msg in chat_history: |
|
|
role = msg.get("role") |
|
|
content = msg.get("content") |
|
|
if role in ["user", "assistant"] and content: |
|
|
llm_messages.append({"role": role, "content": content}) |
|
|
if role == "user": |
|
|
last_user_message = content |
|
|
|
|
|
|
|
|
detected_lang = detect_language_from_text(last_user_message) |
|
|
if detected_lang and detected_lang.lower() != state["language"].lower(): |
|
|
logger.info("Detected new language: %s", detected_lang) |
|
|
state["language"] = detected_lang |
|
|
|
|
|
|
|
|
context_hint = f"Current Language: {state['language']}. Conversation Summary so far: {state['conversationSummary']}" |
|
|
|
|
|
if llm_messages and llm_messages[-1]["role"] == "user": |
|
|
llm_messages[-1]["content"] = f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]" |
|
|
elif last_user_message: |
|
|
llm_messages.append({"role": "user", "content": f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]"}) |
|
|
|
|
|
|
|
|
try: |
|
|
logger.info("Invoking LLM with full history and prepared prompt...") |
|
|
llm_response = llm.invoke(llm_messages) |
|
|
raw_response = llm_response.content if hasattr(llm_response, "content") else str(llm_response) |
|
|
|
|
|
logger.info(f"Raw LLM response: {raw_response}") |
|
|
parsed_result = extract_json_from_llm_response(raw_response) |
|
|
|
|
|
except Exception as e: |
|
|
logger.exception("LLM invocation failed") |
|
|
error_detail = str(e) |
|
|
if 'decommissioned' in error_detail: |
|
|
error_detail = "LLM Model Error: The model is likely decommissioned. Please check the 'LLM_MODEL' environment variable or the default model in app.py." |
|
|
return jsonify({"error": "LLM invocation failed", "detail": error_detail}), 500 |
|
|
|
|
|
|
|
|
updated_state_from_llm = parsed_result.get("state_updates", {}) |
|
|
|
|
|
if 'conversationSummary' in updated_state_from_llm: |
|
|
state["conversationSummary"] = updated_state_from_llm["conversationSummary"] |
|
|
if 'language' in updated_state_from_llm: |
|
|
state["language"] = updated_state_from_llm["language"] |
|
|
|
|
|
assistant_reply = parsed_result.get("assistant_reply") |
|
|
code_snippet = parsed_result.get("code_snippet") |
|
|
|
|
|
|
|
|
|
|
|
final_reply_content = assistant_reply |
|
|
if code_snippet and code_snippet.strip(): |
|
|
|
|
|
if final_reply_content.strip(): |
|
|
final_reply_content += "\n\n" |
|
|
final_reply_content += code_snippet |
|
|
|
|
|
if not final_reply_content.strip(): |
|
|
final_reply_content = "I'm here to help with your code! What programming language are you using?" |
|
|
|
|
|
response_payload = { |
|
|
"assistant_reply": final_reply_content, |
|
|
"updated_state": state, |
|
|
"suggested_tags": parsed_result.get("suggested_tags", []), |
|
|
} |
|
|
|
|
|
return jsonify(response_payload) |
|
|
|
|
|
@app.route("/tag_reply", methods=["POST"]) |
|
|
def tag_reply(): |
|
|
data = request.get_json(force=True) |
|
|
if not isinstance(data, dict): |
|
|
return jsonify({"error": "invalid request body"}), 400 |
|
|
|
|
|
reply_content = data.get("reply") |
|
|
tags = data.get("tags") |
|
|
assistant_state: AssistantState = data.get("assistant_state") or {} |
|
|
|
|
|
if not reply_content or not tags: |
|
|
return jsonify({"error": "Missing 'reply' or 'tags' in request"}), 400 |
|
|
|
|
|
tags = [str(t).strip() for t in tags if str(t).strip()] |
|
|
if not tags: |
|
|
return jsonify({"error": "Tags list cannot be empty"}), 400 |
|
|
|
|
|
state: AssistantState = { |
|
|
"conversationSummary": assistant_state.get("conversationSummary", ""), |
|
|
"language": assistant_state.get("language", "Python"), |
|
|
"taggedReplies": assistant_state.get("taggedReplies", []), |
|
|
} |
|
|
|
|
|
new_tagged_reply: TaggedReply = { |
|
|
"reply": reply_content, |
|
|
"tags": tags, |
|
|
} |
|
|
|
|
|
state["taggedReplies"].append(new_tagged_reply) |
|
|
|
|
|
logger.info("Reply tagged with: %s", tags) |
|
|
|
|
|
return jsonify({ |
|
|
"message": "Reply saved and tagged successfully.", |
|
|
"updated_state": state, |
|
|
}), 200 |
|
|
|
|
|
@app.route("/ping", methods=["GET"]) |
|
|
def ping(): |
|
|
return jsonify({"status": "ok"}) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
port = int(os.getenv("PORT", 7860)) |
|
|
app.run(host="0.0.0.0", port=port, debug=True) |