#!/usr/bin/env python3 import os import json import logging import re from typing import Dict, Any, List, Optional from pathlib import Path from flask import Flask, request, jsonify from flask_cors import CORS from dotenv import load_dotenv from werkzeug.utils import secure_filename from langchain_groq import ChatGroq from typing_extensions import TypedDict # --- Type Definitions for State Management --- class TaggedReply(TypedDict): reply: str tags: List[str] class AssistantState(TypedDict): conversationSummary: str language: str taggedReplies: List[TaggedReply] # Note: lastUserMessage is calculated on request, not stored in state # --- Logging --- logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") logger = logging.getLogger("code-assistant") # --- Load environment --- load_dotenv() GROQ_API_KEY = os.getenv("GROQ_API_KEY") if not GROQ_API_KEY: logger.error("GROQ_API_KEY not set in environment") # For deployment, consider raising an exception instead of exiting: # raise ValueError("GROQ_API_KEY not set in environment") exit(1) # --- Flask app setup (MOVED HERE) --- BASE_DIR = Path(__file__).resolve().parent static_folder = BASE_DIR / "static" # The 'app' object MUST be defined before its first use, e.g., in @app.route app = Flask(__name__, static_folder=str(static_folder), static_url_path="/static") CORS(app) # --- LLM setup --- # Using a model that's good for coding tasks llm = ChatGroq( model=os.getenv("LLM_MODEL", "meta-llama/llama-4-scout-17b-16e-instruct"), # Use the supported model temperature=0, # max_tokens=2048, api_key=GROQ_API_KEY, ) PROGRAMMING_ASSISTANT_PROMPT = """ You are an expert programming assistant. Your role is to provide code suggestions, fix bugs, explain programming concepts, and offer contextual help based on the user's query and preferred programming language. **CONTEXT HANDLING RULES (Follow these strictly):** - **Conversation Summary:** At the end of every response, you MUST provide an updated, concise `conversationSummary` based on the entire chat history provided. This summary helps you maintain context. - **Language Adaptation:** Adjust your suggestions, code, and explanations to the programming language specified in the 'language' field of the 'AssistantState'. STRICT OUTPUT FORMAT (JSON ONLY): Return a single JSON object with the following keys: - assistant_reply: string // a natural language reply TO THE USER, INCLUDING ANY REQUESTED CODE BLOCK(S). - state_updates: object // updates to the internal state, must include: language, conversationSummary - suggested_tags: array of strings // a list of 1-3 relevant tags for the assistant_reply Rules: - ALWAYS include `assistant_reply` as a non-empty string. - If the user is asking for code, the code MUST be enclosed in appropriate markdown code blocks (e.g., ```python\n...\n```) and placed within the `assistant_reply` string. - Do NOT produce any text outside the JSON object. - Be concise in the non-code parts of `assistant_reply`. """ def extract_json_from_llm_response(raw_response: str) -> dict: default = { "assistant_reply": "I'm sorry — I couldn't understand that. Could you please rephrase?", "state_updates": {"conversationSummary": "", "language": "Python"}, "suggested_tags": [], } # Simplified JSON extraction logic if not raw_response or not isinstance(raw_response, str): return default m = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", raw_response) json_string = m.group(1).strip() if m else raw_response first = json_string.find('{') last = json_string.rfind('}') candidate = json_string[first:last+1] if first != -1 and last != -1 and first < last else json_string candidate = re.sub(r',\s*(?=[}\]])', '', candidate) try: parsed = json.loads(candidate) except Exception as e: logger.warning("Failed to parse JSON from LLM output: %s. Candidate: %s", e, candidate) return default if isinstance(parsed, dict) and "assistant_reply" in parsed and parsed["assistant_reply"].strip(): parsed.setdefault("state_updates", {}) parsed["state_updates"].setdefault("conversationSummary", "") parsed["state_updates"].setdefault("language", "Python") parsed.setdefault("suggested_tags", []) return parsed else: logger.warning("Parsed JSON missing 'assistant_reply' or invalid format. Returning default.") return default def detect_language_from_text(text: str) -> Optional[str]: """Simple check for common programming languages.""" if not text: return None lower = text.lower() known_languages = ["python", "javascript", "java", "c++", "c#", "go", "ruby", "php", "typescript", "swift"] lang_match = re.search(r'\b(in|using|for)\s+(' + '|'.join(known_languages) + r')\b', lower) if lang_match: return lang_match.group(2).capitalize() return None # --- Flask routes --- @app.route("/", methods=["GET"]) # <-- 'app' is now defined! def serve_frontend(): try: return app.send_static_file("frontend.html") except Exception: return "

frontend.html not found in static/ — please add your frontend.html there.

", 404 @app.route("/chat", methods=["POST"]) def chat(): data = request.get_json(force=True) if not isinstance(data, dict): return jsonify({"error": "invalid request body"}), 400 # chat_history now receives the full conversation history from the corrected frontend chat_history: List[Dict[str, str]] = data.get("chat_history") or [] assistant_state: AssistantState = data.get("assistant_state") or {} # Initialize/Clean up state state: AssistantState = { "conversationSummary": assistant_state.get("conversationSummary", ""), "language": assistant_state.get("language", "Python"), "taggedReplies": assistant_state.get("taggedReplies", []), } # 1. Prepare LLM Messages from Full History llm_messages = [{"role": "system", "content": PROGRAMMING_ASSISTANT_PROMPT}] last_user_message = "" for msg in chat_history: role = msg.get("role") content = msg.get("content") if role in ["user", "assistant"] and content: llm_messages.append({"role": role, "content": content}) if role == "user": last_user_message = content # 2. Language Detection & State Update detected_lang = detect_language_from_text(last_user_message) if detected_lang and detected_lang.lower() != state["language"].lower(): logger.info("Detected new language: %s", detected_lang) state["language"] = detected_lang # 3. Inject Contextual Hint and State into the LAST user message # This ensures the LLM has immediate access to the *summarized* history and current language. context_hint = f"Current Language: {state['language']}. Conversation Summary so far: {state['conversationSummary']}" # Update the content of the last message in llm_messages if llm_messages and llm_messages[-1]["role"] == "user": # Overwrite the last user message to include the context hint llm_messages[-1]["content"] = f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]" elif last_user_message: # Should not happen with the corrected frontend, but handles fresh start gracefully llm_messages.append({"role": "user", "content": f"USER MESSAGE: {last_user_message}\n\n[CONTEXT HINT: {context_hint}]"}) try: logger.info("Invoking LLM with full history and prepared prompt...") llm_response = llm.invoke(llm_messages) raw_response = llm_response.content if hasattr(llm_response, "content") else str(llm_response) print("llm_response",llm_response) logger.info(f"Raw LLM response: {raw_response}") parsed_result = extract_json_from_llm_response(raw_response) except Exception as e: logger.exception("LLM invocation failed") # CRITICAL FIX: The Groq model might still be the problem if environment is inconsistent. error_detail = str(e) if 'decommissioned' in error_detail: error_detail = "LLM Model Error: The model is likely decommissioned. Please check the 'LLM_MODEL' environment variable or the default model in app.py." return jsonify({"error": "LLM invocation failed", "detail": error_detail}), 500 # 4. State Update from LLM updated_state_from_llm = parsed_result.get("state_updates", {}) # CRUCIAL: Update state with the NEW summary generated by the LLM if 'conversationSummary' in updated_state_from_llm: state["conversationSummary"] = updated_state_from_llm["conversationSummary"] if 'language' in updated_state_from_llm: state["language"] = updated_state_from_llm["language"] assistant_reply = parsed_result.get("assistant_reply") if not assistant_reply or not isinstance(assistant_reply, str) or not assistant_reply.strip(): assistant_reply = "I'm here to help with your code! What programming language are you using?" # 5. Final Response Payload response_payload = { "assistant_reply": assistant_reply, "updated_state": state, "suggested_tags": parsed_result.get("suggested_tags", []), } return jsonify(response_payload) @app.route("/tag_reply", methods=["POST"]) def tag_reply(): data = request.get_json(force=True) if not isinstance(data, dict): return jsonify({"error": "invalid request body"}), 400 reply_content = data.get("reply") tags = data.get("tags") assistant_state: AssistantState = data.get("assistant_state") or {} if not reply_content or not tags: return jsonify({"error": "Missing 'reply' or 'tags' in request"}), 400 tags = [str(t).strip() for t in tags if str(t).strip()] if not tags: return jsonify({"error": "Tags list cannot be empty"}), 400 state: AssistantState = { "conversationSummary": assistant_state.get("conversationSummary", ""), "language": assistant_state.get("language", "Python"), "taggedReplies": assistant_state.get("taggedReplies", []), } new_tagged_reply: TaggedReply = { "reply": reply_content, "tags": tags, } state["taggedReplies"].append(new_tagged_reply) logger.info("Reply tagged with: %s", tags) return jsonify({ "message": "Reply saved and tagged successfully.", "updated_state": state, }), 200 @app.route("/ping", methods=["GET"]) def ping(): return jsonify({"status": "ok"}) if __name__ == "__main__": port = int(os.getenv("PORT", 7860)) app.run(host="0.0.0.0", port=port, debug=True)