Update app.py
Browse files
app.py
CHANGED
|
@@ -33,6 +33,9 @@ static_folder = BASE_DIR / "static"
|
|
| 33 |
app = Flask(__name__, static_folder=str(static_folder), static_url_path="/static")
|
| 34 |
CORS(app)
|
| 35 |
|
|
|
|
|
|
|
|
|
|
| 36 |
# --- LLM setup ---
|
| 37 |
llm = ChatGroq(
|
| 38 |
model=os.getenv("LLM_MODEL", "meta-llama/llama-4-scout-17b-16e-instruct"),
|
|
@@ -55,21 +58,22 @@ def clean_notes_with_bloatectomy(text: str, style: str = "remov") -> str:
|
|
| 55 |
|
| 56 |
# --- Agent prompt instructions ---
|
| 57 |
PATIENT_ASSISTANT_PROMPT = """
|
| 58 |
-
You are a patient assistant helping to analyze medical records and reports. Your
|
|
|
|
|
|
|
| 59 |
|
| 60 |
Your tasks include:
|
|
|
|
| 61 |
- Analyzing medical records and reports to detect anomalies, redundant tests, or misleading treatments.
|
| 62 |
- Suggesting preventive care based on the overall patient health history.
|
| 63 |
-
- Optimizing healthcare costs by comparing past visits and treatments
|
| 64 |
-
- Offering personalized lifestyle recommendations
|
| 65 |
- Generating a natural, helpful reply to the user.
|
| 66 |
|
| 67 |
-
You will be provided with the last user message, the conversation history, and a summary of the patient's medical reports. Use this information to give a tailored and informative response.
|
| 68 |
-
|
| 69 |
STRICT OUTPUT FORMAT (JSON ONLY):
|
| 70 |
Return a single JSON object with the following keys:
|
| 71 |
- assistant_reply: string // a natural language reply to the user (short, helpful, always present)
|
| 72 |
-
- patientDetails: object // keys may include name, problem, city, contact (update if user shared info)
|
| 73 |
- conversationSummary: string (optional) // short summary of conversation + relevant patient docs
|
| 74 |
|
| 75 |
Rules:
|
|
@@ -129,10 +133,30 @@ def extract_json_from_llm_response(raw_response: str) -> dict:
|
|
| 129 |
def serve_frontend():
|
| 130 |
"""Serves the frontend HTML file."""
|
| 131 |
try:
|
| 132 |
-
return app.send_static_file("
|
| 133 |
except Exception:
|
| 134 |
return "<h3>frontend2.html not found in static/ — please add your frontend2.html there.</h3>", 404
|
| 135 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
@app.route("/chat", methods=["POST"])
|
| 137 |
def chat():
|
| 138 |
"""Handles the chat conversation with the assistant."""
|
|
@@ -140,36 +164,9 @@ def chat():
|
|
| 140 |
if not isinstance(data, dict):
|
| 141 |
return jsonify({"error": "invalid request body"}), 400
|
| 142 |
|
| 143 |
-
patient_id = data.get("patient_id")
|
| 144 |
-
if not patient_id:
|
| 145 |
-
return jsonify({"error": "patient_id required"}), 400
|
| 146 |
-
|
| 147 |
chat_history = data.get("chat_history") or []
|
| 148 |
patient_state = data.get("patient_state") or {}
|
| 149 |
-
|
| 150 |
-
# --- Read and parse patient reports ---
|
| 151 |
-
patient_folder = REPORTS_ROOT / f"p_{patient_id}"
|
| 152 |
-
combined_text_parts = []
|
| 153 |
-
if patient_folder.exists() and patient_folder.is_dir():
|
| 154 |
-
for fname in sorted(os.listdir(patient_folder)):
|
| 155 |
-
file_path = patient_folder / fname
|
| 156 |
-
page_text = ""
|
| 157 |
-
if partition_pdf is not None and str(file_path).lower().endswith('.pdf'):
|
| 158 |
-
try:
|
| 159 |
-
elements = partition_pdf(filename=str(file_path))
|
| 160 |
-
page_text = "\n".join([el.text for el in elements if hasattr(el, 'text') and el.text])
|
| 161 |
-
except Exception:
|
| 162 |
-
logger.exception("Failed to parse PDF %s", file_path)
|
| 163 |
-
else:
|
| 164 |
-
try:
|
| 165 |
-
page_text = file_path.read_text(encoding='utf-8', errors='ignore')
|
| 166 |
-
except Exception:
|
| 167 |
-
page_text = ""
|
| 168 |
-
|
| 169 |
-
if page_text:
|
| 170 |
-
cleaned = clean_notes_with_bloatectomy(page_text, style="remov")
|
| 171 |
-
if cleaned:
|
| 172 |
-
combined_text_parts.append(cleaned)
|
| 173 |
|
| 174 |
# --- Prepare the state for the LLM ---
|
| 175 |
state = patient_state.copy()
|
|
@@ -180,6 +177,49 @@ def chat():
|
|
| 180 |
if msg.get("role") == "user" and msg.get("content"):
|
| 181 |
state["lastUserMessage"] = msg["content"]
|
| 182 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 183 |
|
| 184 |
# Update the conversation summary with the parsed documents
|
| 185 |
base_summary = state.get("conversationSummary", "") or ""
|
|
@@ -238,5 +278,5 @@ def ping():
|
|
| 238 |
return jsonify({"status": "ok"})
|
| 239 |
|
| 240 |
if __name__ == "__main__":
|
| 241 |
-
port = int(os.getenv("PORT",
|
| 242 |
-
app.run(host="0.0.0.0", port=port, debug=True)
|
|
|
|
| 33 |
app = Flask(__name__, static_folder=str(static_folder), static_url_path="/static")
|
| 34 |
CORS(app)
|
| 35 |
|
| 36 |
+
# Ensure the reports directory exists
|
| 37 |
+
os.makedirs(REPORTS_ROOT, exist_ok=True)
|
| 38 |
+
|
| 39 |
# --- LLM setup ---
|
| 40 |
llm = ChatGroq(
|
| 41 |
model=os.getenv("LLM_MODEL", "meta-llama/llama-4-scout-17b-16e-instruct"),
|
|
|
|
| 58 |
|
| 59 |
# --- Agent prompt instructions ---
|
| 60 |
PATIENT_ASSISTANT_PROMPT = """
|
| 61 |
+
You are a patient assistant helping to analyze medical records and reports. Your primary task is to get the patient ID (PID) from the user at the start of the conversation.
|
| 62 |
+
|
| 63 |
+
Once you have the PID, you will be provided with a summary of the patient's medical reports. Use this information, along with the conversation history, to provide a comprehensive response.
|
| 64 |
|
| 65 |
Your tasks include:
|
| 66 |
+
- **First, ask for the patient ID.** Do not proceed with any other task until you have the PID.
|
| 67 |
- Analyzing medical records and reports to detect anomalies, redundant tests, or misleading treatments.
|
| 68 |
- Suggesting preventive care based on the overall patient health history.
|
| 69 |
+
- Optimizing healthcare costs by comparing past visits and treatments.
|
| 70 |
+
- Offering personalized lifestyle recommendations.
|
| 71 |
- Generating a natural, helpful reply to the user.
|
| 72 |
|
|
|
|
|
|
|
| 73 |
STRICT OUTPUT FORMAT (JSON ONLY):
|
| 74 |
Return a single JSON object with the following keys:
|
| 75 |
- assistant_reply: string // a natural language reply to the user (short, helpful, always present)
|
| 76 |
+
- patientDetails: object // keys may include name, problem, pid (patient ID), city, contact (update if user shared info)
|
| 77 |
- conversationSummary: string (optional) // short summary of conversation + relevant patient docs
|
| 78 |
|
| 79 |
Rules:
|
|
|
|
| 133 |
def serve_frontend():
|
| 134 |
"""Serves the frontend HTML file."""
|
| 135 |
try:
|
| 136 |
+
return app.send_static_file("frontend_p.html")
|
| 137 |
except Exception:
|
| 138 |
return "<h3>frontend2.html not found in static/ — please add your frontend2.html there.</h3>", 404
|
| 139 |
|
| 140 |
+
@app.route("/upload_report", methods=["POST"])
|
| 141 |
+
def upload_report():
|
| 142 |
+
"""Handles the upload of a new PDF report for a specific patient."""
|
| 143 |
+
if 'report' not in request.files:
|
| 144 |
+
return jsonify({"error": "No file part in the request"}), 400
|
| 145 |
+
|
| 146 |
+
file = request.files['report']
|
| 147 |
+
patient_id = request.form.get("patient_id")
|
| 148 |
+
|
| 149 |
+
if file.filename == '' or not patient_id:
|
| 150 |
+
return jsonify({"error": "No selected file or patient ID"}), 400
|
| 151 |
+
|
| 152 |
+
if file:
|
| 153 |
+
filename = secure_filename(file.filename)
|
| 154 |
+
patient_folder = REPORTS_ROOT / f"p_{patient_id}"
|
| 155 |
+
os.makedirs(patient_folder, exist_ok=True)
|
| 156 |
+
file_path = patient_folder / filename
|
| 157 |
+
file.save(file_path)
|
| 158 |
+
return jsonify({"message": f"File '{filename}' uploaded successfully for patient ID '{patient_id}'."}), 200
|
| 159 |
+
|
| 160 |
@app.route("/chat", methods=["POST"])
|
| 161 |
def chat():
|
| 162 |
"""Handles the chat conversation with the assistant."""
|
|
|
|
| 164 |
if not isinstance(data, dict):
|
| 165 |
return jsonify({"error": "invalid request body"}), 400
|
| 166 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
chat_history = data.get("chat_history") or []
|
| 168 |
patient_state = data.get("patient_state") or {}
|
| 169 |
+
patient_id = patient_state.get("patientDetails", {}).get("pid")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
|
| 171 |
# --- Prepare the state for the LLM ---
|
| 172 |
state = patient_state.copy()
|
|
|
|
| 177 |
if msg.get("role") == "user" and msg.get("content"):
|
| 178 |
state["lastUserMessage"] = msg["content"]
|
| 179 |
break
|
| 180 |
+
|
| 181 |
+
combined_text_parts = []
|
| 182 |
+
# If a PID is not yet known, prompt the agent to ask for it.
|
| 183 |
+
if not patient_id:
|
| 184 |
+
# A simple prompt to get the agent to ask for the PID.
|
| 185 |
+
user_prompt = "Hello. I need to get the patient's ID to proceed."
|
| 186 |
+
|
| 187 |
+
# Check if the user's last message contains a possible number for the PID
|
| 188 |
+
last_message = state.get("lastUserMessage", "")
|
| 189 |
+
# A very basic check to see if the user provided a number
|
| 190 |
+
if re.search(r'\d+', last_message):
|
| 191 |
+
inferred_pid = re.search(r'(\d+)', last_message).group(1)
|
| 192 |
+
state["patientDetails"] = {"pid": inferred_pid}
|
| 193 |
+
patient_id = inferred_pid
|
| 194 |
+
# Now that we have a PID, let the agent know to process the reports.
|
| 195 |
+
user_prompt = f"The user provided a patient ID: {inferred_pid}. Please access their reports and respond."
|
| 196 |
+
else:
|
| 197 |
+
# If no PID is found, the agent should ask for it.
|
| 198 |
+
user_prompt = "The patient has not provided a patient ID. Please ask them to provide it to proceed."
|
| 199 |
+
|
| 200 |
+
# If a PID is known, load the patient reports.
|
| 201 |
+
if patient_id:
|
| 202 |
+
patient_folder = REPORTS_ROOT / f"p_{patient_id}"
|
| 203 |
+
if patient_folder.exists() and patient_folder.is_dir():
|
| 204 |
+
for fname in sorted(os.listdir(patient_folder)):
|
| 205 |
+
file_path = patient_folder / fname
|
| 206 |
+
page_text = ""
|
| 207 |
+
if partition_pdf is not None and str(file_path).lower().endswith('.pdf'):
|
| 208 |
+
try:
|
| 209 |
+
elements = partition_pdf(filename=str(file_path))
|
| 210 |
+
page_text = "\n".join([el.text for el in elements if hasattr(el, 'text') and el.text])
|
| 211 |
+
except Exception:
|
| 212 |
+
logger.exception("Failed to parse PDF %s", file_path)
|
| 213 |
+
else:
|
| 214 |
+
try:
|
| 215 |
+
page_text = file_path.read_text(encoding='utf-8', errors='ignore')
|
| 216 |
+
except Exception:
|
| 217 |
+
page_text = ""
|
| 218 |
+
|
| 219 |
+
if page_text:
|
| 220 |
+
cleaned = clean_notes_with_bloatectomy(page_text, style="remov")
|
| 221 |
+
if cleaned:
|
| 222 |
+
combined_text_parts.append(cleaned)
|
| 223 |
|
| 224 |
# Update the conversation summary with the parsed documents
|
| 225 |
base_summary = state.get("conversationSummary", "") or ""
|
|
|
|
| 278 |
return jsonify({"status": "ok"})
|
| 279 |
|
| 280 |
if __name__ == "__main__":
|
| 281 |
+
port = int(os.getenv("PORT", 5000))
|
| 282 |
+
app.run(host="0.0.0.0", port=port, debug=True)
|