KeenWoo commited on
Commit
d39220f
·
verified ·
1 Parent(s): aaaf1b6

Create agent.py

Browse files
Files changed (1) hide show
  1. alz_companion/agent.py +455 -0
alz_companion/agent.py ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os
3
+ import json
4
+ import base64
5
+ import time
6
+ import tempfile
7
+ import re # <-- ADD THIS LINE
8
+
9
+ from typing import List, Dict, Any, Optional
10
+
11
+ # OpenAI for LLM (optional)
12
+ try:
13
+ from openai import OpenAI
14
+ except Exception: # pragma: no cover
15
+ OpenAI = None # type: ignore
16
+
17
+ # LangChain & RAG
18
+ from langchain.schema import Document
19
+ from langchain_community.vectorstores import FAISS
20
+ from langchain_community.embeddings import HuggingFaceEmbeddings
21
+
22
+ # TTS
23
+ try:
24
+ from gtts import gTTS
25
+ except Exception: # pragma: no cover
26
+ gTTS = None # type: ignore
27
+
28
+
29
+ from .prompts import (
30
+ SYSTEM_TEMPLATE, ANSWER_TEMPLATE_CALM, ANSWER_TEMPLATE_ADQ,
31
+ SAFETY_GUARDRAILS, RISK_FOOTER, render_emotion_guidelines,
32
+ # --- Import the new decomposed NLU prompts ---
33
+ NLU_ROUTER_PROMPT, SPECIALIST_CLASSIFIER_PROMPT,
34
+ EMOTIONAL_SUPPORT_EXAMPLES, PRACTICAL_PLANNING_EXAMPLES,
35
+ # Other templates
36
+ ROUTER_PROMPT,
37
+ ANSWER_TEMPLATE_FACTUAL,
38
+ ANSWER_TEMPLATE_GENERAL_KNOWLEDGE,
39
+ ANSWER_TEMPLATE_GENERAL,
40
+ QUERY_EXPANSION_PROMPT
41
+ )
42
+
43
+ # -----------------------------
44
+ # Multimodal Processing Functions
45
+ # -----------------------------
46
+
47
+ def _openai_client() -> Optional[OpenAI]:
48
+ api_key = os.getenv("OPENAI_API_KEY", "").strip()
49
+ return OpenAI(api_key=api_key) if api_key and OpenAI else None
50
+
51
+ # In agent.py
52
+
53
+ def describe_image(image_path: str) -> str:
54
+ """Uses a vision model to describe an image for context."""
55
+ client = _openai_client()
56
+ if not client:
57
+ return "(Image description failed: OpenAI API key not configured.)"
58
+
59
+ try:
60
+ # --- FIX START ---
61
+ # Determine the MIME type based on the file extension
62
+ extension = os.path.splitext(image_path)[1].lower()
63
+ if extension == ".png":
64
+ mime_type = "image/png"
65
+ elif extension in [".jpg", ".jpeg"]:
66
+ mime_type = "image/jpeg"
67
+ elif extension == ".gif":
68
+ mime_type = "image/gif"
69
+ elif extension == ".webp":
70
+ mime_type = "image/webp"
71
+ else:
72
+ # Default to JPEG, but this handles the most common cases
73
+ mime_type = "image/jpeg"
74
+ # --- FIX END ---
75
+
76
+ with open(image_path, "rb") as image_file:
77
+ base64_image = base64.b64encode(image_file.read()).decode('utf-8')
78
+
79
+ response = client.chat.completions.create(
80
+ model="gpt-4o",
81
+ messages=[
82
+ {
83
+ "role": "user",
84
+ "content": [
85
+ {"type": "text", "text": "Describe this image in a concise, factual way for a memory journal. Focus on people, places, and key objects. For example: 'A photo of John and Mary smiling on a bench at the park.'"},
86
+ {
87
+ "type": "image_url",
88
+ # Use the dynamically determined MIME type
89
+ "image_url": {"url": f"data:{mime_type};base64,{base64_image}"}
90
+ }
91
+ ],
92
+ }
93
+ ],
94
+ max_tokens=100,
95
+ )
96
+ return response.choices[0].message.content or "No description available."
97
+ except Exception as e:
98
+ return f"[Image description error: {e}]"
99
+
100
+ # -----------------------------
101
+ # NLU Classification Function
102
+ # -----------------------------
103
+
104
+ def detect_tags_from_query(query: str, behavior_options: list, emotion_options: list, topic_options: list, context_options: list, settings: dict = None) -> Dict[str, Any]:
105
+ """Uses a two-step NLU process: Route -> Select Examples -> Classify."""
106
+
107
+ # --- STEP 1: Route the query to determine the primary goal ---
108
+ router_prompt = NLU_ROUTER_PROMPT.format(query=query)
109
+ router_messages = [{"role": "user", "content": router_prompt}]
110
+ primary_goal = call_llm(router_messages, temperature=0.0).strip().lower()
111
+
112
+ if "practical" in primary_goal:
113
+ selected_examples = PRACTICAL_PLANNING_EXAMPLES
114
+ goal_for_prompt = "Practical Planning"
115
+ else:
116
+ selected_examples = EMOTIONAL_SUPPORT_EXAMPLES
117
+ goal_for_prompt = "Emotional Support"
118
+
119
+ if settings and settings.get("debug_mode"):
120
+ print(f"\n--- NLU Router ---\nGoal: {goal_for_prompt}\n------------------\n")
121
+
122
+ # --- STEP 2: Use the Specialist Classifier with selected examples ---
123
+ behavior_str = ", ".join(f'"{opt}"' for opt in behavior_options if opt != "None")
124
+ emotion_str = ", ".join(f'"{opt}"' for opt in emotion_options if opt != "None")
125
+ topic_str = ", ".join(f'"{opt}"' for opt in topic_options if opt != "None")
126
+ context_str = ", ".join(f'"{opt}"' for opt in context_options if opt != "None")
127
+
128
+ prompt = SPECIALIST_CLASSIFIER_PROMPT.format(
129
+ primary_goal=goal_for_prompt,
130
+ examples=selected_examples,
131
+ behavior_options=behavior_str,
132
+ emotion_options=emotion_str,
133
+ topic_options=topic_str,
134
+ context_options=context_str,
135
+ query=query
136
+ )
137
+
138
+ messages = [{"role": "system", "content": "You are a helpful NLU classification assistant. Follow the instructions precisely."}, {"role": "user", "content": prompt}]
139
+ response_str = call_llm(messages, temperature=0.1)
140
+
141
+ if settings and settings.get("debug_mode"):
142
+ print(f"\n--- NLU Specialist Full Response ---\n{response_str}\n----------------------------------\n")
143
+
144
+ result_dict = {
145
+ "detected_behaviors": [], "detected_emotion": "None",
146
+ "detected_topic": "None", "detected_contexts": []
147
+ }
148
+
149
+ try:
150
+ # --- ROBUST PARSING LOGIC ---
151
+ start_brace = response_str.find('{')
152
+ end_brace = response_str.rfind('}')
153
+
154
+ if start_brace != -1 and end_brace != -1 and end_brace > start_brace:
155
+ json_str = response_str[start_brace : end_brace + 1]
156
+ result = json.loads(json_str)
157
+
158
+ behaviors = result.get("detected_behaviors")
159
+ result_dict["detected_behaviors"] = [b for b in behaviors if b in behavior_options] if behaviors else []
160
+
161
+ emotion = result.get("detected_emotion")
162
+ result_dict["detected_emotion"] = emotion if emotion in emotion_options else "None"
163
+
164
+ topic = result.get("detected_topic")
165
+ result_dict["detected_topic"] = topic if topic in topic_options else "None"
166
+
167
+ contexts = result.get("detected_contexts")
168
+ result_dict["detected_contexts"] = [c for c in contexts if c in context_options] if contexts else []
169
+
170
+ return result_dict
171
+ except (json.JSONDecodeError, AttributeError) as e:
172
+ print(f"ERROR parsing CoT JSON: {e}")
173
+ return result_dict
174
+
175
+
176
+ # -----------------------------
177
+ # Embeddings & VectorStore
178
+ # -----------------------------
179
+
180
+ def _default_embeddings():
181
+ """Lightweight, widely available model."""
182
+ model_name = os.getenv("EMBEDDINGS_MODEL", "sentence-transformers/all-MiniLM-L6-v2")
183
+ return HuggingFaceEmbeddings(model_name=model_name)
184
+
185
+ def build_or_load_vectorstore(docs: List[Document], index_path: str, is_personal: bool = False) -> FAISS:
186
+ os.makedirs(os.path.dirname(index_path), exist_ok=True)
187
+ if os.path.isdir(index_path) and os.path.exists(os.path.join(index_path, "index.faiss")):
188
+ try:
189
+ return FAISS.load_local(index_path, _default_embeddings(), allow_dangerous_deserialization=True)
190
+ except Exception:
191
+ pass
192
+
193
+ if is_personal and not docs:
194
+ docs = [Document(page_content="(This is the start of the personal memory journal.)", metadata={"source": "placeholder"})]
195
+
196
+ vs = FAISS.from_documents(docs, _default_embeddings())
197
+ vs.save_local(index_path)
198
+ return vs
199
+
200
+ def texts_from_jsonl(path: str) -> List[Document]:
201
+ out: List[Document] = []
202
+ try:
203
+ with open(path, "r", encoding="utf-8") as f:
204
+ for i, line in enumerate(f):
205
+ line = line.strip()
206
+ if not line: continue
207
+ obj = json.loads(line)
208
+ txt = obj.get("text") or ""
209
+ if not isinstance(txt, str) or not txt.strip(): continue
210
+
211
+ # fix bugs by adding tags for topic and context
212
+ md = {"source": os.path.basename(path), "chunk": i}
213
+ for k in ("behaviors", "emotion", "topic_tags", "context_tags"):
214
+ if k in obj and obj[k]: # Ensure the key exists and is not empty
215
+ md[k] = obj[k]
216
+ out.append(Document(page_content=txt, metadata=md))
217
+
218
+ except Exception:
219
+ return []
220
+ return out
221
+
222
+ def bootstrap_vectorstore(sample_paths: List[str] | None = None, index_path: str = "data/faiss_index") -> FAISS:
223
+ docs: List[Document] = []
224
+ for p in (sample_paths or []):
225
+ try:
226
+ if p.lower().endswith(".jsonl"):
227
+ docs.extend(texts_from_jsonl(p))
228
+ else:
229
+ with open(p, "r", encoding="utf-8", errors="ignore") as fh:
230
+ docs.append(Document(page_content=fh.read(), metadata={"source": os.path.basename(p)}))
231
+ except Exception:
232
+ continue
233
+ if not docs:
234
+ docs = [Document(page_content="(empty index)", metadata={"source": "placeholder"})]
235
+ return build_or_load_vectorstore(docs, index_path=index_path)
236
+
237
+ # -----------------------------
238
+ # LLM Call
239
+ # -----------------------------
240
+ # updated the detect_tags_from_query function to call call_llm with a new stop argument,
241
+ # but I failed to update the call_llm function itself to accept that argument.
242
+ # Now fix call_llm function:
243
+ def call_llm(messages: List[Dict[str, str]], temperature: float = 0.6, stop: Optional[List[str]] = None) -> str:
244
+ """Call OpenAI Chat Completions if available; else return a fallback."""
245
+ client = _openai_client()
246
+ model = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
247
+ if not client:
248
+ return "(Offline Mode: OpenAI API key not configured.)"
249
+ try:
250
+ # Prepare arguments for the API call to handle the optional 'stop' parameter
251
+ api_args = {
252
+ "model": model,
253
+ "messages": messages,
254
+ "temperature": float(temperature if temperature is not None else 0.6)
255
+ }
256
+ if stop:
257
+ api_args["stop"] = stop
258
+
259
+ resp = client.chat.completions.create(**api_args)
260
+ return (resp.choices[0].message.content or "").strip()
261
+ except Exception as e:
262
+ return f"[LLM API Error: {e}]"
263
+
264
+
265
+ # -----------------------------
266
+ # Prompting & RAG Chain
267
+ # -----------------------------
268
+
269
+ def _format_sources(docs: List[Document]) -> List[str]:
270
+ return list(set(d.metadata.get("source", "unknown") for d in docs))
271
+
272
+ # In agent.py, replace the existing make_rag_chain function with this new one to handle general & specific conversations .
273
+ # The logic for the "factual_question" path needs to be updated to perform the expansion query
274
+
275
+ def make_rag_chain(
276
+ vs_general: FAISS,
277
+ vs_personal: FAISS,
278
+ *,
279
+ role: str = "patient",
280
+ temperature: float = 0.6,
281
+ language: str = "English",
282
+ patient_name: str = "the patient",
283
+ caregiver_name: str = "the caregiver",
284
+ tone: str = "warm",
285
+ ):
286
+ """Returns a callable that performs the complete, intelligent RAG process."""
287
+
288
+ def _format_docs(docs: List[Document], default_msg: str) -> str:
289
+ if not docs: return default_msg
290
+ unique_docs = {doc.page_content: doc for doc in docs}.values()
291
+ return "\n".join([f"- {d.page_content.strip()}" for d in unique_docs])
292
+
293
+ # def _answer_fn(query: str, chat_history: List[Dict[str, str]], scenario_tag: Optional[str] = None, emotion_tag: Optional[str] = None) -> Dict[str, Any]:
294
+ def _answer_fn(query: str, chat_history: List[Dict[str, str]], scenario_tag: Optional[str] = None, emotion_tag: Optional[str] = None, topic_tag: Optional[str] = None, context_tags: Optional[List[str]] = None) -> Dict[str, Any]:
295
+
296
+ router_messages = [{"role": "user", "content": ROUTER_PROMPT.format(query=query)}]
297
+ query_type = call_llm(router_messages, temperature=0.0).strip().lower()
298
+ print(f"Query classified as: {query_type}")
299
+
300
+ system_message = SYSTEM_TEMPLATE.format(tone=tone, language=language, patient_name=patient_name or "the patient", caregiver_name=caregiver_name or "the caregiver", guardrails=SAFETY_GUARDRAILS)
301
+ messages = [{"role": "system", "content": system_message}]
302
+ messages.extend(chat_history)
303
+
304
+ # --- NEW 'general_knowledge_question' PATH ---
305
+ if "general_knowledge_question" in query_type:
306
+ user_prompt = ANSWER_TEMPLATE_GENERAL_KNOWLEDGE.format(question=query, language=language)
307
+ messages.append({"role": "user", "content": user_prompt})
308
+ answer = call_llm(messages, temperature=temperature)
309
+ return {"answer": answer, "sources": ["General Knowledge"]}
310
+ # --- END NEW PATH ---
311
+
312
+ elif "factual_question" in query_type:
313
+ # ... (This entire section for query expansion and factual search remains the same)
314
+ print(f"Performing query expansion for: '{query}'")
315
+ expansion_prompt = QUERY_EXPANSION_PROMPT.format(question=query)
316
+ expansion_response = call_llm([{"role": "user", "content": expansion_prompt}], temperature=0.1)
317
+
318
+ try:
319
+ clean_response = expansion_response.strip().replace("```json", "").replace("```", "")
320
+ expanded_queries = json.loads(clean_response)
321
+ search_queries = [query] + expanded_queries
322
+ except json.JSONDecodeError:
323
+ search_queries = [query]
324
+
325
+ print(f"Searching with queries: {search_queries}")
326
+ retriever_personal = vs_personal.as_retriever(search_kwargs={"k": 2})
327
+ retriever_general = vs_general.as_retriever(search_kwargs={"k": 2})
328
+
329
+ all_docs = []
330
+ for q in search_queries:
331
+ all_docs.extend(retriever_personal.invoke(q))
332
+ all_docs.extend(retriever_general.invoke(q))
333
+
334
+ context = _format_docs(all_docs, "(No relevant information found in the memory journal.)")
335
+
336
+ user_prompt = ANSWER_TEMPLATE_FACTUAL.format(context=context, question=query, language=language)
337
+ messages.append({"role": "user", "content": user_prompt})
338
+ answer = call_llm(messages, temperature=temperature)
339
+ return {"answer": answer, "sources": _format_sources(all_docs)}
340
+
341
+ elif "general_conversation" in query_type:
342
+ user_prompt = ANSWER_TEMPLATE_GENERAL.format(question=query, language=language)
343
+ messages.append({"role": "user", "content": user_prompt})
344
+ answer = call_llm(messages, temperature=temperature)
345
+ return {"answer": answer, "sources": []}
346
+
347
+ else: # Default to the original caregiving logic
348
+ # ... (This entire section for caregiving scenarios remains the same)
349
+ search_filter = {}
350
+ if scenario_tag and scenario_tag != "None":
351
+ search_filter["behaviors"] = scenario_tag.lower()
352
+ if emotion_tag and emotion_tag != "None":
353
+ search_filter["emotion"] = emotion_tag.lower()
354
+ # fix bug by adding topic tag and context tag
355
+ if topic_tag and topic_tag != "None": # <-- ADD THESE TWO LINES
356
+ search_filter["topic_tags"] = topic_tag.lower()
357
+ if context_tags: # <-- ADD THESE TWO LINES
358
+ search_filter["context_tags"] = {"in": [tag.lower() for tag in context_tags]}
359
+
360
+ # --- Robust Search Strategy ---
361
+ # 1. Start with a general, unfiltered search to always get text-based matches.
362
+ retriever_personal = vs_personal.as_retriever(search_kwargs={"k": 3})
363
+ retriever_general = vs_general.as_retriever(search_kwargs={"k": 3})
364
+
365
+ personal_docs = retriever_personal.invoke(query)
366
+ general_docs = retriever_general.invoke(query)
367
+
368
+ # 2. If filters exist, perform a second, more specific search and add the results.
369
+ if search_filter:
370
+ print(f"Performing additional search with filter: {search_filter}")
371
+ personal_docs.extend(vs_personal.similarity_search(query, k=3, filter=search_filter))
372
+ general_docs.extend(vs_general.similarity_search(query, k=3, filter=search_filter))
373
+
374
+ # 3. Combine and de-duplicate the results to get the best of both searches.
375
+ all_personal_docs = list({doc.page_content: doc for doc in personal_docs}.values())
376
+ all_general_docs = list({doc.page_content: doc for doc in general_docs}.values())
377
+
378
+ # 4. Define the context variables based on the new, combined results.
379
+ personal_context = _format_docs(all_personal_docs, "(No relevant personal memories found.)")
380
+ general_context = _format_docs(all_general_docs, "(No general guidance found.)")
381
+
382
+ first_emotion = None
383
+ all_docs_care = all_personal_docs + all_general_docs
384
+
385
+ # -- end of Robust Search Strategy
386
+
387
+
388
+ for doc in all_docs_care:
389
+ if "emotion" in doc.metadata and doc.metadata["emotion"]:
390
+ emotion_data = doc.metadata["emotion"]
391
+ if isinstance(emotion_data, list): first_emotion = emotion_data[0]
392
+ else: first_emotion = emotion_data
393
+ if first_emotion: break
394
+
395
+ emotions_context = render_emotion_guidelines(first_emotion or emotion_tag)
396
+ is_tagged_scenario = (scenario_tag and scenario_tag != "None") or (emotion_tag and emotion_tag != "None") or (first_emotion is not None)
397
+ template = ANSWER_TEMPLATE_ADQ if is_tagged_scenario else ANSWER_TEMPLATE_CALM
398
+
399
+ if template == ANSWER_TEMPLATE_ADQ:
400
+ user_prompt = template.format(general_context=general_context, personal_context=personal_context, question=query, scenario_tag=scenario_tag, emotions_context=emotions_context, role=role, language=language)
401
+ else:
402
+ combined_context = f"General Guidance:\n{general_context}\n\nPersonal Memories:\n{personal_context}"
403
+ user_prompt = template.format(context=combined_context, question=query, language=language)
404
+
405
+ messages.append({"role": "user", "content": user_prompt})
406
+ answer = call_llm(messages, temperature=temperature)
407
+
408
+ high_risk_scenarios = ["exit_seeking", "wandering", "elopement"]
409
+ if scenario_tag and scenario_tag.lower() in high_risk_scenarios:
410
+ answer += f"\n\n---\n{RISK_FOOTER}"
411
+
412
+ return {"answer": answer, "sources": _format_sources(all_docs_care)}
413
+
414
+ return _answer_fn
415
+
416
+
417
+ # Fix bug by adding topic tag ... how about context tag??
418
+ def answer_query(chain, question: str, **kwargs) -> Dict[str, Any]:
419
+ if not callable(chain): return {"answer": "[Error: RAG chain is not callable]", "sources": []}
420
+ chat_history = kwargs.get("chat_history", [])
421
+ scenario_tag = kwargs.get("scenario_tag")
422
+ emotion_tag = kwargs.get("emotion_tag")
423
+ topic_tag = kwargs.get("topic_tag") # <-- ADD THIS LINE
424
+ context_tags = kwargs.get("context_tags") # <-- ADD THIS LINE
425
+ try:
426
+ return chain(question, chat_history=chat_history, scenario_tag=scenario_tag, emotion_tag=emotion_tag, topic_tag=topic_tag, context_tags=context_tags) # <-- ADD topic_tag and context_tags
427
+ except Exception as e:
428
+ print(f"ERROR in answer_query: {e}")
429
+ return {"answer": f"[Error executing chain: {e}]", "sources": []}
430
+
431
+
432
+ # -----------------------------
433
+ # TTS & Transcription
434
+ # -----------------------------
435
+ def synthesize_tts(text: str, lang: str = "en"):
436
+ if not text or gTTS is None: return None
437
+ try:
438
+ fd, path = tempfile.mkstemp(suffix=".mp3")
439
+ os.close(fd)
440
+ tts = gTTS(text=text, lang=(lang or "en"))
441
+ tts.save(path)
442
+ return path
443
+ except Exception:
444
+ return None
445
+
446
+ def transcribe_audio(filepath: str, lang: str = "en"):
447
+ client = _openai_client()
448
+ if not client:
449
+ return "[Transcription failed: API key not configured]"
450
+ api_args = {"model": "whisper-1"}
451
+ if lang and lang != "auto":
452
+ api_args["language"] = lang
453
+ with open(filepath, "rb") as audio_file:
454
+ transcription = client.audio.transcriptions.create(file=audio_file, **api_args)
455
+ return transcription.text