KeenWoo commited on
Commit
d10576d
·
verified ·
1 Parent(s): 14f1ea0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +391 -0
app.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import shutil
4
+ import gradio as gr
5
+ from datetime import datetime
6
+ from typing import List, Dict, Any, Optional
7
+
8
+ # --- Agent Imports & Safe Fallbacks ---
9
+ try:
10
+ from alz_companion.agent import (
11
+ bootstrap_vectorstore, make_rag_chain, answer_query, synthesize_tts,
12
+ transcribe_audio, detect_tags_from_query, describe_image, build_or_load_vectorstore,
13
+ _default_embeddings
14
+ )
15
+ from alz_companion.prompts import BEHAVIOUR_TAGS, EMOTION_STYLES
16
+ from langchain.schema import Document
17
+ from langchain_community.vectorstores import FAISS
18
+ AGENT_OK = True
19
+ except Exception as e:
20
+ AGENT_OK = False
21
+ # Define all fallback functions and classes
22
+ def bootstrap_vectorstore(sample_paths=None, index_path="data/"): return object()
23
+ def build_or_load_vectorstore(docs, index_path, is_personal=False): return object()
24
+ def make_rag_chain(vs_general, vs_personal, **kwargs): return lambda q, **k: {"answer": f"(Demo) You asked: {q}", "sources": []}
25
+ def answer_query(chain, q, **kwargs): return chain(q, **kwargs)
26
+ def synthesize_tts(text: str, lang: str = "en"): return None
27
+ def transcribe_audio(filepath: str, lang: str = "en"): return "This is a transcribed message."
28
+ def detect_tags_from_query(query: str, behavior_options: list, emotion_options: list): return {"detected_behavior": "None", "detected_emotion": "None"}
29
+ def describe_image(image_path: str): return "This is a description of an image."
30
+ class Document:
31
+ def __init__(self, page_content, metadata):
32
+ self.page_content = page_content
33
+ self.metadata = metadata
34
+ class FAISS:
35
+ def __init__(self):
36
+ self.docstore = type('obj', (object,), {'_dict': {}})()
37
+ BEHAVIOUR_TAGS = {"None": []}
38
+ EMOTION_STYLES = {"None": {}}
39
+ print(f"WARNING: Could not import from alz_companion ({e}). Running in UI-only demo mode.")
40
+
41
+ # --- Centralized Configuration ---
42
+ CONFIG = {
43
+ "themes": ["All", "The Father", "Still Alice", "Away from Her", "General Caregiving"],
44
+ "roles": ["patient", "caregiver"],
45
+ "behavior_tags": ["None"] + list(BEHAVIOUR_TAGS.keys()),
46
+ "emotion_tags": ["None"] + list(EMOTION_STYLES.keys()),
47
+ "languages": {"English": "en", "Chinese": "zh", "Malay": "ms", "French": "fr", "Spanish": "es"},
48
+ "tones": ["warm", "neutral", "formal", "playful"]
49
+ }
50
+
51
+ # --- File Management & Vector Store Logic ---
52
+ INDEX_BASE = os.getenv('INDEX_BASE', 'data')
53
+ UPLOADS_BASE = os.path.join(INDEX_BASE, "uploads")
54
+ PERSONAL_INDEX_PATH = os.path.join(INDEX_BASE, "personal_faiss_index")
55
+ os.makedirs(UPLOADS_BASE, exist_ok=True)
56
+ THEME_PATHS = {t: os.path.join(INDEX_BASE, f"faiss_index_{t.replace(' ', '').lower()}") for t in CONFIG["themes"]}
57
+ vectorstores = {}
58
+ personal_vectorstore = None
59
+
60
+ def canonical_theme(tk: str) -> str: return tk if tk in CONFIG["themes"] else "All"
61
+ def theme_upload_dir(theme: str) -> str:
62
+ p = os.path.join(UPLOADS_BASE, f"theme_{canonical_theme(theme).replace(' ', '').lower()}")
63
+ os.makedirs(p, exist_ok=True)
64
+ return p
65
+ def load_manifest(theme: str) -> Dict[str, Any]:
66
+ p = os.path.join(theme_upload_dir(theme), "manifest.json")
67
+ if os.path.exists(p):
68
+ try:
69
+ with open(p, "r", encoding="utf-8") as f: return json.load(f)
70
+ except Exception: pass
71
+ return {"files": {}}
72
+ def save_manifest(theme: str, man: Dict[str, Any]):
73
+ with open(os.path.join(theme_upload_dir(theme), "manifest.json"), "w", encoding="utf-8") as f: json.dump(man, f, indent=2)
74
+ def list_theme_files(theme: str) -> List[tuple[str, bool]]:
75
+ man = load_manifest(theme)
76
+ base = theme_upload_dir(theme)
77
+ found = [(n, bool(e)) for n, e in man.get("files", {}).items() if os.path.exists(os.path.join(base, n))]
78
+ existing = {n for n, e in found}
79
+ for name in sorted(os.listdir(base)):
80
+ if name not in existing and os.path.isfile(os.path.join(base, name)): found.append((name, False))
81
+ man["files"] = dict(found)
82
+ save_manifest(theme, man)
83
+ return found
84
+ def copy_into_theme(theme: str, src_path: str) -> str:
85
+ fname = os.path.basename(src_path)
86
+ dest = os.path.join(theme_upload_dir(theme), fname)
87
+ shutil.copy2(src_path, dest)
88
+ return dest
89
+ def seed_files_into_theme(theme: str):
90
+ SEED_FILES = [
91
+ ("sample_data/caregiving_tips.txt", True),
92
+ ("sample_data/the_father_segments_tagged_with_emotion_hybrid.jsonl", True),
93
+ ("sample_data/still_alice_segments_tagged_with_emotion_hybrid.jsonl", True),
94
+ ("sample_data/away_from_her_segments_tagged_with_emotion_hybrid.jsonl", True)
95
+ ]
96
+ man, changed = load_manifest(theme), False
97
+ for path, enable in SEED_FILES:
98
+ if not os.path.exists(path): continue
99
+ fname = os.path.basename(path)
100
+ if not os.path.exists(os.path.join(theme_upload_dir(theme), fname)):
101
+ copy_into_theme(theme, path)
102
+ man["files"][fname] = bool(enable)
103
+ changed = True
104
+ if changed: save_manifest(theme, man)
105
+
106
+ def ensure_index(theme='All'):
107
+ theme = canonical_theme(theme)
108
+ if theme in vectorstores: return vectorstores[theme]
109
+ upload_dir = theme_upload_dir(theme)
110
+ enabled_files = [os.path.join(upload_dir, n) for n, enabled in list_theme_files(theme) if enabled]
111
+ index_path = THEME_PATHS.get(theme)
112
+ vectorstores[theme] = bootstrap_vectorstore(sample_paths=enabled_files, index_path=index_path)
113
+ return vectorstores[theme]
114
+
115
+ # --- Gradio Callbacks ---
116
+ def collect_settings(*args):
117
+ keys = ["role", "patient_name", "caregiver_name", "tone", "language", "tts_lang", "temperature", "behaviour_tag", "emotion_tag", "active_theme", "tts_on", "debug_mode"]
118
+ return dict(zip(keys, args))
119
+
120
+ def add_personal_knowledge(text_input, file_input, image_input):
121
+ global personal_vectorstore
122
+ if not any([text_input, file_input, image_input]):
123
+ return "Please provide text, a file, or an image to add."
124
+ content_text, content_source = "", ""
125
+ if text_input and text_input.strip():
126
+ content_text, content_source = text_input.strip(), "Text Input"
127
+ elif file_input:
128
+ content_text, content_source = transcribe_audio(file_input.name), os.path.basename(file_input.name)
129
+ elif image_input:
130
+ content_text, content_source = describe_image(image_input.name), "Image Input"
131
+ if not content_text:
132
+ return "Could not extract any text content to add."
133
+ print("Auto-tagging personal memory...")
134
+ behavior_options = CONFIG.get("behavior_tags", [])
135
+ emotion_options = CONFIG.get("emotion_tags", [])
136
+ detected_tags = detect_tags_from_query(content_text, behavior_options=behavior_options, emotion_options=emotion_options)
137
+ detected_behavior = detected_tags.get("detected_behavior")
138
+ detected_emotion = detected_tags.get("detected_emotion")
139
+ print(f" ...Detected Behavior: {detected_behavior}, Emotion: {detected_emotion}")
140
+ metadata = {"source": content_source}
141
+ if detected_behavior and detected_behavior != "None":
142
+ metadata["behaviors"] = [detected_behavior.lower()]
143
+ if detected_emotion and detected_emotion != "None":
144
+ metadata["emotion"] = detected_emotion.lower()
145
+ doc_to_add = Document(page_content=content_text, metadata=metadata)
146
+ if personal_vectorstore is None:
147
+ personal_vectorstore = build_or_load_vectorstore([doc_to_add], PERSONAL_INDEX_PATH, is_personal=True)
148
+ else:
149
+ personal_vectorstore.add_documents([doc_to_add])
150
+ personal_vectorstore.save_local(PERSONAL_INDEX_PATH)
151
+ return f"Successfully added memory with tags (Behavior: {detected_behavior}, Emotion: {detected_emotion})"
152
+
153
+ def save_chat_to_memory(chat_history):
154
+ global personal_vectorstore
155
+ if not chat_history:
156
+ return "Nothing to save."
157
+ formatted_chat = []
158
+ for message in chat_history:
159
+ role = "User" if message["role"] == "user" else "Assistant"
160
+ content = message["content"].strip()
161
+ if content.startswith("*(Auto-detected context:"):
162
+ continue
163
+ formatted_chat.append(f"{role}: {content}")
164
+ conversation_text = "\n".join(formatted_chat)
165
+ if not conversation_text:
166
+ return "No conversation content to save."
167
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
168
+ doc_to_add = Document(page_content=conversation_text, metadata={"source": f"Conversation saved on {timestamp}"})
169
+ if personal_vectorstore is None:
170
+ personal_vectorstore = build_or_load_vectorstore([doc_to_add], PERSONAL_INDEX_PATH, is_personal=True)
171
+ else:
172
+ personal_vectorstore.add_documents([doc_to_add])
173
+ personal_vectorstore.save_local(PERSONAL_INDEX_PATH)
174
+ print(f"Saved conversation to long-term memory.")
175
+ return f"Conversation from {timestamp} saved successfully to long-term memory!"
176
+
177
+ def list_personal_memories():
178
+ global personal_vectorstore
179
+ if personal_vectorstore is None or not hasattr(personal_vectorstore.docstore, '_dict') or not personal_vectorstore.docstore._dict:
180
+ return gr.update(value=[["No memories to display", ""]]), gr.update(choices=["No memories to select"], value=None)
181
+ docs = list(personal_vectorstore.docstore._dict.values())
182
+ dataframe_data = [[doc.metadata.get('source', 'Unknown'), doc.page_content] for doc in docs]
183
+ dropdown_choices = [doc.page_content for doc in docs]
184
+ return gr.update(value=dataframe_data), gr.update(choices=dropdown_choices)
185
+
186
+ def delete_personal_memory(memory_to_delete):
187
+ global personal_vectorstore
188
+ if personal_vectorstore is None or not memory_to_delete:
189
+ return "Knowledge base is empty or no memory selected."
190
+ all_docs = list(personal_vectorstore.docstore._dict.values())
191
+ docs_to_keep = [doc for doc in all_docs if doc.page_content != memory_to_delete]
192
+ if len(all_docs) == len(docs_to_keep):
193
+ return "Error: Could not find the selected memory to delete."
194
+ print(f"Deleting memory. {len(docs_to_keep)} memories remaining.")
195
+ if not docs_to_keep:
196
+ if os.path.isdir(PERSONAL_INDEX_PATH):
197
+ shutil.rmtree(PERSONAL_INDEX_PATH)
198
+ personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)
199
+ else:
200
+ # Rebuild and save the index
201
+ new_vs = FAISS.from_documents(docs_to_keep, _default_embeddings())
202
+ new_vs.save_local(PERSONAL_INDEX_PATH)
203
+ personal_vectorstore = new_vs
204
+ return "Successfully deleted memory. The list will now refresh."
205
+
206
+ def chat_fn(user_text, audio_file, settings, chat_history):
207
+ global personal_vectorstore
208
+ question = (user_text or "").strip()
209
+ if audio_file and not question:
210
+ try:
211
+ voice_lang_name = settings.get("tts_lang", "English")
212
+ voice_lang_code = CONFIG["languages"].get(voice_lang_name, "en")
213
+ question = transcribe_audio(audio_file, lang=voice_lang_code)
214
+ except Exception as e:
215
+ err_msg = f"Audio Error: {e}" if settings.get("debug_mode") else "Sorry, I couldn't understand the audio."
216
+ chat_history.append({"role": "assistant", "content": err_msg})
217
+ return "", None, chat_history
218
+ if not question:
219
+ return "", None, chat_history
220
+ chat_history.append({"role": "user", "content": question})
221
+ manual_behavior_tag = settings.get("behaviour_tag")
222
+ manual_emotion_tag = settings.get("emotion_tag")
223
+ if manual_behavior_tag not in [None, "None"] or manual_emotion_tag not in [None, "None"]:
224
+ scenario_tag, emotion_tag = manual_behavior_tag, manual_emotion_tag
225
+ else:
226
+ behavior_options = CONFIG.get("behavior_tags", [])
227
+ emotion_options = CONFIG.get("emotion_tags", [])
228
+ detected_tags = detect_tags_from_query(question, behavior_options=behavior_options, emotion_options=emotion_options)
229
+ scenario_tag, emotion_tag = detected_tags.get("detected_behavior"), detected_tags.get("detected_emotion")
230
+ if (scenario_tag and scenario_tag != "None") or (emotion_tag and emotion_tag != "None"):
231
+ detected_msg = f"*(Auto-detected context: Behavior=`{scenario_tag}`, Emotion=`{emotion_tag}`)*"
232
+ chat_history.append({"role": "assistant", "content": detected_msg})
233
+ active_theme = settings.get("active_theme", "All")
234
+ vs_general = ensure_index(active_theme)
235
+ if personal_vectorstore is None:
236
+ personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)
237
+ rag_chain_settings = {"role": settings.get("role"), "temperature": settings.get("temperature"), "language": settings.get("language"), "patient_name": settings.get("patient_name"), "caregiver_name": settings.get("caregiver_name"), "tone": settings.get("tone"),}
238
+ chain = make_rag_chain(vs_general, personal_vectorstore, **rag_chain_settings)
239
+ if scenario_tag == "None": scenario_tag = None
240
+ if emotion_tag == "None": emotion_tag = None
241
+ simple_history = chat_history[:-1]
242
+ response = answer_query(chain, question, chat_history=simple_history, scenario_tag=scenario_tag, emotion_tag=emotion_tag)
243
+ answer = response.get("answer", "[No answer found]")
244
+ chat_history.append({"role": "assistant", "content": answer})
245
+ audio_out = None
246
+ if settings.get("tts_on") and answer:
247
+ tts_lang_code = CONFIG["languages"].get(settings.get("tts_lang"), "en")
248
+ audio_out = synthesize_tts(answer, lang=tts_lang_code)
249
+ from gradio import update
250
+ return "", (update(value=audio_out, visible=bool(audio_out))), chat_history
251
+
252
+ def upload_knowledge(files, current_theme):
253
+ if not files: return "No files were selected to upload."
254
+ added = 0
255
+ for f in files:
256
+ try:
257
+ copy_into_theme(current_theme, f.name); added += 1
258
+ except Exception as e: print(f"Error uploading file {f.name}: {e}")
259
+ if added > 0 and current_theme in vectorstores: del vectorstores[current_theme]
260
+ return f"Uploaded {added} file(s). Refreshing file list..."
261
+ def save_file_selection(current_theme, enabled_files):
262
+ man = load_manifest(current_theme)
263
+ for fname in man['files']: man['files'][fname] = fname in enabled_files
264
+ save_manifest(current_theme, man)
265
+ if current_theme in vectorstores: del vectorstores[current_theme]
266
+ return f"Settings saved. Index for theme '{current_theme}' will rebuild on the next query."
267
+ def refresh_file_list_ui(current_theme):
268
+ files = list_theme_files(current_theme)
269
+ enabled = [f for f, en in files if en]
270
+ msg = f"Found {len(files)} file(s). {len(enabled)} enabled."
271
+ return gr.update(choices=[f for f, _ in files], value=enabled), msg
272
+ def auto_setup_on_load(current_theme):
273
+ theme_dir = theme_upload_dir(current_theme)
274
+ if not os.listdir(theme_dir):
275
+ print("First-time setup: Auto-seeding sample data...")
276
+ seed_files_into_theme(current_theme)
277
+ all_settings = collect_settings("patient", "", "", "warm", "English", "English", 0.7, "None", "None", "All", True, False)
278
+ files_ui, status_msg = refresh_file_list_ui(current_theme)
279
+ return all_settings, files_ui, status_msg
280
+
281
+ # --- UI Definition ---
282
+ CSS = ".gradio-container { font-size: 14px; } #chatbot { min-height: 250px; } #audio_out audio { max-height: 40px; } #audio_in audio { max-height: 40px; padding: 0; }"
283
+
284
+ with gr.Blocks(theme=gr.themes.Soft(), css=CSS) as demo:
285
+ settings_state = gr.State({})
286
+
287
+ with gr.Tab("Chat"):
288
+ user_text = gr.Textbox(show_label=False, placeholder="Type your message here...")
289
+ audio_in = gr.Audio(sources=["microphone"], type="filepath", label="Voice Input", elem_id="audio_in")
290
+ with gr.Row():
291
+ submit_btn = gr.Button("Send", variant="primary")
292
+ save_btn = gr.Button("Save to Memory")
293
+ clear_btn = gr.Button("Clear")
294
+ chat_status = gr.Markdown()
295
+ audio_out = gr.Audio(label="Response Audio", autoplay=True, visible=True, elem_id="audio_out")
296
+ chatbot = gr.Chatbot(elem_id="chatbot", label="Conversation", type="messages")
297
+
298
+ with gr.Tab("Personalize"):
299
+ with gr.Accordion("Add to Personal Knowledge Base", open=True):
300
+ gr.Markdown("Add personal notes, memories, or descriptions of people and places. You can also upload audio/video notes or images.")
301
+ with gr.Row():
302
+ with gr.Column(scale=2):
303
+ personal_text = gr.Textbox(lines=5, label="Text Input", placeholder="e.g., 'My father's name is John. He loves listening to Frank Sinatra music.'")
304
+ with gr.Column(scale=1):
305
+ personal_file = gr.File(label="Upload Audio/Video File")
306
+ personal_image = gr.Image(type="filepath", label="Upload Image")
307
+ with gr.Row():
308
+ personal_add_btn = gr.Button("Add Knowledge to Memory", variant="primary")
309
+ personal_status = gr.Markdown()
310
+ with gr.Accordion("Manage Personal Knowledge", open=False):
311
+ personal_memory_display = gr.DataFrame(headers=["Source", "Content"], label="Saved Personal Memories", interactive=False, row_count=(5, "dynamic"))
312
+ with gr.Row():
313
+ personal_refresh_btn = gr.Button("Refresh Memories")
314
+ with gr.Row():
315
+ personal_delete_selector = gr.Dropdown(label="Select a memory to delete", scale=3, interactive=True)
316
+ personal_delete_btn = gr.Button("Delete Selected Memory", variant="stop", scale=1)
317
+ personal_delete_status = gr.Markdown()
318
+
319
+ with gr.Tab("Settings"):
320
+ with gr.Group():
321
+ gr.Markdown("## Conversation & Persona Settings")
322
+ with gr.Row():
323
+ role = gr.Radio(CONFIG["roles"], value="caregiver", label="Your Role")
324
+ temperature = gr.Slider(0.0, 1.2, value=0.7, step=0.1, label="Creativity")
325
+ tone = gr.Dropdown(CONFIG["tones"], value="warm", label="Response Tone")
326
+ with gr.Row():
327
+ patient_name = gr.Textbox(label="Patient's Name", placeholder="e.g., 'Dad' or 'John'")
328
+ caregiver_name = gr.Textbox(label="Caregiver's Name", placeholder="e.g., 'me' or 'Jane'")
329
+ behaviour_tag = gr.Dropdown(CONFIG["behavior_tags"], value="None", label="Behaviour Filter (Manual Override)")
330
+ emotion_tag = gr.Dropdown(CONFIG["emotion_tags"], value="None", label="Emotion Filter (Manual Override)")
331
+ with gr.Accordion("Language, Voice & Debugging", open=False):
332
+ language = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Response Language")
333
+ tts_lang = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Voice Language")
334
+ tts_on = gr.Checkbox(True, label="Enable Voice Response (TTS)")
335
+ debug_mode = gr.Checkbox(False, label="Show Debug Info")
336
+ gr.Markdown("--- \n ## General Knowledge Base Management")
337
+ active_theme = gr.Radio(CONFIG["themes"], value="All", label="Active Knowledge Theme")
338
+ with gr.Row():
339
+ with gr.Column(scale=1):
340
+ files_in = gr.File(file_count="multiple", file_types=[".jsonl", ".txt"], label="Upload Knowledge Files")
341
+ upload_btn = gr.Button("Upload to Theme", variant="secondary")
342
+ seed_btn = gr.Button("Import Sample Data", variant="secondary")
343
+ with gr.Column(scale=2):
344
+ mgmt_status = gr.Markdown()
345
+ files_box = gr.CheckboxGroup(choices=[], label="Enable Files for the Selected Theme")
346
+ with gr.Row():
347
+ save_files_btn = gr.Button("Save Selection", variant="primary")
348
+ refresh_btn = gr.Button("Refresh List")
349
+
350
+ # --- Event Wiring ---
351
+ all_settings_components = [role, patient_name, caregiver_name, tone, language, tts_lang, temperature, behaviour_tag, emotion_tag, active_theme, tts_on, debug_mode]
352
+ for component in all_settings_components:
353
+ component.change(fn=collect_settings, inputs=all_settings_components, outputs=settings_state)
354
+
355
+ submit_btn.click(fn=chat_fn, inputs=[user_text, audio_in, settings_state, chatbot], outputs=[user_text, audio_out, chatbot])
356
+ save_btn.click(fn=save_chat_to_memory, inputs=[chatbot], outputs=[chat_status])
357
+ clear_btn.click(lambda: (None, None, [], None, "", ""), outputs=[user_text, audio_out, chatbot, audio_in, user_text, chat_status])
358
+
359
+ personal_add_btn.click(fn=add_personal_knowledge, inputs=[personal_text, personal_file, personal_image], outputs=[personal_status]).then(lambda: (None, None, None), outputs=[personal_text, personal_file, personal_image])
360
+ personal_refresh_btn.click(fn=list_personal_memories, inputs=None, outputs=[personal_memory_display, personal_delete_selector])
361
+ personal_delete_btn.click(fn=delete_personal_memory, inputs=[personal_delete_selector], outputs=[personal_delete_status]).then(fn=list_personal_memories, inputs=None, outputs=[personal_memory_display, personal_delete_selector])
362
+
363
+ upload_btn.click(upload_knowledge, inputs=[files_in, active_theme], outputs=[mgmt_status]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
364
+ save_files_btn.click(save_file_selection, inputs=[active_theme, files_box], outputs=[mgmt_status])
365
+ seed_btn.click(seed_files_into_theme, inputs=[active_theme]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
366
+ refresh_btn.click(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
367
+ active_theme.change(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])
368
+ demo.load(auto_setup_on_load, inputs=[active_theme], outputs=[settings_state, files_box, mgmt_status])
369
+
370
+ # --- Startup Logic ---
371
+ def pre_load_indexes():
372
+ global personal_vectorstore
373
+ print("Pre-loading all knowledge base indexes at startup...")
374
+ for theme in CONFIG["themes"]:
375
+ print(f" - Loading general index for theme: '{theme}'")
376
+ try:
377
+ ensure_index(theme)
378
+ print(f" ...'{theme}' theme loaded successfully.")
379
+ except Exception as e:
380
+ print(f" ...Error loading theme '{theme}': {e}")
381
+ print(" - Loading personal knowledge index...")
382
+ try:
383
+ personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)
384
+ print(" ...Personal knowledge loaded successfully.")
385
+ except Exception as e:
386
+ print(f" ...Error loading personal knowledge: {e}")
387
+ print("All indexes loaded. Application is ready.")
388
+
389
+ if __name__ == "__main__":
390
+ pre_load_indexes()
391
+ demo.queue().launch(debug=True)