KeenWoo commited on
Commit
14f1ea0
·
verified ·
1 Parent(s): 7c6b903

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -398
app.py DELETED
@@ -1,398 +0,0 @@
1
- {\rtf1\ansi\ansicpg1252\cocoartf2822
2
- \cocoatextscaling0\cocoaplatform0{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
3
- {\colortbl;\red255\green255\blue255;}
4
- {\*\expandedcolortbl;;}
5
- \margl1440\margr1440\vieww11520\viewh8400\viewkind0
6
- \pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\pardirnatural\partightenfactor0
7
-
8
- \f0\fs24 \cf0 import os\
9
- import json\
10
- import shutil\
11
- import gradio as gr\
12
- from datetime import datetime\
13
- from typing import List, Dict, Any, Optional\
14
- \
15
- # --- Agent Imports & Safe Fallbacks ---\
16
- try:\
17
- from alz_companion.agent import (\
18
- bootstrap_vectorstore, make_rag_chain, answer_query, synthesize_tts,\
19
- transcribe_audio, detect_tags_from_query, describe_image, build_or_load_vectorstore,\
20
- _default_embeddings\
21
- )\
22
- from alz_companion.prompts import BEHAVIOUR_TAGS, EMOTION_STYLES\
23
- from langchain.schema import Document\
24
- from langchain_community.vectorstores import FAISS\
25
- AGENT_OK = True\
26
- except Exception as e:\
27
- AGENT_OK = False\
28
- # Define all fallback functions and classes\
29
- def bootstrap_vectorstore(sample_paths=None, index_path="data/"): return object()\
30
- def build_or_load_vectorstore(docs, index_path, is_personal=False): return object()\
31
- def make_rag_chain(vs_general, vs_personal, **kwargs): return lambda q, **k: \{"answer": f"(Demo) You asked: \{q\}", "sources": []\}\
32
- def answer_query(chain, q, **kwargs): return chain(q, **kwargs)\
33
- def synthesize_tts(text: str, lang: str = "en"): return None\
34
- def transcribe_audio(filepath: str, lang: str = "en"): return "This is a transcribed message."\
35
- def detect_tags_from_query(query: str, behavior_options: list, emotion_options: list): return \{"detected_behavior": "None", "detected_emotion": "None"\}\
36
- def describe_image(image_path: str): return "This is a description of an image."\
37
- class Document:\
38
- def __init__(self, page_content, metadata):\
39
- self.page_content = page_content\
40
- self.metadata = metadata\
41
- class FAISS:\
42
- def __init__(self):\
43
- self.docstore = type('obj', (object,), \{'_dict': \{\}\})()\
44
- BEHAVIOUR_TAGS = \{"None": []\}\
45
- EMOTION_STYLES = \{"None": \{\}\}\
46
- print(f"WARNING: Could not import from alz_companion (\{e\}). Running in UI-only demo mode.")\
47
- \
48
- # --- Centralized Configuration ---\
49
- CONFIG = \{\
50
- "themes": ["All", "The Father", "Still Alice", "Away from Her", "General Caregiving"],\
51
- "roles": ["patient", "caregiver"],\
52
- "behavior_tags": ["None"] + list(BEHAVIOUR_TAGS.keys()),\
53
- "emotion_tags": ["None"] + list(EMOTION_STYLES.keys()),\
54
- "languages": \{"English": "en", "Chinese": "zh", "Malay": "ms", "French": "fr", "Spanish": "es"\},\
55
- "tones": ["warm", "neutral", "formal", "playful"]\
56
- \}\
57
- \
58
- # --- File Management & Vector Store Logic ---\
59
- INDEX_BASE = os.getenv('INDEX_BASE', 'data')\
60
- UPLOADS_BASE = os.path.join(INDEX_BASE, "uploads")\
61
- PERSONAL_INDEX_PATH = os.path.join(INDEX_BASE, "personal_faiss_index")\
62
- os.makedirs(UPLOADS_BASE, exist_ok=True)\
63
- THEME_PATHS = \{t: os.path.join(INDEX_BASE, f"faiss_index_\{t.replace(' ', '').lower()\}") for t in CONFIG["themes"]\}\
64
- vectorstores = \{\}\
65
- personal_vectorstore = None\
66
- \
67
- def canonical_theme(tk: str) -> str: return tk if tk in CONFIG["themes"] else "All"\
68
- def theme_upload_dir(theme: str) -> str:\
69
- p = os.path.join(UPLOADS_BASE, f"theme_\{canonical_theme(theme).replace(' ', '').lower()\}")\
70
- os.makedirs(p, exist_ok=True)\
71
- return p\
72
- def load_manifest(theme: str) -> Dict[str, Any]:\
73
- p = os.path.join(theme_upload_dir(theme), "manifest.json")\
74
- if os.path.exists(p):\
75
- try:\
76
- with open(p, "r", encoding="utf-8") as f: return json.load(f)\
77
- except Exception: pass\
78
- return \{"files": \{\}\}\
79
- def save_manifest(theme: str, man: Dict[str, Any]):\
80
- with open(os.path.join(theme_upload_dir(theme), "manifest.json"), "w", encoding="utf-8") as f: json.dump(man, f, indent=2)\
81
- def list_theme_files(theme: str) -> List[tuple[str, bool]]:\
82
- man = load_manifest(theme)\
83
- base = theme_upload_dir(theme)\
84
- found = [(n, bool(e)) for n, e in man.get("files", \{\}).items() if os.path.exists(os.path.join(base, n))]\
85
- existing = \{n for n, e in found\}\
86
- for name in sorted(os.listdir(base)):\
87
- if name not in existing and os.path.isfile(os.path.join(base, name)): found.append((name, False))\
88
- man["files"] = dict(found)\
89
- save_manifest(theme, man)\
90
- return found\
91
- def copy_into_theme(theme: str, src_path: str) -> str:\
92
- fname = os.path.basename(src_path)\
93
- dest = os.path.join(theme_upload_dir(theme), fname)\
94
- shutil.copy2(src_path, dest)\
95
- return dest\
96
- def seed_files_into_theme(theme: str):\
97
- SEED_FILES = [\
98
- ("sample_data/caregiving_tips.txt", True),\
99
- ("sample_data/the_father_segments_tagged_with_emotion_hybrid.jsonl", True),\
100
- ("sample_data/still_alice_segments_tagged_with_emotion_hybrid.jsonl", True),\
101
- ("sample_data/away_from_her_segments_tagged_with_emotion_hybrid.jsonl", True)\
102
- ]\
103
- man, changed = load_manifest(theme), False\
104
- for path, enable in SEED_FILES:\
105
- if not os.path.exists(path): continue\
106
- fname = os.path.basename(path)\
107
- if not os.path.exists(os.path.join(theme_upload_dir(theme), fname)):\
108
- copy_into_theme(theme, path)\
109
- man["files"][fname] = bool(enable)\
110
- changed = True\
111
- if changed: save_manifest(theme, man)\
112
- \
113
- def ensure_index(theme='All'):\
114
- theme = canonical_theme(theme)\
115
- if theme in vectorstores: return vectorstores[theme]\
116
- upload_dir = theme_upload_dir(theme)\
117
- enabled_files = [os.path.join(upload_dir, n) for n, enabled in list_theme_files(theme) if enabled]\
118
- index_path = THEME_PATHS.get(theme)\
119
- vectorstores[theme] = bootstrap_vectorstore(sample_paths=enabled_files, index_path=index_path)\
120
- return vectorstores[theme]\
121
- \
122
- # --- Gradio Callbacks ---\
123
- def collect_settings(*args):\
124
- keys = ["role", "patient_name", "caregiver_name", "tone", "language", "tts_lang", "temperature", "behaviour_tag", "emotion_tag", "active_theme", "tts_on", "debug_mode"]\
125
- return dict(zip(keys, args))\
126
- \
127
- def add_personal_knowledge(text_input, file_input, image_input):\
128
- global personal_vectorstore\
129
- if not any([text_input, file_input, image_input]):\
130
- return "Please provide text, a file, or an image to add."\
131
- content_text, content_source = "", ""\
132
- if text_input and text_input.strip():\
133
- content_text, content_source = text_input.strip(), "Text Input"\
134
- elif file_input:\
135
- content_text, content_source = transcribe_audio(file_input.name), os.path.basename(file_input.name)\
136
- elif image_input:\
137
- content_text, content_source = describe_image(image_input.name), "Image Input"\
138
- if not content_text:\
139
- return "Could not extract any text content to add."\
140
- print("Auto-tagging personal memory...")\
141
- behavior_options = CONFIG.get("behavior_tags", [])\
142
- emotion_options = CONFIG.get("emotion_tags", [])\
143
- detected_tags = detect_tags_from_query(content_text, behavior_options=behavior_options, emotion_options=emotion_options)\
144
- detected_behavior = detected_tags.get("detected_behavior")\
145
- detected_emotion = detected_tags.get("detected_emotion")\
146
- print(f" ...Detected Behavior: \{detected_behavior\}, Emotion: \{detected_emotion\}")\
147
- metadata = \{"source": content_source\}\
148
- if detected_behavior and detected_behavior != "None":\
149
- metadata["behaviors"] = [detected_behavior.lower()]\
150
- if detected_emotion and detected_emotion != "None":\
151
- metadata["emotion"] = detected_emotion.lower()\
152
- doc_to_add = Document(page_content=content_text, metadata=metadata)\
153
- if personal_vectorstore is None:\
154
- personal_vectorstore = build_or_load_vectorstore([doc_to_add], PERSONAL_INDEX_PATH, is_personal=True)\
155
- else:\
156
- personal_vectorstore.add_documents([doc_to_add])\
157
- personal_vectorstore.save_local(PERSONAL_INDEX_PATH)\
158
- return f"Successfully added memory with tags (Behavior: \{detected_behavior\}, Emotion: \{detected_emotion\})"\
159
- \
160
- def save_chat_to_memory(chat_history):\
161
- global personal_vectorstore\
162
- if not chat_history:\
163
- return "Nothing to save."\
164
- formatted_chat = []\
165
- for message in chat_history:\
166
- role = "User" if message["role"] == "user" else "Assistant"\
167
- content = message["content"].strip()\
168
- if content.startswith("*(Auto-detected context:"):\
169
- continue\
170
- formatted_chat.append(f"\{role\}: \{content\}")\
171
- conversation_text = "\\n".join(formatted_chat)\
172
- if not conversation_text:\
173
- return "No conversation content to save."\
174
- timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")\
175
- doc_to_add = Document(page_content=conversation_text, metadata=\{"source": f"Conversation saved on \{timestamp\}"\})\
176
- if personal_vectorstore is None:\
177
- personal_vectorstore = build_or_load_vectorstore([doc_to_add], PERSONAL_INDEX_PATH, is_personal=True)\
178
- else:\
179
- personal_vectorstore.add_documents([doc_to_add])\
180
- personal_vectorstore.save_local(PERSONAL_INDEX_PATH)\
181
- print(f"Saved conversation to long-term memory.")\
182
- return f"Conversation from \{timestamp\} saved successfully to long-term memory!"\
183
- \
184
- def list_personal_memories():\
185
- global personal_vectorstore\
186
- if personal_vectorstore is None or not hasattr(personal_vectorstore.docstore, '_dict') or not personal_vectorstore.docstore._dict:\
187
- return gr.update(value=[["No memories to display", ""]]), gr.update(choices=["No memories to select"], value=None)\
188
- docs = list(personal_vectorstore.docstore._dict.values())\
189
- dataframe_data = [[doc.metadata.get('source', 'Unknown'), doc.page_content] for doc in docs]\
190
- dropdown_choices = [doc.page_content for doc in docs]\
191
- return gr.update(value=dataframe_data), gr.update(choices=dropdown_choices)\
192
- \
193
- def delete_personal_memory(memory_to_delete):\
194
- global personal_vectorstore\
195
- if personal_vectorstore is None or not memory_to_delete:\
196
- return "Knowledge base is empty or no memory selected."\
197
- all_docs = list(personal_vectorstore.docstore._dict.values())\
198
- docs_to_keep = [doc for doc in all_docs if doc.page_content != memory_to_delete]\
199
- if len(all_docs) == len(docs_to_keep):\
200
- return "Error: Could not find the selected memory to delete."\
201
- print(f"Deleting memory. \{len(docs_to_keep)\} memories remaining.")\
202
- if not docs_to_keep:\
203
- if os.path.isdir(PERSONAL_INDEX_PATH):\
204
- shutil.rmtree(PERSONAL_INDEX_PATH)\
205
- personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)\
206
- else:\
207
- # Rebuild and save the index\
208
- new_vs = FAISS.from_documents(docs_to_keep, _default_embeddings())\
209
- new_vs.save_local(PERSONAL_INDEX_PATH)\
210
- personal_vectorstore = new_vs\
211
- return "Successfully deleted memory. The list will now refresh."\
212
- \
213
- def chat_fn(user_text, audio_file, settings, chat_history):\
214
- global personal_vectorstore\
215
- question = (user_text or "").strip()\
216
- if audio_file and not question:\
217
- try:\
218
- voice_lang_name = settings.get("tts_lang", "English")\
219
- voice_lang_code = CONFIG["languages"].get(voice_lang_name, "en")\
220
- question = transcribe_audio(audio_file, lang=voice_lang_code)\
221
- except Exception as e:\
222
- err_msg = f"Audio Error: \{e\}" if settings.get("debug_mode") else "Sorry, I couldn't understand the audio."\
223
- chat_history.append(\{"role": "assistant", "content": err_msg\})\
224
- return "", None, chat_history\
225
- if not question:\
226
- return "", None, chat_history\
227
- chat_history.append(\{"role": "user", "content": question\})\
228
- manual_behavior_tag = settings.get("behaviour_tag")\
229
- manual_emotion_tag = settings.get("emotion_tag")\
230
- if manual_behavior_tag not in [None, "None"] or manual_emotion_tag not in [None, "None"]:\
231
- scenario_tag, emotion_tag = manual_behavior_tag, manual_emotion_tag\
232
- else:\
233
- behavior_options = CONFIG.get("behavior_tags", [])\
234
- emotion_options = CONFIG.get("emotion_tags", [])\
235
- detected_tags = detect_tags_from_query(question, behavior_options=behavior_options, emotion_options=emotion_options)\
236
- scenario_tag, emotion_tag = detected_tags.get("detected_behavior"), detected_tags.get("detected_emotion")\
237
- if (scenario_tag and scenario_tag != "None") or (emotion_tag and emotion_tag != "None"):\
238
- detected_msg = f"*(Auto-detected context: Behavior=`\{scenario_tag\}`, Emotion=`\{emotion_tag\}`)*"\
239
- chat_history.append(\{"role": "assistant", "content": detected_msg\})\
240
- active_theme = settings.get("active_theme", "All")\
241
- vs_general = ensure_index(active_theme)\
242
- if personal_vectorstore is None:\
243
- personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)\
244
- rag_chain_settings = \{"role": settings.get("role"), "temperature": settings.get("temperature"), "language": settings.get("language"), "patient_name": settings.get("patient_name"), "caregiver_name": settings.get("caregiver_name"), "tone": settings.get("tone"),\}\
245
- chain = make_rag_chain(vs_general, personal_vectorstore, **rag_chain_settings)\
246
- if scenario_tag == "None": scenario_tag = None\
247
- if emotion_tag == "None": emotion_tag = None\
248
- simple_history = chat_history[:-1]\
249
- response = answer_query(chain, question, chat_history=simple_history, scenario_tag=scenario_tag, emotion_tag=emotion_tag)\
250
- answer = response.get("answer", "[No answer found]")\
251
- chat_history.append(\{"role": "assistant", "content": answer\})\
252
- audio_out = None\
253
- if settings.get("tts_on") and answer:\
254
- tts_lang_code = CONFIG["languages"].get(settings.get("tts_lang"), "en")\
255
- audio_out = synthesize_tts(answer, lang=tts_lang_code)\
256
- from gradio import update\
257
- return "", (update(value=audio_out, visible=bool(audio_out))), chat_history\
258
- \
259
- def upload_knowledge(files, current_theme):\
260
- if not files: return "No files were selected to upload."\
261
- added = 0\
262
- for f in files:\
263
- try:\
264
- copy_into_theme(current_theme, f.name); added += 1\
265
- except Exception as e: print(f"Error uploading file \{f.name\}: \{e\}")\
266
- if added > 0 and current_theme in vectorstores: del vectorstores[current_theme]\
267
- return f"Uploaded \{added\} file(s). Refreshing file list..."\
268
- def save_file_selection(current_theme, enabled_files):\
269
- man = load_manifest(current_theme)\
270
- for fname in man['files']: man['files'][fname] = fname in enabled_files\
271
- save_manifest(current_theme, man)\
272
- if current_theme in vectorstores: del vectorstores[current_theme]\
273
- return f"Settings saved. Index for theme '\{current_theme\}' will rebuild on the next query."\
274
- def refresh_file_list_ui(current_theme):\
275
- files = list_theme_files(current_theme)\
276
- enabled = [f for f, en in files if en]\
277
- msg = f"Found \{len(files)\} file(s). \{len(enabled)\} enabled."\
278
- return gr.update(choices=[f for f, _ in files], value=enabled), msg\
279
- def auto_setup_on_load(current_theme):\
280
- theme_dir = theme_upload_dir(current_theme)\
281
- if not os.listdir(theme_dir):\
282
- print("First-time setup: Auto-seeding sample data...")\
283
- seed_files_into_theme(current_theme)\
284
- all_settings = collect_settings("patient", "", "", "warm", "English", "English", 0.7, "None", "None", "All", True, False)\
285
- files_ui, status_msg = refresh_file_list_ui(current_theme)\
286
- return all_settings, files_ui, status_msg\
287
- \
288
- # --- UI Definition ---\
289
- CSS = ".gradio-container \{ font-size: 14px; \} #chatbot \{ min-height: 250px; \} #audio_out audio \{ max-height: 40px; \} #audio_in audio \{ max-height: 40px; padding: 0; \}"\
290
- \
291
- with gr.Blocks(theme=gr.themes.Soft(), css=CSS) as demo:\
292
- settings_state = gr.State(\{\})\
293
- \
294
- with gr.Tab("Chat"):\
295
- user_text = gr.Textbox(show_label=False, placeholder="Type your message here...")\
296
- audio_in = gr.Audio(sources=["microphone"], type="filepath", label="Voice Input", elem_id="audio_in")\
297
- with gr.Row():\
298
- submit_btn = gr.Button("Send", variant="primary")\
299
- save_btn = gr.Button("Save to Memory")\
300
- clear_btn = gr.Button("Clear")\
301
- chat_status = gr.Markdown()\
302
- audio_out = gr.Audio(label="Response Audio", autoplay=True, visible=True, elem_id="audio_out")\
303
- chatbot = gr.Chatbot(elem_id="chatbot", label="Conversation", type="messages")\
304
- \
305
- with gr.Tab("Personalize"):\
306
- with gr.Accordion("Add to Personal Knowledge Base", open=True):\
307
- gr.Markdown("Add personal notes, memories, or descriptions of people and places. You can also upload audio/video notes or images.")\
308
- with gr.Row():\
309
- with gr.Column(scale=2):\
310
- personal_text = gr.Textbox(lines=5, label="Text Input", placeholder="e.g., 'My father's name is John. He loves listening to Frank Sinatra music.'")\
311
- with gr.Column(scale=1):\
312
- personal_file = gr.File(label="Upload Audio/Video File")\
313
- personal_image = gr.Image(type="filepath", label="Upload Image")\
314
- with gr.Row():\
315
- personal_add_btn = gr.Button("Add Knowledge to Memory", variant="primary")\
316
- personal_status = gr.Markdown()\
317
- with gr.Accordion("Manage Personal Knowledge", open=False):\
318
- personal_memory_display = gr.DataFrame(headers=["Source", "Content"], label="Saved Personal Memories", interactive=False, row_count=(5, "dynamic"))\
319
- with gr.Row():\
320
- personal_refresh_btn = gr.Button("Refresh Memories")\
321
- with gr.Row():\
322
- personal_delete_selector = gr.Dropdown(label="Select a memory to delete", scale=3, interactive=True)\
323
- personal_delete_btn = gr.Button("Delete Selected Memory", variant="stop", scale=1)\
324
- personal_delete_status = gr.Markdown()\
325
- \
326
- with gr.Tab("Settings"):\
327
- with gr.Group():\
328
- gr.Markdown("## Conversation & Persona Settings")\
329
- with gr.Row():\
330
- role = gr.Radio(CONFIG["roles"], value="caregiver", label="Your Role")\
331
- temperature = gr.Slider(0.0, 1.2, value=0.7, step=0.1, label="Creativity")\
332
- tone = gr.Dropdown(CONFIG["tones"], value="warm", label="Response Tone")\
333
- with gr.Row():\
334
- patient_name = gr.Textbox(label="Patient's Name", placeholder="e.g., 'Dad' or 'John'")\
335
- caregiver_name = gr.Textbox(label="Caregiver's Name", placeholder="e.g., 'me' or 'Jane'")\
336
- behaviour_tag = gr.Dropdown(CONFIG["behavior_tags"], value="None", label="Behaviour Filter (Manual Override)")\
337
- emotion_tag = gr.Dropdown(CONFIG["emotion_tags"], value="None", label="Emotion Filter (Manual Override)")\
338
- with gr.Accordion("Language, Voice & Debugging", open=False):\
339
- language = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Response Language")\
340
- tts_lang = gr.Dropdown(list(CONFIG["languages"].keys()), value="English", label="Voice Language")\
341
- tts_on = gr.Checkbox(True, label="Enable Voice Response (TTS)")\
342
- debug_mode = gr.Checkbox(False, label="Show Debug Info")\
343
- gr.Markdown("--- \\n ## General Knowledge Base Management")\
344
- active_theme = gr.Radio(CONFIG["themes"], value="All", label="Active Knowledge Theme")\
345
- with gr.Row():\
346
- with gr.Column(scale=1):\
347
- files_in = gr.File(file_count="multiple", file_types=[".jsonl", ".txt"], label="Upload Knowledge Files")\
348
- upload_btn = gr.Button("Upload to Theme", variant="secondary")\
349
- seed_btn = gr.Button("Import Sample Data", variant="secondary")\
350
- with gr.Column(scale=2):\
351
- mgmt_status = gr.Markdown()\
352
- files_box = gr.CheckboxGroup(choices=[], label="Enable Files for the Selected Theme")\
353
- with gr.Row():\
354
- save_files_btn = gr.Button("Save Selection", variant="primary")\
355
- refresh_btn = gr.Button("Refresh List")\
356
- \
357
- # --- Event Wiring ---\
358
- all_settings_components = [role, patient_name, caregiver_name, tone, language, tts_lang, temperature, behaviour_tag, emotion_tag, active_theme, tts_on, debug_mode]\
359
- for component in all_settings_components:\
360
- component.change(fn=collect_settings, inputs=all_settings_components, outputs=settings_state)\
361
- \
362
- submit_btn.click(fn=chat_fn, inputs=[user_text, audio_in, settings_state, chatbot], outputs=[user_text, audio_out, chatbot])\
363
- save_btn.click(fn=save_chat_to_memory, inputs=[chatbot], outputs=[chat_status])\
364
- clear_btn.click(lambda: (None, None, [], None, "", ""), outputs=[user_text, audio_out, chatbot, audio_in, user_text, chat_status])\
365
- \
366
- personal_add_btn.click(fn=add_personal_knowledge, inputs=[personal_text, personal_file, personal_image], outputs=[personal_status]).then(lambda: (None, None, None), outputs=[personal_text, personal_file, personal_image])\
367
- personal_refresh_btn.click(fn=list_personal_memories, inputs=None, outputs=[personal_memory_display, personal_delete_selector])\
368
- personal_delete_btn.click(fn=delete_personal_memory, inputs=[personal_delete_selector], outputs=[personal_delete_status]).then(fn=list_personal_memories, inputs=None, outputs=[personal_memory_display, personal_delete_selector])\
369
- \
370
- upload_btn.click(upload_knowledge, inputs=[files_in, active_theme], outputs=[mgmt_status]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])\
371
- save_files_btn.click(save_file_selection, inputs=[active_theme, files_box], outputs=[mgmt_status])\
372
- seed_btn.click(seed_files_into_theme, inputs=[active_theme]).then(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])\
373
- refresh_btn.click(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])\
374
- active_theme.change(refresh_file_list_ui, inputs=[active_theme], outputs=[files_box, mgmt_status])\
375
- demo.load(auto_setup_on_load, inputs=[active_theme], outputs=[settings_state, files_box, mgmt_status])\
376
- \
377
- # --- Startup Logic ---\
378
- def pre_load_indexes():\
379
- global personal_vectorstore\
380
- print("Pre-loading all knowledge base indexes at startup...")\
381
- for theme in CONFIG["themes"]:\
382
- print(f" - Loading general index for theme: '\{theme\}'")\
383
- try:\
384
- ensure_index(theme)\
385
- print(f" ...'\{theme\}' theme loaded successfully.")\
386
- except Exception as e:\
387
- print(f" ...Error loading theme '\{theme\}': \{e\}")\
388
- print(" - Loading personal knowledge index...")\
389
- try:\
390
- personal_vectorstore = build_or_load_vectorstore([], PERSONAL_INDEX_PATH, is_personal=True)\
391
- print(" ...Personal knowledge loaded successfully.")\
392
- except Exception as e:\
393
- print(f" ...Error loading personal knowledge: \{e\}")\
394
- print("All indexes loaded. Application is ready.")\
395
- \
396
- if __name__ == "__main__":\
397
- pre_load_indexes()\
398
- demo.queue().launch(debug=True)}