Spaces:
Sleeping
Sleeping
Commit
·
2f2f8a0
1
Parent(s):
f6f656f
TF I would remember Why TF I did the change
Browse files
main.py
CHANGED
|
@@ -86,7 +86,7 @@ You are a query analysis agent. Transform the user's query into a precise search
|
|
| 86 |
2. If status keywords (ongoing, completed, upcoming, etc.) are present, pick the matching table.
|
| 87 |
3. If no status keyword, set filter_table to null.
|
| 88 |
4. Return JSON: {{"search_query": "...", "filter_table": "table_name or null"}}
|
| 89 |
-
"""
|
| 90 |
|
| 91 |
ANSWER_SYSTEM_PROMPT = """
|
| 92 |
You are an expert AI assistant for a premier real estate developer.
|
|
@@ -154,16 +154,23 @@ def generate_elevenlabs_sync(text: str, voice: str) -> bytes:
|
|
| 154 |
|
| 155 |
# --- UPDATED formulate_search_plan with logging ---
|
| 156 |
async def formulate_search_plan(user_query: str) -> dict:
|
| 157 |
-
logging.info(f"Formulating search plan for query: {user_query}")
|
| 158 |
for attempt in range(3):
|
| 159 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 160 |
response = await run_in_threadpool(
|
| 161 |
client_openai.chat.completions.create,
|
| 162 |
model=PLANNER_MODEL,
|
| 163 |
-
messages=[{"role": "user", "content":
|
| 164 |
response_format={"type": "json_object"},
|
| 165 |
temperature=0.0
|
| 166 |
)
|
|
|
|
| 167 |
# Log the raw response BEFORE trying to parse
|
| 168 |
raw_response_content = response.choices[0].message.content
|
| 169 |
logging.info(f"Raw Planner LLM response content: {raw_response_content}")
|
|
|
|
| 86 |
2. If status keywords (ongoing, completed, upcoming, etc.) are present, pick the matching table.
|
| 87 |
3. If no status keyword, set filter_table to null.
|
| 88 |
4. Return JSON: {{"search_query": "...", "filter_table": "table_name or null"}}
|
| 89 |
+
"""
|
| 90 |
|
| 91 |
ANSWER_SYSTEM_PROMPT = """
|
| 92 |
You are an expert AI assistant for a premier real estate developer.
|
|
|
|
| 154 |
|
| 155 |
# --- UPDATED formulate_search_plan with logging ---
|
| 156 |
async def formulate_search_plan(user_query: str) -> dict:
|
| 157 |
+
logging.info(f"Formulating search plan for query: {user_query}")
|
| 158 |
for attempt in range(3):
|
| 159 |
try:
|
| 160 |
+
# --- FIX: Format the prompt here with BOTH variables ---
|
| 161 |
+
formatted_prompt = QUERY_FORMULATION_PROMPT.format(
|
| 162 |
+
table_descriptions=TABLE_DESCRIPTIONS,
|
| 163 |
+
user_query=user_query
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
response = await run_in_threadpool(
|
| 167 |
client_openai.chat.completions.create,
|
| 168 |
model=PLANNER_MODEL,
|
| 169 |
+
messages=[{"role": "user", "content": formatted_prompt}], # Use the fully formatted prompt
|
| 170 |
response_format={"type": "json_object"},
|
| 171 |
temperature=0.0
|
| 172 |
)
|
| 173 |
+
# ... rest of the function ...
|
| 174 |
# Log the raw response BEFORE trying to parse
|
| 175 |
raw_response_content = response.choices[0].message.content
|
| 176 |
logging.info(f"Raw Planner LLM response content: {raw_response_content}")
|