Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
|
@@ -623,7 +623,56 @@ async def followup_agent(query: FollowupQueryModel, background_tasks: Background
|
|
| 623 |
|
| 624 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
| 625 |
|
| 626 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 627 |
## Digiyatra
|
| 628 |
|
| 629 |
@app.post("/digiyatra-followup")
|
|
|
|
| 623 |
|
| 624 |
return StreamingResponse(process_response(), media_type="text/event-stream")
|
| 625 |
|
| 626 |
+
|
| 627 |
+
|
| 628 |
+
@app.post("/v4/followup-agent")
|
| 629 |
+
async def followup_agent_v4(query: FollowupQueryModel, background_tasks: BackgroundTasks, api_key: str = Depends(verify_api_key)):
|
| 630 |
+
"""
|
| 631 |
+
Followup agent endpoint that provides helpful responses or generates clarifying questions based on user queries.
|
| 632 |
+
Requires API Key authentication via X-API-Key header.
|
| 633 |
+
"""
|
| 634 |
+
logger.info(f"Received followup agent query: {query.query}")
|
| 635 |
+
|
| 636 |
+
if query.conversation_id not in conversations:
|
| 637 |
+
conversations[query.conversation_id] = [
|
| 638 |
+
{"role": "system", "content": FOLLOWUP_AGENT_PROMPT}
|
| 639 |
+
]
|
| 640 |
+
|
| 641 |
+
conversations[query.conversation_id].append({"role": "user", "content": query.query})
|
| 642 |
+
last_activity[query.conversation_id] = time.time()
|
| 643 |
+
|
| 644 |
+
# Limit tokens in the conversation history
|
| 645 |
+
limited_conversation = conversations[query.conversation_id]
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
async def process_response():
|
| 649 |
+
yield "<followup-response>+\n"
|
| 650 |
+
full_response = ""
|
| 651 |
+
for content in chat_with_llama_stream(limited_conversation, model=query.model_id):
|
| 652 |
+
full_response += content
|
| 653 |
+
yield content
|
| 654 |
+
yield "</followup-response>+\n"
|
| 655 |
+
yield "--END_SECTION--\n"
|
| 656 |
+
|
| 657 |
+
logger.info(f"LLM RAW response for query: {query.query}: {full_response}")
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
response_content, interact, tools = parse_followup_and_tools(full_response)
|
| 661 |
+
result = {
|
| 662 |
+
"clarification": interact
|
| 663 |
+
}
|
| 664 |
+
|
| 665 |
+
yield "<followup-json> + "\n"
|
| 666 |
+
yield json.dumps(result) + "\n"
|
| 667 |
+
yield "</followup-json> +"\n"
|
| 668 |
+
yield "--END_SECTION--\n"
|
| 669 |
+
# Add the assistant's response to the conversation history
|
| 670 |
+
conversations[query.conversation_id].append({"role": "assistant", "content": full_response})
|
| 671 |
+
background_tasks.add_task(update_db, query.user_id, query.conversation_id, query.query, full_response)
|
| 672 |
+
logger.info(f"Completed followup agent response for query: {query.query}, send result: {result}")
|
| 673 |
+
|
| 674 |
+
return StreamingResponse(process_response(), media_type="text/event-stream")
|
| 675 |
+
|
| 676 |
## Digiyatra
|
| 677 |
|
| 678 |
@app.post("/digiyatra-followup")
|