Spaces:
Running
Running
Eurico149
commited on
Commit
·
16fb597
1
Parent(s):
b0df00c
feat: using a larger model on rag agent
Browse files- agents/BookRetriverAgent.py +6 -4
- app.py +3 -5
agents/BookRetriverAgent.py
CHANGED
|
@@ -1,4 +1,7 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
|
|
|
| 2 |
from tools import get_retrieve_book_context_tool
|
| 3 |
from langchain.agents import create_agent
|
| 4 |
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
|
@@ -13,7 +16,7 @@ def get_book_retriver_agent():
|
|
| 13 |
|
| 14 |
def generate_agent(vector_store):
|
| 15 |
hf_model = HuggingFaceEndpoint(
|
| 16 |
-
repo_id="Qwen/Qwen3-
|
| 17 |
task="text-generation",
|
| 18 |
provider="auto",
|
| 19 |
huggingfacehub_api_token=os.getenv("HF_TOKEN")
|
|
@@ -24,13 +27,12 @@ def generate_agent(vector_store):
|
|
| 24 |
prompt = """
|
| 25 |
You are a knowledge retriever agent, you must always provide context related answers, using the tools provided.
|
| 26 |
You must always use this tool for reliable information to answer any query.
|
| 27 |
-
|
| 28 |
-
Dont use the same tool more than once.
|
| 29 |
"""
|
| 30 |
return create_agent(
|
| 31 |
model=llm,
|
| 32 |
tools=tools,
|
| 33 |
-
system_prompt=prompt
|
| 34 |
)
|
| 35 |
|
| 36 |
def initiate_vector_store():
|
|
|
|
| 1 |
import os
|
| 2 |
+
|
| 3 |
+
from langgraph.checkpoint.memory import InMemorySaver
|
| 4 |
+
|
| 5 |
from tools import get_retrieve_book_context_tool
|
| 6 |
from langchain.agents import create_agent
|
| 7 |
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
|
|
|
|
| 16 |
|
| 17 |
def generate_agent(vector_store):
|
| 18 |
hf_model = HuggingFaceEndpoint(
|
| 19 |
+
repo_id="Qwen/Qwen3-30B-A3B-Instruct-2507",
|
| 20 |
task="text-generation",
|
| 21 |
provider="auto",
|
| 22 |
huggingfacehub_api_token=os.getenv("HF_TOKEN")
|
|
|
|
| 27 |
prompt = """
|
| 28 |
You are a knowledge retriever agent, you must always provide context related answers, using the tools provided.
|
| 29 |
You must always use this tool for reliable information to answer any query.
|
| 30 |
+
Dont try to elaborate your answers, always prioritize data coming from your tools.
|
|
|
|
| 31 |
"""
|
| 32 |
return create_agent(
|
| 33 |
model=llm,
|
| 34 |
tools=tools,
|
| 35 |
+
system_prompt=prompt,
|
| 36 |
)
|
| 37 |
|
| 38 |
def initiate_vector_store():
|
app.py
CHANGED
|
@@ -43,11 +43,9 @@ class GradioAgent:
|
|
| 43 |
]
|
| 44 |
|
| 45 |
prompt = (
|
| 46 |
-
"You are a
|
| 47 |
-
"
|
| 48 |
-
"
|
| 49 |
-
"Keep the technical accuracy of your responses, but present them in a natural, conversational way. "
|
| 50 |
-
"Your main goal is to generate user-friendly and informative answers based on the data you gather."
|
| 51 |
)
|
| 52 |
|
| 53 |
return create_agent(
|
|
|
|
| 43 |
]
|
| 44 |
|
| 45 |
prompt = (
|
| 46 |
+
"You are a helpful and usefull coordinator agent, you have access to a collection of tools and"
|
| 47 |
+
" agents to help you with reliable data to your query's. "
|
| 48 |
+
"One of your main objectives is to generate user friendly answers based on the information you have."
|
|
|
|
|
|
|
| 49 |
)
|
| 50 |
|
| 51 |
return create_agent(
|