Spaces:
Paused
Paused
File size: 1,475 Bytes
03d676c 16fb597 03d676c f086d4e 03d676c 5ef2d8b 03d676c 9b09327 03d676c 9b09327 16fb597 9b09327 03d676c 9b09327 16fb597 9b09327 16fb597 9b09327 f086d4e 9b09327 5ef2d8b f086d4e 5ef2d8b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import os
from langgraph.checkpoint.memory import InMemorySaver
from tools import get_retrieve_book_context_tool
from langchain.agents import create_agent
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_postgres import PGVector
def get_book_retriver_agent():
vector_store = initiate_vector_store()
agent = generate_agent(vector_store)
return agent
def generate_agent(vector_store):
hf_model = HuggingFaceEndpoint(
repo_id="Qwen/Qwen3-30B-A3B-Instruct-2507",
task="text-generation",
provider="auto",
huggingfacehub_api_token=os.getenv("HF_TOKEN")
)
llm = ChatHuggingFace(llm=hf_model)
tools = [get_retrieve_book_context_tool(vector_store)]
prompt = """
You are a knowledge retriever agent, you must always provide context related answers, using the tools provided.
You must always use this tool for reliable information to answer any query.
Dont try to elaborate your answers, always prioritize data coming from your tools.
"""
return create_agent(
model=llm,
tools=tools,
system_prompt=prompt,
)
def initiate_vector_store():
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")
return PGVector(
connection=os.getenv("POSTGRE_CONNECTION"),
collection_name="books",
embeddings=embeddings,
)
|