Spaces:
Runtime error
Runtime error
| from pydantic import BaseModel | |
| from langchain_community.vectorstores import FAISS | |
| from langchain.chains import RetrievalQA | |
| from langchain.agents import Tool, AgentExecutor, create_json_chat_agent, create_react_agent | |
| from langchain_core.messages import AIMessage, HumanMessage | |
| from personal_models import PersonalModels | |
| from personal_templates import PersonalTemplate | |
| import uuid | |
| #------------------------------- Mixtral | |
| def create_model_instances(model_name, embedding_model, embedding_dir, agent_type): | |
| templates = PersonalTemplate() | |
| models = PersonalModels() | |
| chat_model = models.get_chat_model( | |
| model=model_name, | |
| temperature=0.1, | |
| max_tokens=1000) | |
| llm = models.get_rag_model( | |
| model=model_name, | |
| temperature=0.1, | |
| max_tokens=500, | |
| top_k=20) | |
| embeddings = models.get_embedding_model(model=embedding_model) | |
| vectorstore = FAISS.load_local(embedding_dir, embeddings) | |
| qa_tool = RetrievalQA.from_chain_type( | |
| llm=llm, | |
| chain_type="stuff", | |
| retriever=vectorstore.as_retriever(), | |
| verbose=True, | |
| chain_type_kwargs={'prompt': templates.retriever_prompt} | |
| ) | |
| tools = [ | |
| Tool( | |
| name='tax-law-search', | |
| func=qa_tool.run, | |
| description='useful when you want to answer questions about brazilian tax law and legal entities', | |
| verbose=True | |
| ) | |
| ] | |
| print('Usando agente do tipo: ',agent_type) | |
| if agent_type == 'JSON_CHAT_MODEL': | |
| agent = create_json_chat_agent(chat_model, tools, templates.chatbot_tributario_prompt) | |
| else: | |
| agent = create_react_agent(llm, tools, templates.chatbot_tributario_prompt_react) | |
| agent_executor = AgentExecutor(agent=agent, tools=tools, handle_parsing_errors="Check your output and make sure it conforms! Do not output an action and a final answer at the same time.", return_intermediate_steps=True, verbose=True) | |
| return agent_executor | |
| #-------------------------------- Classe Modificada | |
| class Message(BaseModel): | |
| user_id: str | |
| text: str | |
| model: str | |
| embedding_model: str | |
| embedding_dir: str | |
| agent_type: str | |
| class ChatHandler: | |
| historico_por_usuario = {} | |
| def __init__(self): | |
| pass | |
| def generate_id(self): | |
| new_id = str(uuid.uuid4()) | |
| self.historico_por_usuario[new_id] = [] | |
| return new_id | |
| def clear_history(self, user_id): | |
| if user_id not in self.historico_por_usuario: | |
| return "Usuário não encontrado" | |
| self.historico_por_usuario[user_id].clear() | |
| return "Histórico limpo com sucesso" | |
| def post_message(self, message: dict): | |
| user_id = message['user_id'] | |
| if user_id not in self.historico_por_usuario: | |
| return "Usuário não encontrado" | |
| model_name = message['model'] | |
| agent_executor = create_model_instances(model_name, message['embedding_model'], message['embedding_dir'], message['agent_type']) | |
| try: | |
| print('message: ', message['text']) | |
| chat_history = self.historico_por_usuario[user_id][-10:] | |
| response = agent_executor.invoke( | |
| { | |
| "input": message['text'], | |
| "chat_history": chat_history | |
| } | |
| ) | |
| print(response['intermediate_steps']) | |
| self.historico_por_usuario[user_id].append(HumanMessage(content=message['text'])) | |
| self.historico_por_usuario[user_id].append(AIMessage(content=response['output'])) | |
| except Exception as e: | |
| response = {'output': 'Ocorreu um erro de execução do Agente. O erro não será salvo no histórico, tente novamente.', | |
| 'intermediate_steps':[]} | |
| return response['output'],response['intermediate_steps'] | |