VIRTUS commited on
Commit
d45fd05
·
1 Parent(s): ef5c5ef

refactor: cleaning the general structure and changing the memory of the agent

Browse files
Files changed (1) hide show
  1. app.py +42 -49
app.py CHANGED
@@ -1,64 +1,57 @@
 
1
  import gradio as gr
2
  from langchain.agents import create_agent
3
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
4
- from langchain_core.messages import HumanMessage, SystemMessage
5
 
6
 
7
- def respond(
8
- message,
9
- history: list,
10
- system_message,
11
- max_tokens,
12
- temperature,
13
- top_p,
14
- hf_token: gr.OAuthToken,
15
- ):
16
- hf_model = HuggingFaceEndpoint(
17
- repo_id="meta-llama/Llama-3.1-8B-Instruct",
18
- task="text-generation",
19
- provider="auto",
20
- max_new_tokens=max_tokens,
21
- temperature=temperature,
22
- top_p=top_p,
23
- huggingfacehub_api_token=hf_token.token
24
- )
25
 
26
- llm = ChatHuggingFace(llm=hf_model)
 
27
 
28
- agent = create_agent(
29
- tools=[],
30
- model=llm,
31
- system_prompt=system_message
32
- )
33
 
34
- m = HumanMessage(message)
35
- saida = agent.invoke(m)['messages'][0].content
 
 
36
 
37
- yield saida
38
 
 
 
 
 
 
 
 
 
39
 
40
- chatbot = gr.ChatInterface(
41
- respond,
42
- type="messages",
43
- additional_inputs=[
44
- gr.Textbox(value="You are a friendly and helpful Chatbot.", label="System message"),
45
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
46
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
47
- gr.Slider(
48
- minimum=0.1,
49
- maximum=1.0,
50
- value=0.95,
51
- step=0.05,
52
- label="Top-p (nucleus sampling)",
53
- ),
54
- ],
55
- )
56
 
57
- with gr.Blocks() as demo:
58
- with gr.Sidebar():
59
- gr.LoginButton()
60
- chatbot.render()
 
 
 
 
 
 
 
 
61
 
62
 
63
  if __name__ == "__main__":
64
- demo.launch()
 
 
1
+ import os
2
  import gradio as gr
3
  from langchain.agents import create_agent
4
  from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace
5
+ from langgraph.checkpoint.memory import InMemorySaver
6
 
7
 
8
+ class GradioAgent:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
+ def __init__(self):
11
+ self.agent = self.__create_agent()
12
 
13
+ def inicialize(self):
14
+ chatbot = gr.ChatInterface(
15
+ self._respond,
16
+ type="messages"
17
+ )
18
 
19
+ with gr.Blocks() as demo:
20
+ with gr.Sidebar():
21
+ gr.LoginButton()
22
+ chatbot.render()
23
 
24
+ demo.launch()
25
 
26
+ def __create_agent(self):
27
+ hf_model = HuggingFaceEndpoint(
28
+ repo_id="meta-llama/Llama-3.1-8B-Instruct",
29
+ task="text-generation",
30
+ provider="auto",
31
+ huggingfacehub_api_token=os.getenv("HF_TOKEN")
32
+ )
33
+ llm = ChatHuggingFace(llm=hf_model)
34
 
35
+ return create_agent(
36
+ tools=[],
37
+ model=llm,
38
+ checkpointer=InMemorySaver()
39
+ )
 
 
 
 
 
 
 
 
 
 
 
40
 
41
+ def _respond(
42
+ self,
43
+ message,
44
+ history
45
+ ):
46
+ result = self.agent.invoke(
47
+ {"messages": [{"role": "user", "content": message}]},
48
+ {"configurable": {"thread_id": "1"}},
49
+ )
50
+ output = result['messages'][-1].content
51
+
52
+ yield output
53
 
54
 
55
  if __name__ == "__main__":
56
+ gradio = GradioAgent()
57
+ gradio.inicialize()