Spaces:
Sleeping
Sleeping
Commit
·
7f94c68
1
Parent(s):
5172194
without asr
Browse files- app.py +35 -0
- requirements.txt +9 -0
app.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from langchain_openai import ChatOpenAI
|
| 3 |
+
from langchain_core.messages import AIMessage
|
| 4 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
| 5 |
+
from langchain_core.messages import HumanMessage
|
| 6 |
+
from langgraph.checkpoint.memory import MemorySaver
|
| 7 |
+
from langgraph.prebuilt import create_react_agent
|
| 8 |
+
from dotenv import load_dotenv
|
| 9 |
+
|
| 10 |
+
load_dotenv()
|
| 11 |
+
@st.cache_resource
|
| 12 |
+
def get_model():
|
| 13 |
+
model = ChatOpenAI(model="gpt-4o", temperature=0, base_url="https://models.inference.ai.azure.com")
|
| 14 |
+
return model
|
| 15 |
+
|
| 16 |
+
memory = MemorySaver()
|
| 17 |
+
search = TavilySearchResults(max_results=2)
|
| 18 |
+
tools = [search]
|
| 19 |
+
model = get_model()
|
| 20 |
+
app = create_react_agent(model, tools, checkpointer=memory)
|
| 21 |
+
|
| 22 |
+
config = {"configurable": {"thread_id": "111"}}
|
| 23 |
+
|
| 24 |
+
if query:=st.chat_input("Ask anything"):
|
| 25 |
+
msg= [HumanMessage(query)]
|
| 26 |
+
def gen():
|
| 27 |
+
for chunk, metadata in app.stream({"messages": msg}, config=config, stream_mode="messages"):
|
| 28 |
+
if isinstance(chunk, AIMessage):
|
| 29 |
+
yield chunk.content
|
| 30 |
+
st.write_stream(gen)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain_community
|
| 2 |
+
langchain-openai
|
| 3 |
+
langchain
|
| 4 |
+
langgraph
|
| 5 |
+
langchainhub
|
| 6 |
+
streamlit
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
|