SuhasBansode commited on
Commit
f8fcc25
·
1 Parent(s): fcd3456

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -3
app.py CHANGED
@@ -1,4 +1,87 @@
1
- import streamlit as st
2
 
3
- x = st.slider('Select a value')
4
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
+ !pip install -qq langchain wget llama-index cohere llama-cpp-python
3
+
4
+ import wget
5
+
6
+ def bar_custom(current, total, width=80):
7
+ print("Downloading %d%% [%d / %d] bytes" % (current / total * 100, current, total))
8
+
9
+ model_url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q2_K.gguf"
10
+ wget.download(model_url, bar=bar_custom)
11
+
12
+ !pip -q install streamlit
13
+
14
+ %%writefile app.py
15
+ import streamlit as st
16
+ from llama_index import (
17
+ SimpleDirectoryReader,
18
+ VectorStoreIndex,
19
+ ServiceContext,
20
+ )
21
+ from llama_index.llms import LlamaCPP
22
+ from llama_index.llms.llama_utils import (
23
+ messages_to_prompt,
24
+ completion_to_prompt,
25
+ )
26
+ from langchain.schema import(SystemMessage, HumanMessage, AIMessage)
27
+
28
+ def init_page() -> None:
29
+ st.set_page_config(
30
+ page_title="Personal Chatbot"
31
+ )
32
+ st.header("Persoanl Chatbot")
33
+ st.sidebar.title("Options")
34
+
35
+ def select_llm() -> LlamaCPP:
36
+ return LlamaCPP(
37
+ model_path="/content/llama-2-7b-chat.Q2_K.gguf",
38
+ temperature=0.1,
39
+ max_new_tokens=500,
40
+ context_window=3900,
41
+ generate_kwargs={},
42
+ model_kwargs={"n_gpu_layers":1},
43
+ messages_to_prompt=messages_to_prompt,
44
+ completion_to_prompt=completion_to_prompt,
45
+ verbose=True,
46
+ )
47
+
48
+ def init_messages() -> None:
49
+ clear_button = st.sidebar.button("Clear Conversation", key="clear")
50
+ if clear_button or "messages" not in st.session_state:
51
+ st.session_state.messages = [
52
+ SystemMessage(
53
+ content="you are a helpful AI assistant. Reply your answer in markdown format."
54
+ )
55
+ ]
56
+
57
+ def get_answer(llm, messages) -> str:
58
+ response = llm.complete(messages)
59
+ return response.text
60
+
61
+ def main() -> None:
62
+ init_page()
63
+ llm = select_llm()
64
+ init_messages()
65
+
66
+ if user_input := st.chat_input("Input your question!"):
67
+ st.session_state.messages.append(HumanMessage(content=user_input))
68
+ with st.spinner("Bot is typing ..."):
69
+ answer = get_answer(llm, user_input)
70
+ print(answer)
71
+ st.session_state.messages.append(AIMessage(content=answer))
72
+
73
+
74
+ messages = st.session_state.get("messages", [])
75
+ for message in messages:
76
+ if isinstance(message, AIMessage):
77
+ with st.chat_message("assistant"):
78
+ st.markdown(message.content)
79
+ elif isinstance(message, HumanMessage):
80
+ with st.chat_message("user"):
81
+ st.markdown(message.content)
82
+
83
+ if __name__ == "__main__":
84
+ main()
85
+
86
+ !streamlit run app.py & npx localtunnel --port 8501
87
+ view rawmain.py hosted with ❤ by GitHub