Update app.py
Browse files
app.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
| 1 |
""" TypeGPT
|
| 2 |
@author: NiansuhAI
|
| 3 |
@email: niansuhtech@gmail.com
|
| 4 |
-
|
| 5 |
"""
|
| 6 |
import numpy as np
|
| 7 |
import streamlit as st
|
|
@@ -13,6 +12,8 @@ load_dotenv()
|
|
| 13 |
|
| 14 |
|
| 15 |
|
|
|
|
|
|
|
| 16 |
# initialize the client
|
| 17 |
client = OpenAI(
|
| 18 |
base_url="https://api-inference.huggingface.co/v1",
|
|
@@ -21,9 +22,32 @@ client = OpenAI(
|
|
| 21 |
|
| 22 |
# Create supported models
|
| 23 |
model_links = {
|
| 24 |
-
"
|
| 25 |
-
"
|
| 26 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
}
|
| 28 |
|
| 29 |
#Random dog images for error message
|
|
@@ -58,21 +82,22 @@ def reset_conversation():
|
|
| 58 |
models =[key for key in model_links.keys()]
|
| 59 |
|
| 60 |
# Create the sidebar with the dropdown for model selection
|
| 61 |
-
selected_model = st.sidebar.selectbox("
|
| 62 |
|
| 63 |
# Create a temperature slider
|
| 64 |
-
temp_values = st.sidebar.slider('
|
| 65 |
-
|
| 66 |
-
st.sidebar.markdown("**Для оптимального результата рекомендуем выбирать температуру в диапазоне от 0,5 до 0,7**.")
|
| 67 |
-
st.sidebar.markdown("Этот диапазон обеспечивает хороший баланс между креативностью и связностью.")
|
| 68 |
|
| 69 |
#Add reset button to clear conversation
|
| 70 |
-
st.sidebar.button('
|
| 71 |
|
| 72 |
|
| 73 |
# Create model description
|
| 74 |
-
st.sidebar.
|
| 75 |
-
st.sidebar.markdown("
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
|
| 78 |
if "prev_option" not in st.session_state:
|
|
@@ -90,8 +115,8 @@ if st.session_state.prev_option != selected_model:
|
|
| 90 |
repo_id = model_links[selected_model]
|
| 91 |
|
| 92 |
|
| 93 |
-
st.subheader(f'
|
| 94 |
-
# st.title(f'
|
| 95 |
|
| 96 |
# Set a default model
|
| 97 |
if selected_model not in st.session_state:
|
|
@@ -110,7 +135,7 @@ for message in st.session_state.messages:
|
|
| 110 |
|
| 111 |
|
| 112 |
# Accept user input
|
| 113 |
-
if prompt := st.chat_input(f"
|
| 114 |
# Display user message in chat message container
|
| 115 |
with st.chat_message("user"):
|
| 116 |
st.markdown(prompt)
|
|
@@ -136,13 +161,21 @@ if prompt := st.chat_input(f"Привет. Я GPT-ChatBot, задай здесь
|
|
| 136 |
|
| 137 |
except Exception as e:
|
| 138 |
# st.empty()
|
| 139 |
-
response = "
|
| 140 |
-
\n
|
| 141 |
-
\n
|
|
|
|
|
|
|
|
|
|
| 142 |
st.write(response)
|
| 143 |
random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
|
| 144 |
st.image(random_dog_pick)
|
| 145 |
st.write("This was the error message:")
|
| 146 |
st.write(e)
|
| 147 |
|
| 148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
""" TypeGPT
|
| 2 |
@author: NiansuhAI
|
| 3 |
@email: niansuhtech@gmail.com
|
|
|
|
| 4 |
"""
|
| 5 |
import numpy as np
|
| 6 |
import streamlit as st
|
|
|
|
| 12 |
|
| 13 |
|
| 14 |
|
| 15 |
+
|
| 16 |
+
|
| 17 |
# initialize the client
|
| 18 |
client = OpenAI(
|
| 19 |
base_url="https://api-inference.huggingface.co/v1",
|
|
|
|
| 22 |
|
| 23 |
# Create supported models
|
| 24 |
model_links = {
|
| 25 |
+
"Meta-Llama-3.1-70B-Instruct": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
| 26 |
+
"Meta-Llama-3.1-405B-Instruct-FP8": "meta-llama/Meta-Llama-3.1-405B-Instruct-FP8",
|
| 27 |
+
"Meta-Llama-3.1-405B-Instruct": "meta-llama/Meta-Llama-3.1-405B-Instruct",
|
| 28 |
+
"Meta-Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 29 |
+
"Meta-Llama-3-70B-Instruct": "meta-llama/Meta-Llama-3-70B-Instruct",
|
| 30 |
+
"Meta-Llama-3-8B-Instruct": "meta-llama/Meta-Llama-3-8B-Instruct",
|
| 31 |
+
"C4ai-command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
| 32 |
+
"Aya-23-35B": "CohereForAI/aya-23-35B",
|
| 33 |
+
"Zephyr-orpo-141b-A35b-v0.1": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
| 34 |
+
"Mixtral-8x7B-Instruct-v0.1": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
| 35 |
+
"Codestral-22B-v0.1": "mistralai/Codestral-22B-v0.1",
|
| 36 |
+
"Nous-Hermes-2-Mixtral-8x7B-DPO": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
| 37 |
+
"Yi-1.5-34B-Chat": "01-ai/Yi-1.5-34B-Chat",
|
| 38 |
+
"Gemma-2-27b-it": "google/gemma-2-27b-it",
|
| 39 |
+
"Meta-Llama-2-70B-Chat-HF": "meta-llama/Llama-2-70b-chat-hf",
|
| 40 |
+
"Meta-Llama-2-7B-Chat-HF": "meta-llama/Llama-2-7b-chat-hf",
|
| 41 |
+
"Meta-Llama-2-13B-Chat-HF": "meta-llama/Llama-2-13b-chat-hf",
|
| 42 |
+
"Mistral-7B-Instruct-v0.1": "mistralai/Mistral-7B-Instruct-v0.1",
|
| 43 |
+
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
|
| 44 |
+
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
|
| 45 |
+
"Gemma-1.1-7b-it": "google/gemma-1.1-7b-it",
|
| 46 |
+
"Gemma-1.1-2b-it": "google/gemma-1.1-2b-it",
|
| 47 |
+
"Zephyr-7B-Beta": "HuggingFaceH4/zephyr-7b-beta",
|
| 48 |
+
"Zephyr-7B-Alpha": "HuggingFaceH4/zephyr-7b-alpha",
|
| 49 |
+
"Phi-3-mini-128k-instruct": "microsoft/Phi-3-mini-128k-instruct",
|
| 50 |
+
"Phi-3-mini-4k-instruct": "microsoft/Phi-3-mini-4k-instruct",
|
| 51 |
}
|
| 52 |
|
| 53 |
#Random dog images for error message
|
|
|
|
| 82 |
models =[key for key in model_links.keys()]
|
| 83 |
|
| 84 |
# Create the sidebar with the dropdown for model selection
|
| 85 |
+
selected_model = st.sidebar.selectbox("Select Model", models)
|
| 86 |
|
| 87 |
# Create a temperature slider
|
| 88 |
+
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
|
| 89 |
+
|
|
|
|
|
|
|
| 90 |
|
| 91 |
#Add reset button to clear conversation
|
| 92 |
+
st.sidebar.button('Reset Chat', on_click=reset_conversation) #Reset button
|
| 93 |
|
| 94 |
|
| 95 |
# Create model description
|
| 96 |
+
st.sidebar.write(f"You're now chatting with **{selected_model}**")
|
| 97 |
+
st.sidebar.markdown("*Generated content may be inaccurate or false.*")
|
| 98 |
+
st.sidebar.markdown("\n[TypeGPT](https://typegpt.net).")
|
| 99 |
+
|
| 100 |
+
|
| 101 |
|
| 102 |
|
| 103 |
if "prev_option" not in st.session_state:
|
|
|
|
| 115 |
repo_id = model_links[selected_model]
|
| 116 |
|
| 117 |
|
| 118 |
+
st.subheader(f'TypeGPT.net - {selected_model}')
|
| 119 |
+
# st.title(f'ChatBot Using {selected_model}')
|
| 120 |
|
| 121 |
# Set a default model
|
| 122 |
if selected_model not in st.session_state:
|
|
|
|
| 135 |
|
| 136 |
|
| 137 |
# Accept user input
|
| 138 |
+
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
|
| 139 |
# Display user message in chat message container
|
| 140 |
with st.chat_message("user"):
|
| 141 |
st.markdown(prompt)
|
|
|
|
| 161 |
|
| 162 |
except Exception as e:
|
| 163 |
# st.empty()
|
| 164 |
+
response = "😵💫 Looks like someone unplugged something!\
|
| 165 |
+
\n Either the model space is being updated or something is down.\
|
| 166 |
+
\n\
|
| 167 |
+
\n Try again later. \
|
| 168 |
+
\n\
|
| 169 |
+
\n Here's a random pic of a 🐶:"
|
| 170 |
st.write(response)
|
| 171 |
random_dog_pick = 'https://random.dog/'+ random_dog[np.random.randint(len(random_dog))]
|
| 172 |
st.image(random_dog_pick)
|
| 173 |
st.write("This was the error message:")
|
| 174 |
st.write(e)
|
| 175 |
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|