Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from hugchat import hugchat | |
| from hugchat.login import Login | |
| from huggingface_hub import InferenceClient | |
| client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") | |
| # App title | |
| st.set_page_config(page_title="π€π¬ HugChat") | |
| # Hugging Face Credentials | |
| with st.sidebar: | |
| st.title('π€π¬ HugChat') | |
| if ('EMAIL' in st.secrets) and ('PASS' in st.secrets): | |
| st.success('HuggingFace Login credentials already provided!', icon='β ') | |
| hf_email = st.secrets['EMAIL'] | |
| hf_pass = st.secrets['PASS'] | |
| else: | |
| hf_email = st.text_input('Enter E-mail:', type='password') | |
| hf_pass = st.text_input('Enter password:', type='password') | |
| if not (hf_email and hf_pass): | |
| st.warning('Please enter your credentials!', icon='β οΈ') | |
| else: | |
| st.success('Proceed to entering your prompt message!', icon='π') | |
| st.markdown('π Learn how to build this app in this [blog](https://blog.streamlit.io/how-to-build-an-llm-powered-chatbot-with-streamlit/)!') | |
| # Store LLM generated responses | |
| if "messages" not in st.session_state.keys(): | |
| st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}] | |
| # Display chat messages | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.write(message["content"]) | |
| # Function for generating LLM response | |
| def generate_response(messages, email, passwd): | |
| for message in client.chat_completion( | |
| messages, | |
| max_tokens=500, | |
| stream=True, | |
| temperature=0.7, | |
| top_p=0.9, | |
| ): | |
| token = message.choices[0].delta.content | |
| response += token | |
| yield response | |
| # def generate_response(prompt_input, email, passwd): | |
| # # Hugging Face Login | |
| # cookie_path_dir = "./cookies/" | |
| # sign = Login(email, passwd) | |
| # cookies = sign.login(cookie_dir_path=cookie_path_dir, save_cookies=True) | |
| # # Create ChatBot | |
| # chatbot = hugchat.ChatBot(cookies=cookies.get_dict()) | |
| # return chatbot.chat(prompt_input) | |
| # # Function for generating LLM response based on "HuggingFaceH4/zephyr-7b-beta" | |
| # def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p,): | |
| # messages = [{"role": "system", "content": system_message}] | |
| # for val in history: | |
| # if val[0]: | |
| # messages.append({"role": "user", "content": val[0]}) | |
| # if val[1]: | |
| # messages.append({"role": "assistant", "content": val[1]}) | |
| # messages.append({"role": "user", "content": message}) | |
| # response = "" | |
| # for message in client.chat_completion( | |
| # messages, | |
| # max_tokens=max_tokens, | |
| # stream=True, | |
| # temperature=temperature, | |
| # top_p=top_p, | |
| # ): | |
| # token = message.choices[0].delta.content | |
| # response += token | |
| # yield response | |
| # User-provided prompt | |
| if prompt := st.chat_input(disabled=not (hf_email and hf_pass)): | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| with st.chat_message("user"): | |
| st.write(prompt) | |
| # Generate a new response if last message is not from assistant | |
| if st.session_state.messages[-1]["role"] != "assistant": | |
| with st.chat_message("assistant"): | |
| with st.spinner("Thinking..."): | |
| response = generate_response(prompt, hf_email, hf_pass) | |
| st.write(response) | |
| message = {"role": "assistant", "content": response} | |
| st.session_state.messages.append(message) |