File size: 1,540 Bytes
b3c91c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import os
from huggingface_hub import InferenceClient
import streamlit as st

# Access your Hugging Face API token from the environment variable
api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if api_token is None:
    st.error("Hugging Face API token is not set.")
else:
    st.title("Tanya Gizi!")

    # Initialize chat history if not already present
    if 'messages' not in st.session_state:
        st.session_state.messages = []

    # Display chat history
    for message in st.session_state.messages:
        st.chat_message(message['role']).markdown(message['content'])

    # Input area for the user
    prompt = st.chat_input('Masukan pertanyaanmu di sini!')

    # Process user input
    if prompt:
        st.chat_message('user').markdown(prompt)
        st.session_state.messages.append({'role': 'user', 'content': prompt})

        # Generate a response using InferenceClient
        client = InferenceClient(
            model="mistralai/Mistral-Large-Instruct-2407",
            token=api_token
        )

        # Generating response
        response = client.chat_completion(
            messages=[{"role": "user", "content": prompt}],
            max_tokens=100,
            stream=False  # Disable streaming as it's not supported
        )

        response_text = response['choices'][0]['message']['content']

        # Display and store the assistant's response
        st.chat_message('assistant').markdown(response_text)
        st.session_state.messages.append({'role': 'assistant', 'content': response_text})