Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,7 +10,7 @@ import base64
|
|
| 10 |
# Load environment variables
|
| 11 |
load_dotenv()
|
| 12 |
|
| 13 |
-
icons = {"assistant": "👽", "user": "
|
| 14 |
|
| 15 |
# Configure the Llama index settings
|
| 16 |
Settings.llm = HuggingFaceInferenceAPI(
|
|
@@ -100,7 +100,7 @@ user_prompt = st.chat_input("Ask me anything about the content of the PDF:")
|
|
| 100 |
|
| 101 |
if user_prompt and uploaded_file:
|
| 102 |
st.session_state.messages.append({'role': 'user', "content": user_prompt})
|
| 103 |
-
with st.chat_message("user", avatar="
|
| 104 |
st.write(user_prompt)
|
| 105 |
|
| 106 |
# Trigger assistant's response retrieval and update UI
|
|
|
|
| 10 |
# Load environment variables
|
| 11 |
load_dotenv()
|
| 12 |
|
| 13 |
+
icons = {"assistant": "👽", "user": "man-kddi.png"}
|
| 14 |
|
| 15 |
# Configure the Llama index settings
|
| 16 |
Settings.llm = HuggingFaceInferenceAPI(
|
|
|
|
| 100 |
|
| 101 |
if user_prompt and uploaded_file:
|
| 102 |
st.session_state.messages.append({'role': 'user', "content": user_prompt})
|
| 103 |
+
with st.chat_message("user", avatar="man-kddi.png"):
|
| 104 |
st.write(user_prompt)
|
| 105 |
|
| 106 |
# Trigger assistant's response retrieval and update UI
|