Update app.py
Browse files
app.py
CHANGED
|
@@ -4,11 +4,12 @@ import io
|
|
| 4 |
import base64
|
| 5 |
import uuid
|
| 6 |
import pixeltable as pxt
|
| 7 |
-
from pixeltable.iterators import DocumentSplitter
|
| 8 |
import numpy as np
|
|
|
|
| 9 |
from pixeltable.functions.huggingface import sentence_transformer
|
| 10 |
from pixeltable.functions import openai
|
| 11 |
from gradio.themes import Monochrome
|
|
|
|
| 12 |
|
| 13 |
import os
|
| 14 |
import getpass
|
|
@@ -34,7 +35,18 @@ def create_prompt(top_k_list: list[dict], question: str) -> str:
|
|
| 34 |
QUESTION:
|
| 35 |
{question}'''
|
| 36 |
|
| 37 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
# Initialize Pixeltable
|
| 39 |
pxt.drop_dir('chatbot_demo', force=True)
|
| 40 |
pxt.create_dir('chatbot_demo')
|
|
@@ -103,7 +115,11 @@ def process_files(pdf_files, chunk_limit, chunk_separator):
|
|
| 103 |
# Extract the answer text from the API response
|
| 104 |
t['gpt4omini'] = t.response.choices[0].message.content
|
| 105 |
|
| 106 |
-
return "Files processed successfully
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
|
| 108 |
def get_answer(msg):
|
| 109 |
|
|
@@ -117,8 +133,8 @@ def get_answer(msg):
|
|
| 117 |
|
| 118 |
return answer
|
| 119 |
|
| 120 |
-
def respond(message, chat_history):
|
| 121 |
-
bot_message = get_answer(message)
|
| 122 |
chat_history.append((message, bot_message))
|
| 123 |
return "", chat_history
|
| 124 |
|
|
@@ -160,6 +176,8 @@ with gr.Blocks(theme=gr.themes.Base()) as demo:
|
|
| 160 |
- The LLM formulates a response based on the provided context and the user's question.
|
| 161 |
""")
|
| 162 |
|
|
|
|
|
|
|
| 163 |
with gr.Row():
|
| 164 |
with gr.Column(scale=1):
|
| 165 |
pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")
|
|
@@ -177,8 +195,8 @@ with gr.Blocks(theme=gr.themes.Base()) as demo:
|
|
| 177 |
msg = gr.Textbox(label="Your Question", placeholder="Ask a question about the uploaded documents")
|
| 178 |
submit = gr.Button("Submit")
|
| 179 |
|
| 180 |
-
process_button.click(process_files, inputs=[pdf_files, chunk_limit, chunk_separator], outputs=[process_output])
|
| 181 |
-
submit.click(respond, inputs=[msg, chatbot], outputs=[msg, chatbot])
|
| 182 |
|
| 183 |
if __name__ == "__main__":
|
| 184 |
demo.launch()
|
|
|
|
| 4 |
import base64
|
| 5 |
import uuid
|
| 6 |
import pixeltable as pxt
|
|
|
|
| 7 |
import numpy as np
|
| 8 |
+
from pixeltable.iterators import DocumentSplitter
|
| 9 |
from pixeltable.functions.huggingface import sentence_transformer
|
| 10 |
from pixeltable.functions import openai
|
| 11 |
from gradio.themes import Monochrome
|
| 12 |
+
from huggingface_hub import HfApi, HfFolder
|
| 13 |
|
| 14 |
import os
|
| 15 |
import getpass
|
|
|
|
| 35 |
QUESTION:
|
| 36 |
{question}'''
|
| 37 |
|
| 38 |
+
def validate_token(token):
|
| 39 |
+
try:
|
| 40 |
+
api = HfApi()
|
| 41 |
+
user_info = api.whoami(token=token)
|
| 42 |
+
return user_info is not None
|
| 43 |
+
except Exception:
|
| 44 |
+
return False
|
| 45 |
+
|
| 46 |
+
def process_files(token, pdf_files, chunk_limit, chunk_separator):
|
| 47 |
+
if not validate_token(token):
|
| 48 |
+
return "Invalid token. Please enter a valid Hugging Face token."
|
| 49 |
+
|
| 50 |
# Initialize Pixeltable
|
| 51 |
pxt.drop_dir('chatbot_demo', force=True)
|
| 52 |
pxt.create_dir('chatbot_demo')
|
|
|
|
| 115 |
# Extract the answer text from the API response
|
| 116 |
t['gpt4omini'] = t.response.choices[0].message.content
|
| 117 |
|
| 118 |
+
return "Files processed successfully. You can start the discussion."
|
| 119 |
+
|
| 120 |
+
def get_answer(token, msg):
|
| 121 |
+
if not validate_token(token):
|
| 122 |
+
return "Invalid token. Please enter a valid Hugging Face token."
|
| 123 |
|
| 124 |
def get_answer(msg):
|
| 125 |
|
|
|
|
| 133 |
|
| 134 |
return answer
|
| 135 |
|
| 136 |
+
def respond(token, message, chat_history):
|
| 137 |
+
bot_message = get_answer(token, message)
|
| 138 |
chat_history.append((message, bot_message))
|
| 139 |
return "", chat_history
|
| 140 |
|
|
|
|
| 176 |
- The LLM formulates a response based on the provided context and the user's question.
|
| 177 |
""")
|
| 178 |
|
| 179 |
+
user_token = gr.Textbox(label="Enter your Hugging Face Token", type="password")
|
| 180 |
+
|
| 181 |
with gr.Row():
|
| 182 |
with gr.Column(scale=1):
|
| 183 |
pdf_files = gr.File(label="Upload PDF Documents", file_count="multiple")
|
|
|
|
| 195 |
msg = gr.Textbox(label="Your Question", placeholder="Ask a question about the uploaded documents")
|
| 196 |
submit = gr.Button("Submit")
|
| 197 |
|
| 198 |
+
process_button.click(process_files, inputs=[user_token,pdf_files, chunk_limit, chunk_separator], outputs=[process_output])
|
| 199 |
+
submit.click(respond, inputs=[user_token, msg, chatbot], outputs=[msg, chatbot])
|
| 200 |
|
| 201 |
if __name__ == "__main__":
|
| 202 |
demo.launch()
|