Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -51,55 +51,55 @@ with demo:
|
|
| 51 |
#chat_model_selection = chat_model_dropdown.value
|
| 52 |
chat_model_selection = 'Intel/neural-chat-7b-v1-1'
|
| 53 |
|
| 54 |
-
def call_api_and_stream_response(query, chat_model):
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
def get_response(query, history):
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
with gr.Blocks() as chat_interface:
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
chat_interface.queue()
|
| 102 |
-
chat_interface.launch()
|
| 103 |
|
| 104 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 105 |
with gr.TabItem("π LLM Leadeboard", elem_id="llm-benchmark-table", id=0):
|
|
|
|
| 51 |
#chat_model_selection = chat_model_dropdown.value
|
| 52 |
chat_model_selection = 'Intel/neural-chat-7b-v1-1'
|
| 53 |
|
| 54 |
+
#def call_api_and_stream_response(query, chat_model):
|
| 55 |
+
# """
|
| 56 |
+
# Call the API endpoint and yield characters as they are received.
|
| 57 |
+
# This function simulates streaming by yielding characters one by one.
|
| 58 |
+
# """
|
| 59 |
+
# url = inference_endpoint_url
|
| 60 |
+
# params = {"query": query, "selected_model": chat_model}
|
| 61 |
+
# with requests.get(url, json=params, stream=True) as r: # Use params for query parameters
|
| 62 |
+
# for chunk in r.iter_content(chunk_size=1):
|
| 63 |
+
# if chunk:
|
| 64 |
+
# yield chunk.decode()
|
| 65 |
+
#
|
| 66 |
+
#def get_response(query, history):
|
| 67 |
+
# """
|
| 68 |
+
# Wrapper function to call the streaming API and compile the response.
|
| 69 |
+
# """
|
| 70 |
+
# response = ''
|
| 71 |
+
# for char in call_api_and_stream_response(query, chat_model=chat_model_selection):
|
| 72 |
+
# if char == '<': # This seems to be your stopping condition; adjust as needed.
|
| 73 |
+
# break
|
| 74 |
+
# response += char
|
| 75 |
+
# yield [(f"π€ Response from LLM: {chat_model_selection}", response)] # Correct format for Gradio Chatbot
|
| 76 |
+
#
|
| 77 |
+
#with gr.Blocks() as chat_interface:
|
| 78 |
+
# chatbot = gr.Chatbot()
|
| 79 |
+
# msg = gr.Textbox()
|
| 80 |
+
# submit = gr.Button("Submit")
|
| 81 |
+
# clear = gr.Button("Clear")
|
| 82 |
+
#
|
| 83 |
+
# def user(user_message, history):
|
| 84 |
+
# return "", history + [[user_message, None]]
|
| 85 |
+
#
|
| 86 |
+
# def clear_chat(*args):
|
| 87 |
+
# return [] # Returning an empty list to signify clearing the chat, adjust as per Gradio's capabilities
|
| 88 |
+
#
|
| 89 |
+
# submit.click(
|
| 90 |
+
# fn=get_response,
|
| 91 |
+
# inputs=[msg, chatbot],
|
| 92 |
+
# outputs=chatbot
|
| 93 |
+
# )
|
| 94 |
+
#
|
| 95 |
+
# clear.click(
|
| 96 |
+
# fn=clear_chat,
|
| 97 |
+
# inputs=None,
|
| 98 |
+
# outputs=chatbot
|
| 99 |
+
# )
|
| 100 |
+
#
|
| 101 |
+
#chat_interface.queue()
|
| 102 |
+
#chat_interface.launch()
|
| 103 |
|
| 104 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
| 105 |
with gr.TabItem("π LLM Leadeboard", elem_id="llm-benchmark-table", id=0):
|