Spaces:
Runtime error
Runtime error
Commit
Β·
9111154
1
Parent(s):
0431f29
display results
Browse files- app.py +17 -8
- awesome_chat.py +4 -4
app.py
CHANGED
|
@@ -95,8 +95,8 @@ class Client:
|
|
| 95 |
|
| 96 |
def bot(self, messages):
|
| 97 |
if len(self.OPENAI_KEY) == 0 or not self.OPENAI_KEY.startswith("sk-") or len(self.HUGGINGFACE_TOKEN) == 0 or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 98 |
-
return messages
|
| 99 |
-
message = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)
|
| 100 |
urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
|
| 101 |
self.add_message(message, "assistant")
|
| 102 |
messages[-1][1] = message
|
|
@@ -118,9 +118,12 @@ class Client:
|
|
| 118 |
messages = messages + [((None, (f"public/{video_url}",)))]
|
| 119 |
# else:
|
| 120 |
# messages = messages + [((None, (f"{video_url}",)))]
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
| 124 |
state = gr.State(value={"client": Client()})
|
| 125 |
gr.Markdown("<h1><center>HuggingGPT</center></h1>")
|
| 126 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
|
@@ -147,8 +150,14 @@ with gr.Blocks() as demo:
|
|
| 147 |
).style(container=False)
|
| 148 |
with gr.Column(scale=0.15, min_width=0):
|
| 149 |
btn3 = gr.Button("Submit").style(full_height=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
-
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=500)
|
| 152 |
|
| 153 |
with gr.Row().style():
|
| 154 |
with gr.Column(scale=0.85):
|
|
@@ -173,10 +182,10 @@ with gr.Blocks() as demo:
|
|
| 173 |
return state["client"].bot(chatbot)
|
| 174 |
|
| 175 |
openai_api_key.submit(set_key, [state, openai_api_key], [openai_api_key])
|
| 176 |
-
txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], chatbot)
|
| 177 |
hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 178 |
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
| 179 |
-
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], chatbot)
|
| 180 |
btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 181 |
|
| 182 |
gr.Examples(
|
|
|
|
| 95 |
|
| 96 |
def bot(self, messages):
|
| 97 |
if len(self.OPENAI_KEY) == 0 or not self.OPENAI_KEY.startswith("sk-") or len(self.HUGGINGFACE_TOKEN) == 0 or not self.HUGGINGFACE_TOKEN.startswith("hf_"):
|
| 98 |
+
return messages, {}
|
| 99 |
+
message, results = chat_huggingface(self.all_messages, self.OPENAI_KEY, self.HUGGINGFACE_TOKEN)
|
| 100 |
urls, image_urls, audio_urls, video_urls = self.extract_medias(message)
|
| 101 |
self.add_message(message, "assistant")
|
| 102 |
messages[-1][1] = message
|
|
|
|
| 118 |
messages = messages + [((None, (f"public/{video_url}",)))]
|
| 119 |
# else:
|
| 120 |
# messages = messages + [((None, (f"{video_url}",)))]
|
| 121 |
+
# replace int key to string key
|
| 122 |
+
results = {str(k): v for k, v in results.items()}
|
| 123 |
+
return messages, results
|
| 124 |
+
|
| 125 |
+
css = ".json {height: 527px; overflow: scroll;} .json-holder {height: 527px; overflow: scroll;}"
|
| 126 |
+
with gr.Blocks(css=css) as demo:
|
| 127 |
state = gr.State(value={"client": Client()})
|
| 128 |
gr.Markdown("<h1><center>HuggingGPT</center></h1>")
|
| 129 |
gr.Markdown("<p align='center'><img src='https://i.ibb.co/qNH3Jym/logo.png' height='25' width='95'></p>")
|
|
|
|
| 150 |
).style(container=False)
|
| 151 |
with gr.Column(scale=0.15, min_width=0):
|
| 152 |
btn3 = gr.Button("Submit").style(full_height=True)
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
with gr.Row().style():
|
| 156 |
+
with gr.Column(scale=0.6):
|
| 157 |
+
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=500)
|
| 158 |
+
with gr.Column(scale=0.4):
|
| 159 |
+
results = gr.JSON(elem_classes="json")
|
| 160 |
|
|
|
|
| 161 |
|
| 162 |
with gr.Row().style():
|
| 163 |
with gr.Column(scale=0.85):
|
|
|
|
| 182 |
return state["client"].bot(chatbot)
|
| 183 |
|
| 184 |
openai_api_key.submit(set_key, [state, openai_api_key], [openai_api_key])
|
| 185 |
+
txt.submit(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
| 186 |
hugging_face_token.submit(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 187 |
btn1.click(set_key, [state, openai_api_key], [openai_api_key])
|
| 188 |
+
btn2.click(add_text, [state, chatbot, txt], [chatbot, txt]).then(bot, [state, chatbot], [chatbot, results])
|
| 189 |
btn3.click(set_token, [state, hugging_face_token], [hugging_face_token])
|
| 190 |
|
| 191 |
gr.Examples(
|
awesome_chat.py
CHANGED
|
@@ -839,21 +839,21 @@ def chat_huggingface(messages, openaikey = None, huggingfacetoken = None, return
|
|
| 839 |
logger.info(task_str)
|
| 840 |
|
| 841 |
if "error" in task_str:
|
| 842 |
-
return
|
| 843 |
else:
|
| 844 |
task_str = task_str.strip()
|
| 845 |
|
| 846 |
if task_str == "[]": # using LLM response for empty task
|
| 847 |
record_case(success=False, **{"input": input, "task": [], "reason": "task parsing fail: empty", "op": "chitchat"})
|
| 848 |
response = chitchat(messages, openaikey)
|
| 849 |
-
return {
|
| 850 |
try:
|
| 851 |
tasks = json.loads(task_str)
|
| 852 |
except Exception as e:
|
| 853 |
logger.debug(e)
|
| 854 |
response = chitchat(messages, openaikey)
|
| 855 |
record_case(success=False, **{"input": input, "task": task_str, "reason": "task parsing fail", "op":"chitchat"})
|
| 856 |
-
return {
|
| 857 |
|
| 858 |
|
| 859 |
tasks = unfold(tasks)
|
|
@@ -908,4 +908,4 @@ def chat_huggingface(messages, openaikey = None, huggingfacetoken = None, return
|
|
| 908 |
answer = {"message": response}
|
| 909 |
record_case(success=True, **{"input": input, "task": task_str, "results": results, "response": response, "during": during, "op":"response"})
|
| 910 |
logger.info(f"response: {response}")
|
| 911 |
-
return
|
|
|
|
| 839 |
logger.info(task_str)
|
| 840 |
|
| 841 |
if "error" in task_str:
|
| 842 |
+
return str(task_str), {}
|
| 843 |
else:
|
| 844 |
task_str = task_str.strip()
|
| 845 |
|
| 846 |
if task_str == "[]": # using LLM response for empty task
|
| 847 |
record_case(success=False, **{"input": input, "task": [], "reason": "task parsing fail: empty", "op": "chitchat"})
|
| 848 |
response = chitchat(messages, openaikey)
|
| 849 |
+
return response, {}
|
| 850 |
try:
|
| 851 |
tasks = json.loads(task_str)
|
| 852 |
except Exception as e:
|
| 853 |
logger.debug(e)
|
| 854 |
response = chitchat(messages, openaikey)
|
| 855 |
record_case(success=False, **{"input": input, "task": task_str, "reason": "task parsing fail", "op":"chitchat"})
|
| 856 |
+
return response, {}
|
| 857 |
|
| 858 |
|
| 859 |
tasks = unfold(tasks)
|
|
|
|
| 908 |
answer = {"message": response}
|
| 909 |
record_case(success=True, **{"input": input, "task": task_str, "results": results, "response": response, "during": during, "op":"response"})
|
| 910 |
logger.info(f"response: {response}")
|
| 911 |
+
return response, results
|