Commit
·
d78f59f
1
Parent(s):
285adea
removed disfunctional share buttons
Browse files
app.py
CHANGED
|
@@ -109,12 +109,14 @@ def instantiate_chatbots(sel1, sel2):
|
|
| 109 |
chatbot1 = gr.Chatbot(
|
| 110 |
type="messages",
|
| 111 |
show_label=False,
|
|
|
|
| 112 |
avatar_images=("img/usr.png", bot_icon_select(model_name1)),
|
| 113 |
)
|
| 114 |
model_name2 = model_presets[sel2]
|
| 115 |
chatbot2 = gr.Chatbot(
|
| 116 |
type="messages",
|
| 117 |
show_label=False,
|
|
|
|
| 118 |
avatar_images=("img/usr.png", bot_icon_select(model_name2)),
|
| 119 |
)
|
| 120 |
return chatbot1, chatbot2
|
|
@@ -163,6 +165,7 @@ with gr.Blocks(fill_width=True, title="Keras demo") as demo:
|
|
| 163 |
show_label=False,
|
| 164 |
show_download_button=False,
|
| 165 |
show_fullscreen_button=False,
|
|
|
|
| 166 |
interactive=False,
|
| 167 |
scale=0.01,
|
| 168 |
container=False,
|
|
@@ -171,8 +174,8 @@ with gr.Blocks(fill_width=True, title="Keras demo") as demo:
|
|
| 171 |
"<H2> Battle of the Keras chatbots on TPU</H2>"
|
| 172 |
+ "All the models are loaded into the TPU memory. "
|
| 173 |
+ "You can call any of them and compare their answers. <br/>"
|
| 174 |
-
+ "The entire chat history is fed to the models at every submission."
|
| 175 |
-
+ "This
|
| 176 |
)
|
| 177 |
with gr.Row():
|
| 178 |
sel1, sel2 = instantiate_select_boxes(0, 1, model_labels_list)
|
|
@@ -180,9 +183,8 @@ with gr.Blocks(fill_width=True, title="Keras demo") as demo:
|
|
| 180 |
with gr.Row():
|
| 181 |
chatbot1, chatbot2 = instantiate_chatbots(sel1.value, sel2.value)
|
| 182 |
|
| 183 |
-
msg = gr.Textbox(
|
| 184 |
-
|
| 185 |
-
)
|
| 186 |
with gr.Row():
|
| 187 |
gr.ClearButton([msg, chatbot1, chatbot2])
|
| 188 |
with gr.Accordion("Additional settings", open=False):
|
|
|
|
| 109 |
chatbot1 = gr.Chatbot(
|
| 110 |
type="messages",
|
| 111 |
show_label=False,
|
| 112 |
+
show_share_button=False,
|
| 113 |
avatar_images=("img/usr.png", bot_icon_select(model_name1)),
|
| 114 |
)
|
| 115 |
model_name2 = model_presets[sel2]
|
| 116 |
chatbot2 = gr.Chatbot(
|
| 117 |
type="messages",
|
| 118 |
show_label=False,
|
| 119 |
+
show_share_button=False,
|
| 120 |
avatar_images=("img/usr.png", bot_icon_select(model_name2)),
|
| 121 |
)
|
| 122 |
return chatbot1, chatbot2
|
|
|
|
| 165 |
show_label=False,
|
| 166 |
show_download_button=False,
|
| 167 |
show_fullscreen_button=False,
|
| 168 |
+
show_share_button=False,
|
| 169 |
interactive=False,
|
| 170 |
scale=0.01,
|
| 171 |
container=False,
|
|
|
|
| 174 |
"<H2> Battle of the Keras chatbots on TPU</H2>"
|
| 175 |
+ "All the models are loaded into the TPU memory. "
|
| 176 |
+ "You can call any of them and compare their answers. <br/>"
|
| 177 |
+
+ "The entire chat history is fed to the models at every submission. "
|
| 178 |
+
+ "This demo is runnig on a Google TPU v5e 2x4 (8 cores).",
|
| 179 |
)
|
| 180 |
with gr.Row():
|
| 181 |
sel1, sel2 = instantiate_select_boxes(0, 1, model_labels_list)
|
|
|
|
| 183 |
with gr.Row():
|
| 184 |
chatbot1, chatbot2 = instantiate_chatbots(sel1.value, sel2.value)
|
| 185 |
|
| 186 |
+
msg = gr.Textbox(label="Your message:", submit_btn=True)
|
| 187 |
+
|
|
|
|
| 188 |
with gr.Row():
|
| 189 |
gr.ClearButton([msg, chatbot1, chatbot2])
|
| 190 |
with gr.Accordion("Additional settings", open=False):
|
models.py
CHANGED
|
@@ -44,6 +44,7 @@ def log_applied_layout_map(model):
|
|
| 44 |
else: # works for Llama, Mistral, Vicuna
|
| 45 |
transformer_decoder_block_name = "transformer_layer_1"
|
| 46 |
|
|
|
|
| 47 |
# See how layer sharding was applied
|
| 48 |
embedding_layer = model.backbone.get_layer("token_embedding")
|
| 49 |
print(embedding_layer)
|
|
|
|
| 44 |
else: # works for Llama, Mistral, Vicuna
|
| 45 |
transformer_decoder_block_name = "transformer_layer_1"
|
| 46 |
|
| 47 |
+
print("Model class:", type(model).__name__)
|
| 48 |
# See how layer sharding was applied
|
| 49 |
embedding_layer = model.backbone.get_layer("token_embedding")
|
| 50 |
print(embedding_layer)
|