Commit
·
40912b5
1
Parent(s):
d78f59f
three additional 1B to 3B params models
Browse files- app.py +13 -2
- img/llama2.png +0 -0
- img/meta.png +0 -0
- models.py +18 -10
app.py
CHANGED
|
@@ -1,5 +1,16 @@
|
|
| 1 |
import os
|
| 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
os.environ["KERAS_BACKEND"] = "jax"
|
| 4 |
|
| 5 |
import gradio as gr
|
|
@@ -95,7 +106,7 @@ def bot_icon_select(model_name):
|
|
| 95 |
if "gemma" in model_name:
|
| 96 |
return "img/gemma.png"
|
| 97 |
elif "llama" in model_name:
|
| 98 |
-
return "img/
|
| 99 |
elif "vicuna" in model_name:
|
| 100 |
return "img/vicuna.png"
|
| 101 |
elif "mistral" in model_name:
|
|
@@ -167,7 +178,7 @@ with gr.Blocks(fill_width=True, title="Keras demo") as demo:
|
|
| 167 |
show_fullscreen_button=False,
|
| 168 |
show_share_button=False,
|
| 169 |
interactive=False,
|
| 170 |
-
scale=0
|
| 171 |
container=False,
|
| 172 |
)
|
| 173 |
gr.HTML(
|
|
|
|
| 1 |
import os
|
| 2 |
|
| 3 |
+
# Questions for Gradio
|
| 4 |
+
# - Chat share button is enabled by default but thrown an error when clicked.
|
| 5 |
+
# - How to add local images in HTML? (https://github.com/gradio-app/gradio/issues/884)
|
| 6 |
+
# - How to allow Chatbot to fill the vertical space? (https://github.com/gradio-app/gradio/issues/4001)
|
| 7 |
+
# TODO:
|
| 8 |
+
# - Add the 1MB models, keras/gemma_1.1_instruct_7b_en
|
| 9 |
+
# - Add retry button, for each model individually
|
| 10 |
+
# - Add ability to route a message to a single model only.
|
| 11 |
+
# - log_applied_layout_map: make it work for Llama3CausalLM and LlamaCausalLM (vicuna)
|
| 12 |
+
# - display context length
|
| 13 |
+
|
| 14 |
os.environ["KERAS_BACKEND"] = "jax"
|
| 15 |
|
| 16 |
import gradio as gr
|
|
|
|
| 106 |
if "gemma" in model_name:
|
| 107 |
return "img/gemma.png"
|
| 108 |
elif "llama" in model_name:
|
| 109 |
+
return "img/meta.png"
|
| 110 |
elif "vicuna" in model_name:
|
| 111 |
return "img/vicuna.png"
|
| 112 |
elif "mistral" in model_name:
|
|
|
|
| 178 |
show_fullscreen_button=False,
|
| 179 |
show_share_button=False,
|
| 180 |
interactive=False,
|
| 181 |
+
scale=0,
|
| 182 |
container=False,
|
| 183 |
)
|
| 184 |
gr.HTML(
|
img/llama2.png
ADDED
|
img/meta.png
ADDED
|
models.py
CHANGED
|
@@ -2,11 +2,17 @@ import keras
|
|
| 2 |
import keras_hub
|
| 3 |
|
| 4 |
model_presets = [
|
|
|
|
| 5 |
"hf://google/gemma-2-instruct-9b-keras",
|
| 6 |
"hf://meta-llama/Llama-3.1-8B-Instruct",
|
| 7 |
"hf://google/codegemma-7b-it-keras",
|
| 8 |
"hf://keras/mistral_instruct_7b_en",
|
| 9 |
"hf://keras/vicuna_1.5_7b_en",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
]
|
| 11 |
|
| 12 |
model_labels = map(lambda s: s.removeprefix("hf://"), model_presets)
|
|
@@ -33,18 +39,27 @@ def get_default_layout_map(preset_name, device_mesh):
|
|
| 33 |
or "mistral" in preset_name
|
| 34 |
or "vicuna" in preset_name
|
| 35 |
):
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
| 37 |
elif "gemma" in preset_name:
|
| 38 |
return keras_hub.models.GemmaBackbone.get_layout_map(device_mesh)
|
| 39 |
|
| 40 |
|
| 41 |
def log_applied_layout_map(model):
|
|
|
|
|
|
|
| 42 |
if "Gemma" in type(model).__name__:
|
| 43 |
transformer_decoder_block_name = "decoder_block_1"
|
| 44 |
-
|
|
|
|
|
|
|
| 45 |
transformer_decoder_block_name = "transformer_layer_1"
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
print("Model class:", type(model).__name__)
|
| 48 |
# See how layer sharding was applied
|
| 49 |
embedding_layer = model.backbone.get_layer("token_embedding")
|
| 50 |
print(embedding_layer)
|
|
@@ -96,10 +111,3 @@ def load_model(preset):
|
|
| 96 |
|
| 97 |
log_applied_layout_map(model)
|
| 98 |
return model
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
# Some small models too
|
| 102 |
-
# model1 = keras_hub.models.CausalLM.from_preset("hf://meta-llama/Llama-3.2-1B-Instruct", dtype="bfloat16")
|
| 103 |
-
# model2 = keras_hub.models.CausalLM.from_preset("hf://google/gemma-2b-it-keras", dtype="bfloat16")
|
| 104 |
-
# model3 = keras_hub.models.CausalLM.from_preset("hf://meta-llama/Llama-3.2-3B-Instruct", dtype="bfloat16")
|
| 105 |
-
# keras/gemma_1.1_instruct_7b_en
|
|
|
|
| 2 |
import keras_hub
|
| 3 |
|
| 4 |
model_presets = [
|
| 5 |
+
# 8B params models
|
| 6 |
"hf://google/gemma-2-instruct-9b-keras",
|
| 7 |
"hf://meta-llama/Llama-3.1-8B-Instruct",
|
| 8 |
"hf://google/codegemma-7b-it-keras",
|
| 9 |
"hf://keras/mistral_instruct_7b_en",
|
| 10 |
"hf://keras/vicuna_1.5_7b_en",
|
| 11 |
+
# "keras/gemma_1.1_instruct_7b_en", # won't fit?
|
| 12 |
+
# 1-3B params models
|
| 13 |
+
"hf://meta-llama/Llama-3.2-1B-Instruct",
|
| 14 |
+
"hf://google/gemma-2b-it-keras",
|
| 15 |
+
"hf://meta-llama/Llama-3.2-3B-Instruct",
|
| 16 |
]
|
| 17 |
|
| 18 |
model_labels = map(lambda s: s.removeprefix("hf://"), model_presets)
|
|
|
|
| 39 |
or "mistral" in preset_name
|
| 40 |
or "vicuna" in preset_name
|
| 41 |
):
|
| 42 |
+
layout_map = keras_hub.models.Llama3Backbone.get_layout_map(device_mesh)
|
| 43 |
+
# This line is missing for some Llama models (TODO: fix this in keras_hub)
|
| 44 |
+
layout_map["token_embedding/reverse_embeddings"] = ("batch", "model")
|
| 45 |
+
return layout_map
|
| 46 |
elif "gemma" in preset_name:
|
| 47 |
return keras_hub.models.GemmaBackbone.get_layout_map(device_mesh)
|
| 48 |
|
| 49 |
|
| 50 |
def log_applied_layout_map(model):
|
| 51 |
+
print("Model class:", type(model).__name__)
|
| 52 |
+
|
| 53 |
if "Gemma" in type(model).__name__:
|
| 54 |
transformer_decoder_block_name = "decoder_block_1"
|
| 55 |
+
elif "Llama" in type(model).__name__: # works for Llama (Vicuna) and Llama3
|
| 56 |
+
transformer_decoder_block_name = "transformer_layer_1"
|
| 57 |
+
elif "Mistral" in type(model).__name__:
|
| 58 |
transformer_decoder_block_name = "transformer_layer_1"
|
| 59 |
+
else:
|
| 60 |
+
print("Unknown architecture. Cannot display the applied layout.")
|
| 61 |
+
return
|
| 62 |
|
|
|
|
| 63 |
# See how layer sharding was applied
|
| 64 |
embedding_layer = model.backbone.get_layer("token_embedding")
|
| 65 |
print(embedding_layer)
|
|
|
|
| 111 |
|
| 112 |
log_applied_layout_map(model)
|
| 113 |
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|