Spaces:
Sleeping
Sleeping
feat(config): add gemma 3 to prod config (#1753)
Browse files* feat(prod): add gemma 3 to prod config
* feat: add multimodal mode to gemma 3
* docs: update Gemma 3 model description to highlight multimodal support
- chart/env/prod.yaml +29 -1
chart/env/prod.yaml
CHANGED
|
@@ -262,6 +262,28 @@ envVars:
|
|
| 262 |
}
|
| 263 |
]
|
| 264 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
{
|
| 266 |
"name": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 267 |
"logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png",
|
|
@@ -382,7 +404,13 @@ envVars:
|
|
| 382 |
"stop": ["<|eot_id|>", "<|im_end|>"],
|
| 383 |
"temperature": 0.1,
|
| 384 |
"max_new_tokens": 256
|
| 385 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 386 |
}
|
| 387 |
]
|
| 388 |
|
|
|
|
| 262 |
}
|
| 263 |
]
|
| 264 |
},
|
| 265 |
+
{
|
| 266 |
+
"name": "google/gemma-3-27b-it",
|
| 267 |
+
"logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/google-logo.png",
|
| 268 |
+
"multimodal": true,
|
| 269 |
+
"description": "Google's latest open model with great multilingual performance, supports image inputs natively.",
|
| 270 |
+
"websiteUrl": "https://blog.google/technology/developers/gemma-3/",
|
| 271 |
+
"endpoints": [
|
| 272 |
+
{
|
| 273 |
+
"type": "openai",
|
| 274 |
+
"baseURL": "https://wp0d3hn6s3k8jk22.us-east-1.aws.endpoints.huggingface.cloud/v1",
|
| 275 |
+
"multimodal": {
|
| 276 |
+
"image": {
|
| 277 |
+
"maxSizeInMB": 10,
|
| 278 |
+
"maxWidth": 560,
|
| 279 |
+
"maxHeight": 560,
|
| 280 |
+
"supportedMimeTypes": ["image/jpeg"],
|
| 281 |
+
"preferredMimeType": "image/jpeg"
|
| 282 |
+
}
|
| 283 |
+
}
|
| 284 |
+
}
|
| 285 |
+
]
|
| 286 |
+
},
|
| 287 |
{
|
| 288 |
"name": "meta-llama/Llama-3.2-11B-Vision-Instruct",
|
| 289 |
"logoUrl": "https://huggingface.co/datasets/huggingchat/models-logo/resolve/main/meta-logo.png",
|
|
|
|
| 404 |
"stop": ["<|eot_id|>", "<|im_end|>"],
|
| 405 |
"temperature": 0.1,
|
| 406 |
"max_new_tokens": 256
|
| 407 |
+
},
|
| 408 |
+
"endpoints": [
|
| 409 |
+
{
|
| 410 |
+
"type": "openai",
|
| 411 |
+
"baseURL": "https://internal.api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct/v1"
|
| 412 |
+
}
|
| 413 |
+
]
|
| 414 |
}
|
| 415 |
]
|
| 416 |
|