| Traceback (most recent call last): | |
| File "/tmp/MiniMaxAI_MiniMax-M2_0gDsHlo.py", line 17, in <module> | |
| pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-M2") | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline | |
| framework, model = infer_framework_load_model( | |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~^ | |
| adapter_path if adapter_path is not None else model, | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ...<5 lines>... | |
| **model_kwargs, | |
| ^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model | |
| raise ValueError( | |
| f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n" | |
| ) | |
| ValueError: Could not load model MiniMaxAI/MiniMax-M2 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>,). See the original errors: | |
| while loading with AutoModelForCausalLM, an error is thrown: | |
| Traceback (most recent call last): | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model | |
| model = model_class.from_pretrained(model, **kwargs) | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained | |
| return model_class.from_pretrained( | |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ | |
| pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper | |
| return func(*args, **kwargs) | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained | |
| hf_quantizer, config, dtype, device_map = get_hf_quantizer( | |
| ~~~~~~~~~~~~~~~~^ | |
| config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer | |
| hf_quantizer.validate_environment( | |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^ | |
| dtype=dtype, | |
| ^^^^^^^^^^^^ | |
| ...<3 lines>... | |
| weights_only=weights_only, | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 54, in validate_environment | |
| raise ValueError( | |
| ...<2 lines>... | |
| ) | |
| ValueError: FP8 quantized models is only supported on GPUs with compute capability >= 8.9 (e.g 4090/H100), actual = `8.6` | |
| During handling of the above exception, another exception occurred: | |
| Traceback (most recent call last): | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model | |
| model = model_class.from_pretrained(model, **fp32_kwargs) | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained | |
| return model_class.from_pretrained( | |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~^ | |
| pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper | |
| return func(*args, **kwargs) | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained | |
| hf_quantizer, config, dtype, device_map = get_hf_quantizer( | |
| ~~~~~~~~~~~~~~~~^ | |
| config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer | |
| hf_quantizer.validate_environment( | |
| ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^ | |
| dtype=dtype, | |
| ^^^^^^^^^^^^ | |
| ...<3 lines>... | |
| weights_only=weights_only, | |
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ | |
| ) | |
| ^ | |
| File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 54, in validate_environment | |
| raise ValueError( | |
| ...<2 lines>... | |
| ) | |
| ValueError: FP8 quantized models is only supported on GPUs with compute capability >= 8.9 (e.g 4090/H100), actual = `8.6` | |