File size: 5,621 Bytes
ed072c7
 
 
 
 
 
 
 
 
 
68c528c
ed072c7
c2f810b
68c528c
c2f810b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68c528c
 
 
c2f810b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68c528c
 
 
c2f810b
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
```CODE: 
# Use a pipeline as a high-level helper
from transformers import pipeline

pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-M2")
messages = [
    {"role": "user", "content": "Who are you?"},
]
pipe(messages)
```

ERROR: 
Traceback (most recent call last):
  File "/tmp/MiniMaxAI_MiniMax-M2_0AY8pTg.py", line 17, in <module>
    pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-M2")
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
    framework, model = infer_framework_load_model(
                       ~~~~~~~~~~~~~~~~~~~~~~~~~~^
        adapter_path if adapter_path is not None else model,
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    ...<5 lines>...
        **model_kwargs,
        ^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
    raise ValueError(
        f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
    )
ValueError: Could not load model MiniMaxAI/MiniMax-M2 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>,). See the original errors:

while loading with AutoModelForCausalLM, an error is thrown:
Traceback (most recent call last):
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
    model = model_class.from_pretrained(model, **kwargs)
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
    return model_class.from_pretrained(
           ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
        pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
    return func(*args, **kwargs)
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
    hf_quantizer, config, dtype, device_map = get_hf_quantizer(
                                              ~~~~~~~~~~~~~~~~^
        config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
    hf_quantizer.validate_environment(
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
        dtype=dtype,
        ^^^^^^^^^^^^
    ...<3 lines>...
        weights_only=weights_only,
        ^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
    raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
    model = model_class.from_pretrained(model, **fp32_kwargs)
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
    return model_class.from_pretrained(
           ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
        pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
    return func(*args, **kwargs)
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
    hf_quantizer, config, dtype, device_map = get_hf_quantizer(
                                              ~~~~~~~~~~~~~~~~^
        config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
    hf_quantizer.validate_environment(
    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
        dtype=dtype,
        ^^^^^^^^^^^^
    ...<3 lines>...
        weights_only=weights_only,
        ^^^^^^^^^^^^^^^^^^^^^^^^^^
    )
    ^
  File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
    raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.