ariG23498 HF Staff commited on
Commit
c2f810b
·
verified ·
1 Parent(s): e1cec9e

Upload MiniMaxAI_MiniMax-M2_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. MiniMaxAI_MiniMax-M2_0.txt +94 -0
MiniMaxAI_MiniMax-M2_0.txt ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Traceback (most recent call last):
2
+ File "/tmp/MiniMaxAI_MiniMax-M2_0gDsHlo.py", line 17, in <module>
3
+ pipe = pipeline("text-generation", model="MiniMaxAI/MiniMax-M2")
4
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
5
+ framework, model = infer_framework_load_model(
6
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
7
+ adapter_path if adapter_path is not None else model,
8
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9
+ ...<5 lines>...
10
+ **model_kwargs,
11
+ ^^^^^^^^^^^^^^^
12
+ )
13
+ ^
14
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
15
+ raise ValueError(
16
+ f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
17
+ )
18
+ ValueError: Could not load model MiniMaxAI/MiniMax-M2 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>,). See the original errors:
19
+
20
+ while loading with AutoModelForCausalLM, an error is thrown:
21
+ Traceback (most recent call last):
22
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
23
+ model = model_class.from_pretrained(model, **kwargs)
24
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
25
+ return model_class.from_pretrained(
26
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
27
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
28
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
29
+ )
30
+ ^
31
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
32
+ return func(*args, **kwargs)
33
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
34
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
35
+ ~~~~~~~~~~~~~~~~^
36
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
37
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
38
+ )
39
+ ^
40
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
41
+ hf_quantizer.validate_environment(
42
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
43
+ dtype=dtype,
44
+ ^^^^^^^^^^^^
45
+ ...<3 lines>...
46
+ weights_only=weights_only,
47
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
48
+ )
49
+ ^
50
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 54, in validate_environment
51
+ raise ValueError(
52
+ ...<2 lines>...
53
+ )
54
+ ValueError: FP8 quantized models is only supported on GPUs with compute capability >= 8.9 (e.g 4090/H100), actual = `8.6`
55
+
56
+ During handling of the above exception, another exception occurred:
57
+
58
+ Traceback (most recent call last):
59
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
60
+ model = model_class.from_pretrained(model, **fp32_kwargs)
61
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
62
+ return model_class.from_pretrained(
63
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
64
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
65
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
66
+ )
67
+ ^
68
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
69
+ return func(*args, **kwargs)
70
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
71
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
72
+ ~~~~~~~~~~~~~~~~^
73
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
74
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
75
+ )
76
+ ^
77
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
78
+ hf_quantizer.validate_environment(
79
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
80
+ dtype=dtype,
81
+ ^^^^^^^^^^^^
82
+ ...<3 lines>...
83
+ weights_only=weights_only,
84
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
85
+ )
86
+ ^
87
+ File "/tmp/.cache/uv/environments-v2/d3eea229ed2fb556/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 54, in validate_environment
88
+ raise ValueError(
89
+ ...<2 lines>...
90
+ )
91
+ ValueError: FP8 quantized models is only supported on GPUs with compute capability >= 8.9 (e.g 4090/H100), actual = `8.6`
92
+
93
+
94
+