File size: 2,569 Bytes
917f25e dd2102a 917f25e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
```CODE:
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-M2")
model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-M2")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
```
ERROR:
Traceback (most recent call last):
File "/tmp/MiniMaxAI_MiniMax-M2_1H18ifN.py", line 20, in <module>
model = AutoModelForCausalLM.from_pretrained("MiniMaxAI/MiniMax-M2")
File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
return model_class.from_pretrained(
~~~~~~~~~~~~~~~~~~~~~~~~~~~^
pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
return func(*args, **kwargs)
File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
hf_quantizer, config, dtype, device_map = get_hf_quantizer(
~~~~~~~~~~~~~~~~^
config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
hf_quantizer.validate_environment(
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
dtype=dtype,
^^^^^^^^^^^^
...<3 lines>...
weights_only=weights_only,
^^^^^^^^^^^^^^^^^^^^^^^^^^
)
^
File "/tmp/.cache/uv/environments-v2/e4552f1c3ca9047f/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
|