ariG23498 HF Staff commited on
Commit
4aa8e89
·
verified ·
1 Parent(s): e3866bb

Upload Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt with huggingface_hub

Browse files
Qwen_Qwen3-VL-32B-Instruct-FP8_1.txt ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Load model directly
3
+ from transformers import AutoProcessor, AutoModelForVision2Seq
4
+
5
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8")
6
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8")
7
+ messages = [
8
+ {
9
+ "role": "user",
10
+ "content": [
11
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
12
+ {"type": "text", "text": "What animal is on the candy?"}
13
+ ]
14
+ },
15
+ ]
16
+ inputs = processor.apply_chat_template(
17
+ messages,
18
+ add_generation_prompt=True,
19
+ tokenize=True,
20
+ return_dict=True,
21
+ return_tensors="pt",
22
+ ).to(model.device)
23
+
24
+ outputs = model.generate(**inputs, max_new_tokens=40)
25
+ print(processor.decode(outputs[0][inputs["input_ids"].shape[-1]:]))
26
+ ```
27
+
28
+ ERROR:
29
+ Traceback (most recent call last):
30
+ File "/tmp/Qwen_Qwen3-VL-32B-Instruct-FP8_1H3mGsX.py", line 18, in <module>
31
+ model = AutoModelForVision2Seq.from_pretrained("Qwen/Qwen3-VL-32B-Instruct-FP8")
32
+ File "/tmp/.cache/uv/environments-v2/3a120645d4f704f5/lib/python3.13/site-packages/transformers/models/auto/modeling_auto.py", line 2289, in from_pretrained
33
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
34
+ ~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
35
+ File "/tmp/.cache/uv/environments-v2/3a120645d4f704f5/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
36
+ return model_class.from_pretrained(
37
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
38
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
39
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
40
+ )
41
+ ^
42
+ File "/tmp/.cache/uv/environments-v2/3a120645d4f704f5/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
43
+ return func(*args, **kwargs)
44
+ File "/tmp/.cache/uv/environments-v2/3a120645d4f704f5/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
45
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
46
+ ~~~~~~~~~~~~~~~~^
47
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
48
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
49
+ )
50
+ ^
51
+ File "/tmp/.cache/uv/environments-v2/3a120645d4f704f5/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
52
+ hf_quantizer.validate_environment(
53
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
54
+ dtype=dtype,
55
+ ^^^^^^^^^^^^
56
+ ...<3 lines>...
57
+ weights_only=weights_only,
58
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
59
+ )
60
+ ^
61
+ File "/tmp/.cache/uv/environments-v2/3a120645d4f704f5/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
62
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
63
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.