ariG23498 HF Staff commited on
Commit
85bcc26
·
verified ·
1 Parent(s): 8354c65

Upload Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt +163 -0
Qwen_Qwen3-VL-32B-Instruct-FP8_0.txt ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Use a pipeline as a high-level helper
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("image-text-to-text", model="Qwen/Qwen3-VL-32B-Instruct-FP8")
6
+ messages = [
7
+ {
8
+ "role": "user",
9
+ "content": [
10
+ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/p-blog/candy.JPG"},
11
+ {"type": "text", "text": "What animal is on the candy?"}
12
+ ]
13
+ },
14
+ ]
15
+ pipe(text=messages)
16
+ ```
17
+
18
+ ERROR:
19
+ Traceback (most recent call last):
20
+ File "/tmp/Qwen_Qwen3-VL-32B-Instruct-FP8_0pkcurZ.py", line 17, in <module>
21
+ pipe = pipeline("image-text-to-text", model="Qwen/Qwen3-VL-32B-Instruct-FP8")
22
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
23
+ framework, model = infer_framework_load_model(
24
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
25
+ adapter_path if adapter_path is not None else model,
26
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
27
+ ...<5 lines>...
28
+ **model_kwargs,
29
+ ^^^^^^^^^^^^^^^
30
+ )
31
+ ^
32
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
33
+ raise ValueError(
34
+ f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
35
+ )
36
+ ValueError: Could not load model Qwen/Qwen3-VL-32B-Instruct-FP8 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>, <class 'transformers.models.qwen3_vl.modeling_qwen3_vl.Qwen3VLForConditionalGeneration'>). See the original errors:
37
+
38
+ while loading with AutoModelForImageTextToText, an error is thrown:
39
+ Traceback (most recent call last):
40
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
41
+ model = model_class.from_pretrained(model, **kwargs)
42
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
43
+ return model_class.from_pretrained(
44
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
45
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
46
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
47
+ )
48
+ ^
49
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
50
+ return func(*args, **kwargs)
51
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
52
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
53
+ ~~~~~~~~~~~~~~~~^
54
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
55
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
56
+ )
57
+ ^
58
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
59
+ hf_quantizer.validate_environment(
60
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
61
+ dtype=dtype,
62
+ ^^^^^^^^^^^^
63
+ ...<3 lines>...
64
+ weights_only=weights_only,
65
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
66
+ )
67
+ ^
68
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
69
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
70
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
71
+
72
+ During handling of the above exception, another exception occurred:
73
+
74
+ Traceback (most recent call last):
75
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
76
+ model = model_class.from_pretrained(model, **fp32_kwargs)
77
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 604, in from_pretrained
78
+ return model_class.from_pretrained(
79
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
80
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
81
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
82
+ )
83
+ ^
84
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
85
+ return func(*args, **kwargs)
86
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
87
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
88
+ ~~~~~~~~~~~~~~~~^
89
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
90
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
91
+ )
92
+ ^
93
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
94
+ hf_quantizer.validate_environment(
95
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
96
+ dtype=dtype,
97
+ ^^^^^^^^^^^^
98
+ ...<3 lines>...
99
+ weights_only=weights_only,
100
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
101
+ )
102
+ ^
103
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
104
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
105
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
106
+
107
+ while loading with Qwen3VLForConditionalGeneration, an error is thrown:
108
+ Traceback (most recent call last):
109
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
110
+ model = model_class.from_pretrained(model, **kwargs)
111
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
112
+ return func(*args, **kwargs)
113
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
114
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
115
+ ~~~~~~~~~~~~~~~~^
116
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
117
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
118
+ )
119
+ ^
120
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
121
+ hf_quantizer.validate_environment(
122
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
123
+ dtype=dtype,
124
+ ^^^^^^^^^^^^
125
+ ...<3 lines>...
126
+ weights_only=weights_only,
127
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
128
+ )
129
+ ^
130
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
131
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
132
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
133
+
134
+ During handling of the above exception, another exception occurred:
135
+
136
+ Traceback (most recent call last):
137
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
138
+ model = model_class.from_pretrained(model, **fp32_kwargs)
139
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
140
+ return func(*args, **kwargs)
141
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
142
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
143
+ ~~~~~~~~~~~~~~~~^
144
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
145
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
146
+ )
147
+ ^
148
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
149
+ hf_quantizer.validate_environment(
150
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
151
+ dtype=dtype,
152
+ ^^^^^^^^^^^^
153
+ ...<3 lines>...
154
+ weights_only=weights_only,
155
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
156
+ )
157
+ ^
158
+ File "/tmp/.cache/uv/environments-v2/49adf985b5e89d74/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
159
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
160
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
161
+
162
+
163
+