ariG23498 HF Staff commited on
Commit
52fe397
·
verified ·
1 Parent(s): 7ee56a6

Upload deepseek-ai_DeepSeek-R1_0.txt with huggingface_hub

Browse files
Files changed (1) hide show
  1. deepseek-ai_DeepSeek-R1_0.txt +157 -0
deepseek-ai_DeepSeek-R1_0.txt ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```CODE:
2
+ # Use a pipeline as a high-level helper
3
+ from transformers import pipeline
4
+
5
+ pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1", trust_remote_code=True)
6
+ messages = [
7
+ {"role": "user", "content": "Who are you?"},
8
+ ]
9
+ pipe(messages)
10
+ ```
11
+
12
+ ERROR:
13
+ Traceback (most recent call last):
14
+ File "/tmp/deepseek-ai_DeepSeek-R1_0Vrg3Zv.py", line 17, in <module>
15
+ pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1", trust_remote_code=True)
16
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
17
+ framework, model = infer_framework_load_model(
18
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~^
19
+ adapter_path if adapter_path is not None else model,
20
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
21
+ ...<5 lines>...
22
+ **model_kwargs,
23
+ ^^^^^^^^^^^^^^^
24
+ )
25
+ ^
26
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
27
+ raise ValueError(
28
+ f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
29
+ )
30
+ ValueError: Could not load model deepseek-ai/DeepSeek-R1 with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForCausalLM'>, <class 'transformers.models.deepseek_v3.modeling_deepseek_v3.DeepseekV3ForCausalLM'>). See the original errors:
31
+
32
+ while loading with AutoModelForCausalLM, an error is thrown:
33
+ Traceback (most recent call last):
34
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
35
+ model = model_class.from_pretrained(model, **kwargs)
36
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 597, in from_pretrained
37
+ return model_class.from_pretrained(
38
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
39
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
40
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
41
+ )
42
+ ^
43
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
44
+ return func(*args, **kwargs)
45
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
46
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
47
+ ~~~~~~~~~~~~~~~~^
48
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
49
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
50
+ )
51
+ ^
52
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
53
+ hf_quantizer.validate_environment(
54
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
55
+ dtype=dtype,
56
+ ^^^^^^^^^^^^
57
+ ...<3 lines>...
58
+ weights_only=weights_only,
59
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
60
+ )
61
+ ^
62
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
63
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
64
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
65
+
66
+ During handling of the above exception, another exception occurred:
67
+
68
+ Traceback (most recent call last):
69
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
70
+ model = model_class.from_pretrained(model, **fp32_kwargs)
71
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 597, in from_pretrained
72
+ return model_class.from_pretrained(
73
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~^
74
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
75
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
76
+ )
77
+ ^
78
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
79
+ return func(*args, **kwargs)
80
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
81
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
82
+ ~~~~~~~~~~~~~~~~^
83
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
84
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
85
+ )
86
+ ^
87
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
88
+ hf_quantizer.validate_environment(
89
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
90
+ dtype=dtype,
91
+ ^^^^^^^^^^^^
92
+ ...<3 lines>...
93
+ weights_only=weights_only,
94
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
95
+ )
96
+ ^
97
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
98
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
99
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
100
+
101
+ while loading with DeepseekV3ForCausalLM, an error is thrown:
102
+ Traceback (most recent call last):
103
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
104
+ model = model_class.from_pretrained(model, **kwargs)
105
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
106
+ return func(*args, **kwargs)
107
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
108
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
109
+ ~~~~~~~~~~~~~~~~^
110
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
111
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
112
+ )
113
+ ^
114
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
115
+ hf_quantizer.validate_environment(
116
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
117
+ dtype=dtype,
118
+ ^^^^^^^^^^^^
119
+ ...<3 lines>...
120
+ weights_only=weights_only,
121
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
122
+ )
123
+ ^
124
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
125
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
126
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
127
+
128
+ During handling of the above exception, another exception occurred:
129
+
130
+ Traceback (most recent call last):
131
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
132
+ model = model_class.from_pretrained(model, **fp32_kwargs)
133
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 277, in _wrapper
134
+ return func(*args, **kwargs)
135
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/modeling_utils.py", line 4881, in from_pretrained
136
+ hf_quantizer, config, dtype, device_map = get_hf_quantizer(
137
+ ~~~~~~~~~~~~~~~~^
138
+ config, quantization_config, dtype, from_tf, from_flax, device_map, weights_only, user_agent
139
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
140
+ )
141
+ ^
142
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/auto.py", line 319, in get_hf_quantizer
143
+ hf_quantizer.validate_environment(
144
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
145
+ dtype=dtype,
146
+ ^^^^^^^^^^^^
147
+ ...<3 lines>...
148
+ weights_only=weights_only,
149
+ ^^^^^^^^^^^^^^^^^^^^^^^^^^
150
+ )
151
+ ^
152
+ File "/tmp/.cache/uv/environments-v2/b79da90666dc4235/lib/python3.13/site-packages/transformers/quantizers/quantizer_finegrained_fp8.py", line 48, in validate_environment
153
+ raise RuntimeError("No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.")
154
+ RuntimeError: No GPU or XPU found. A GPU or XPU is needed for FP8 quantization.
155
+
156
+
157
+