Upload cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt with huggingface_hub
Browse files
cerebras_GLM-4.6-REAP-218B-A32B-FP8_0.txt
CHANGED
|
@@ -11,7 +11,7 @@ pipe(messages)
|
|
| 11 |
|
| 12 |
ERROR:
|
| 13 |
Traceback (most recent call last):
|
| 14 |
-
File "/tmp/cerebras_GLM-4.6-REAP-218B-A32B-
|
| 15 |
pipe = pipeline("text-generation", model="cerebras/GLM-4.6-REAP-218B-A32B-FP8")
|
| 16 |
File "/tmp/.cache/uv/environments-v2/1514a401d470c3b6/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
|
| 17 |
framework, model = infer_framework_load_model(
|
|
|
|
| 11 |
|
| 12 |
ERROR:
|
| 13 |
Traceback (most recent call last):
|
| 14 |
+
File "/tmp/cerebras_GLM-4.6-REAP-218B-A32B-FP8_0zgHH88.py", line 19, in <module>
|
| 15 |
pipe = pipeline("text-generation", model="cerebras/GLM-4.6-REAP-218B-A32B-FP8")
|
| 16 |
File "/tmp/.cache/uv/environments-v2/1514a401d470c3b6/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1027, in pipeline
|
| 17 |
framework, model = infer_framework_load_model(
|