Upload vandijklab_C2S-Scale-Gemma-2-27B_0.txt with huggingface_hub
Browse files
vandijklab_C2S-Scale-Gemma-2-27B_0.txt
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
```CODE:
|
| 2 |
+
# Use a pipeline as a high-level helper
|
| 3 |
+
from transformers import pipeline
|
| 4 |
+
|
| 5 |
+
pipe = pipeline("text-generation", model="vandijklab/C2S-Scale-Gemma-2-27B")
|
| 6 |
+
```
|
| 7 |
+
|
| 8 |
+
ERROR:
|
| 9 |
+
Traceback (most recent call last):
|
| 10 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1640, in extract_vocab_merges_from_model
|
| 11 |
+
from tiktoken.load import load_tiktoken_bpe
|
| 12 |
+
ModuleNotFoundError: No module named 'tiktoken'
|
| 13 |
+
|
| 14 |
+
During handling of the above exception, another exception occurred:
|
| 15 |
+
|
| 16 |
+
Traceback (most recent call last):
|
| 17 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1783, in convert_slow_tokenizer
|
| 18 |
+
).converted()
|
| 19 |
+
~~~~~~~~~^^
|
| 20 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1677, in converted
|
| 21 |
+
tokenizer = self.tokenizer()
|
| 22 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1670, in tokenizer
|
| 23 |
+
vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab_file)
|
| 24 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^
|
| 25 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1642, in extract_vocab_merges_from_model
|
| 26 |
+
raise ValueError(
|
| 27 |
+
"`tiktoken` is required to read a `tiktoken` file. Install it with `pip install tiktoken`."
|
| 28 |
+
)
|
| 29 |
+
ValueError: `tiktoken` is required to read a `tiktoken` file. Install it with `pip install tiktoken`.
|
| 30 |
+
|
| 31 |
+
During handling of the above exception, another exception occurred:
|
| 32 |
+
|
| 33 |
+
Traceback (most recent call last):
|
| 34 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2343, in _from_pretrained
|
| 35 |
+
tokenizer = cls(*init_inputs, **init_kwargs)
|
| 36 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/models/gemma/tokenization_gemma_fast.py", line 100, in __init__
|
| 37 |
+
super().__init__(
|
| 38 |
+
~~~~~~~~~~~~~~~~^
|
| 39 |
+
vocab_file=vocab_file,
|
| 40 |
+
^^^^^^^^^^^^^^^^^^^^^^
|
| 41 |
+
...<8 lines>...
|
| 42 |
+
**kwargs,
|
| 43 |
+
^^^^^^^^^
|
| 44 |
+
)
|
| 45 |
+
^
|
| 46 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/tokenization_utils_fast.py", line 139, in __init__
|
| 47 |
+
fast_tokenizer = convert_slow_tokenizer(self, from_tiktoken=True)
|
| 48 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/convert_slow_tokenizer.py", line 1785, in convert_slow_tokenizer
|
| 49 |
+
raise ValueError(
|
| 50 |
+
...<3 lines>...
|
| 51 |
+
)
|
| 52 |
+
ValueError: Converting from SentencePiece and Tiktoken failed, if a converter for SentencePiece is available, provide a model path with a SentencePiece tokenizer.model file.Currently available slow->fast converters: ['AlbertTokenizer', 'BartTokenizer', 'BarthezTokenizer', 'BertTokenizer', 'BigBirdTokenizer', 'BlenderbotTokenizer', 'CamembertTokenizer', 'CLIPTokenizer', 'CodeGenTokenizer', 'ConvBertTokenizer', 'DebertaTokenizer', 'DebertaV2Tokenizer', 'DistilBertTokenizer', 'DPRReaderTokenizer', 'DPRQuestionEncoderTokenizer', 'DPRContextEncoderTokenizer', 'ElectraTokenizer', 'FNetTokenizer', 'FunnelTokenizer', 'GPT2Tokenizer', 'HerbertTokenizer', 'LayoutLMTokenizer', 'LayoutLMv2Tokenizer', 'LayoutLMv3Tokenizer', 'LayoutXLMTokenizer', 'LongformerTokenizer', 'LEDTokenizer', 'LxmertTokenizer', 'MarkupLMTokenizer', 'MBartTokenizer', 'MBart50Tokenizer', 'MPNetTokenizer', 'MobileBertTokenizer', 'MvpTokenizer', 'NllbTokenizer', 'OpenAIGPTTokenizer', 'PegasusTokenizer', 'Qwen2Tokenizer', 'RealmTokenizer', 'ReformerTokenizer', 'RemBertTokenizer', 'RetriBertTokenizer', 'RobertaTokenizer', 'RoFormerTokenizer', 'SeamlessM4TTokenizer', 'SqueezeBertTokenizer', 'T5Tokenizer', 'UdopTokenizer', 'WhisperTokenizer', 'XLMRobertaTokenizer', 'XLNetTokenizer', 'SplinterTokenizer', 'XGLMTokenizer', 'LlamaTokenizer', 'CodeLlamaTokenizer', 'GemmaTokenizer', 'Phi3Tokenizer']
|
| 53 |
+
|
| 54 |
+
During handling of the above exception, another exception occurred:
|
| 55 |
+
|
| 56 |
+
Traceback (most recent call last):
|
| 57 |
+
File "/tmp/vandijklab_C2S-Scale-Gemma-2-27B_08O9wMA.py", line 19, in <module>
|
| 58 |
+
pipe = pipeline("text-generation", model="vandijklab/C2S-Scale-Gemma-2-27B")
|
| 59 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1078, in pipeline
|
| 60 |
+
raise e
|
| 61 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1073, in pipeline
|
| 62 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 63 |
+
tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
|
| 64 |
+
)
|
| 65 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/models/auto/tokenization_auto.py", line 1140, in from_pretrained
|
| 66 |
+
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
|
| 67 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
| 68 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2097, in from_pretrained
|
| 69 |
+
return cls._from_pretrained(
|
| 70 |
+
~~~~~~~~~~~~~~~~~~~~^
|
| 71 |
+
resolved_vocab_files,
|
| 72 |
+
^^^^^^^^^^^^^^^^^^^^^
|
| 73 |
+
...<9 lines>...
|
| 74 |
+
**kwargs,
|
| 75 |
+
^^^^^^^^^
|
| 76 |
+
)
|
| 77 |
+
^
|
| 78 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 2344, in _from_pretrained
|
| 79 |
+
except import_protobuf_decode_error():
|
| 80 |
+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~^^
|
| 81 |
+
File "/tmp/.cache/uv/environments-v2/787d286a2a85a9de/lib/python3.13/site-packages/transformers/tokenization_utils_base.py", line 87, in import_protobuf_decode_error
|
| 82 |
+
raise ImportError(PROTOBUF_IMPORT_ERROR.format(error_message))
|
| 83 |
+
ImportError:
|
| 84 |
+
requires the protobuf library but it was not found in your environment. Check out the instructions on the
|
| 85 |
+
installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
|
| 86 |
+
that match your environment. Please note that you may need to restart your runtime after installation.
|
| 87 |
+
|