| { | |
| "added_tokens_decoder": {}, | |
| "clean_up_tokenization_spaces": false, | |
| "extra_special_tokens": {}, | |
| "model_max_length": 1000000000000000019884624838656, | |
| "tokenizer_class": "PreTrainedTokenizerFast" | |
| } | |
| { | |
| "added_tokens_decoder": {}, | |
| "clean_up_tokenization_spaces": false, | |
| "extra_special_tokens": {}, | |
| "model_max_length": 1000000000000000019884624838656, | |
| "tokenizer_class": "PreTrainedTokenizerFast" | |
| } | |