| { | |
| "added_tokens_decoder": {}, | |
| "clean_up_tokenization_spaces": true, | |
| "merges_file": "./openwebmath_tokenizer/merges.txt", | |
| "model_max_length": 1000000000000000019884624838656, | |
| "tokenizer_class": "PreTrainedTokenizerFast", | |
| "vocab_file": "./openwebmath_tokenizer/vocab.json" | |
| } | |