NLP_tokeniser2 / tokenizer_config.json
omidgh's picture
Upload tokenizer
d980917 verified
raw
history blame contribute delete
329 Bytes
{
"added_tokens_decoder": {},
"clean_up_tokenization_spaces": false,
"cls_token": "[CLS]",
"extra_special_tokens": {},
"mask_token": "[MASK]",
"model_max_length": 1000000000000000019884624838656,
"pad_token": "[PAD]",
"sep_token": "[SEP]",
"tokenizer_class": "PreTrainedTokenizerFast",
"unk_token": "[UNK]"
}