chron-am-roberta/regular_roberta_from_scratch/1_train_tokenizer.py
Jakub Pokrywka 00fec94240 init
2021-11-06 15:41:54 +01:00

19 lines
372 B
Python

from pathlib import Path
from tokenizers import ByteLevelBPETokenizer
paths = ['./train_in.csv']
# Initialize a tokenizer
tokenizer = ByteLevelBPETokenizer()
# Customize training
tokenizer.train(files=paths, vocab_size=50265, min_frequency=2, special_tokens=[
"<s>",
"<pad>",
"</s>",
"<unk>",
"<mask>",
])
tokenizer.save_model("./tokenizer_model")