21 lines
1.1 KiB
Bash
21 lines
1.1 KiB
Bash
|
TOTAL_UPDATES=12500000 # Total number of training steps
|
||
|
WARMUP_UPDATES=1000 # Warmup the learning rate over this many updates
|
||
|
PEAK_LR=0.0001 # Peak learning rate, adjust as needed
|
||
|
TOKENS_PER_SAMPLE=512 # Max sequence length
|
||
|
MAX_POSITIONS=512 # Num. positional embeddings (usually same as above)
|
||
|
MAX_SENTENCES=1 # Number of sequences per batch (batch size)
|
||
|
UPDATE_FREQ=128 # Increase the batch size 16x
|
||
|
|
||
|
DATA_DIR=data-bin/wikitext-103
|
||
|
ulimit -n 4096
|
||
|
|
||
|
fairseq-train --fp16 $DATA_DIR \
|
||
|
--task masked_lm --criterion masked_lm \
|
||
|
--arch roberta_large --sample-break-mode complete --tokens-per-sample $TOKENS_PER_SAMPLE \
|
||
|
--optimizer adam --adam-betas '(0.9,0.98)' --adam-eps 1e-6 --clip-norm 0.0 \
|
||
|
--lr-scheduler polynomial_decay --lr $PEAK_LR --warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_UPDATES \
|
||
|
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
|
||
|
--batch-size $MAX_SENTENCES --update-freq $UPDATE_FREQ \
|
||
|
--max-update $TOTAL_UPDATES --log-format simple --log-interval 1 \
|
||
|
--restore-file roberta.large/model.pt
|