petite-difference-challenge.../4-finetune.sh

36 lines
1.8 KiB
Bash
Executable File

#!/usr/bin/env bash
set -e
set -x
TOTAL_NUM_UPDATES=600_000 # Total number of training steps == 10 epoch (1 peoch = 60_000)
WARMUP_UPDATES=24_000 # Warmup the learning rate over this many updates
PEAK_LR=0.0001 # Peak learning rate, adjust as needed
HEAD_NAME='he_she' # Custom name for the classification head.
TOKENS_PER_SAMPLE=256 # Max sequence length
NUM_CLASSES=2 # Number of classes for the classification task.
MAX_SENTENCES=50 # Batch size.
UPDATE_FREQ=1 # Increase the batch size
MODEL_PATH='checkpoints/lm_roberta_small/checkpoint_best.pt'
DATA_DIR=data-bin/classifier-spm-bpe
fairseq-train $DATA_DIR \
--restore-file "$MODEL_PATH" \
--fp16 --max-sentences $MAX_SENTENCES --max-positions $TOKENS_PER_SAMPLE --update-freq $UPDATE_FREQ \
--max-tokens 32768 --save-dir checkpoints/lm_roberta_small_finetune \
--task sentence_prediction \
--reset-optimizer --reset-dataloader --reset-meters \
--required-batch-size-multiple 1 \
--init-token 0 --separator-token 2 \
--arch roberta \
--criterion sentence_prediction \
--num-classes $NUM_CLASSES \
--dropout 0.1 --attention-dropout 0.1 --encoder-layers 8 --encoder-embed-dim 512 --encoder-ffn-embed-dim 2048 --encoder-attention-heads 8 \
--weight-decay 0.1 --clip-norm 0.0 \
--optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-06 \
--lr-scheduler polynomial_decay --lr $PEAK_LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
--max-epoch 10 --log-format tqdm --log-interval 1 --save-interval-updates 15000 --keep-interval-updates 5 --skip-invalid-size-inputs-valid-test \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--find-unused-parameters