uczenie_glebokie_projekt/GPT2.ipynb
2023-02-13 20:36:35 +01:00

149 KiB
Raw Permalink Blame History

GPT2ForSequenceClassification model classification training

!pip install -q datasets transformers
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 462.8/462.8 KB 7.3 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 6.3/6.3 MB 61.6 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 132.0/132.0 KB 10.3 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 190.3/190.3 KB 5.5 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 213.0/213.0 KB 8.8 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 7.6/7.6 MB 55.5 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 140.6/140.6 KB 8.3 MB/s eta 0:00:00
[?25h
from datasets import load_dataset
import torch
from transformers import AutoTokenizer, GPT2ForSequenceClassification, Trainer, TrainingArguments, GPT2Tokenizer,GPT2Config
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from transformers.integrations import TensorBoardCallback
def load_and_process_dataset():
    dataset = load_dataset("sst2")
    dataset.remove_columns('idx')
    del dataset['test']
    dataset['test'] = dataset['validation']
    del dataset['validation']
    split_dataset = dataset['train'].train_test_split(test_size=1600)
    dataset['train'] = split_dataset['train']
    dataset['validation'] = split_dataset['test']
    return dataset
def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
    acc = accuracy_score(labels, preds)
    return {
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall
    }
dataset = load_and_process_dataset()
dataset
Downloading builder script:   0%|          | 0.00/3.77k [00:00<?, ?B/s]
Downloading metadata:   0%|          | 0.00/1.85k [00:00<?, ?B/s]
Downloading readme:   0%|          | 0.00/5.06k [00:00<?, ?B/s]
Downloading and preparing dataset sst2/default to /root/.cache/huggingface/datasets/sst2/default/2.0.0/9896208a8d85db057ac50c72282bcb8fe755accc671a57dd8059d4e130961ed5...
Downloading data:   0%|          | 0.00/7.44M [00:00<?, ?B/s]
Generating train split:   0%|          | 0/67349 [00:00<?, ? examples/s]
Generating validation split:   0%|          | 0/872 [00:00<?, ? examples/s]
Generating test split:   0%|          | 0/1821 [00:00<?, ? examples/s]
Dataset sst2 downloaded and prepared to /root/.cache/huggingface/datasets/sst2/default/2.0.0/9896208a8d85db057ac50c72282bcb8fe755accc671a57dd8059d4e130961ed5. Subsequent calls will reuse this data.
  0%|          | 0/3 [00:00<?, ?it/s]
DatasetDict({
    train: Dataset({
        features: ['idx', 'sentence', 'label'],
        num_rows: 65749
    })
    test: Dataset({
        features: ['idx', 'sentence', 'label'],
        num_rows: 872
    })
    validation: Dataset({
        features: ['idx', 'sentence', 'label'],
        num_rows: 1600
    })
})
train = dataset['train']
validation = dataset['validation']
test = dataset['test']
configuration = GPT2Config()

tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token

model = GPT2ForSequenceClassification(configuration).from_pretrained("gpt2")
model.config.pad_token_id = model.config.eos_token_id
Downloading (…)olve/main/vocab.json:   0%|          | 0.00/1.04M [00:00<?, ?B/s]
Downloading (…)olve/main/merges.txt:   0%|          | 0.00/456k [00:00<?, ?B/s]
Downloading (…)lve/main/config.json:   0%|          | 0.00/665 [00:00<?, ?B/s]
Downloading (…)"pytorch_model.bin";:   0%|          | 0.00/548M [00:00<?, ?B/s]
Some weights of GPT2ForSequenceClassification were not initialized from the model checkpoint at gpt2 and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
def tokenization(batched_text):
    return tokenizer(batched_text['sentence'], padding = True, truncation=True)


train_data = train.map(tokenization, batched = True, batch_size = len(train))
val_data = validation.map(tokenization, batched = True, batch_size = len(validation))
test_data = test.map(tokenization, batched = True, batch_size = len(test))
  0%|          | 0/1 [00:00<?, ?ba/s]
  0%|          | 0/1 [00:00<?, ?ba/s]
  0%|          | 0/1 [00:00<?, ?ba/s]
train_data.set_format('torch', columns=['input_ids', 'sentence', 'label'])
val_data.set_format('torch', columns=['input_ids', 'sentence', 'label'])
test_data.set_format('torch', columns=['input_ids', 'sentence', 'label'])
training_args = TrainingArguments(
    output_dir = './results',
    num_train_epochs=4,
    per_device_train_batch_size = 4,
    gradient_accumulation_steps = 16,    
    per_device_eval_batch_size= 8,
    evaluation_strategy = "epoch",
    disable_tqdm = False, 
    load_best_model_at_end=False,
    warmup_steps=500,
    weight_decay=0.01,
    logging_steps = 8,
    fp16 = True,
    logging_dir='./logs',
    dataloader_num_workers = 2,
    run_name = 'gpt2-classification',
    optim="adamw_torch"
)
trainer = Trainer(
    model=model,
    args=training_args,
    compute_metrics=compute_metrics,
    train_dataset=train_data,
    eval_dataset=val_data,
    callbacks=[TensorBoardCallback]
)
You are adding a <class 'transformers.integrations.TensorBoardCallback'> to the callbacks of this Trainer, but there is already one. The currentlist of callbacks is
:DefaultFlowCallback
TensorBoardCallback
Using cuda_amp half precision backend
trainer.train()
The following columns in the training set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running training *****
  Num examples = 65749
  Num Epochs = 4
  Instantaneous batch size per device = 4
  Total train batch size (w. parallel, distributed & accumulation) = 64
  Gradient Accumulation steps = 16
  Total optimization steps = 4108
  Number of trainable parameters = 124441344
[4108/4108 54:59, Epoch 3/4]
Epoch Training Loss Validation Loss Accuracy F1 Precision Recall
0 0.299400 0.205000 0.921250 0.930921 0.926856 0.935022
1 0.194200 0.166365 0.936875 0.944106 0.948832 0.939427
2 0.110300 0.193420 0.938125 0.945092 0.951955 0.938326
3 0.094900 0.197988 0.941875 0.948189 0.959414 0.937225

Saving model checkpoint to ./results/checkpoint-500
Configuration saved in ./results/checkpoint-500/config.json
Model weights saved in ./results/checkpoint-500/pytorch_model.bin
Saving model checkpoint to ./results/checkpoint-1000
Configuration saved in ./results/checkpoint-1000/config.json
Model weights saved in ./results/checkpoint-1000/pytorch_model.bin
The following columns in the evaluation set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running Evaluation *****
  Num examples = 1600
  Batch size = 8
Saving model checkpoint to ./results/checkpoint-1500
Configuration saved in ./results/checkpoint-1500/config.json
Model weights saved in ./results/checkpoint-1500/pytorch_model.bin
Saving model checkpoint to ./results/checkpoint-2000
Configuration saved in ./results/checkpoint-2000/config.json
Model weights saved in ./results/checkpoint-2000/pytorch_model.bin
The following columns in the evaluation set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running Evaluation *****
  Num examples = 1600
  Batch size = 8
Saving model checkpoint to ./results/checkpoint-2500
Configuration saved in ./results/checkpoint-2500/config.json
Model weights saved in ./results/checkpoint-2500/pytorch_model.bin
Saving model checkpoint to ./results/checkpoint-3000
Configuration saved in ./results/checkpoint-3000/config.json
Model weights saved in ./results/checkpoint-3000/pytorch_model.bin
The following columns in the evaluation set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running Evaluation *****
  Num examples = 1600
  Batch size = 8
Saving model checkpoint to ./results/checkpoint-3500
Configuration saved in ./results/checkpoint-3500/config.json
Model weights saved in ./results/checkpoint-3500/pytorch_model.bin
Saving model checkpoint to ./results/checkpoint-4000
Configuration saved in ./results/checkpoint-4000/config.json
Model weights saved in ./results/checkpoint-4000/pytorch_model.bin
The following columns in the evaluation set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running Evaluation *****
  Num examples = 1600
  Batch size = 8


Training completed. Do not forget to share your model on huggingface.co/models =)


TrainOutput(global_step=4108, training_loss=0.19953461595336675, metrics={'train_runtime': 3303.857, 'train_samples_per_second': 79.603, 'train_steps_per_second': 1.243, 'total_flos': 8723522156544000.0, 'train_loss': 0.19953461595336675, 'epoch': 4.0})
trainer.evaluate()
The following columns in the evaluation set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running Evaluation *****
  Num examples = 1600
  Batch size = 8
[200/200 00:03]
{'eval_loss': 0.19798797369003296,
 'eval_accuracy': 0.941875,
 'eval_f1': 0.9481894150417827,
 'eval_precision': 0.9594137542277339,
 'eval_recall': 0.9372246696035242,
 'eval_runtime': 4.1144,
 'eval_samples_per_second': 388.88,
 'eval_steps_per_second': 48.61,
 'epoch': 4.0}
trainer.evaluate(test_data)
The following columns in the evaluation set don't have a corresponding argument in `GPT2ForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `GPT2ForSequenceClassification.forward`,  you can safely ignore this message.
***** Running Evaluation *****
  Num examples = 872
  Batch size = 8
[200/200 00:13]
{'eval_loss': 0.32073774933815,
 'eval_accuracy': 0.9174311926605505,
 'eval_f1': 0.9191011235955056,
 'eval_precision': 0.9170403587443946,
 'eval_recall': 0.9211711711711712,
 'eval_runtime': 2.7023,
 'eval_samples_per_second': 322.685,
 'eval_steps_per_second': 40.336,
 'epoch': 4.0}
!tensorboard dev upload --logdir logs --name GPT2ForSequenceClassification
2023-02-13 17:20:44.499658: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia
2023-02-13 17:20:44.499776: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/lib64-nvidia
2023-02-13 17:20:44.499796: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.

***** TensorBoard Uploader *****

This will upload your TensorBoard logs to https://tensorboard.dev/ from
the following directory:

logs

This TensorBoard will be visible to everyone. Do not upload sensitive
data.

Your use of this service is subject to Google's Terms of Service
<https://policies.google.com/terms> and Privacy Policy
<https://policies.google.com/privacy>, and TensorBoard.dev's Terms of Service
<https://tensorboard.dev/policy/terms/>.

This notice will not be shown again while you are logged into the uploader.
To log out, run `tensorboard dev auth revoke`.

Continue? (yes/NO) YES

To sign in with the TensorBoard uploader:

1. On your computer or phone, visit:

   https://www.google.com/device

2. Sign in with your Google account, then enter:

   LLX-MQT-RDG


Upload started and will continue reading any new data as it's added to the logdir.

To stop uploading, press Ctrl-C.

New experiment created. View your TensorBoard at: https://tensorboard.dev/experiment/rdKko5l3RKaJoSogfLorWg/

[2023-02-13T17:21:34] Started scanning logdir.
[2023-02-13T17:21:36] Total uploaded: 3198 scalars, 10 tensors (7.6 kB), 0 binary objects


Interrupted. View your TensorBoard at https://tensorboard.dev/experiment/rdKko5l3RKaJoSogfLorWg/
Traceback (most recent call last):
  File "/usr/local/bin/tensorboard", line 8, in <module>
    sys.exit(run_main())
  File "/usr/local/lib/python3.8/dist-packages/tensorboard/main.py", line 46, in run_main
    app.run(tensorboard.main, flags_parser=tensorboard.configure)
  File "/usr/local/lib/python3.8/dist-packages/absl/app.py", line 308, in run
    _run_main(main, args)
  File "/usr/local/lib/python3.8/dist-packages/absl/app.py", line 254, in _run_main
    sys.exit(main(argv))
  File "/usr/local/lib/python3.8/dist-packages/tensorboard/program.py", line 276, in main
    return runner(self.flags) or 0
  File "/usr/local/lib/python3.8/dist-packages/tensorboard/uploader/uploader_subcommand.py", line 691, in run
    return _run(flags, self._experiment_url_callback)
  File "/usr/local/lib/python3.8/dist-packages/tensorboard/uploader/uploader_subcommand.py", line 124, in _run
    intent.execute(server_info, channel)
  File "/usr/local/lib/python3.8/dist-packages/tensorboard/uploader/uploader_subcommand.py", line 507, in execute
    sys.stdout.write(end_message + "\n")
KeyboardInterrupt
^C
model.save_pretrained("./model")
Configuration saved in ./model/config.json
Model weights saved in ./model/pytorch_model.bin
!zip model -r model
  adding: model/ (stored 0%)
  adding: model/config.json (deflated 53%)
  adding: model/pytorch_model.bin (deflated 9%)