uczenie/GPT2.ipynb

35 KiB
Raw Permalink Blame History

!pip install -q datasets transformers
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 330.3/330.3 kB 3.7 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 3.8/3.8 MB 22.4 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 8.2/8.2 MB 66.2 MB/s eta 0:00:00
     ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 3.8/3.8 MB 52.7 MB/s eta 0:00:00
[?25h
from datasets import load_dataset
import torch
from transformers import AutoTokenizer, GPT2ForSequenceClassification, Trainer, TrainingArguments, GPT2Tokenizer,GPT2Config
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
def load_and_process_dataset():
    dataset = load_dataset("sst2")
    dataset.remove_columns('idx')
    del dataset['test']
    dataset['test'] = dataset['validation']
    del dataset['validation']
    split_dataset = dataset['train'].train_test_split(test_size=1600)
    dataset['train'] = split_dataset['train']
    dataset['validation'] = split_dataset['test']
    return dataset
def compute_metrics(pred):
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='binary')
    acc = accuracy_score(labels, preds)
    return {
        'accuracy': acc,
        'f1': f1,
        'precision': precision,
        'recall': recall
    }
dataset = load_and_process_dataset()
dataset
/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_token.py:88: UserWarning: 
The secret `HF_TOKEN` does not exist in your Colab secrets.
To authenticate with the Hugging Face Hub, create a token in your settings tab (https://huggingface.co/settings/tokens), set it as secret in your Google Colab and restart your session.
You will be able to reuse this secret in all of your notebooks.
Please note that authentication is recommended but still optional to access public models or datasets.
  warnings.warn(
DatasetDict({
    train: Dataset({
        features: ['idx', 'sentence', 'label'],
        num_rows: 65749
    })
    test: Dataset({
        features: ['idx', 'sentence', 'label'],
        num_rows: 872
    })
    validation: Dataset({
        features: ['idx', 'sentence', 'label'],
        num_rows: 1600
    })
})
train = dataset['train']
validation = dataset['validation']
test = dataset['test']
configuration = GPT2Config()

tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token

model = GPT2ForSequenceClassification(configuration).from_pretrained("gpt2")
model.config.pad_token_id = model.config.eos_token_id
Some weights of GPT2ForSequenceClassification were not initialized from the model checkpoint at gpt2 and are newly initialized: ['score.weight']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
def tokenization(batched_text):
    return tokenizer(batched_text['sentence'], padding = True, truncation=True)


train_data = train.map(tokenization, batched = True, batch_size = len(train))
val_data = validation.map(tokenization, batched = True, batch_size = len(validation))
test_data = test.map(tokenization, batched = True, batch_size = len(test))
Map:   0%|          | 0/65749 [00:00<?, ? examples/s]
Map:   0%|          | 0/1600 [00:00<?, ? examples/s]
train_data.set_format('torch', columns=['input_ids', 'sentence', 'label'])
val_data.set_format('torch', columns=['input_ids', 'sentence', 'label'])
test_data.set_format('torch', columns=['input_ids', 'sentence', 'label'])
!pip install 'transformers[torch]>=4.34,<4.35'
Collecting transformers[torch]<4.35,>=4.34
  Using cached transformers-4.34.1-py3-none-any.whl (7.7 MB)
Requirement already satisfied: filelock in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (3.13.1)
Requirement already satisfied: huggingface-hub<1.0,>=0.16.4 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (0.20.2)
Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (1.23.5)
Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (23.2)
Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (6.0.1)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (2023.6.3)
Requirement already satisfied: requests in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (2.31.0)
Collecting tokenizers<0.15,>=0.14 (from transformers[torch]<4.35,>=4.34)
  Using cached tokenizers-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.8 MB)
Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (0.4.1)
Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (4.66.1)
Requirement already satisfied: torch!=1.12.0,>=1.10 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (2.1.0+cu121)
Requirement already satisfied: accelerate>=0.20.3 in /usr/local/lib/python3.10/dist-packages (from transformers[torch]<4.35,>=4.34) (0.26.0)
Requirement already satisfied: psutil in /usr/local/lib/python3.10/dist-packages (from accelerate>=0.20.3->transformers[torch]<4.35,>=4.34) (5.9.5)
Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers[torch]<4.35,>=4.34) (2023.6.0)
Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.10/dist-packages (from huggingface-hub<1.0,>=0.16.4->transformers[torch]<4.35,>=4.34) (4.5.0)
Collecting huggingface-hub<1.0,>=0.16.4 (from transformers[torch]<4.35,>=4.34)
  Using cached huggingface_hub-0.17.3-py3-none-any.whl (295 kB)
Requirement already satisfied: sympy in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.10->transformers[torch]<4.35,>=4.34) (1.12)
Requirement already satisfied: networkx in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.10->transformers[torch]<4.35,>=4.34) (3.2.1)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.10->transformers[torch]<4.35,>=4.34) (3.1.2)
Requirement already satisfied: triton==2.1.0 in /usr/local/lib/python3.10/dist-packages (from torch!=1.12.0,>=1.10->transformers[torch]<4.35,>=4.34) (2.1.0)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[torch]<4.35,>=4.34) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[torch]<4.35,>=4.34) (3.6)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[torch]<4.35,>=4.34) (2.0.7)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests->transformers[torch]<4.35,>=4.34) (2023.11.17)
Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.10/dist-packages (from jinja2->torch!=1.12.0,>=1.10->transformers[torch]<4.35,>=4.34) (2.1.3)
Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.10/dist-packages (from sympy->torch!=1.12.0,>=1.10->transformers[torch]<4.35,>=4.34) (1.3.0)
Installing collected packages: huggingface-hub, tokenizers, transformers
  Attempting uninstall: huggingface-hub
    Found existing installation: huggingface-hub 0.20.2
    Uninstalling huggingface-hub-0.20.2:
      Successfully uninstalled huggingface-hub-0.20.2
  Attempting uninstall: tokenizers
    Found existing installation: tokenizers 0.15.0
    Uninstalling tokenizers-0.15.0:
      Successfully uninstalled tokenizers-0.15.0
  Attempting uninstall: transformers
    Found existing installation: transformers 4.36.2
    Uninstalling transformers-4.36.2:
      Successfully uninstalled transformers-4.36.2
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
datasets 2.16.1 requires huggingface-hub>=0.19.4, but you have huggingface-hub 0.17.3 which is incompatible.
Successfully installed huggingface-hub-0.17.3 tokenizers-0.14.1 transformers-4.34.1
training_args = TrainingArguments(
    output_dir = './results',
    num_train_epochs=4,
    per_device_train_batch_size = 4,
    gradient_accumulation_steps = 16,
    per_device_eval_batch_size= 8,
    evaluation_strategy = "epoch",
    disable_tqdm = False,
    load_best_model_at_end=False,
    warmup_steps=500,
    weight_decay=0.01,
    logging_steps = 8,
    fp16 = True,
    logging_dir='./logs',
    dataloader_num_workers = 2,
    run_name = 'gpt2-classification',
    optim="adamw_torch"
)
trainer = Trainer(
    model=model,
    args=training_args,
    compute_metrics=compute_metrics,
    train_dataset=train_data,
    eval_dataset=val_data,
)
trainer.train()
We strongly recommend passing in an `attention_mask` since your input_ids may be padded. See https://huggingface.co/docs/transformers/troubleshooting#incorrect-output-when-padding-tokens-arent-masked.
You may ignore this warning if your `pad_token_id` (50256) is identical to the `bos_token_id` (50256), `eos_token_id` (50256), or the `sep_token_id` (None), and your input is not padded.
[4108/4108 57:21, Epoch 3/4]
Epoch Training Loss Validation Loss Accuracy F1 Precision Recall
0 0.253700 0.217945 0.908750 0.915995 0.924506 0.907640
1 0.161900 0.177921 0.939375 0.944476 0.948276 0.940707
2 0.105800 0.186941 0.942500 0.946943 0.957993 0.936146
3 0.086800 0.199287 0.941250 0.946712 0.941375 0.952109

TrainOutput(global_step=4108, training_loss=0.19071476609095162, metrics={'train_runtime': 3447.6287, 'train_samples_per_second': 76.283, 'train_steps_per_second': 1.192, 'total_flos': 8721133740933120.0, 'train_loss': 0.19071476609095162, 'epoch': 4.0})
trainer.evaluate()
[200/200 00:04]
{'eval_loss': 0.1992868036031723,
 'eval_accuracy': 0.94125,
 'eval_f1': 0.9467120181405895,
 'eval_precision': 0.9413754227733935,
 'eval_recall': 0.9521094640820981,
 'eval_runtime': 4.8171,
 'eval_samples_per_second': 332.147,
 'eval_steps_per_second': 41.518,
 'epoch': 4.0}