Initial commit
This commit is contained in:
commit
e1b933bf4d
5
.gitignore
vendored
Normal file
5
.gitignore
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
data
|
||||
out/t5
|
||||
out/gpt2
|
||||
out/roberta
|
||||
.cache_training
|
BIN
__pycache__/gpt2.cpython-310.pyc
Normal file
BIN
__pycache__/gpt2.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/gpt2.cpython-39.pyc
Normal file
BIN
__pycache__/gpt2.cpython-39.pyc
Normal file
Binary file not shown.
BIN
__pycache__/roberta.cpython-310.pyc
Normal file
BIN
__pycache__/roberta.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/roberta.cpython-39.pyc
Normal file
BIN
__pycache__/roberta.cpython-39.pyc
Normal file
Binary file not shown.
BIN
__pycache__/t5.cpython-310.pyc
Normal file
BIN
__pycache__/t5.cpython-310.pyc
Normal file
Binary file not shown.
BIN
__pycache__/t5.cpython-39.pyc
Normal file
BIN
__pycache__/t5.cpython-39.pyc
Normal file
Binary file not shown.
10
bart.py
Normal file
10
bart.py
Normal file
@ -0,0 +1,10 @@
|
||||
from transformers import BartConfig, BartForSequenceClassification, BartModel
|
||||
from torch import nn
|
||||
|
||||
class BartForClassification(BartForSequenceClassification):
|
||||
def __init__(self, config: BartConfig):
|
||||
self.config = config
|
||||
self.bart = BartForSequenceClassification(config)
|
||||
self.bart.out_proj = nn.Linear(768, 4)
|
||||
|
||||
|
154
gpt2.py
Normal file
154
gpt2.py
Normal file
@ -0,0 +1,154 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
from transformers import GPT2PreTrainedModel, GPT2Model
|
||||
from transformers.modeling_outputs import SequenceClassifierOutputWithPast
|
||||
|
||||
class GPT2ForSequenceClassification(GPT2PreTrainedModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
self.transformer = GPT2Model(config)
|
||||
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
||||
|
||||
# Model parallel
|
||||
self.model_parallel = False
|
||||
self.device_map = None
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
|
||||
class GPT2ClassificationHeadCustom(nn.Module):
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
hidden_size = config.n_embd
|
||||
self.dense_1_input = nn.Linear(hidden_size, 2 * hidden_size)
|
||||
self.dense_1_hidden = nn.Linear(hidden_size, 2 * hidden_size)
|
||||
self.dense_2 = nn.Linear(4 * hidden_size, 4 * hidden_size)
|
||||
self.dense_3 = nn.Linear(4 * hidden_size, 4 * hidden_size)
|
||||
self.dense_4 = nn.Linear(4 * hidden_size, hidden_size)
|
||||
self.dropout = nn.Dropout(config.resid_pdrop)
|
||||
self.out_proj = nn.Linear(hidden_size, config.num_labels, bias=False)
|
||||
|
||||
def forward(self, x, **kwargs):
|
||||
if 'hidden_states' in kwargs and kwargs['hidden_states'] is not None:
|
||||
# Get hidden states from last layer
|
||||
hidden = kwargs['hidden_states'][-1]
|
||||
else:
|
||||
hidden = torch.zeros(x.size(), dtype=x.dtype, device=x.device)
|
||||
|
||||
x = self.dense_1_input(x)
|
||||
x = torch.relu(x)
|
||||
x = self.dropout(x)
|
||||
|
||||
hidden = self.dense_1_hidden(hidden)
|
||||
hidden = torch.relu(hidden)
|
||||
hidden = self.dropout(hidden)
|
||||
|
||||
x = torch.cat((x, hidden), dim=2)
|
||||
x = self.dense_2(x)
|
||||
x = torch.relu(x)
|
||||
x = self.dense_3(x)
|
||||
x = torch.relu(x)
|
||||
x = self.dense_4(x)
|
||||
x = torch.relu(x)
|
||||
x = self.dropout(x)
|
||||
|
||||
x = self.out_proj(x)
|
||||
return x
|
||||
|
||||
class GPT2ForSequenceClassificationCustom(GPT2ForSequenceClassification):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
self.transformer = GPT2Model(config)
|
||||
|
||||
self.score = GPT2ClassificationHeadCustom(config)
|
||||
|
||||
self.init_weights()
|
||||
|
||||
# Model parallel
|
||||
self.model_parallel = False
|
||||
self.device_map = None
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
past_key_values=None,
|
||||
attention_mask=None,
|
||||
token_type_ids=None,
|
||||
position_ids=None,
|
||||
head_mask=None,
|
||||
inputs_embeds=None,
|
||||
labels=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
):
|
||||
r"""
|
||||
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
|
||||
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
|
||||
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
|
||||
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
||||
"""
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
transformer_outputs = self.transformer(
|
||||
input_ids,
|
||||
past_key_values=past_key_values,
|
||||
attention_mask=attention_mask,
|
||||
token_type_ids=token_type_ids,
|
||||
position_ids=position_ids,
|
||||
head_mask=head_mask,
|
||||
inputs_embeds=inputs_embeds,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
hidden_states = transformer_outputs[0]
|
||||
if return_dict:
|
||||
logits = self.score(hidden_states, hidden_states=transformer_outputs.hidden_states)
|
||||
else:
|
||||
raise NotImplemented('Not implemented for using non-dictionary object')
|
||||
|
||||
if input_ids is not None:
|
||||
batch_size, sequence_length = input_ids.shape[:2]
|
||||
else:
|
||||
batch_size, sequence_length = inputs_embeds.shape[:2]
|
||||
|
||||
assert (
|
||||
self.config.pad_token_id is not None or batch_size == 1
|
||||
), "Cannot handle batch sizes > 1 if no padding token is defined."
|
||||
if self.config.pad_token_id is None:
|
||||
sequence_lengths = -1
|
||||
else:
|
||||
if input_ids is not None:
|
||||
sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1
|
||||
else:
|
||||
sequence_lengths = -1
|
||||
|
||||
pooled_logits = logits[range(batch_size), sequence_lengths]
|
||||
|
||||
loss = None
|
||||
if labels is not None:
|
||||
if self.num_labels == 1:
|
||||
# We are doing regression
|
||||
loss_fct = nn.MSELoss()
|
||||
loss = loss_fct(pooled_logits.view(-1), labels.to(self.dtype).view(-1))
|
||||
else:
|
||||
loss_fct = nn.CrossEntropyLoss()
|
||||
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
||||
|
||||
if not return_dict:
|
||||
output = (pooled_logits,) + transformer_outputs[1:]
|
||||
return ((loss,) + output) if loss is not None else output
|
||||
|
||||
return SequenceClassifierOutputWithPast(
|
||||
loss=loss,
|
||||
logits=pooled_logits,
|
||||
past_key_values=transformer_outputs.past_key_values,
|
||||
hidden_states=transformer_outputs.hidden_states,
|
||||
attentions=transformer_outputs.attentions,
|
||||
)
|
53
out/gpt2_results/README.md
Normal file
53
out/gpt2_results/README.md
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
tags:
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: gpt2_results
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# gpt2_results
|
||||
|
||||
This model is a fine-tuned version of [out/gpt2](https://huggingface.co/out/gpt2) on an unknown dataset.
|
||||
It achieves the following results on the evaluation set:
|
||||
- eval_loss: 0.3020
|
||||
- eval_accuracy: 0.9195
|
||||
- eval_runtime: 24.1139
|
||||
- eval_samples_per_second: 82.94
|
||||
- eval_steps_per_second: 10.367
|
||||
- step: 0
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 2e-05
|
||||
- train_batch_size: 8
|
||||
- eval_batch_size: 8
|
||||
- seed: 42
|
||||
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
||||
- lr_scheduler_type: linear
|
||||
- training_steps: 2500
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 4.26.1
|
||||
- Pytorch 1.13.1+cu117
|
||||
- Datasets 2.9.0
|
||||
- Tokenizers 0.13.2
|
8
out/gpt2_results/all_results.json
Normal file
8
out/gpt2_results/all_results.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"eval_accuracy": 0.9194999933242798,
|
||||
"eval_loss": 0.3020096719264984,
|
||||
"eval_runtime": 24.1139,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 82.94,
|
||||
"eval_steps_per_second": 10.367
|
||||
}
|
8
out/gpt2_results/eval_results.json
Normal file
8
out/gpt2_results/eval_results.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"eval_accuracy": 0.9194999933242798,
|
||||
"eval_loss": 0.3020096719264984,
|
||||
"eval_runtime": 24.1139,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 82.94,
|
||||
"eval_steps_per_second": 10.367
|
||||
}
|
3801
out/gpt2_results/predict_results_None.txt
Normal file
3801
out/gpt2_results/predict_results_None.txt
Normal file
File diff suppressed because it is too large
Load Diff
53
out/roberta_results/README.md
Normal file
53
out/roberta_results/README.md
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
tags:
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: roberta_results
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# roberta_results
|
||||
|
||||
This model is a fine-tuned version of [out/roberta](https://huggingface.co/out/roberta) on an unknown dataset.
|
||||
It achieves the following results on the evaluation set:
|
||||
- eval_loss: 0.2960
|
||||
- eval_accuracy: 0.9230
|
||||
- eval_runtime: 17.8166
|
||||
- eval_samples_per_second: 112.255
|
||||
- eval_steps_per_second: 14.032
|
||||
- step: 0
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 2e-05
|
||||
- train_batch_size: 8
|
||||
- eval_batch_size: 8
|
||||
- seed: 42
|
||||
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
||||
- lr_scheduler_type: linear
|
||||
- training_steps: 2500
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 4.26.1
|
||||
- Pytorch 1.13.1+cu117
|
||||
- Datasets 2.9.0
|
||||
- Tokenizers 0.13.2
|
8
out/roberta_results/all_results.json
Normal file
8
out/roberta_results/all_results.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"eval_accuracy": 0.9229999780654907,
|
||||
"eval_loss": 0.29598742723464966,
|
||||
"eval_runtime": 17.8166,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 112.255,
|
||||
"eval_steps_per_second": 14.032
|
||||
}
|
8
out/roberta_results/eval_results.json
Normal file
8
out/roberta_results/eval_results.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"eval_accuracy": 0.9229999780654907,
|
||||
"eval_loss": 0.29598742723464966,
|
||||
"eval_runtime": 17.8166,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 112.255,
|
||||
"eval_steps_per_second": 14.032
|
||||
}
|
3801
out/roberta_results/predict_results_None.txt
Normal file
3801
out/roberta_results/predict_results_None.txt
Normal file
File diff suppressed because it is too large
Load Diff
53
out/t5_results/README.md
Normal file
53
out/t5_results/README.md
Normal file
@ -0,0 +1,53 @@
|
||||
---
|
||||
tags:
|
||||
- generated_from_trainer
|
||||
model-index:
|
||||
- name: t5_results
|
||||
results: []
|
||||
---
|
||||
|
||||
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
||||
should probably proofread and complete it, then remove this comment. -->
|
||||
|
||||
# t5_results
|
||||
|
||||
This model is a fine-tuned version of [out/t5](https://huggingface.co/out/t5) on an unknown dataset.
|
||||
It achieves the following results on the evaluation set:
|
||||
- eval_loss: 1.2139
|
||||
- eval_accuracy: 0.4675
|
||||
- eval_runtime: 40.5651
|
||||
- eval_samples_per_second: 49.303
|
||||
- eval_steps_per_second: 6.163
|
||||
- step: 0
|
||||
|
||||
## Model description
|
||||
|
||||
More information needed
|
||||
|
||||
## Intended uses & limitations
|
||||
|
||||
More information needed
|
||||
|
||||
## Training and evaluation data
|
||||
|
||||
More information needed
|
||||
|
||||
## Training procedure
|
||||
|
||||
### Training hyperparameters
|
||||
|
||||
The following hyperparameters were used during training:
|
||||
- learning_rate: 2e-05
|
||||
- train_batch_size: 8
|
||||
- eval_batch_size: 8
|
||||
- seed: 42
|
||||
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
||||
- lr_scheduler_type: linear
|
||||
- training_steps: 2500
|
||||
|
||||
### Framework versions
|
||||
|
||||
- Transformers 4.26.1
|
||||
- Pytorch 1.13.1+cu117
|
||||
- Datasets 2.9.0
|
||||
- Tokenizers 0.13.2
|
8
out/t5_results/all_results.json
Normal file
8
out/t5_results/all_results.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"eval_accuracy": 0.4675000011920929,
|
||||
"eval_loss": 1.213880181312561,
|
||||
"eval_runtime": 40.5651,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 49.303,
|
||||
"eval_steps_per_second": 6.163
|
||||
}
|
8
out/t5_results/eval_results.json
Normal file
8
out/t5_results/eval_results.json
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"eval_accuracy": 0.4675000011920929,
|
||||
"eval_loss": 1.213880181312561,
|
||||
"eval_runtime": 40.5651,
|
||||
"eval_samples": 2000,
|
||||
"eval_samples_per_second": 49.303,
|
||||
"eval_steps_per_second": 6.163
|
||||
}
|
3801
out/t5_results/predict_results_None.txt
Normal file
3801
out/t5_results/predict_results_None.txt
Normal file
File diff suppressed because it is too large
Load Diff
114
preparer_ag_nenws.py
Normal file
114
preparer_ag_nenws.py
Normal file
@ -0,0 +1,114 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List, Dict
|
||||
|
||||
from datasets import load_dataset
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MAP_LABEL_TRANSLATION = {
|
||||
0: 'world',
|
||||
1: 'sport',
|
||||
2: 'business',
|
||||
3: 'scitech'
|
||||
}
|
||||
|
||||
|
||||
def save_as_translations(original_save_path: Path, data_to_save: List[Dict]) -> None:
|
||||
file_name = 's2s-' + original_save_path.name
|
||||
file_path = original_save_path.parent / file_name
|
||||
|
||||
print(f'Saving into: {file_path}')
|
||||
with open(file_path, 'wt') as f_write:
|
||||
for data_line in data_to_save:
|
||||
label = data_line['label']
|
||||
new_label = MAP_LABEL_TRANSLATION[label]
|
||||
data_line['label'] = new_label
|
||||
data_line_str = json.dumps(data_line)
|
||||
f_write.write(f'{data_line_str}\n')
|
||||
|
||||
|
||||
def main() -> None:
|
||||
loaded_data = load_dataset('ag_news')
|
||||
logger.info(f'Loaded dataset ag_news: {loaded_data}')
|
||||
|
||||
save_path = Path('data/')
|
||||
save_train_path = save_path / 'train.json'
|
||||
save_valid_path = save_path / 'valid.json'
|
||||
save_test_path = save_path / 'test.json'
|
||||
if not save_path.exists():
|
||||
save_path.mkdir()
|
||||
|
||||
# Read train and validation data
|
||||
data_train, data_valid, data_test = [], [], []
|
||||
for source_data, dataset, max_size in [
|
||||
(loaded_data['train'], data_train, None),
|
||||
(loaded_data['test'], data_valid, None)
|
||||
]:
|
||||
for i, data in enumerate(source_data):
|
||||
if max_size is not None and i >= max_size:
|
||||
break
|
||||
data_line = {
|
||||
'label': int(data['label']),
|
||||
'text': data['text'],
|
||||
}
|
||||
dataset.append(data_line)
|
||||
logger.info(f'Train: {len(data_train):6d}')
|
||||
|
||||
# Split validation set into 2 classes for validation and test splitting
|
||||
world, sport, business, scitech = [], [], [], []
|
||||
|
||||
for data in data_valid:
|
||||
label = data['label']
|
||||
if label == 0:
|
||||
world.append(data)
|
||||
elif label == 1:
|
||||
sport.append(data)
|
||||
elif label == 2:
|
||||
business.append(data)
|
||||
elif label == 3:
|
||||
scitech.append(data)
|
||||
|
||||
logger.info(f'World: {len(world):6d}')
|
||||
logger.info(f'Sport: {len(sport):6d}')
|
||||
logger.info(f'Business: {len(business):6d}')
|
||||
logger.info(f'Scitech: {len(scitech):6d}')
|
||||
|
||||
print(world)
|
||||
print(f'World: {len(world)}')
|
||||
print(f'Sport: {len(sport):6d}')
|
||||
print(f'Business: {len(business):6d}')
|
||||
print(f'Scitech: {len(scitech):6d}')
|
||||
|
||||
|
||||
# Split 2 classes into validation and test
|
||||
size_half_world = int(len(world) / 2)
|
||||
size_half_sport = int(len(sport) / 2)
|
||||
size_half_business = int(len(business) / 2)
|
||||
size_half_scitech = int(len(scitech) / 2)
|
||||
logger.info(f'Valid: {len(data_valid):6d}')
|
||||
logger.info(f'Test : {len(data_test):6d}')
|
||||
|
||||
data_valid = world[:size_half_world] + sport[:size_half_sport] + business[:size_half_business] + scitech[:size_half_scitech]
|
||||
data_test = world[size_half_world:] + sport[size_half_sport:] + business[size_half_business:] + scitech[size_half_scitech:]
|
||||
|
||||
# Save files
|
||||
for file_path, data_to_save in [
|
||||
(save_train_path, data_train),
|
||||
(save_valid_path, data_valid),
|
||||
(save_test_path, data_test)
|
||||
]:
|
||||
print(f'Saving into: {file_path}')
|
||||
with open(file_path, 'wt') as f_write:
|
||||
for data_line in data_to_save:
|
||||
data_line_str = json.dumps(data_line)
|
||||
f_write.write(f'{data_line_str}\n')
|
||||
|
||||
save_as_translations(file_path, data_to_save)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
7335
projektV2.ipynb
Normal file
7335
projektV2.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
54
roberta.py
Normal file
54
roberta.py
Normal file
@ -0,0 +1,54 @@
|
||||
from typing import Optional, Union, Tuple
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import MSELoss, CrossEntropyLoss, BCEWithLogitsLoss
|
||||
from transformers import RobertaForSequenceClassification, RobertaModel
|
||||
from transformers.modeling_outputs import SequenceClassifierOutput
|
||||
|
||||
|
||||
# Simple version #
|
||||
|
||||
class RobertaClassificationHeadCustomSimple(nn.Module):
|
||||
"""Head for sentence-level classification tasks."""
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__()
|
||||
hidden_size = config.hidden_size
|
||||
self.dense_1 = nn.Linear(hidden_size, 4 * hidden_size)
|
||||
self.dense_2 = nn.Linear(4 * hidden_size, hidden_size)
|
||||
classifier_dropout = (
|
||||
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
|
||||
)
|
||||
self.dropout = nn.Dropout(classifier_dropout)
|
||||
self.out_proj = nn.Linear(hidden_size, config.num_labels)
|
||||
self.activation = nn.GELU()
|
||||
|
||||
def forward(self, features, **kwargs):
|
||||
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
|
||||
|
||||
x = self.dense_1(x)
|
||||
x = self.activation(x)
|
||||
x = self.dropout(x)
|
||||
|
||||
x = self.dense_2(x)
|
||||
x = self.activation(x)
|
||||
x = self.dropout(x)
|
||||
|
||||
x = self.out_proj(x)
|
||||
return x
|
||||
|
||||
|
||||
class RobertaForSequenceClassificationCustomSimple(RobertaForSequenceClassification):
|
||||
_keys_to_ignore_on_load_missing = [r"position_ids"]
|
||||
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.num_labels = config.num_labels
|
||||
self.config = config
|
||||
|
||||
self.roberta = RobertaModel(config, add_pooling_layer=False)
|
||||
self.classifier = RobertaClassificationHeadCustomSimple(config)
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
685
run_glue.py
Normal file
685
run_glue.py
Normal file
@ -0,0 +1,685 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2020 The HuggingFace Inc. team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
""" Finetuning the library models for sequence classification on GLUE."""
|
||||
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoTokenizer,
|
||||
DataCollatorWithPadding,
|
||||
EvalPrediction,
|
||||
HfArgumentParser,
|
||||
PretrainedConfig,
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
default_data_collator,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.trainer_utils import get_last_checkpoint
|
||||
from transformers.utils import check_min_version, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from roberta import RobertaForSequenceClassificationCustomSimple
|
||||
from gpt2 import GPT2ForSequenceClassificationCustom
|
||||
from t5 import T5ForClassification
|
||||
from transformers import BartForSequenceClassification
|
||||
|
||||
MODEL_NAME_TO_CLASS = {
|
||||
'roberta_simple': RobertaForSequenceClassificationCustomSimple,
|
||||
'gpt2_hidden': GPT2ForSequenceClassificationCustom,
|
||||
't5_custom': T5ForClassification,
|
||||
'bart_base': BartForSequenceClassification,
|
||||
}
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
task_to_keys = {
|
||||
"cola": ("sentence", None),
|
||||
"mnli": ("premise", "hypothesis"),
|
||||
"mrpc": ("sentence1", "sentence2"),
|
||||
"qnli": ("question", "sentence"),
|
||||
"qqp": ("question1", "question2"),
|
||||
"rte": ("sentence1", "sentence2"),
|
||||
"sst2": ("sentence", None),
|
||||
"stsb": ("sentence1", "sentence2"),
|
||||
"wnli": ("sentence1", "sentence2"),
|
||||
}
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
|
||||
Using `HfArgumentParser` we can turn this class
|
||||
into argparse arguments to be able to specify them on
|
||||
the command line.
|
||||
"""
|
||||
|
||||
task_name: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
|
||||
)
|
||||
dataset_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
dataset_config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
max_seq_length: int = field(
|
||||
default=128,
|
||||
metadata={
|
||||
"help": (
|
||||
"The maximum total input sequence length after tokenization. Sequences longer "
|
||||
"than this will be truncated, sequences shorter will be padded."
|
||||
)
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
||||
)
|
||||
pad_to_max_length: bool = field(
|
||||
default=True,
|
||||
metadata={
|
||||
"help": (
|
||||
"Whether to pad all samples to `max_seq_length`. "
|
||||
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
|
||||
)
|
||||
},
|
||||
)
|
||||
max_train_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": (
|
||||
"For debugging purposes or quicker training, truncate the number of training examples to this "
|
||||
"value if set."
|
||||
)
|
||||
},
|
||||
)
|
||||
max_eval_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": (
|
||||
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
|
||||
"value if set."
|
||||
)
|
||||
},
|
||||
)
|
||||
max_predict_samples: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": (
|
||||
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
|
||||
"value if set."
|
||||
)
|
||||
},
|
||||
)
|
||||
train_file: Optional[str] = field(
|
||||
default=None, metadata={"help": "A csv or a json file containing the training data."}
|
||||
)
|
||||
validation_file: Optional[str] = field(
|
||||
default=None, metadata={"help": "A csv or a json file containing the validation data."}
|
||||
)
|
||||
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
|
||||
|
||||
def __post_init__(self):
|
||||
if self.task_name is not None:
|
||||
self.task_name = self.task_name.lower()
|
||||
if self.task_name not in task_to_keys.keys():
|
||||
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
|
||||
elif self.dataset_name is not None:
|
||||
pass
|
||||
elif self.train_file is None or self.validation_file is None:
|
||||
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.")
|
||||
else:
|
||||
train_extension = self.train_file.split(".")[-1]
|
||||
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
|
||||
validation_extension = self.validation_file.split(".")[-1]
|
||||
assert (
|
||||
validation_extension == train_extension
|
||||
), "`validation_file` should have the same extension (csv or json) as `train_file`."
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
|
||||
"""
|
||||
|
||||
model_name_or_path: str = field(
|
||||
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
|
||||
)
|
||||
config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
||||
)
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
|
||||
)
|
||||
use_fast_tokenizer: bool = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
|
||||
)
|
||||
model_revision: str = field(
|
||||
default="main",
|
||||
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
|
||||
)
|
||||
use_auth_token: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": (
|
||||
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
|
||||
"with private models)."
|
||||
)
|
||||
},
|
||||
)
|
||||
ignore_mismatched_sizes: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},
|
||||
)
|
||||
custom_model: str = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "Use custom implementation from available list",
|
||||
"choices": list(MODEL_NAME_TO_CLASS.keys()),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
|
||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
||||
# If we pass only one argument to the script and it's the path to a json file,
|
||||
# let's parse it to get our arguments.
|
||||
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
||||
else:
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
|
||||
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
|
||||
# information sent is the one passed as arguments along with your Python/PyTorch versions.
|
||||
send_example_telemetry("run_glue", model_args, data_args)
|
||||
|
||||
if 'bart' in model_args.model_name_or_path:
|
||||
model_args.ignore_mismatched_sizes = True
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
handlers=[logging.StreamHandler(sys.stdout)],
|
||||
)
|
||||
|
||||
log_level = training_args.get_process_log_level()
|
||||
logger.setLevel(log_level)
|
||||
datasets.utils.logging.set_verbosity(log_level)
|
||||
transformers.utils.logging.set_verbosity(log_level)
|
||||
transformers.utils.logging.enable_default_handler()
|
||||
transformers.utils.logging.enable_explicit_format()
|
||||
|
||||
# Log on each process the small summary:
|
||||
logger.warning(
|
||||
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
|
||||
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
|
||||
)
|
||||
logger.info(f"Training/evaluation parameters {training_args}")
|
||||
|
||||
# Detecting last checkpoint.
|
||||
last_checkpoint = None
|
||||
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
||||
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
||||
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
|
||||
"Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
|
||||
logger.info(
|
||||
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
|
||||
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
|
||||
)
|
||||
|
||||
# Set seed before initializing model.
|
||||
set_seed(training_args.seed)
|
||||
|
||||
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
|
||||
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
|
||||
#
|
||||
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
|
||||
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
|
||||
# label if at least two columns are provided.
|
||||
#
|
||||
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
|
||||
# single column. You can easily tweak this behavior (see below)
|
||||
#
|
||||
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
|
||||
# download the dataset.
|
||||
if data_args.task_name is not None:
|
||||
# Downloading and loading a dataset from the hub.
|
||||
raw_datasets = load_dataset(
|
||||
"glue",
|
||||
data_args.task_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
elif data_args.dataset_name is not None:
|
||||
# Downloading and loading a dataset from the hub.
|
||||
raw_datasets = load_dataset(
|
||||
data_args.dataset_name,
|
||||
data_args.dataset_config_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
else:
|
||||
# Loading a dataset from your local files.
|
||||
# CSV/JSON training and evaluation files are needed.
|
||||
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
|
||||
|
||||
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
|
||||
# when you use `do_predict` without specifying a GLUE benchmark task.
|
||||
if training_args.do_predict:
|
||||
if data_args.test_file is not None:
|
||||
train_extension = data_args.train_file.split(".")[-1]
|
||||
test_extension = data_args.test_file.split(".")[-1]
|
||||
assert (
|
||||
test_extension == train_extension
|
||||
), "`test_file` should have the same extension (csv or json) as `train_file`."
|
||||
data_files["test"] = data_args.test_file
|
||||
else:
|
||||
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
|
||||
|
||||
for key in data_files.keys():
|
||||
logger.info(f"load a local file for {key}: {data_files[key]}")
|
||||
|
||||
if data_args.train_file.endswith(".csv"):
|
||||
# Loading a dataset from local csv files
|
||||
raw_datasets = load_dataset(
|
||||
"csv",
|
||||
data_files=data_files,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
else:
|
||||
# Loading a dataset from local json files
|
||||
raw_datasets = load_dataset(
|
||||
"json",
|
||||
data_files=data_files,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
# See more about loading any type of standard or custom dataset at
|
||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||
|
||||
# Labels
|
||||
if data_args.task_name is not None:
|
||||
is_regression = data_args.task_name == "stsb"
|
||||
if not is_regression:
|
||||
label_list = raw_datasets["train"].features["label"].names
|
||||
num_labels = len(label_list)
|
||||
else:
|
||||
num_labels = 1
|
||||
else:
|
||||
# Trying to have good defaults here, don't hesitate to tweak to your needs.
|
||||
is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"]
|
||||
if is_regression:
|
||||
num_labels = 1
|
||||
else:
|
||||
# A useful fast method:
|
||||
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
|
||||
label_list = raw_datasets["train"].unique("label")
|
||||
label_list.sort() # Let's sort it for determinism
|
||||
num_labels = len(label_list)
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
#
|
||||
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
config = AutoConfig.from_pretrained(
|
||||
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
|
||||
num_labels=num_labels,
|
||||
finetuning_task=data_args.task_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
custom_model = model_args.custom_model
|
||||
|
||||
if custom_model is not None:
|
||||
# Check model and implementation is the same
|
||||
if 'roberta' in custom_model and 'roberta' not in model_args.model_name_or_path:
|
||||
raise RuntimeError('Model and custom implementation should be the same type: RoBERTa')
|
||||
elif 'gpt2' in custom_model and 'gpt2' not in model_args.model_name_or_path:
|
||||
raise RuntimeError('Model and custom implementation should be the same type: GPT-2')
|
||||
|
||||
# Set custom configuration in model configuration
|
||||
config.use_hidden_states = 'hidden' in custom_model
|
||||
logger.info(f'Using hidden states in model: {config.use_hidden_states}')
|
||||
|
||||
print(f'-------------------------------------------------------- Using hidden: {config.use_hidden_states}')
|
||||
|
||||
# Get class to initialize model
|
||||
model_cls = MODEL_NAME_TO_CLASS[custom_model]
|
||||
else:
|
||||
model_cls = AutoModelForSequenceClassification
|
||||
logger.info(f'Using implementation from class: {model_cls.__name__}')
|
||||
model = model_cls.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
from_tf=bool(".ckpt" in model_args.model_name_or_path),
|
||||
config=config,
|
||||
cache_dir=model_args.cache_dir,
|
||||
revision=model_args.model_revision,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,
|
||||
)
|
||||
|
||||
print(model)
|
||||
|
||||
if 'gpt2' in tokenizer.name_or_path and tokenizer.pad_token is None:
|
||||
logger.info(f'Set PAD token to EOS: {tokenizer.eos_token}')
|
||||
tokenizer._pad_token = tokenizer.eos_token
|
||||
model.config.pad_token_id = model.config.eos_token_id
|
||||
|
||||
# Preprocessing the raw_datasets
|
||||
if data_args.task_name is not None:
|
||||
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
|
||||
|
||||
# Preprocessing the raw_datasets
|
||||
if data_args.task_name is not None:
|
||||
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
|
||||
else:
|
||||
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
|
||||
non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"]
|
||||
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
|
||||
sentence1_key, sentence2_key = "sentence1", "sentence2"
|
||||
else:
|
||||
if len(non_label_column_names) >= 2:
|
||||
sentence1_key, sentence2_key = non_label_column_names[:2]
|
||||
else:
|
||||
sentence1_key, sentence2_key = non_label_column_names[0], None
|
||||
|
||||
# Padding strategy
|
||||
if data_args.pad_to_max_length:
|
||||
padding = "max_length"
|
||||
else:
|
||||
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
|
||||
padding = False
|
||||
|
||||
# Some models have set the order of the labels to use, so let's make sure we do use it.
|
||||
label_to_id = None
|
||||
if (
|
||||
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
|
||||
and data_args.task_name is not None
|
||||
and not is_regression
|
||||
):
|
||||
# Some have all caps in their config, some don't.
|
||||
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
|
||||
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
|
||||
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
|
||||
else:
|
||||
logger.warning(
|
||||
"Your model seems to have been trained with labels, but they don't match the dataset: ",
|
||||
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
|
||||
"\nIgnoring the model labels as a result.",
|
||||
)
|
||||
elif data_args.task_name is None and not is_regression:
|
||||
label_to_id = {v: i for i, v in enumerate(label_list)}
|
||||
|
||||
if label_to_id is not None:
|
||||
model.config.label2id = label_to_id
|
||||
model.config.id2label = {id: label for label, id in config.label2id.items()}
|
||||
elif data_args.task_name is not None and not is_regression:
|
||||
model.config.label2id = {l: i for i, l in enumerate(label_list)}
|
||||
model.config.id2label = {id: label for label, id in config.label2id.items()}
|
||||
|
||||
if data_args.max_seq_length > tokenizer.model_max_length:
|
||||
logger.warning(
|
||||
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
|
||||
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
|
||||
)
|
||||
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
|
||||
|
||||
def preprocess_function(examples):
|
||||
# Tokenize the texts
|
||||
args = (
|
||||
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
|
||||
)
|
||||
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
|
||||
|
||||
# Map labels to IDs (not necessary for GLUE tasks)
|
||||
if label_to_id is not None and "label" in examples:
|
||||
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
|
||||
return result
|
||||
|
||||
with training_args.main_process_first(desc="dataset map pre-processing"):
|
||||
raw_datasets = raw_datasets.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
desc="Running tokenizer on dataset",
|
||||
)
|
||||
if training_args.do_train:
|
||||
if "train" not in raw_datasets:
|
||||
raise ValueError("--do_train requires a train dataset")
|
||||
train_dataset = raw_datasets["train"]
|
||||
if data_args.max_train_samples is not None:
|
||||
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
|
||||
train_dataset = train_dataset.select(range(max_train_samples))
|
||||
|
||||
if training_args.do_eval:
|
||||
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
|
||||
raise ValueError("--do_eval requires a validation dataset")
|
||||
eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
|
||||
if data_args.max_eval_samples is not None:
|
||||
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
|
||||
label_to_indexes = defaultdict(list)
|
||||
for index, eval_sample in enumerate(eval_dataset):
|
||||
label_to_indexes[eval_sample['label']].append(index)
|
||||
max_samples_per_label = int(max_eval_samples / len(label_to_indexes))
|
||||
eval_sample_indexes = []
|
||||
for label, indexes in label_to_indexes.items():
|
||||
eval_sample_indexes.extend(indexes[:max_samples_per_label])
|
||||
logger.info(f"Set {max_samples_per_label} samples for {label}-class")
|
||||
eval_sample_indexes.sort()
|
||||
eval_dataset = eval_dataset.select(eval_sample_indexes)
|
||||
|
||||
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
|
||||
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
|
||||
raise ValueError("--do_predict requires a test dataset")
|
||||
predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"]
|
||||
if data_args.max_predict_samples is not None:
|
||||
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
|
||||
predict_dataset = predict_dataset.select(range(max_predict_samples))
|
||||
|
||||
# Log a few random samples from the training set:
|
||||
if training_args.do_train:
|
||||
for index in random.sample(range(len(train_dataset)), 3):
|
||||
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
|
||||
|
||||
# Get the metric function
|
||||
if data_args.task_name is not None:
|
||||
metric = evaluate.load("glue", data_args.task_name)
|
||||
else:
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||
def compute_metrics(p: EvalPrediction):
|
||||
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
|
||||
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
|
||||
if data_args.task_name is not None:
|
||||
result = metric.compute(predictions=preds, references=p.label_ids)
|
||||
if len(result) > 1:
|
||||
result["combined_score"] = np.mean(list(result.values())).item()
|
||||
return result
|
||||
elif is_regression:
|
||||
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
|
||||
else:
|
||||
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
|
||||
|
||||
# Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if
|
||||
# we already did the padding.
|
||||
if data_args.pad_to_max_length:
|
||||
data_collator = default_data_collator
|
||||
elif training_args.fp16:
|
||||
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
|
||||
else:
|
||||
data_collator = None
|
||||
|
||||
# Initialize our Trainer
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=train_dataset if training_args.do_train else None,
|
||||
eval_dataset=eval_dataset if training_args.do_eval else None,
|
||||
compute_metrics=compute_metrics,
|
||||
tokenizer=tokenizer,
|
||||
data_collator=data_collator,
|
||||
)
|
||||
|
||||
# Training
|
||||
ignore_keys_for_eval = ['hidden_states', 'attentions', 'past_key_values']
|
||||
if training_args.do_train:
|
||||
checkpoint = None
|
||||
if training_args.resume_from_checkpoint is not None:
|
||||
checkpoint = training_args.resume_from_checkpoint
|
||||
elif last_checkpoint is not None:
|
||||
checkpoint = last_checkpoint
|
||||
train_result = trainer.train(resume_from_checkpoint=checkpoint, ignore_keys_for_eval=ignore_keys_for_eval)
|
||||
metrics = train_result.metrics
|
||||
max_train_samples = (
|
||||
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
|
||||
)
|
||||
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
|
||||
|
||||
trainer.save_model() # Saves the tokenizer too for easy upload
|
||||
|
||||
trainer.log_metrics("train", metrics)
|
||||
trainer.save_metrics("train", metrics)
|
||||
trainer.save_state()
|
||||
|
||||
# Evaluation
|
||||
if training_args.do_eval:
|
||||
logger.info("*** Evaluate ***")
|
||||
|
||||
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
||||
tasks = [data_args.task_name]
|
||||
eval_datasets = [eval_dataset]
|
||||
if data_args.task_name == "mnli":
|
||||
tasks.append("mnli-mm")
|
||||
valid_mm_dataset = raw_datasets["validation_mismatched"]
|
||||
if data_args.max_eval_samples is not None:
|
||||
max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples)
|
||||
valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples))
|
||||
eval_datasets.append(valid_mm_dataset)
|
||||
combined = {}
|
||||
|
||||
for eval_dataset, task in zip(eval_datasets, tasks):
|
||||
metrics = trainer.evaluate(eval_dataset=eval_dataset, ignore_keys=ignore_keys_for_eval)
|
||||
|
||||
max_eval_samples = (
|
||||
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
|
||||
)
|
||||
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
|
||||
|
||||
if task == "mnli-mm":
|
||||
metrics = {k + "_mm": v for k, v in metrics.items()}
|
||||
if task is not None and "mnli" in task:
|
||||
combined.update(metrics)
|
||||
|
||||
trainer.log_metrics("eval", metrics)
|
||||
trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics)
|
||||
|
||||
if training_args.do_predict:
|
||||
logger.info("*** Predict ***")
|
||||
|
||||
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
||||
tasks = [data_args.task_name]
|
||||
predict_datasets = [predict_dataset]
|
||||
if data_args.task_name == "mnli":
|
||||
tasks.append("mnli-mm")
|
||||
predict_datasets.append(raw_datasets["test_mismatched"])
|
||||
|
||||
for predict_dataset, task in zip(predict_datasets, tasks):
|
||||
# Removing the `label` columns because it contains -1 and Trainer won't like that.
|
||||
predict_dataset = predict_dataset.remove_columns("label")
|
||||
predictions = trainer.predict(predict_dataset, metric_key_prefix="predict", ignore_keys=ignore_keys_for_eval).predictions
|
||||
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
|
||||
|
||||
output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt")
|
||||
if trainer.is_world_process_zero():
|
||||
with open(output_predict_file, "w") as writer:
|
||||
logger.info(f"***** Predict results {task} *****")
|
||||
writer.write("index\tprediction\n")
|
||||
for index, item in enumerate(predictions):
|
||||
if is_regression:
|
||||
writer.write(f"{index}\t{item:3.3f}\n")
|
||||
else:
|
||||
item = label_list[item]
|
||||
writer.write(f"{index}\t{item}\n")
|
||||
|
||||
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
|
||||
if data_args.task_name is not None:
|
||||
kwargs["language"] = "en"
|
||||
kwargs["dataset_tags"] = "glue"
|
||||
kwargs["dataset_args"] = data_args.task_name
|
||||
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
|
||||
|
||||
if training_args.push_to_hub:
|
||||
trainer.push_to_hub(**kwargs)
|
||||
else:
|
||||
trainer.create_model_card(**kwargs)
|
||||
|
||||
|
||||
def _mp_fn(index):
|
||||
# For xla_spawn (TPUs)
|
||||
main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
125
t5.py
Normal file
125
t5.py
Normal file
@ -0,0 +1,125 @@
|
||||
import torch
|
||||
import copy
|
||||
from torch import nn
|
||||
from transformers import T5PreTrainedModel, T5Config
|
||||
from transformers.models.t5.modeling_t5 import T5Stack
|
||||
from transformers.modeling_outputs import SequenceClassifierOutput
|
||||
|
||||
|
||||
class T5ClassificationHead(nn.Module):
|
||||
def __init__(self, config: T5Config):
|
||||
super().__init__()
|
||||
|
||||
self.dense_in = nn.Linear(config.d_model, 768)
|
||||
self.dense = nn.Linear(768, 768)
|
||||
self.dense_out = nn.Linear(768, config.num_labels)
|
||||
self.dropout = nn.Dropout(0.1)
|
||||
|
||||
def forward(self, features, **kwargs):
|
||||
x = features[:, 0, :]
|
||||
x = self.dropout(x)
|
||||
x = self.dense_in(x)
|
||||
x = torch.relu(x)
|
||||
x = self.dropout(x)
|
||||
x = self.dense(x)
|
||||
x = torch.relu(x)
|
||||
x = self.dropout(x)
|
||||
x = self.dense_out(x)
|
||||
|
||||
return x
|
||||
|
||||
|
||||
class T5ForClassification(T5PreTrainedModel):
|
||||
def __init__(self, config: T5Config):
|
||||
super().__init__(config)
|
||||
self.model_dim = config.d_model
|
||||
|
||||
self.shared = nn.Embedding(config.vocab_size, config.d_model)
|
||||
|
||||
encoder_config = copy.deepcopy(config)
|
||||
encoder_config.is_decoder = False
|
||||
encoder_config.use_cache = False
|
||||
encoder_config.is_encoder_decoder = False
|
||||
self.encoder = T5Stack(encoder_config, self.shared)
|
||||
|
||||
decoder_config = copy.deepcopy(config)
|
||||
decoder_config.is_decoder = True
|
||||
decoder_config.is_encoder_decoder = False
|
||||
decoder_config.num_layers = config.num_decoder_layers
|
||||
self.decoder = T5Stack(decoder_config, self.shared)
|
||||
|
||||
modules_to_freeze = [self.encoder.block[i].layer[0] for i in range(len(self.encoder.block))]
|
||||
modules_to_freeze.extend([self.decoder.block[i].layer[0] for i in range(len(self.decoder.block))])
|
||||
modules_to_freeze.extend([self.decoder.block[i].layer[1] for i in range(len(self.decoder.block))])
|
||||
|
||||
for module in modules_to_freeze:
|
||||
for param in module.parameters():
|
||||
param.requires_grad = False
|
||||
|
||||
self.lm_head = T5ClassificationHead(config)
|
||||
|
||||
# Initialize weights and apply final processing
|
||||
self.post_init()
|
||||
|
||||
# Model parallel
|
||||
self.model_parallel = False
|
||||
self.device_map = None
|
||||
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids=None,
|
||||
attention_mask=None,
|
||||
head_mask=None,
|
||||
cross_attn_head_mask=None,
|
||||
past_key_values=None,
|
||||
inputs_embeds=None,
|
||||
decoder_inputs_embeds=None,
|
||||
use_cache=None,
|
||||
output_attentions=None,
|
||||
output_hidden_states=None,
|
||||
return_dict=None,
|
||||
labels=None
|
||||
):
|
||||
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
||||
|
||||
outputs = self.encoder(
|
||||
input_ids,
|
||||
attention_mask=attention_mask,
|
||||
head_mask=head_mask,
|
||||
cross_attn_head_mask=cross_attn_head_mask,
|
||||
past_key_values=past_key_values,
|
||||
inputs_embeds=inputs_embeds,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
|
||||
outputs = self.decoder(
|
||||
input_ids,
|
||||
attention_mask=attention_mask,
|
||||
head_mask=head_mask,
|
||||
cross_attn_head_mask=cross_attn_head_mask,
|
||||
past_key_values=past_key_values,
|
||||
inputs_embeds=inputs_embeds,
|
||||
use_cache=use_cache,
|
||||
output_attentions=output_attentions,
|
||||
output_hidden_states=output_hidden_states,
|
||||
return_dict=return_dict,
|
||||
)
|
||||
|
||||
|
||||
logits = self.lm_head(outputs[0])
|
||||
|
||||
|
||||
loss = None
|
||||
if labels is not None:
|
||||
loss_fct = nn.CrossEntropyLoss()
|
||||
loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
|
||||
|
||||
|
||||
return SequenceClassifierOutput(
|
||||
loss=loss,
|
||||
logits=logits,
|
||||
)
|
Loading…
Reference in New Issue
Block a user