From fac97a487d06d9a7959f2532eef513a60de77f13 Mon Sep 17 00:00:00 2001 From: kubapok Date: Fri, 22 Oct 2021 10:35:16 +0200 Subject: [PATCH] roberta_year_as_text_better_finetunning_only_weekday --- .../01_create_datasets.py | 34 ++ .../02_load_dataset.py | 47 ++ .../04_predict.py | 85 +++ .../config.py | 18 + .../run.sh | 1 + .../run_glue.py | 578 ++++++++++++++++++ 6 files changed, 763 insertions(+) create mode 100644 roberta_year_as_text_better_finetunning_only_weekday/01_create_datasets.py create mode 100644 roberta_year_as_text_better_finetunning_only_weekday/02_load_dataset.py create mode 100644 roberta_year_as_text_better_finetunning_only_weekday/04_predict.py create mode 100644 roberta_year_as_text_better_finetunning_only_weekday/config.py create mode 100644 roberta_year_as_text_better_finetunning_only_weekday/run.sh create mode 100755 roberta_year_as_text_better_finetunning_only_weekday/run_glue.py diff --git a/roberta_year_as_text_better_finetunning_only_weekday/01_create_datasets.py b/roberta_year_as_text_better_finetunning_only_weekday/01_create_datasets.py new file mode 100644 index 0000000..a4db7ed --- /dev/null +++ b/roberta_year_as_text_better_finetunning_only_weekday/01_create_datasets.py @@ -0,0 +1,34 @@ +import datetime +from config import LABELS_DICT + +with open('../test-A/in.tsv','r') as f_in, open(f'./test-A_huggingface_format.csv', 'w') as f_hf: + f_hf.write('text\tyear_cont\tdate\tday_of_year\tday_of_month\tmonth\tyear\tweekday\tlabel\n') + for line_in in f_in: + year_cont, date, text = line_in.rstrip('\n').split('\t') + d = datetime.datetime.strptime(date,"%Y%m%d") + day_of_year = str(d.timetuple().tm_yday) + day_of_month = str(d.day) + month = str(d.month) + year = str(d.year) + weekday = str(d.weekday()) + day_of_year = str(d.timetuple().tm_yday) + text = 'weekday: ' + weekday + ' '+ text + f_hf.write(text +'\t' +year_cont +'\t'+ date + '\t' + day_of_year + '\t' + day_of_month + '\t' + month + '\t' + year + '\t' + weekday + '\t' + str('0') + '\n') + + +for dataset in 'train', 'dev-0': + with open(f'../{dataset}/in.tsv') as f_in, open(f'../{dataset}/expected.tsv') as f_exp, open(f'./{dataset}_huggingface_format.csv','w') as f_hf: + f_hf.write('text\tyear_cont\tdate\tday_of_year\tday_of_month\tmonth\tyear\tweekday\tlabel\n') + for line_in, line_exp in zip(f_in, f_exp): + label = str(LABELS_DICT[line_exp.rstrip('\n')]) + year_cont,date,text = line_in.rstrip('\n').split('\t') + d = datetime.datetime.strptime(date,"%Y%m%d") + day_of_year = str(d.timetuple().tm_yday) + day_of_month = str(d.day) + month = str(d.month) + year = str(d.year) + weekday = str(d.weekday()) + day_of_year = str(d.timetuple().tm_yday) + text = 'weekday: ' + weekday + ' '+ text + f_hf.write(text +'\t' +year_cont +'\t'+ date + '\t'+ day_of_year + '\t' + day_of_month + '\t' + month + '\t' + year + '\t' + weekday + '\t' + label + '\n') + diff --git a/roberta_year_as_text_better_finetunning_only_weekday/02_load_dataset.py b/roberta_year_as_text_better_finetunning_only_weekday/02_load_dataset.py new file mode 100644 index 0000000..013bad4 --- /dev/null +++ b/roberta_year_as_text_better_finetunning_only_weekday/02_load_dataset.py @@ -0,0 +1,47 @@ +import pickle +from datasets import load_dataset +from transformers import AutoTokenizer +from config import MODEL +from tqdm import tqdm + +dataset = load_dataset('csv', sep='\t', data_files={'train': ['./train_huggingface_format.csv'], 'test': ['./dev-0_huggingface_format.csv']}) +test_dataset = load_dataset('csv', sep='\t', data_files='./test-A_huggingface_format.csv') + +tokenizer = AutoTokenizer.from_pretrained(MODEL) + +def tokenize_function(examples): + t = tokenizer(examples["text"], padding="max_length", truncation=True) + return t + +test_tokenized_datasets = test_dataset.map(tokenize_function, batched=True) +tokenized_datasets = dataset.map(tokenize_function, batched=True) + + +#for d in ('train', 'test'): +# for i in tqdm(range(len(tokenized_datasets[d]))): +# tokenized_datasets[d][i][column] = [tokenized_datasets[d][i][column] ] * 512 #len(tokenized_datasets[d][i]['input_ids']) +# +#d = 'train' +#for column in tqdm(('date', 'day_of_month', 'day_of_year', 'month', 'year', 'year_cont')): +# for i in tqdm(range(len(test_tokenized_datasets[d]))): +# test_tokenized_datasets[d][i][column] = [test_tokenized_datasets[d][i][column] ] * 512 #len(test_tokenized_datasets[d][i]['input_ids']) + +train_dataset = tokenized_datasets["train"].shuffle(seed=42) +eval_dataset_full = tokenized_datasets["test"] +eval_dataset_small = tokenized_datasets["test"].select(range(2000)) +test_dataset = test_tokenized_datasets["train"] + + +with open('train_dataset.pickle','wb') as f_p: + pickle.dump(train_dataset, f_p) + +with open('eval_dataset_small.pickle','wb') as f_p: + pickle.dump(eval_dataset_small, f_p) + +with open('eval_dataset_full.pickle','wb') as f_p: + pickle.dump(eval_dataset_full, f_p) + +with open('test_dataset.pickle','wb') as f_p: + pickle.dump(test_dataset, f_p) + + diff --git a/roberta_year_as_text_better_finetunning_only_weekday/04_predict.py b/roberta_year_as_text_better_finetunning_only_weekday/04_predict.py new file mode 100644 index 0000000..788fab2 --- /dev/null +++ b/roberta_year_as_text_better_finetunning_only_weekday/04_predict.py @@ -0,0 +1,85 @@ +import pickle +from config import LABELS_LIST, MODEL + +with open('train_dataset.pickle','rb') as f_p: + train_dataset = pickle.load(f_p) + +with open('eval_dataset_small.pickle','rb') as f_p: + eval_dataset_small = pickle.load(f_p) + +with open('eval_dataset_full.pickle','rb') as f_p: + eval_dataset_full = pickle.load(f_p) + +with open('test_dataset.pickle','rb') as f_p: + test_dataset = pickle.load(f_p) + + +from transformers import AutoModelForSequenceClassification + +model = AutoModelForSequenceClassification.from_pretrained('roberta-ireland').cuda() + +from transformers import TrainingArguments + + +training_args = TrainingArguments("roberta-ireland", + per_device_train_batch_size=4, + per_device_eval_batch_size=4, + evaluation_strategy='steps', + #eval_steps=2_000, + #save_steps=2_000, + eval_steps=2_000, + save_steps=20_000, + num_train_epochs=1, + gradient_accumulation_steps=2, + learning_rate = 1e-6, + #warmup_steps=4_000, + warmup_steps=4, + load_best_model_at_end=True, + ) + +import numpy as np +from datasets import load_metric + +metric = load_metric("accuracy") + +def compute_metrics(eval_pred): + logits, labels = eval_pred + predictions = np.argmax(logits, axis=-1) + return metric.compute(predictions=predictions, references=labels) + + +from transformers import Trainer + +trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset, + eval_dataset=eval_dataset_small, + compute_metrics=compute_metrics, + ) + + +eval_predictions = trainer.predict(eval_dataset_full).predictions.argmax(1) + +with open('../dev-0/out.tsv', 'w') as f_out: + for pred in eval_predictions: + f_out.write(LABELS_LIST[pred] + '\n') + +test_predictions = trainer.predict(test_dataset).predictions.argmax(1) +with open('../test-A/out.tsv', 'w') as f_out: + for pred in test_predictions: + f_out.write(LABELS_LIST[pred] + '\n') + +#model = AutoModelForSequenceClassification.from_pretrained('roberta-retrained/') + +#for dataset in ('dev-0', 'test-A'): +# with open(f'../{dataset}/in.tsv') as f_in, open(f'../{dataset}/out.tsv','w') as f_out: +# for line_in in tqdm(f_in, total=150_000): +# _,_, text = line_in.split('\t') +# text = text.rstrip('\n') +# inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt").to(device) +# outputs = model(**inputs) +# probs = outputs[0].softmax(1) +# prediction = LABELS_LIST[probs.argmax(1)] +# f_out.write(prediction + '\n') +# diff --git a/roberta_year_as_text_better_finetunning_only_weekday/config.py b/roberta_year_as_text_better_finetunning_only_weekday/config.py new file mode 100644 index 0000000..4b62f72 --- /dev/null +++ b/roberta_year_as_text_better_finetunning_only_weekday/config.py @@ -0,0 +1,18 @@ +LABELS_DICT = {'news':0, + 'sport':1, + 'business':2, + 'opinion':3, + 'culture':4, + 'lifestyle':5, + 'removed':6} + + +LABELS_LIST = ['news', + 'sport', + 'business', + 'opinion', + 'culture', + 'lifestyle', + 'removed'] + +MODEL = 'roberta-base' diff --git a/roberta_year_as_text_better_finetunning_only_weekday/run.sh b/roberta_year_as_text_better_finetunning_only_weekday/run.sh new file mode 100644 index 0000000..6e2b878 --- /dev/null +++ b/roberta_year_as_text_better_finetunning_only_weekday/run.sh @@ -0,0 +1 @@ +python run_glue.py --model_name_or_path roberta-base --train_file ./train_huggingface_format.csv --validation_file ./dev-0_huggingface_format.csv --do_train --do_eval --max_seq_length 64 --per_device_train_batch_size 32 --learning_rate 2e-5 --num_train_epochs 3 --save_steps=5000 --eval_steps=5000 --evaluation_strategy steps --output_dir ./roberta-ireland diff --git a/roberta_year_as_text_better_finetunning_only_weekday/run_glue.py b/roberta_year_as_text_better_finetunning_only_weekday/run_glue.py new file mode 100755 index 0000000..1c4deba --- /dev/null +++ b/roberta_year_as_text_better_finetunning_only_weekday/run_glue.py @@ -0,0 +1,578 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Finetuning the library models for sequence classification on GLUE.""" +# You can also adapt this script on your own text classification task. Pointers for this are left as comments. + +import logging +import os +import random +import sys +from dataclasses import dataclass, field +from typing import Optional + +import datasets +import numpy as np +from datasets import load_dataset, load_metric + +import transformers +from transformers import ( + AutoConfig, + AutoModelForSequenceClassification, + AutoTokenizer, + DataCollatorWithPadding, + EvalPrediction, + HfArgumentParser, + PretrainedConfig, + Trainer, + TrainingArguments, + default_data_collator, + set_seed, +) +from transformers.trainer_utils import get_last_checkpoint +from transformers.utils import check_min_version +from transformers.utils.versions import require_version + + +# Will error if the minimal version of Transformers is not installed. Remove at your own risks. +check_min_version("4.11.0.dev0") + +require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") + +task_to_keys = { + "cola": ("sentence", None), + "mnli": ("premise", "hypothesis"), + "mrpc": ("sentence1", "sentence2"), + "qnli": ("question", "sentence"), + "qqp": ("question1", "question2"), + "rte": ("sentence1", "sentence2"), + "sst2": ("sentence", None), + "stsb": ("sentence1", "sentence2"), + "wnli": ("sentence1", "sentence2"), +} + +logger = logging.getLogger(__name__) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + + Using `HfArgumentParser` we can turn this class + into argparse arguments to be able to specify them on + the command line. + """ + + task_name: Optional[str] = field( + default=None, + metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, + ) + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + max_seq_length: int = field( + default=128, + metadata={ + "help": "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + }, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} + ) + pad_to_max_length: bool = field( + default=True, + metadata={ + "help": "Whether to pad all samples to `max_seq_length`. " + "If False, will pad the samples dynamically when batching to the maximum length in the batch." + }, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + }, + ) + max_predict_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this " + "value if set." + }, + ) + train_file: Optional[str] = field( + default=None, metadata={"help": "A csv or a json file containing the training data."} + ) + validation_file: Optional[str] = field( + default=None, metadata={"help": "A csv or a json file containing the validation data."} + ) + test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) + + def __post_init__(self): + if self.task_name is not None: + self.task_name = self.task_name.lower() + if self.task_name not in task_to_keys.keys(): + raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) + elif self.dataset_name is not None: + pass + elif self.train_file is None or self.validation_file is None: + raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") + else: + train_extension = self.train_file.split(".")[-1] + assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." + validation_extension = self.validation_file.split(".")[-1] + assert ( + validation_extension == train_extension + ), "`validation_file` should have the same extension (csv or json) as `train_file`." + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. + """ + + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + ) + use_auth_token: bool = field( + default=False, + metadata={ + "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "with private models)." + }, + ) + + +def main(): + # See all possible arguments in src/transformers/training_args.py + # or by passing the --help flag to this script. + # We now keep distinct sets of args, for a cleaner separation of concerns. + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + logger.info(f"Training/evaluation parameters {training_args}") + + # Detecting last checkpoint. + last_checkpoint = None + if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError( + f"Output directory ({training_args.output_dir}) already exists and is not empty. " + "Use --overwrite_output_dir to overcome." + ) + elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: + logger.info( + f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " + "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." + ) + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) + # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). + # + # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the + # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named + # label if at least two columns are provided. + # + # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this + # single column. You can easily tweak this behavior (see below) + # + # In distributed training, the load_dataset function guarantee that only one local process can concurrently + # download the dataset. + if data_args.task_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset("glue", data_args.task_name, cache_dir=model_args.cache_dir) + elif data_args.dataset_name is not None: + # Downloading and loading a dataset from the hub. + raw_datasets = load_dataset( + data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir + ) + else: + # Loading a dataset from your local files. + # CSV/JSON training and evaluation files are needed. + data_files = {"train": data_args.train_file, "validation": data_args.validation_file} + #data_files = {"train": data_args.train_file, "validation": data_args.validation_file, "test":data_args.test_file} + + # Get the test dataset: you can provide your own CSV/JSON test file (see below) + # when you use `do_predict` without specifying a GLUE benchmark task. + if training_args.do_predict: + if data_args.test_file is not None: + train_extension = data_args.train_file.split(".")[-1] + test_extension = data_args.test_file.split(".")[-1] + assert ( + test_extension == train_extension + ), "`test_file` should have the same extension (csv or json) as `train_file`." + data_files["test"] = data_args.test_file + else: + raise ValueError("Need either a GLUE task or a test file for `do_predict`.") + + for key in data_files.keys(): + logger.info(f"load a local file for {key}: {data_files[key]}") + + if data_args.train_file.endswith(".csv"): + # Loading a dataset from local csv files + raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir, sep='\t') + else: + # Loading a dataset from local json files + raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) + # See more about loading any type of standard or custom dataset at + # https://huggingface.co/docs/datasets/loading_datasets.html. + + # Labels + if data_args.task_name is not None: + is_regression = data_args.task_name == "stsb" + if not is_regression: + label_list = raw_datasets["train"].features["label"].names + num_labels = len(label_list) + else: + num_labels = 1 + else: + # Trying to have good defaults here, don't hesitate to tweak to your needs. + is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] + if is_regression: + num_labels = 1 + else: + # A useful fast method: + # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique + label_list = raw_datasets["train"].unique("label") + label_list.sort() # Let's sort it for determinism + num_labels = len(label_list) + + # Load pretrained model and tokenizer + # + # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently + # download model & vocab. + config = AutoConfig.from_pretrained( + model_args.config_name if model_args.config_name else model_args.model_name_or_path, + num_labels=num_labels, + finetuning_task=data_args.task_name, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + ) + tokenizer = AutoTokenizer.from_pretrained( + model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, + cache_dir=model_args.cache_dir, + use_fast=model_args.use_fast_tokenizer, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + ) + model = AutoModelForSequenceClassification.from_pretrained( + model_args.model_name_or_path, + from_tf=bool(".ckpt" in model_args.model_name_or_path), + config=config, + cache_dir=model_args.cache_dir, + revision=model_args.model_revision, + use_auth_token=True if model_args.use_auth_token else None, + ) + + # Preprocessing the raw_datasets + if data_args.task_name is not None: + sentence1_key, sentence2_key = task_to_keys[data_args.task_name] + else: + # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. + non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] + if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: + sentence1_key, sentence2_key = "sentence1", "sentence2" + else: + if len(non_label_column_names) >= 2: + sentence1_key, sentence2_key = non_label_column_names[:2] + else: + sentence1_key, sentence2_key = non_label_column_names[0], None + + # Padding strategy + if data_args.pad_to_max_length: + padding = "max_length" + else: + # We will pad later, dynamically at batch creation, to the max sequence length in each batch + padding = False + + # Some models have set the order of the labels to use, so let's make sure we do use it. + label_to_id = None + if ( + model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id + and data_args.task_name is not None + and not is_regression + ): + # Some have all caps in their config, some don't. + label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} + if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): + label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} + else: + logger.warning( + "Your model seems to have been trained with labels, but they don't match the dataset: ", + f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." + "\nIgnoring the model labels as a result.", + ) + elif data_args.task_name is None and not is_regression: + label_to_id = {v: i for i, v in enumerate(label_list)} + + if label_to_id is not None: + model.config.label2id = label_to_id + model.config.id2label = {id: label for label, id in config.label2id.items()} + elif data_args.task_name is not None and not is_regression: + model.config.label2id = {l: i for i, l in enumerate(label_list)} + model.config.id2label = {id: label for label, id in config.label2id.items()} + + if data_args.max_seq_length > tokenizer.model_max_length: + logger.warning( + f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" + f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." + ) + max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) + + def preprocess_function(examples): + # Tokenize the texts + args = ( + (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) + ) + result = tokenizer(examples['text'], padding=padding, max_length=max_seq_length, truncation=True) + + # Map labels to IDs (not necessary for GLUE tasks) + if label_to_id is not None and "label" in examples: + result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] + + t = result + + for column in 'date', 'day_of_month', 'day_of_year', 'month', 'year', 'weekday', 'year_cont': + t[column] = [[0] * len(i) for i in t.input_ids] + for i in range(len(t['input_ids'])): + t['year'][i][1] = examples['year'][i] - 1995 + t['month'][i][2] = examples['month'][i] + t['day_of_month'][i][3] = examples['day_of_month'][i] + t['weekday'][i][4] = examples['weekday'][i] + + return t + + with training_args.main_process_first(desc="dataset map pre-processing"): + raw_datasets = raw_datasets.map( + preprocess_function, + batched=True, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on dataset", + ) + if training_args.do_train: + if "train" not in raw_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = raw_datasets["train"] + if data_args.max_train_samples is not None: + train_dataset = train_dataset.select(range(data_args.max_train_samples)) + + if training_args.do_eval: + if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] + if data_args.max_eval_samples is not None: + eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) + + if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: + if "test" not in raw_datasets and "test_matched" not in raw_datasets: + raise ValueError("--do_predict requires a test dataset") + predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"] + if data_args.max_predict_samples is not None: + predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) + + # Log a few random samples from the training set: + if training_args.do_train: + for index in random.sample(range(len(train_dataset)), 3): + logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") + + # Get the metric function + if data_args.task_name is not None: + metric = load_metric("glue", data_args.task_name) + else: + metric = load_metric("accuracy") + + # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a + # predictions and label_ids field) and has to return a dictionary string to float. + def compute_metrics(p: EvalPrediction): + preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions + preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) + if data_args.task_name is not None: + result = metric.compute(predictions=preds, references=p.label_ids) + if len(result) > 1: + result["combined_score"] = np.mean(list(result.values())).item() + return result + elif is_regression: + return {"mse": ((preds - p.label_ids) ** 2).mean().item()} + else: + return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} + + # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. + if data_args.pad_to_max_length: + data_collator = default_data_collator + elif training_args.fp16: + data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) + else: + data_collator = None + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + compute_metrics=compute_metrics, + tokenizer=tokenizer, + data_collator=data_collator, + ) + + # Training + if training_args.do_train: + checkpoint = None + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + elif last_checkpoint is not None: + checkpoint = last_checkpoint + train_result = trainer.train(resume_from_checkpoint=checkpoint) + metrics = train_result.metrics + max_train_samples = ( + data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) + ) + metrics["train_samples"] = min(max_train_samples, len(train_dataset)) + + trainer.save_model() # Saves the tokenizer too for easy upload + + trainer.log_metrics("train", metrics) + trainer.save_metrics("train", metrics) + trainer.save_state() + + # Evaluation + if training_args.do_eval: + logger.info("*** Evaluate ***") + + # Loop to handle MNLI double evaluation (matched, mis-matched) + tasks = [data_args.task_name] + eval_datasets = [eval_dataset] + if data_args.task_name == "mnli": + tasks.append("mnli-mm") + eval_datasets.append(raw_datasets["validation_mismatched"]) + + for eval_dataset, task in zip(eval_datasets, tasks): + metrics = trainer.evaluate(eval_dataset=eval_dataset) + + max_eval_samples = ( + data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) + ) + metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) + + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + if training_args.do_predict: + logger.info("*** Predict ***") + + # Loop to handle MNLI double evaluation (matched, mis-matched) + tasks = [data_args.task_name] + predict_datasets = [predict_dataset] + if data_args.task_name == "mnli": + tasks.append("mnli-mm") + predict_datasets.append(raw_datasets["test_mismatched"]) + + for predict_dataset, task in zip(predict_datasets, tasks): + # Removing the `label` columns because it contains -1 and Trainer won't like that. + predict_dataset = predict_dataset.remove_columns("label") + predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions + predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) + + output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") + if trainer.is_world_process_zero(): + with open(output_predict_file, "w") as writer: + logger.info(f"***** Predict results {task} *****") + writer.write("index\tprediction\n") + for index, item in enumerate(predictions): + if is_regression: + writer.write(f"{index}\t{item:3.3f}\n") + else: + item = label_list[item] + writer.write(f"{index}\t{item}\n") + + if training_args.push_to_hub: + kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} + if data_args.task_name is not None: + kwargs["language"] = "en" + kwargs["dataset_tags"] = "glue" + kwargs["dataset_args"] = data_args.task_name + kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" + + trainer.push_to_hub(**kwargs) + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main()