from transformers import BertTokenizer, BertForSequenceClassification, TrainingArguments, Trainer import random import torch with open('train/in.tsv') as f: data_train_X = f.readlines() with open('train/expected.tsv') as f: data_train_Y = f.readlines() with open('dev-0/in.tsv') as f: data_dev_X = f.readlines() with open('test-A/in.tsv') as f: data_test_X = f.readlines() class CustomDataset(torch.utils.data.Dataset): def __init__(self, encodings, labels=None): self.encodings = encodings self.labels = labels def __getitem__(self, idx): item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()} if self.labels: item["labels"] = torch.tensor(self.labels[idx]) return item def __len__(self): return len(self.encodings["input_ids"]) data_train = list(zip(data_train_X, data_train_Y)) data_train = random.sample(data_train, 50000) tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") train_X = tokenizer([text[0] for text in data_train], truncation=True, padding=True) train_Y = [int(text[1]) for text in data_train] train_dataset = CustomDataset(train_X, train_Y) model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2) training_args = TrainingArguments("model") trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset) trainer.train()