This commit is contained in:
wangobango 2021-06-22 17:16:57 +02:00
parent 43dbf81d83
commit 962ca45b2c
4 changed files with 10487 additions and 29 deletions

5272
dev-0/out.tsv Normal file

File diff suppressed because it is too large Load Diff

View File

@ -6,11 +6,11 @@ from torch.utils.data import TensorDataset, DataLoader, RandomSampler
import torch.nn as nn
from sklearn.utils.class_weight import compute_class_weight
import numpy as np
from model import BERT_Arch
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import accuracy_score, f1_score
from transformers import BertTokenizerFast, BertForSequenceClassification
from transformers import Trainer, TrainingArguments
import csv
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
@ -25,34 +25,68 @@ class Dataset(torch.utils.data.Dataset):
def __len__(self):
return len(self.labels)
def save_tsv_result(path, data):
with open(path, "w") as save:
writer = csv.writer(save, delimiter='\t', lineterminator='\n')
for value in [str(x) for x in data]:
writer.writerow([value])
def predictions_for_set(inputs, masks):
predictions = []
with torch.no_grad():
batch_size = 60
for i in range(0, len(inputs), batch_size):
preds = model(inputs[i: i + batch_size].to(device),
masks[i: i + batch_size].to(device))
preds = preds.logits.detach().cpu().numpy()
preds = np.argmax(preds, axis=1)
predictions += preds.tolist()
return predictions
device = torch.device('cuda')
train_texts = \
pd.read_csv('train/in.tsv.xz', compression='xz', sep='\t', header=None, error_bad_lines=False, quoting=3)[0].tolist()[:1000]
train_labels = pd.read_csv('train/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()[:1000]
dev_texts = pd.read_csv('dev-0/in.tsv.xz', compression='xz', sep='\t', header=None, quoting=3)[0].tolist()[:1000]
dev_labels = pd.read_csv('dev-0/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()[:1000]
model_name = "bert-base-uncased"
# train_texts = \
# pd.read_csv('train/in.tsv.xz', compression='xz', sep='\t',
# header=None, error_bad_lines=False, quoting=3)[0].tolist()
# train_labels = pd.read_csv(
# 'train/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()
dev_texts = pd.read_csv('dev-0/in.tsv.xz', compression='xz',
sep='\t', header=None, quoting=3)[0].tolist()
dev_labels = pd.read_csv('dev-0/expected.tsv', sep='\t',
header=None, quoting=3)[0].tolist()
test_texts = pd.read_csv('test-A/in.tsv.xz', compression='xz', sep='\t',
header=None, error_bad_lines=False, quoting=3)[0].tolist()
model_name = "bert-base-uncased-pretrained"
model = BertForSequenceClassification.from_pretrained(
model_name, num_labels=len(pd.unique(train_labels))).to(device)
model_name, num_labels=len(pd.unique(dev_labels))).to(device)
max_length = 512
tokenizer = BertTokenizerFast.from_pretrained(model_name, do_lower_case=True)
# model.load_pretrained(model_path)
# tokenizer.load_pretrainded(model_path)
train_encodings = tokenizer(train_texts, truncation=True, padding=True, max_length=max_length)
valid_encodings = tokenizer(dev_texts, truncation=True, padding=True, max_length=max_length)
# train_encodings = tokenizer(
# train_texts, truncation=True, padding=True, max_length=max_length)
valid_encodings = tokenizer(
dev_texts, truncation=True, padding=True, max_length=max_length)
test_encodings = tokenizer(
test_texts, truncation=True, padding=True, max_length=max_length)
input_ids = torch.tensor(valid_encodings.data['input_ids'])[:100]
attention_mask = torch.tensor(valid_encodings.data['attention_mask'])[:100]
input_ids_val = torch.tensor(valid_encodings.data['input_ids'])
attention_mask_val = torch.tensor(valid_encodings.data['attention_mask'])
with torch.no_grad():
preds = model(input_ids.to(device), attention_mask.to(device))
preds = preds.logits.detach().cpu().numpy()
preds = np.argmax(preds, axis = 1)
print(preds)
print(classification_report(dev_labels, preds))
print(accuracy_score(dev_labels, preds))
input_ids_test = torch.tensor(test_encodings.data['input_ids'])
attention_mask_test = torch.tensor(test_encodings.data['attention_mask'])
predictions = predictions_for_set(input_ids_val, attention_mask_val)
print("Predictions for dev set:")
print(classification_report(dev_labels, predictions))
print(accuracy_score(dev_labels, predictions))
print(f1_score(dev_labels, predictions))
save_tsv_result("dev-0/out.tsv", predictions)
predictions = predictions_for_set(input_ids_test, attention_mask_test)
save_tsv_result("test-A/out.tsv", predictions)

16
main.py
View File

@ -40,10 +40,10 @@ def compute_metrics(pred):
set_seed(1)
train_texts = \
pd.read_csv('train/in.tsv.xz', compression='xz', sep='\t', header=None, error_bad_lines=False, quoting=3)[0].tolist()
train_labels = pd.read_csv('train/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()
dev_texts = pd.read_csv('dev-0/in.tsv.xz', compression='xz', sep='\t', header=None, quoting=3)[0].tolist()
dev_labels = pd.read_csv('dev-0/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()
pd.read_csv('train/in.tsv.xz', compression='xz', sep='\t', header=None, error_bad_lines=False, quoting=3)[0].tolist()[:25000]
train_labels = pd.read_csv('train/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()[:25000]
dev_texts = pd.read_csv('dev-0/in.tsv.xz', compression='xz', sep='\t', header=None, quoting=3)[0].tolist()[:1000]
dev_labels = pd.read_csv('dev-0/expected.tsv', sep='\t', header=None, quoting=3)[0].tolist()[:1000]
# test_texts = pd.read_table('test-A/in.tsv.xz', compression='xz', sep='\t', header=None, quoting=3)
model_name = "bert-base-uncased"
@ -61,10 +61,10 @@ model = BertForSequenceClassification.from_pretrained(
training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=1, # batch size per device during training
per_device_eval_batch_size=1, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
num_train_epochs=1, # total number of training epochs
per_device_train_batch_size=60, # batch size per device during training
per_device_eval_batch_size=60, # batch size for evaluation
warmup_steps=100, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
load_best_model_at_end=True, # load the best model when finished training (default metric is loss)

5152
test-A/out.tsv Normal file

File diff suppressed because it is too large Load Diff