Compare commits

..

11 Commits

Author SHA1 Message Date
Jakub Pokrywka
a6bb8042bc hf roberta large early stopping 2022-07-10 22:44:18 +02:00
Jakub Pokrywka
4be9a5bda2 hf regular roberta base early stopping 2022-07-03 18:41:13 +02:00
Jakub Pokrywka
894f5c166b hf roberta challam base early stopping 2022-07-01 11:02:23 +02:00
Jakub Pokrywka
c469463bc0 challam roberta base hf 2022-06-29 09:36:02 +02:00
Jakub Pokrywka
22521da951 challam roberta base hf 2022-06-29 09:01:14 +02:00
kubapok
987b64ae50 lstm 2021-09-16 18:38:49 +02:00
kubapok
10f548680a roberta base with regression layer on top lr=1e-8 4 epochs 2021-09-15 10:37:26 +02:00
kubapok
8a5bc51f44 roberta base with regression layer on top lr=1e-7 2021-09-10 10:17:20 +02:00
kubapok
52225711ae lstm one epoch 2021-08-30 14:16:16 +02:00
kubapok
7a21dce529 roberta large with regression layer on top 2021-07-10 19:50:44 +02:00
kubapok
08412c6b42 roberta base with regression layer on top 2021-07-10 19:48:49 +02:00
7 changed files with 11629 additions and 19056 deletions

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +0,0 @@
description: tfidf with linear regression
tags:
- linear-regression
- tf-idf

View File

@ -57,8 +57,8 @@ model.add(Bidirectional(LSTM(LSTM_SIZE, dropout = DROPOUT_LSTM)))
model.add(Dropout(DROPOUT_REGULAR)) model.add(Dropout(DROPOUT_REGULAR))
model.add(Dense(1, activation='linear')) model.add(Dense(1, activation='linear'))
model_checkpoint = sorted(os.listdir(checkpoints))[-1] #model_checkpoint = sorted(os.listdir(checkpoints))[-1]
model.load_weights('checkpoints/saved-model-000006-0.02.hdf5','rb') model.load_weights('checkpoints/saved-model-000018-0.03.hdf5','rb')
scaler = pickle.load(open('minmaxscaler.pickle', 'rb')) scaler = pickle.load(open('minmaxscaler.pickle', 'rb'))

View File

@ -38,22 +38,22 @@ FILE='2'
train_text = [a.rstrip('\n') for a in open('../train/in.tsv','r')] train_text = [a.rstrip('\n') for a in open('../train/in.tsv','r')]
train_year = [float(a.rstrip('\n')) for a in open(f'../train/expected{FILE}.tsv','r')] train_year = [float(a.rstrip('\n')) for a in open(f'../train/expected{FILE}.tsv','r')]
tokenizer = Tokenizer(num_words=vocab_size)
tokenizer.fit_on_texts(train_text)
train_text_tokenized = tokenizer.texts_to_sequences(train_text)
maxlen = 500 maxlen = 500
train_text_tokenized = pad_sequences(train_text_tokenized, padding='post', maxlen=maxlen)
pickle.dump(train_text_tokenized, open('train_text_30k_for_keras.pickle', 'wb')) #tokenizer = Tokenizer(num_words=vocab_size)
pickle.dump(tokenizer, open('tokenizer.pickle', 'wb')) #tokenizer.fit_on_texts(train_text)
#train_text_tokenized = tokenizer.texts_to_sequences(train_text)
#train_text_tokenized = pad_sequences(train_text_tokenized, padding='post', maxlen=maxlen)
#pickle.dump(train_text_tokenized, open('train_text_30k_for_keras.pickle', 'wb'))
#pickle.dump(tokenizer, open('tokenizer.pickle', 'wb'))
train_text_tokenized= pickle.load(open('train_text_30k_for_keras.pickle', 'rb')) train_text_tokenized= pickle.load(open('train_text_30k_for_keras.pickle', 'rb'))
eval_text_tokenized = [a.rstrip('\n') for a in open('../dev-0/in.tsv', 'r')] #eval_text_tokenized = [a.rstrip('\n') for a in open('../dev-0/in.tsv', 'r')]
eval_text_tokenized = tokenizer.texts_to_sequences(eval_text_tokenized) #eval_text_tokenized = tokenizer.texts_to_sequences(eval_text_tokenized)
eval_text_tokenized = pad_sequences(eval_text_tokenized, padding='post', maxlen=maxlen) #eval_text_tokenized = pad_sequences(eval_text_tokenized, padding='post', maxlen=maxlen)
pickle.dump(eval_text_tokenized, open('eval_text_30k_for_keras.pickle', 'wb')) #pickle.dump(eval_text_tokenized, open('eval_text_30k_for_keras.pickle', 'wb'))
eval_text_tokenized = pickle.load(open('eval_text_30k_for_keras.pickle', 'rb')) eval_text_tokenized = pickle.load(open('eval_text_30k_for_keras.pickle', 'rb'))
eval_year = [float(a.rstrip()) for a in open(f'../dev-0/expected{FILE}.tsv','r')] eval_year = [float(a.rstrip()) for a in open(f'../dev-0/expected{FILE}.tsv','r')]
@ -84,5 +84,5 @@ eval_year_scaled = scaler.transform(eval_year)
filepath = "./" + checkpoints + "/saved-model-{epoch:06d}-{val_loss:.2f}.hdf5" filepath = "./" + checkpoints + "/saved-model-{epoch:06d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True) checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
es = EarlyStopping(monitor='val_loss', patience = 70) es = EarlyStopping(monitor='val_loss', patience = 70)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='mse', metrics=['mse']) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.000001), loss='mse', metrics=['mse'])
history = model.fit(train_text_tokenized, train_year_scaled, batch_size=BATCH_SIZE, epochs=5000, verbose=1, validation_data = (eval_text_tokenized, eval_year_scaled), callbacks = [es, checkpoint]) history = model.fit(train_text_tokenized, train_year_scaled, batch_size=BATCH_SIZE, epochs=100, verbose=1, validation_data = (eval_text_tokenized, eval_year_scaled), callbacks = [es, checkpoint])

View File

@ -65,7 +65,7 @@ class RegressorHead(torch.nn.Module):
regressor_head = RegressorHead().to(device) regressor_head = RegressorHead().to(device)
optimizer = torch.optim.Adam(list(roberta.parameters()) + list(regressor_head.parameters()), lr=1e-6) optimizer = torch.optim.Adam(list(roberta.parameters()) + list(regressor_head.parameters()), lr=1e-8)
criterion = torch.nn.MSELoss(reduction='sum').to(device) criterion = torch.nn.MSELoss(reduction='sum').to(device)
BATCH_SIZE = 1 BATCH_SIZE = 1
@ -118,7 +118,8 @@ def eval_short():
loss = 0.0 loss = 0.0
loss_clipped = 0.0 loss_clipped = 0.0
loss_scaled = 0.0 loss_scaled = 0.0
for batch, year in tqdm(get_train_batch(dev_in[:1000],dev_year_scaled[:1000])): eval_num = 10000
for batch, year in tqdm(get_train_batch(dev_in[:eval_num],dev_year_scaled[:eval_num])):
x = regressor_head(batch.to(device)).squeeze() x = regressor_head(batch.to(device)).squeeze()
x_clipped = torch.clamp(x,0.0,1.0) x_clipped = torch.clamp(x,0.0,1.0)
@ -130,8 +131,8 @@ def eval_short():
loss_scaled += criterion_eval(x, year).item() loss_scaled += criterion_eval(x, year).item()
loss += criterion_eval(original_x, original_year).item() loss += criterion_eval(original_x, original_year).item()
loss_clipped += criterion_eval(original_x_clipped, original_year).item() loss_clipped += criterion_eval(original_x_clipped, original_year).item()
print('valid loss scaled: ' + str(np.sqrt(loss_scaled/1000))) print('valid loss scaled: ' + str(np.sqrt(loss_scaled/eval_num)))
print('valid loss: ' + str(np.sqrt(loss/1000))) print('valid loss: ' + str(np.sqrt(loss/eval_num)))
print('valid loss clipped: ' + str(np.sqrt(loss_clipped/len(dev_year)))) print('valid loss clipped: ' + str(np.sqrt(loss_clipped/len(dev_year))))

File diff suppressed because it is too large Load Diff

View File

@ -10,20 +10,18 @@ import random
import pickle import pickle
import sys import sys
import lzma
def tokenizer_space(text): def tokenizer_space(text):
return text.split(' ') return text.split(' ')
type = sys.argv[1] # 1 or 2 type = sys.argv[1] # 1 or 2
def run(): def run():
# LOADING DATA # LOADING DATA
train_text = [a.rstrip('\n') for a in lzma.open('../train/in.tsv.xz', 'rt')] train_text = [a.rstrip('\n') for a in open('../train/in.tsv','r')]
dev_text = [a.rstrip('\n') for a in lzma.open('../dev-0/in.tsv.xz', 'rt')] dev_text = [a.rstrip('\n') for a in open('../dev-0/in.tsv','r')]
test_text = [a.rstrip('\n') for a in lzma.open('../test-A/in.tsv.xz', 'rt')] test_text = [a.rstrip('\n') for a in open('../test-A/in.tsv','r')]
global lowest global lowest
train_year = [float(a.rstrip('\n')) for a in open(f'../train/expected{type}.tsv','r')] train_year = [float(a.rstrip('\n')) for a in open(f'../train/expected{type}.tsv','r')]