Compare commits
11 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
a6bb8042bc | ||
|
4be9a5bda2 | ||
|
894f5c166b | ||
|
c469463bc0 | ||
|
22521da951 | ||
|
987b64ae50 | ||
|
10f548680a | ||
|
8a5bc51f44 | ||
|
52225711ae | ||
|
7a21dce529 | ||
|
08412c6b42 |
15769
dev-0/out.tsv
15769
dev-0/out.tsv
File diff suppressed because it is too large
Load Diff
@ -57,8 +57,8 @@ model.add(Bidirectional(LSTM(LSTM_SIZE, dropout = DROPOUT_LSTM)))
|
|||||||
model.add(Dropout(DROPOUT_REGULAR))
|
model.add(Dropout(DROPOUT_REGULAR))
|
||||||
model.add(Dense(1, activation='linear'))
|
model.add(Dense(1, activation='linear'))
|
||||||
|
|
||||||
model_checkpoint = sorted(os.listdir(checkpoints))[-1]
|
#model_checkpoint = sorted(os.listdir(checkpoints))[-1]
|
||||||
model.load_weights('checkpoints/saved-model-000006-0.02.hdf5','rb')
|
model.load_weights('checkpoints/saved-model-000018-0.03.hdf5','rb')
|
||||||
|
|
||||||
scaler = pickle.load(open('minmaxscaler.pickle', 'rb'))
|
scaler = pickle.load(open('minmaxscaler.pickle', 'rb'))
|
||||||
|
|
||||||
|
@ -38,22 +38,22 @@ FILE='2'
|
|||||||
train_text = [a.rstrip('\n') for a in open('../train/in.tsv','r')]
|
train_text = [a.rstrip('\n') for a in open('../train/in.tsv','r')]
|
||||||
train_year = [float(a.rstrip('\n')) for a in open(f'../train/expected{FILE}.tsv','r')]
|
train_year = [float(a.rstrip('\n')) for a in open(f'../train/expected{FILE}.tsv','r')]
|
||||||
|
|
||||||
|
|
||||||
tokenizer = Tokenizer(num_words=vocab_size)
|
|
||||||
tokenizer.fit_on_texts(train_text)
|
|
||||||
train_text_tokenized = tokenizer.texts_to_sequences(train_text)
|
|
||||||
maxlen = 500
|
maxlen = 500
|
||||||
train_text_tokenized = pad_sequences(train_text_tokenized, padding='post', maxlen=maxlen)
|
|
||||||
pickle.dump(train_text_tokenized, open('train_text_30k_for_keras.pickle', 'wb'))
|
#tokenizer = Tokenizer(num_words=vocab_size)
|
||||||
pickle.dump(tokenizer, open('tokenizer.pickle', 'wb'))
|
#tokenizer.fit_on_texts(train_text)
|
||||||
|
#train_text_tokenized = tokenizer.texts_to_sequences(train_text)
|
||||||
|
#train_text_tokenized = pad_sequences(train_text_tokenized, padding='post', maxlen=maxlen)
|
||||||
|
#pickle.dump(train_text_tokenized, open('train_text_30k_for_keras.pickle', 'wb'))
|
||||||
|
#pickle.dump(tokenizer, open('tokenizer.pickle', 'wb'))
|
||||||
train_text_tokenized= pickle.load(open('train_text_30k_for_keras.pickle', 'rb'))
|
train_text_tokenized= pickle.load(open('train_text_30k_for_keras.pickle', 'rb'))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
eval_text_tokenized = [a.rstrip('\n') for a in open('../dev-0/in.tsv', 'r')]
|
#eval_text_tokenized = [a.rstrip('\n') for a in open('../dev-0/in.tsv', 'r')]
|
||||||
eval_text_tokenized = tokenizer.texts_to_sequences(eval_text_tokenized)
|
#eval_text_tokenized = tokenizer.texts_to_sequences(eval_text_tokenized)
|
||||||
eval_text_tokenized = pad_sequences(eval_text_tokenized, padding='post', maxlen=maxlen)
|
#eval_text_tokenized = pad_sequences(eval_text_tokenized, padding='post', maxlen=maxlen)
|
||||||
pickle.dump(eval_text_tokenized, open('eval_text_30k_for_keras.pickle', 'wb'))
|
#pickle.dump(eval_text_tokenized, open('eval_text_30k_for_keras.pickle', 'wb'))
|
||||||
eval_text_tokenized = pickle.load(open('eval_text_30k_for_keras.pickle', 'rb'))
|
eval_text_tokenized = pickle.load(open('eval_text_30k_for_keras.pickle', 'rb'))
|
||||||
|
|
||||||
eval_year = [float(a.rstrip()) for a in open(f'../dev-0/expected{FILE}.tsv','r')]
|
eval_year = [float(a.rstrip()) for a in open(f'../dev-0/expected{FILE}.tsv','r')]
|
||||||
@ -84,5 +84,5 @@ eval_year_scaled = scaler.transform(eval_year)
|
|||||||
filepath = "./" + checkpoints + "/saved-model-{epoch:06d}-{val_loss:.2f}.hdf5"
|
filepath = "./" + checkpoints + "/saved-model-{epoch:06d}-{val_loss:.2f}.hdf5"
|
||||||
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
|
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
|
||||||
es = EarlyStopping(monitor='val_loss', patience = 70)
|
es = EarlyStopping(monitor='val_loss', patience = 70)
|
||||||
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='mse', metrics=['mse'])
|
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.000001), loss='mse', metrics=['mse'])
|
||||||
history = model.fit(train_text_tokenized, train_year_scaled, batch_size=BATCH_SIZE, epochs=5000, verbose=1, validation_data = (eval_text_tokenized, eval_year_scaled), callbacks = [es, checkpoint])
|
history = model.fit(train_text_tokenized, train_year_scaled, batch_size=BATCH_SIZE, epochs=100, verbose=1, validation_data = (eval_text_tokenized, eval_year_scaled), callbacks = [es, checkpoint])
|
||||||
|
@ -65,7 +65,7 @@ class RegressorHead(torch.nn.Module):
|
|||||||
|
|
||||||
regressor_head = RegressorHead().to(device)
|
regressor_head = RegressorHead().to(device)
|
||||||
|
|
||||||
optimizer = torch.optim.Adam(list(roberta.parameters()) + list(regressor_head.parameters()), lr=1e-6)
|
optimizer = torch.optim.Adam(list(roberta.parameters()) + list(regressor_head.parameters()), lr=1e-8)
|
||||||
criterion = torch.nn.MSELoss(reduction='sum').to(device)
|
criterion = torch.nn.MSELoss(reduction='sum').to(device)
|
||||||
|
|
||||||
BATCH_SIZE = 1
|
BATCH_SIZE = 1
|
||||||
@ -118,7 +118,8 @@ def eval_short():
|
|||||||
loss = 0.0
|
loss = 0.0
|
||||||
loss_clipped = 0.0
|
loss_clipped = 0.0
|
||||||
loss_scaled = 0.0
|
loss_scaled = 0.0
|
||||||
for batch, year in tqdm(get_train_batch(dev_in[:1000],dev_year_scaled[:1000])):
|
eval_num = 10000
|
||||||
|
for batch, year in tqdm(get_train_batch(dev_in[:eval_num],dev_year_scaled[:eval_num])):
|
||||||
|
|
||||||
x = regressor_head(batch.to(device)).squeeze()
|
x = regressor_head(batch.to(device)).squeeze()
|
||||||
x_clipped = torch.clamp(x,0.0,1.0)
|
x_clipped = torch.clamp(x,0.0,1.0)
|
||||||
@ -130,8 +131,8 @@ def eval_short():
|
|||||||
loss_scaled += criterion_eval(x, year).item()
|
loss_scaled += criterion_eval(x, year).item()
|
||||||
loss += criterion_eval(original_x, original_year).item()
|
loss += criterion_eval(original_x, original_year).item()
|
||||||
loss_clipped += criterion_eval(original_x_clipped, original_year).item()
|
loss_clipped += criterion_eval(original_x_clipped, original_year).item()
|
||||||
print('valid loss scaled: ' + str(np.sqrt(loss_scaled/1000)))
|
print('valid loss scaled: ' + str(np.sqrt(loss_scaled/eval_num)))
|
||||||
print('valid loss: ' + str(np.sqrt(loss/1000)))
|
print('valid loss: ' + str(np.sqrt(loss/eval_num)))
|
||||||
print('valid loss clipped: ' + str(np.sqrt(loss_clipped/len(dev_year))))
|
print('valid loss clipped: ' + str(np.sqrt(loss_clipped/len(dev_year))))
|
||||||
|
|
||||||
|
|
||||||
|
14863
test-A/out.tsv
14863
test-A/out.tsv
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user