ium_478815/train-sacred.py

102 lines
3.0 KiB
Python
Raw Normal View History

2022-05-07 14:49:38 +02:00
import sacred
2022-05-07 12:22:38 +02:00
import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import sys
import os
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.observers import MongoObserver
2022-05-08 18:41:17 +02:00
from sklearn.metrics import accuracy_score
2022-05-07 14:39:17 +02:00
from sacred.observers.file_storage import file_storage_option
from sacred.observers.mongo import mongo_db_option
2022-05-07 14:54:31 +02:00
ex = Experiment("s478815",save_git_info=False)
2022-05-08 19:44:54 +02:00
ex.observers.append(FileStorageObserver('experiment/'))
2022-05-07 12:22:38 +02:00
2022-05-09 09:55:17 +02:00
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017', db_name='sacred'))
2022-05-08 19:29:44 +02:00
2022-05-07 12:22:38 +02:00
@ex.config
def my_config():
epochs = 1000
# Model
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1,1)
def forward(self, x):
y_predicted = torch.sigmoid(self.linear(x))
return y_predicted
data = pd.read_csv('data.csv')
data.dropna()
training_data = data.sample(frac=0.9, random_state=25)
testing_data = data.drop(training_data.index)
print(f"No. of training examples: {training_data.shape[0]}")
print(f"No. of testing examples: {testing_data.shape[0]}")
training_data = training_data[['sqft_living', 'price']]
testing_data = testing_data[['sqft_living', 'price']]
training_data[['price']] = training_data[['price']] / 10000000
training_data[['sqft_living']] = training_data[['sqft_living']] / 10000
testing_data[['price']] = testing_data[['price']] / 10000000
testing_data[['sqft_living']] = testing_data[['sqft_living']] / 10000
# Tensory
X_training = training_data[['sqft_living']].to_numpy()
X_testing = testing_data[['sqft_living']].to_numpy()
y_training = training_data[['price']].to_numpy()
y_testing = testing_data[['price']].to_numpy()
torch.from_file
X_training = torch.from_numpy(X_training.astype(np.float32))
X_testing = torch.from_numpy(X_testing.astype(np.float32))
y_training = torch.from_numpy(y_training.astype(np.float32))
y_testing = torch.from_numpy(y_testing.astype(np.float32))
model = Model()
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
@ex.automain
2022-05-08 19:07:49 +02:00
def my_main(epochs):
2022-05-07 12:22:38 +02:00
# Trening
2022-05-08 19:29:44 +02:00
#epochs = EPOCHS
2022-05-07 12:22:38 +02:00
for epochs in range(epochs):
y_predicted = model(X_training)
loss = criterion(y_predicted,y_training)
loss.backward()
optimizer.step()
optimizer.zero_grad()
2022-05-08 20:30:23 +02:00
with open ("output.txt",'a+') as f:
if (epochs%100==0):
f.write(f'epoch:{epochs+1},loss = {loss.item():.4f}')
2022-05-07 12:22:38 +02:00
with torch.no_grad():
y_predicted = model(X_testing)
y_predicted_cls = y_predicted.round()
acc = y_predicted_cls.eq(y_testing).sum()/float(y_testing.shape[0])
print(f'{acc:.4f}')
2022-05-08 20:30:23 +02:00
#result = open("output",'w+')
#result.write(f'{y_predicted}')
2022-05-07 12:22:38 +02:00
2022-05-08 19:18:50 +02:00
torch.save(model, "modelS.pkl")
2022-05-07 12:22:38 +02:00
ex.run()