sacred apply
This commit is contained in:
parent
475a76e552
commit
24cf18df57
@ -5,6 +5,7 @@ FROM ubuntu:latest
|
||||
RUN export PATH="$PATH:/root/.local/bin"
|
||||
RUN apt update
|
||||
RUN apt-get update
|
||||
RUN apt install -y git
|
||||
RUN apt install -y figlet
|
||||
RUN export PATH=”$PATH:/usr/local/bin/python”
|
||||
RUN apt install python3-pip -y
|
||||
@ -16,6 +17,8 @@ RUN pip3 install pillow
|
||||
RUN pip3 install scikit-learn
|
||||
RUN pip3 install matplotlib
|
||||
RUN pip3 install torchvision
|
||||
RUN pip3 install sacred
|
||||
RUN pip3 install pymongo
|
||||
|
||||
# Args
|
||||
ARG KAGGLE_USERNAME
|
||||
|
@ -32,12 +32,13 @@ pipeline {
|
||||
stage('Train model') {
|
||||
steps {
|
||||
sh "chmod u+x ./neutral_network.py"
|
||||
sh "python3 neutral_network.py -e ${params.EPOCHS} -b ${params.BATCHSIZE}"
|
||||
sh "python3 neutral_network.py with 'epochs=${params.EPOCHS}' 'batch_size=${params.BATCHSIZE}'"
|
||||
}
|
||||
}
|
||||
stage('Archive model') {
|
||||
steps {
|
||||
archiveArtifacts artifacts: "model.zip", onlyIfSuccessful: true
|
||||
archiveArtifacts artifacts: 'model.zip', onlyIfSuccessful: true
|
||||
archiveArtifacts artifacts: 'sacred_runs/**', onlyIfSuccessful: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,13 +5,14 @@ import torch
|
||||
import argparse
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from sacred import Experiment
|
||||
from sacred.observers import FileStorageObserver, MongoObserver
|
||||
|
||||
default_batch_size = 64
|
||||
default_epochs = 4
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
|
||||
class AtpDataset(Dataset):
|
||||
def __init__(self, file_name):
|
||||
df = pd.read_csv(file_name, usecols=["AvgL", "AvgW"])
|
||||
@ -83,12 +84,9 @@ def setup_args():
|
||||
args_parser.add_argument("-e", "--epochs", type=int, default=default_epochs)
|
||||
return args_parser.parse_args()
|
||||
|
||||
|
||||
def main(batch_size, epochs):
|
||||
print(f"Using {device} device")
|
||||
|
||||
args = setup_args()
|
||||
batch_size = args.batchSize
|
||||
|
||||
plant_test = AtpDataset("atp_test.csv")
|
||||
plant_train = AtpDataset("atp_train.csv")
|
||||
|
||||
@ -105,7 +103,6 @@ print(model)
|
||||
|
||||
loss_fn = nn.MSELoss()
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
||||
epochs = args.epochs
|
||||
|
||||
for t in range(epochs):
|
||||
print(f"Epoch {t + 1}\n-------------------------------")
|
||||
@ -115,3 +112,23 @@ print("Finish!")
|
||||
|
||||
torch.save(model.state_dict(), "./model.zip")
|
||||
print("Model saved in ./model.zip file.")
|
||||
|
||||
def setup_experiment():
|
||||
ex = Experiment('Simple Experiment')
|
||||
ex.observers.append(FileStorageObserver('sacred_runs'))
|
||||
# ex.observers.append(MongoObserver(url='mongodb://mongo_user:mongo_password_IUM_2022@localhost:27017',
|
||||
# db_name='sacred'))
|
||||
return ex
|
||||
|
||||
|
||||
ex = setup_experiment()
|
||||
|
||||
@ex.config
|
||||
def experiment_config():
|
||||
batch_size = 64
|
||||
epochs = 5
|
||||
|
||||
@ex.automain
|
||||
def run(batch_size, epochs):
|
||||
main(batch_size, epochs)
|
||||
ex.add_artifact('model.zip')
|
||||
|
5
sacred_runs/1/config.json
Normal file
5
sacred_runs/1/config.json
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"batch_size": 64,
|
||||
"epochs": 5,
|
||||
"seed": 171403633
|
||||
}
|
1
sacred_runs/1/metrics.json
Normal file
1
sacred_runs/1/metrics.json
Normal file
@ -0,0 +1 @@
|
||||
{}
|
74
sacred_runs/1/run.json
Normal file
74
sacred_runs/1/run.json
Normal file
@ -0,0 +1,74 @@
|
||||
{
|
||||
"artifacts": [
|
||||
"model.zip"
|
||||
],
|
||||
"command": "run",
|
||||
"experiment": {
|
||||
"base_dir": "/home/wirus/dev/ium_444498",
|
||||
"dependencies": [
|
||||
"numpy==1.22.3",
|
||||
"pandas==1.4.1",
|
||||
"sacred==0.8.2",
|
||||
"torch==1.11.0"
|
||||
],
|
||||
"mainfile": "neutral_network.py",
|
||||
"name": "Simple Experiment",
|
||||
"repositories": [
|
||||
{
|
||||
"commit": "b0deed97f63ff54e9f4df87346315b37506fcf3b",
|
||||
"dirty": true,
|
||||
"url": "https://git.wmi.amu.edu.pl/s444498/ium_444498.git"
|
||||
}
|
||||
],
|
||||
"sources": [
|
||||
[
|
||||
"neutral_network.py",
|
||||
"_sources/neutral_network_e5d851342f3b663b0ccbb114284aae02.py"
|
||||
]
|
||||
]
|
||||
},
|
||||
"heartbeat": "2022-05-09T16:01:55.707975",
|
||||
"host": {
|
||||
"ENV": {},
|
||||
"cpu": "Intel(R) Core(TM) i3-2310M CPU @ 2.10GHz",
|
||||
"hostname": "wirusowylapek",
|
||||
"os": [
|
||||
"Linux",
|
||||
"Linux-5.10.105-1-MANJARO-x86_64-with-glibc2.35"
|
||||
],
|
||||
"python_version": "3.10.2"
|
||||
},
|
||||
"meta": {
|
||||
"command": "run",
|
||||
"options": {
|
||||
"--beat-interval": null,
|
||||
"--capture": null,
|
||||
"--comment": null,
|
||||
"--debug": false,
|
||||
"--enforce_clean": false,
|
||||
"--file_storage": null,
|
||||
"--force": false,
|
||||
"--help": false,
|
||||
"--loglevel": null,
|
||||
"--mongo_db": null,
|
||||
"--name": null,
|
||||
"--pdb": false,
|
||||
"--print-config": false,
|
||||
"--priority": null,
|
||||
"--queue": false,
|
||||
"--s3": null,
|
||||
"--sql": null,
|
||||
"--tiny_db": null,
|
||||
"--unobserved": false,
|
||||
"COMMAND": null,
|
||||
"UPDATE": [],
|
||||
"help": false,
|
||||
"with": false
|
||||
}
|
||||
},
|
||||
"resources": [],
|
||||
"result": null,
|
||||
"start_time": "2022-05-09T16:01:42.424966",
|
||||
"status": "COMPLETED",
|
||||
"stop_time": "2022-05-09T16:01:55.705969"
|
||||
}
|
@ -0,0 +1,135 @@
|
||||
from ast import arg
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import torch
|
||||
import argparse
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from sacred import Experiment
|
||||
from sacred.observers import FileStorageObserver, MongoObserver
|
||||
|
||||
default_batch_size = 64
|
||||
default_epochs = 4
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
class AtpDataset(Dataset):
|
||||
def __init__(self, file_name):
|
||||
df = pd.read_csv(file_name, usecols=["AvgL", "AvgW"])
|
||||
df = df.dropna()
|
||||
|
||||
# Loser avg and Winner avg
|
||||
x = df.iloc[:, 1].values
|
||||
y = df.iloc[:, 0].values
|
||||
|
||||
self.x_train = torch.from_numpy(x)
|
||||
self.y_train = torch.from_numpy(y)
|
||||
self.x_train.type(torch.LongTensor)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.y_train)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return self.x_train[idx].float(), self.y_train[idx].float()
|
||||
|
||||
|
||||
class MLP(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.layers = nn.Sequential(
|
||||
nn.Linear(1, 64),
|
||||
nn.ReLU(),
|
||||
nn.Linear(64, 32),
|
||||
nn.ReLU(),
|
||||
nn.Linear(32, 1),
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
x = x.view(x.size(0), -1)
|
||||
return self.layers(x)
|
||||
|
||||
|
||||
def train(dataloader, model, loss_fn, optimizer):
|
||||
size = len(dataloader.dataset)
|
||||
model.train()
|
||||
for batch, (X, y) in enumerate(dataloader):
|
||||
X, y = X.to(device), y.to(device)
|
||||
pred = model(X)
|
||||
loss = loss_fn(pred, y)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
if batch % 100 == 0:
|
||||
loss, current = loss.item(), batch * len(X)
|
||||
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
|
||||
|
||||
|
||||
def test(dataloader, model, loss_fn):
|
||||
num_batches = len(dataloader)
|
||||
model.eval()
|
||||
test_loss, correct = 0, 0
|
||||
with torch.no_grad():
|
||||
for X, y in dataloader:
|
||||
X, y = X.to(device), y.to(device)
|
||||
pred = model(X)
|
||||
test_loss += loss_fn(pred, y).item()
|
||||
test_loss /= num_batches
|
||||
print(f"Avg loss (using {loss_fn}): {test_loss:>8f} \n")
|
||||
return test_loss
|
||||
|
||||
|
||||
def setup_args():
|
||||
args_parser = argparse.ArgumentParser(prefix_chars="-")
|
||||
args_parser.add_argument("-b", "--batchSize", type=int, default=default_batch_size)
|
||||
args_parser.add_argument("-e", "--epochs", type=int, default=default_epochs)
|
||||
return args_parser.parse_args()
|
||||
|
||||
def main(batch_size, epochs):
|
||||
print(f"Using {device} device")
|
||||
|
||||
plant_test = AtpDataset("atp_test.csv")
|
||||
plant_train = AtpDataset("atp_train.csv")
|
||||
|
||||
train_dataloader = DataLoader(plant_train, batch_size=batch_size)
|
||||
test_dataloader = DataLoader(plant_test, batch_size=batch_size)
|
||||
|
||||
for i, (data, labels) in enumerate(train_dataloader):
|
||||
print(data.shape, labels.shape)
|
||||
print(data, labels)
|
||||
break
|
||||
|
||||
model = MLP()
|
||||
print(model)
|
||||
|
||||
loss_fn = nn.MSELoss()
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
||||
|
||||
for t in range(epochs):
|
||||
print(f"Epoch {t + 1}\n-------------------------------")
|
||||
train(train_dataloader, model, loss_fn, optimizer)
|
||||
test(test_dataloader, model, loss_fn)
|
||||
print("Finish!")
|
||||
|
||||
torch.save(model.state_dict(), "./model.zip")
|
||||
print("Model saved in ./model.zip file.")
|
||||
|
||||
def setup_experiment():
|
||||
ex = Experiment('Simple Experiment')
|
||||
ex.observers.append(FileStorageObserver('sacred_runs'))
|
||||
# ex.observers.append(MongoObserver(url='mongodb://mongo_user:mongo_password_IUM_2022@localhost:27017',
|
||||
# db_name='sacred'))
|
||||
return ex
|
||||
|
||||
|
||||
ex = setup_experiment()
|
||||
|
||||
@ex.config
|
||||
def experiment_config():
|
||||
batch_size = 64
|
||||
epochs = 5
|
||||
|
||||
@ex.automain
|
||||
def run(batch_size, epochs):
|
||||
main(batch_size, epochs)
|
||||
|
||||
ex.add_artifact('model.zip')
|
Loading…
Reference in New Issue
Block a user