sacred apply
This commit is contained in:
parent
475a76e552
commit
24cf18df57
@ -5,6 +5,7 @@ FROM ubuntu:latest
|
|||||||
RUN export PATH="$PATH:/root/.local/bin"
|
RUN export PATH="$PATH:/root/.local/bin"
|
||||||
RUN apt update
|
RUN apt update
|
||||||
RUN apt-get update
|
RUN apt-get update
|
||||||
|
RUN apt install -y git
|
||||||
RUN apt install -y figlet
|
RUN apt install -y figlet
|
||||||
RUN export PATH=”$PATH:/usr/local/bin/python”
|
RUN export PATH=”$PATH:/usr/local/bin/python”
|
||||||
RUN apt install python3-pip -y
|
RUN apt install python3-pip -y
|
||||||
@ -16,6 +17,8 @@ RUN pip3 install pillow
|
|||||||
RUN pip3 install scikit-learn
|
RUN pip3 install scikit-learn
|
||||||
RUN pip3 install matplotlib
|
RUN pip3 install matplotlib
|
||||||
RUN pip3 install torchvision
|
RUN pip3 install torchvision
|
||||||
|
RUN pip3 install sacred
|
||||||
|
RUN pip3 install pymongo
|
||||||
|
|
||||||
# Args
|
# Args
|
||||||
ARG KAGGLE_USERNAME
|
ARG KAGGLE_USERNAME
|
||||||
@ -29,4 +32,4 @@ WORKDIR /app
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Create kaggle catalog for authenticate
|
# Create kaggle catalog for authenticate
|
||||||
RUN mkdir /.kaggle/ && chmod o+w /.kaggle
|
RUN mkdir /.kaggle/ && chmod o+w /.kaggle
|
||||||
|
@ -32,12 +32,13 @@ pipeline {
|
|||||||
stage('Train model') {
|
stage('Train model') {
|
||||||
steps {
|
steps {
|
||||||
sh "chmod u+x ./neutral_network.py"
|
sh "chmod u+x ./neutral_network.py"
|
||||||
sh "python3 neutral_network.py -e ${params.EPOCHS} -b ${params.BATCHSIZE}"
|
sh "python3 neutral_network.py with 'epochs=${params.EPOCHS}' 'batch_size=${params.BATCHSIZE}'"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
stage('Archive model') {
|
stage('Archive model') {
|
||||||
steps {
|
steps {
|
||||||
archiveArtifacts artifacts: "model.zip", onlyIfSuccessful: true
|
archiveArtifacts artifacts: 'model.zip', onlyIfSuccessful: true
|
||||||
|
archiveArtifacts artifacts: 'sacred_runs/**', onlyIfSuccessful: true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,13 +5,14 @@ import torch
|
|||||||
import argparse
|
import argparse
|
||||||
from torch import nn
|
from torch import nn
|
||||||
from torch.utils.data import DataLoader, Dataset
|
from torch.utils.data import DataLoader, Dataset
|
||||||
|
from sacred import Experiment
|
||||||
|
from sacred.observers import FileStorageObserver, MongoObserver
|
||||||
|
|
||||||
default_batch_size = 64
|
default_batch_size = 64
|
||||||
default_epochs = 4
|
default_epochs = 4
|
||||||
|
|
||||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
|
|
||||||
|
|
||||||
class AtpDataset(Dataset):
|
class AtpDataset(Dataset):
|
||||||
def __init__(self, file_name):
|
def __init__(self, file_name):
|
||||||
df = pd.read_csv(file_name, usecols=["AvgL", "AvgW"])
|
df = pd.read_csv(file_name, usecols=["AvgL", "AvgW"])
|
||||||
@ -83,35 +84,51 @@ def setup_args():
|
|||||||
args_parser.add_argument("-e", "--epochs", type=int, default=default_epochs)
|
args_parser.add_argument("-e", "--epochs", type=int, default=default_epochs)
|
||||||
return args_parser.parse_args()
|
return args_parser.parse_args()
|
||||||
|
|
||||||
|
def main(batch_size, epochs):
|
||||||
|
print(f"Using {device} device")
|
||||||
|
|
||||||
print(f"Using {device} device")
|
plant_test = AtpDataset("atp_test.csv")
|
||||||
|
plant_train = AtpDataset("atp_train.csv")
|
||||||
|
|
||||||
args = setup_args()
|
train_dataloader = DataLoader(plant_train, batch_size=batch_size)
|
||||||
batch_size = args.batchSize
|
test_dataloader = DataLoader(plant_test, batch_size=batch_size)
|
||||||
|
|
||||||
plant_test = AtpDataset("atp_test.csv")
|
for i, (data, labels) in enumerate(train_dataloader):
|
||||||
plant_train = AtpDataset("atp_train.csv")
|
print(data.shape, labels.shape)
|
||||||
|
print(data, labels)
|
||||||
|
break
|
||||||
|
|
||||||
train_dataloader = DataLoader(plant_train, batch_size=batch_size)
|
model = MLP()
|
||||||
test_dataloader = DataLoader(plant_test, batch_size=batch_size)
|
print(model)
|
||||||
|
|
||||||
for i, (data, labels) in enumerate(train_dataloader):
|
loss_fn = nn.MSELoss()
|
||||||
print(data.shape, labels.shape)
|
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
||||||
print(data, labels)
|
|
||||||
break
|
|
||||||
|
|
||||||
model = MLP()
|
for t in range(epochs):
|
||||||
print(model)
|
print(f"Epoch {t + 1}\n-------------------------------")
|
||||||
|
train(train_dataloader, model, loss_fn, optimizer)
|
||||||
|
test(test_dataloader, model, loss_fn)
|
||||||
|
print("Finish!")
|
||||||
|
|
||||||
loss_fn = nn.MSELoss()
|
torch.save(model.state_dict(), "./model.zip")
|
||||||
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
print("Model saved in ./model.zip file.")
|
||||||
epochs = args.epochs
|
|
||||||
|
|
||||||
for t in range(epochs):
|
def setup_experiment():
|
||||||
print(f"Epoch {t + 1}\n-------------------------------")
|
ex = Experiment('Simple Experiment')
|
||||||
train(train_dataloader, model, loss_fn, optimizer)
|
ex.observers.append(FileStorageObserver('sacred_runs'))
|
||||||
test(test_dataloader, model, loss_fn)
|
# ex.observers.append(MongoObserver(url='mongodb://mongo_user:mongo_password_IUM_2022@localhost:27017',
|
||||||
print("Finish!")
|
# db_name='sacred'))
|
||||||
|
return ex
|
||||||
|
|
||||||
torch.save(model.state_dict(), "./model.zip")
|
|
||||||
print("Model saved in ./model.zip file.")
|
ex = setup_experiment()
|
||||||
|
|
||||||
|
@ex.config
|
||||||
|
def experiment_config():
|
||||||
|
batch_size = 64
|
||||||
|
epochs = 5
|
||||||
|
|
||||||
|
@ex.automain
|
||||||
|
def run(batch_size, epochs):
|
||||||
|
main(batch_size, epochs)
|
||||||
|
ex.add_artifact('model.zip')
|
||||||
|
5
sacred_runs/1/config.json
Normal file
5
sacred_runs/1/config.json
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
{
|
||||||
|
"batch_size": 64,
|
||||||
|
"epochs": 5,
|
||||||
|
"seed": 171403633
|
||||||
|
}
|
1
sacred_runs/1/metrics.json
Normal file
1
sacred_runs/1/metrics.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{}
|
74
sacred_runs/1/run.json
Normal file
74
sacred_runs/1/run.json
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
{
|
||||||
|
"artifacts": [
|
||||||
|
"model.zip"
|
||||||
|
],
|
||||||
|
"command": "run",
|
||||||
|
"experiment": {
|
||||||
|
"base_dir": "/home/wirus/dev/ium_444498",
|
||||||
|
"dependencies": [
|
||||||
|
"numpy==1.22.3",
|
||||||
|
"pandas==1.4.1",
|
||||||
|
"sacred==0.8.2",
|
||||||
|
"torch==1.11.0"
|
||||||
|
],
|
||||||
|
"mainfile": "neutral_network.py",
|
||||||
|
"name": "Simple Experiment",
|
||||||
|
"repositories": [
|
||||||
|
{
|
||||||
|
"commit": "b0deed97f63ff54e9f4df87346315b37506fcf3b",
|
||||||
|
"dirty": true,
|
||||||
|
"url": "https://git.wmi.amu.edu.pl/s444498/ium_444498.git"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"sources": [
|
||||||
|
[
|
||||||
|
"neutral_network.py",
|
||||||
|
"_sources/neutral_network_e5d851342f3b663b0ccbb114284aae02.py"
|
||||||
|
]
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"heartbeat": "2022-05-09T16:01:55.707975",
|
||||||
|
"host": {
|
||||||
|
"ENV": {},
|
||||||
|
"cpu": "Intel(R) Core(TM) i3-2310M CPU @ 2.10GHz",
|
||||||
|
"hostname": "wirusowylapek",
|
||||||
|
"os": [
|
||||||
|
"Linux",
|
||||||
|
"Linux-5.10.105-1-MANJARO-x86_64-with-glibc2.35"
|
||||||
|
],
|
||||||
|
"python_version": "3.10.2"
|
||||||
|
},
|
||||||
|
"meta": {
|
||||||
|
"command": "run",
|
||||||
|
"options": {
|
||||||
|
"--beat-interval": null,
|
||||||
|
"--capture": null,
|
||||||
|
"--comment": null,
|
||||||
|
"--debug": false,
|
||||||
|
"--enforce_clean": false,
|
||||||
|
"--file_storage": null,
|
||||||
|
"--force": false,
|
||||||
|
"--help": false,
|
||||||
|
"--loglevel": null,
|
||||||
|
"--mongo_db": null,
|
||||||
|
"--name": null,
|
||||||
|
"--pdb": false,
|
||||||
|
"--print-config": false,
|
||||||
|
"--priority": null,
|
||||||
|
"--queue": false,
|
||||||
|
"--s3": null,
|
||||||
|
"--sql": null,
|
||||||
|
"--tiny_db": null,
|
||||||
|
"--unobserved": false,
|
||||||
|
"COMMAND": null,
|
||||||
|
"UPDATE": [],
|
||||||
|
"help": false,
|
||||||
|
"with": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"resources": [],
|
||||||
|
"result": null,
|
||||||
|
"start_time": "2022-05-09T16:01:42.424966",
|
||||||
|
"status": "COMPLETED",
|
||||||
|
"stop_time": "2022-05-09T16:01:55.705969"
|
||||||
|
}
|
@ -0,0 +1,135 @@
|
|||||||
|
from ast import arg
|
||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import torch
|
||||||
|
import argparse
|
||||||
|
from torch import nn
|
||||||
|
from torch.utils.data import DataLoader, Dataset
|
||||||
|
from sacred import Experiment
|
||||||
|
from sacred.observers import FileStorageObserver, MongoObserver
|
||||||
|
|
||||||
|
default_batch_size = 64
|
||||||
|
default_epochs = 4
|
||||||
|
|
||||||
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||||
|
|
||||||
|
class AtpDataset(Dataset):
|
||||||
|
def __init__(self, file_name):
|
||||||
|
df = pd.read_csv(file_name, usecols=["AvgL", "AvgW"])
|
||||||
|
df = df.dropna()
|
||||||
|
|
||||||
|
# Loser avg and Winner avg
|
||||||
|
x = df.iloc[:, 1].values
|
||||||
|
y = df.iloc[:, 0].values
|
||||||
|
|
||||||
|
self.x_train = torch.from_numpy(x)
|
||||||
|
self.y_train = torch.from_numpy(y)
|
||||||
|
self.x_train.type(torch.LongTensor)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.y_train)
|
||||||
|
|
||||||
|
def __getitem__(self, idx):
|
||||||
|
return self.x_train[idx].float(), self.y_train[idx].float()
|
||||||
|
|
||||||
|
|
||||||
|
class MLP(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.layers = nn.Sequential(
|
||||||
|
nn.Linear(1, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 32),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(32, 1),
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = x.view(x.size(0), -1)
|
||||||
|
return self.layers(x)
|
||||||
|
|
||||||
|
|
||||||
|
def train(dataloader, model, loss_fn, optimizer):
|
||||||
|
size = len(dataloader.dataset)
|
||||||
|
model.train()
|
||||||
|
for batch, (X, y) in enumerate(dataloader):
|
||||||
|
X, y = X.to(device), y.to(device)
|
||||||
|
pred = model(X)
|
||||||
|
loss = loss_fn(pred, y)
|
||||||
|
optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
if batch % 100 == 0:
|
||||||
|
loss, current = loss.item(), batch * len(X)
|
||||||
|
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
|
||||||
|
|
||||||
|
|
||||||
|
def test(dataloader, model, loss_fn):
|
||||||
|
num_batches = len(dataloader)
|
||||||
|
model.eval()
|
||||||
|
test_loss, correct = 0, 0
|
||||||
|
with torch.no_grad():
|
||||||
|
for X, y in dataloader:
|
||||||
|
X, y = X.to(device), y.to(device)
|
||||||
|
pred = model(X)
|
||||||
|
test_loss += loss_fn(pred, y).item()
|
||||||
|
test_loss /= num_batches
|
||||||
|
print(f"Avg loss (using {loss_fn}): {test_loss:>8f} \n")
|
||||||
|
return test_loss
|
||||||
|
|
||||||
|
|
||||||
|
def setup_args():
|
||||||
|
args_parser = argparse.ArgumentParser(prefix_chars="-")
|
||||||
|
args_parser.add_argument("-b", "--batchSize", type=int, default=default_batch_size)
|
||||||
|
args_parser.add_argument("-e", "--epochs", type=int, default=default_epochs)
|
||||||
|
return args_parser.parse_args()
|
||||||
|
|
||||||
|
def main(batch_size, epochs):
|
||||||
|
print(f"Using {device} device")
|
||||||
|
|
||||||
|
plant_test = AtpDataset("atp_test.csv")
|
||||||
|
plant_train = AtpDataset("atp_train.csv")
|
||||||
|
|
||||||
|
train_dataloader = DataLoader(plant_train, batch_size=batch_size)
|
||||||
|
test_dataloader = DataLoader(plant_test, batch_size=batch_size)
|
||||||
|
|
||||||
|
for i, (data, labels) in enumerate(train_dataloader):
|
||||||
|
print(data.shape, labels.shape)
|
||||||
|
print(data, labels)
|
||||||
|
break
|
||||||
|
|
||||||
|
model = MLP()
|
||||||
|
print(model)
|
||||||
|
|
||||||
|
loss_fn = nn.MSELoss()
|
||||||
|
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
||||||
|
|
||||||
|
for t in range(epochs):
|
||||||
|
print(f"Epoch {t + 1}\n-------------------------------")
|
||||||
|
train(train_dataloader, model, loss_fn, optimizer)
|
||||||
|
test(test_dataloader, model, loss_fn)
|
||||||
|
print("Finish!")
|
||||||
|
|
||||||
|
torch.save(model.state_dict(), "./model.zip")
|
||||||
|
print("Model saved in ./model.zip file.")
|
||||||
|
|
||||||
|
def setup_experiment():
|
||||||
|
ex = Experiment('Simple Experiment')
|
||||||
|
ex.observers.append(FileStorageObserver('sacred_runs'))
|
||||||
|
# ex.observers.append(MongoObserver(url='mongodb://mongo_user:mongo_password_IUM_2022@localhost:27017',
|
||||||
|
# db_name='sacred'))
|
||||||
|
return ex
|
||||||
|
|
||||||
|
|
||||||
|
ex = setup_experiment()
|
||||||
|
|
||||||
|
@ex.config
|
||||||
|
def experiment_config():
|
||||||
|
batch_size = 64
|
||||||
|
epochs = 5
|
||||||
|
|
||||||
|
@ex.automain
|
||||||
|
def run(batch_size, epochs):
|
||||||
|
main(batch_size, epochs)
|
||||||
|
|
||||||
|
ex.add_artifact('model.zip')
|
Loading…
Reference in New Issue
Block a user