add pytorch exercise
This commit is contained in:
parent
332e14bfdb
commit
379b3622c5
@ -6,11 +6,13 @@ RUN apt update && apt install python3-pip -y
|
|||||||
RUN pip3 install kaggle && pip3 install pandas && pip3 install scikit-learn && pip3 install matplotlib
|
RUN pip3 install kaggle && pip3 install pandas && pip3 install scikit-learn && pip3 install matplotlib
|
||||||
RUN apt install -y curl
|
RUN apt install -y curl
|
||||||
RUN pip3 install --user wget
|
RUN pip3 install --user wget
|
||||||
|
RUN pip3 install torch torchvision torchaudio
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
COPY ./init.py ./
|
COPY ./init.py ./
|
||||||
COPY ./stats.py ./
|
COPY ./stats.py ./
|
||||||
|
COPY ./pytorch-example.py ./
|
||||||
|
|
||||||
RUN mkdir /.kaggle
|
RUN mkdir /.kaggle
|
||||||
RUN chmod -R 777 /.kaggle
|
RUN chmod -R 777 /.kaggle
|
9
Jenkinsfile
vendored
9
Jenkinsfile
vendored
@ -22,13 +22,20 @@ node {
|
|||||||
"KAGGLE_KEY=${params.KAGGLE_KEY}", "CUTOFF=${params.CUTOFF}" ]) {
|
"KAGGLE_KEY=${params.KAGGLE_KEY}", "CUTOFF=${params.CUTOFF}" ]) {
|
||||||
checkout([$class: 'GitSCM', branches: [[name: '*/master']], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://git.wmi.amu.edu.pl/s440058/ium_440058']]])
|
checkout([$class: 'GitSCM', branches: [[name: '*/master']], doGenerateSubmoduleConfigurations: false, extensions: [], submoduleCfg: [], userRemoteConfigs: [[url: 'https://git.wmi.amu.edu.pl/s440058/ium_440058']]])
|
||||||
|
|
||||||
|
checkout scm
|
||||||
|
|
||||||
|
def image = docker.build("s440058/ium")
|
||||||
|
image.inside {
|
||||||
|
sh 'python3 ./pytorch-example.py > model.txt'
|
||||||
|
sh 'python3 ./init.py > model.txt'
|
||||||
sh "chmod 777 ./bash.sh"
|
sh "chmod 777 ./bash.sh"
|
||||||
sh "./bash.sh"
|
sh "./bash.sh"
|
||||||
|
|
||||||
archiveArtifacts "courses.data.dev"
|
archiveArtifacts "courses.data.dev"
|
||||||
archiveArtifacts "courses.data.test"
|
archiveArtifacts "courses.data.test"
|
||||||
archiveArtifacts "courses.data.train"
|
archiveArtifacts "courses.data.train"
|
||||||
|
archiveArtifacts 'model.txt'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
66
pytorch-example.py
Normal file
66
pytorch-example.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import torch.nn.functional as F
|
||||||
|
from torch.utils.data import DataLoader, TensorDataset, random_split
|
||||||
|
from sklearn import preprocessing
|
||||||
|
|
||||||
|
class LogisticRegressionModel(torch.nn.Module):
|
||||||
|
def __init__(self, input_dim, output_dim):
|
||||||
|
super(LogisticRegressionModel, self).__init__()
|
||||||
|
self.linear = nn.Linear(input_dim, output_dim)
|
||||||
|
self.sigmoid = nn.Sigmoid()
|
||||||
|
def forward(self, x):
|
||||||
|
out = self.linear(x)
|
||||||
|
return self.sigmoid(out)
|
||||||
|
|
||||||
|
results = pd.read_csv('diabetes2.csv')
|
||||||
|
|
||||||
|
results.dropna()
|
||||||
|
|
||||||
|
data_train, data_valid, data_test = np.split(results.sample(frac=1), [int(.6*len(results)), int(.8*len(results))])
|
||||||
|
columns_to_train = ['Glucose', 'BloodPressure', 'Insulin', 'Age']
|
||||||
|
|
||||||
|
x_train = data_train[columns_to_train].astype(np.float32)
|
||||||
|
y_train = data_train['Outcome'].astype(np.float32)
|
||||||
|
|
||||||
|
x_test = data_test[columns_to_train].astype(np.float32)
|
||||||
|
y_test = data_test['Outcome'].astype(np.float32)
|
||||||
|
|
||||||
|
fTrain = torch.from_numpy(x_train.values)
|
||||||
|
tTrain = torch.from_numpy(y_train.values.reshape(460,1))
|
||||||
|
|
||||||
|
fTest= torch.from_numpy(x_test.values)
|
||||||
|
tTest = torch.from_numpy(y_test.values)
|
||||||
|
|
||||||
|
batch_size = 95
|
||||||
|
n_iters = 900
|
||||||
|
num_epochs = int(n_iters / (len(x_train) / batch_size))
|
||||||
|
learning_rate = 0.005
|
||||||
|
input_dim = 4
|
||||||
|
output_dim = 1
|
||||||
|
|
||||||
|
model = LogisticRegressionModel(input_dim, output_dim)
|
||||||
|
|
||||||
|
criterion = torch.nn.BCELoss(reduction='mean')
|
||||||
|
optimizer = torch.optim.SGD(model.parameters(), lr = learning_rate)
|
||||||
|
|
||||||
|
for epoch in range(num_epochs):
|
||||||
|
print ("Epoch #",epoch)
|
||||||
|
model.train()
|
||||||
|
optimizer.zero_grad()
|
||||||
|
# Forward pass
|
||||||
|
y_pred = model(fTrain)
|
||||||
|
# Compute Loss
|
||||||
|
loss = criterion(y_pred, tTrain)
|
||||||
|
print(loss.item())
|
||||||
|
# Backward pass
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
|
||||||
|
|
||||||
|
y_pred = model(fTest)
|
||||||
|
|
||||||
|
torch.save(model, 'diabetes.pkl')
|
Loading…
Reference in New Issue
Block a user