add Jenkinsfile_train
Some checks failed
s444018-training/pipeline/head There was a failure building this commit
Some checks failed
s444018-training/pipeline/head There was a failure building this commit
This commit is contained in:
parent
ed0399be0c
commit
a804509a00
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,2 +1,2 @@
|
||||
.idea
|
||||
*.csv
|
||||
|
||||
|
38
Jenkinsfile_train
Normal file
38
Jenkinsfile_train
Normal file
@ -0,0 +1,38 @@
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile {
|
||||
additionalBuildArgs "--build-arg KAGGLE_USERNAME=${params.KAGGLE_USERNAME} --build-arg KAGGLE_KEY=${params.KAGGLE_KEY} --build-arg CUTOFF=${params.CUTOFF} -t docker_image"
|
||||
}
|
||||
}
|
||||
parameters {
|
||||
string(
|
||||
defaultValue: '1000',
|
||||
description: 'Number of epochs',
|
||||
name: 'EPOCHS',
|
||||
trim: false
|
||||
)
|
||||
stages {
|
||||
stage('Script'){
|
||||
steps {
|
||||
copyArtifacts filter: '*', projectName: 's444018-create-dataset'
|
||||
sh 'python3 ./biblioteka_DL/dllib.py $EPOCHS'
|
||||
archiveArtifacts artifacts: 'model.pkl', followSymlinks: false
|
||||
build job: 's444018-evaluation/master/'
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
success {
|
||||
emailext body: 'SUCCESS', subject: 's444018-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
failure {
|
||||
emailext body: 'FAILURE', subject: 's444018-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
unstable {
|
||||
emailext body: 'UNSTABLE', subject: 's444018-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
changed {
|
||||
emailext body: 'CHANGED', subject: 's444018-training', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
|
||||
}
|
||||
}
|
||||
}
|
@ -1,3 +1,5 @@
|
||||
import sys
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import pandas as pd
|
||||
@ -54,7 +56,7 @@ def normalize_gross(imbd_data):
|
||||
|
||||
|
||||
def prepare_dataset():
|
||||
df = pd.read_csv('../imdb_top_1000.csv')
|
||||
df = pd.read_csv('imdb_top_1000.csv')
|
||||
df = drop_relevant_columns(df)
|
||||
df_lowercase = lowercase_columns_names(df)
|
||||
df = data_to_numeric(df_lowercase)
|
||||
@ -98,7 +100,9 @@ l = nn.MSELoss()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
|
||||
|
||||
|
||||
num_epochs = 1000
|
||||
# num_epochs = 1000
|
||||
num_epochs = int(sys.argv[1])
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
# forward feed
|
||||
y_pred = model(X_train.requires_grad_())
|
||||
@ -123,6 +127,9 @@ predicted = model(X_train).detach().numpy()
|
||||
pred = pd.DataFrame(predicted)
|
||||
pred.to_csv('result.csv')
|
||||
|
||||
# save model
|
||||
torch.save(model, "model.pkl")
|
||||
|
||||
# plt.scatter(X_train.detach().numpy() , y_train.detach().numpy())
|
||||
# plt.plot(X_train.detach().numpy() , predicted , "red")
|
||||
# plt.xlabel("Meta_score")
|
||||
|
1001
biblioteka_DL/imdb_top_1000.csv
Normal file
1001
biblioteka_DL/imdb_top_1000.csv
Normal file
File diff suppressed because it is too large
Load Diff
29
lab5/Dockerfile
Normal file
29
lab5/Dockerfile
Normal file
@ -0,0 +1,29 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y python3-pip
|
||||
RUN apt-get install -y unzip
|
||||
|
||||
RUN pip3 install kaggle
|
||||
RUN pip3 install pandas
|
||||
RUN pip3 install sklearn
|
||||
RUN pip3 install numpy
|
||||
|
||||
RUN pip3 install matplotlib
|
||||
RUN pip3 install torch
|
||||
|
||||
ARG CUTOFF
|
||||
ARG KAGGLE_USERNAME
|
||||
ARG KAGGLE_KEY
|
||||
ENV CUTOFF=${CUTOFF}
|
||||
ENV KAGGLE_USERNAME=${KAGGLE_USERNAME}
|
||||
ENV KAGGLE_KEY=${KAGGLE_KEY}
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY lab2/download.sh .
|
||||
COPY biblioteka_DL/dllib.py .
|
||||
|
||||
RUN chmod +x ./download.sh
|
||||
RUN ./download.sh
|
||||
#CMD python3 ./dllib.py
|
32
lab5/Jenkinsfile
vendored
Normal file
32
lab5/Jenkinsfile
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
pipeline {
|
||||
agent {
|
||||
dockerfile {
|
||||
additionalBuildArgs "--build-arg KAGGLE_USERNAME=${params.KAGGLE_USERNAME} --build-arg KAGGLE_KEY=${params.KAGGLE_KEY} --build-arg CUTOFF=${params.CUTOFF} -t docker_image"
|
||||
}
|
||||
}
|
||||
parameters {
|
||||
string(
|
||||
defaultValue: 'szymonparafinski',
|
||||
description: 'Kaggle username',
|
||||
name: 'KAGGLE_USERNAME',
|
||||
trim: false
|
||||
)
|
||||
password(
|
||||
defaultValue: '',
|
||||
description: 'Kaggle token taken from kaggle.json file, as described in https://github.com/Kaggle/kaggle-api#api-credentials',
|
||||
name: 'KAGGLE_KEY'
|
||||
)
|
||||
string(
|
||||
defaultValue: '100',
|
||||
description: 'Cutoff lines',
|
||||
name: 'CUTOFF'
|
||||
)
|
||||
}
|
||||
stages {
|
||||
stage('Script'){
|
||||
steps {
|
||||
archiveArtifacts artifacts: 'data_test.csv, data_train.csv, data_dev.csv', followSymlinks: false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
24
lab5/Jenkinsfile_stats
Normal file
24
lab5/Jenkinsfile_stats
Normal file
@ -0,0 +1,24 @@
|
||||
pipeline {
|
||||
agent {
|
||||
docker {
|
||||
image 'docker_image'
|
||||
}
|
||||
}
|
||||
parameters{
|
||||
buildSelector(
|
||||
defaultSelector: lastSuccessful(),
|
||||
name: 'BUILD_SELECTOR',
|
||||
description: 'Which build to use for copying artifacts'
|
||||
)
|
||||
}
|
||||
stages {
|
||||
stage("Script") {
|
||||
steps {
|
||||
copyArtifacts fingerprintArtifacts: true, projectName: 's444018-create-dataset', selector: buildParameter('BUILD_SELECTOR')
|
||||
sh 'chmod +x ./lab2/stats.sh'
|
||||
sh "./lab2/stats.sh"
|
||||
archiveArtifacts 'stats.txt'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
130
lab5/biblioteka_DL/dllib.py
Normal file
130
lab5/biblioteka_DL/dllib.py
Normal file
@ -0,0 +1,130 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.metrics import accuracy_score
|
||||
|
||||
|
||||
def drop_relevant_columns(imbd_data):
|
||||
imbd_data.drop(columns=["Poster_Link"], inplace=True)
|
||||
imbd_data.drop(columns=["Overview"], inplace=True)
|
||||
imbd_data.drop(columns=["Certificate"], inplace=True)
|
||||
return imbd_data
|
||||
|
||||
|
||||
def lowercase_columns_names(imbd_data):
|
||||
imbd_data["Series_Title"] = imbd_data["Series_Title"].str.lower()
|
||||
imbd_data["Genre"] = imbd_data["Genre"].str.lower()
|
||||
imbd_data["Director"] = imbd_data["Director"].str.lower()
|
||||
imbd_data["Star1"] = imbd_data["Star1"].str.lower()
|
||||
imbd_data["Star2"] = imbd_data["Star2"].str.lower()
|
||||
imbd_data["Star3"] = imbd_data["Star3"].str.lower()
|
||||
imbd_data["Star4"] = imbd_data["Star4"].str.lower()
|
||||
return imbd_data
|
||||
|
||||
|
||||
def data_to_numeric(imbd_data):
|
||||
imbd_data = imbd_data.replace(np.nan, '', regex=True)
|
||||
imbd_data["Gross"] = imbd_data["Gross"].str.replace(',', '')
|
||||
imbd_data["Gross"] = pd.to_numeric(imbd_data["Gross"], errors='coerce')
|
||||
imbd_data["Runtime"] = imbd_data["Runtime"].str.replace(' min', '')
|
||||
imbd_data["Runtime"] = pd.to_numeric(imbd_data["Runtime"], errors='coerce')
|
||||
imbd_data["IMDB_Rating"] = pd.to_numeric(imbd_data["IMDB_Rating"], errors='coerce')
|
||||
imbd_data["Meta_score"] = pd.to_numeric(imbd_data["Meta_score"], errors='coerce')
|
||||
imbd_data["Released_Year"] = pd.to_numeric(imbd_data["Released_Year"], errors='coerce')
|
||||
imbd_data = imbd_data.dropna()
|
||||
imbd_data = imbd_data.reset_index()
|
||||
imbd_data.drop(columns=["index"], inplace=True)
|
||||
return imbd_data
|
||||
|
||||
|
||||
def create_train_dev_test(imbd_data):
|
||||
data_train, data_test = train_test_split(imbd_data, test_size=230, random_state=1, shuffle=True)
|
||||
data_test, data_dev = train_test_split(data_test, test_size=115, random_state=1, shuffle=True)
|
||||
data_test.to_csv("data_test.csv", encoding="utf-8", index=False)
|
||||
data_dev.to_csv("data_dev.csv", encoding="utf-8", index=False)
|
||||
data_train.to_csv("data_train.csv", encoding="utf-8", index=False)
|
||||
|
||||
|
||||
def normalize_gross(imbd_data):
|
||||
imbd_data[["Gross"]] = imbd_data[["Gross"]] / 10000000
|
||||
return imbd_data
|
||||
|
||||
|
||||
def prepare_dataset():
|
||||
df = pd.read_csv('../imdb_top_1000.csv')
|
||||
df = drop_relevant_columns(df)
|
||||
df_lowercase = lowercase_columns_names(df)
|
||||
df = data_to_numeric(df_lowercase)
|
||||
df = normalize_gross(df)
|
||||
return df
|
||||
|
||||
|
||||
class LinearRegressionModel(torch.nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(LinearRegressionModel, self).__init__()
|
||||
self.linear = torch.nn.Linear(1, 1) # One in and one out
|
||||
|
||||
def forward(self, x):
|
||||
y_pred = self.linear(x)
|
||||
return y_pred
|
||||
|
||||
|
||||
df = prepare_dataset()
|
||||
data_train, data_test = train_test_split(df, random_state=1, shuffle=True)
|
||||
|
||||
X_train = pd.DataFrame(data_train["Meta_score"], dtype=np.float64)
|
||||
X_train = X_train.to_numpy()
|
||||
|
||||
y_train = pd.DataFrame(data_train["Gross"], dtype=np.float64)
|
||||
y_train = y_train.to_numpy()
|
||||
|
||||
X_train = X_train.reshape(-1, 1)
|
||||
y_train = y_train.reshape(-1, 1)
|
||||
|
||||
X_train = torch.from_numpy(X_train.astype(np.float32)).view(-1, 1)
|
||||
y_train = torch.from_numpy(y_train.astype(np.float32)).view(-1, 1)
|
||||
|
||||
input_size = 1
|
||||
output_size = 1
|
||||
|
||||
model = nn.Linear(input_size, output_size)
|
||||
|
||||
learning_rate = 0.0001
|
||||
l = nn.MSELoss()
|
||||
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
|
||||
|
||||
|
||||
num_epochs = 1000
|
||||
for epoch in range(num_epochs):
|
||||
# forward feed
|
||||
y_pred = model(X_train.requires_grad_())
|
||||
|
||||
# calculate the loss
|
||||
loss = l(y_pred, y_train)
|
||||
|
||||
# backward propagation: calculate gradients
|
||||
loss.backward()
|
||||
|
||||
# update the weights
|
||||
optimizer.step()
|
||||
|
||||
# clear out the gradients from the last step loss.backward()
|
||||
optimizer.zero_grad()
|
||||
|
||||
if epoch % 100 == 0:
|
||||
print('epoch {}, loss {}'.format(epoch, loss.item()))
|
||||
|
||||
predicted = model(X_train).detach().numpy()
|
||||
|
||||
pred = pd.DataFrame(predicted)
|
||||
pred.to_csv('result.csv')
|
||||
|
||||
# plt.scatter(X_train.detach().numpy() , y_train.detach().numpy())
|
||||
# plt.plot(X_train.detach().numpy() , predicted , "red")
|
||||
# plt.xlabel("Meta_score")
|
||||
# plt.ylabel("Gross")
|
||||
# plt.show()
|
Loading…
Reference in New Issue
Block a user