new Jenkins file and Dockerfile
This commit is contained in:
parent
615d310500
commit
5cf865c15c
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
.idea
|
32
Dockerfile
32
Dockerfile
@ -1,16 +1,26 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
RUN apt-get update --fix-missing
|
||||
RUN apt-get -y install python3-pip
|
||||
RUN apt-get -y install nano
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y pip
|
||||
RUN apt-get install -y unzip
|
||||
|
||||
RUN pip3 install --user pandas
|
||||
RUN pip3 install --user numpy
|
||||
RUN pip3 install --user sklearn
|
||||
RUN python3 -m pip install kaggle
|
||||
RUN pip install pandas
|
||||
RUN pip install matplotlib
|
||||
RUN pip install sklearn
|
||||
|
||||
WORKDIR /dataset
|
||||
COPY ./script.py ./
|
||||
COPY ./imdb_top_1000.csv ./
|
||||
ARG CUTOFF
|
||||
ARG KAGGLE_USERNAME
|
||||
ARG KAGGLE_KEY
|
||||
ENV CUTOFF=${CUTOFF}
|
||||
ENV KAGGLE_USERNAME=${KAGGLE_USERNAME}
|
||||
ENV KAGGLE_KEY=${KAGGLE_KEY}
|
||||
|
||||
CMD python3 ./script.py
|
||||
CMD echo 'Exiting docker'
|
||||
RUN mkdir /dane
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY lab2/download.sh .
|
||||
COPY lab2/main.py .
|
||||
|
||||
RUN ./download.sh
|
41
Jenkinsfile
vendored
41
Jenkinsfile
vendored
@ -1,12 +1,33 @@
|
||||
pipeline {
|
||||
agent any
|
||||
|
||||
agent {
|
||||
dockerfile {
|
||||
additionalBuildArgs "--build-arg KAGGLE_USERNAME=${params.KAGGLE_USERNAME} --build-arg KAGGLE_KEY=${params.KAGGLE_KEY} --build-arg CUTOFF=${params.CUTOFF} -t docker_image"
|
||||
}
|
||||
}
|
||||
parameters {
|
||||
string(
|
||||
defaultValue: 'szymonparafinski',
|
||||
description: 'Kaggle username',
|
||||
name: 'KAGGLE_USERNAME',
|
||||
trim: false
|
||||
)
|
||||
password(
|
||||
defaultValue: 'a95757bcf7f0def396b5294d971bf6dd',
|
||||
description: 'Kaggle token taken from kaggle.json file, as described in https://github.com/Kaggle/kaggle-api#api-credentials',
|
||||
name: 'KAGGLE_KEY'
|
||||
)
|
||||
string(
|
||||
defaultValue: '500',
|
||||
description: 'Cutoff lines',
|
||||
name: 'CUTOFF'
|
||||
)
|
||||
}
|
||||
stages {
|
||||
|
||||
stage ("Build Docker image") {
|
||||
steps {
|
||||
sh 'docker build -t ium_docker .'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Script'){
|
||||
steps {
|
||||
sh 'python3 ./lab2/main.py'
|
||||
archiveArtifacts artifacts: 'imdb_top_1000_dev.csv, imdb_top_1000_test.csv, imdb_top_1000_train.csv', followSymlinks: false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
54
lab2/main.py
Normal file
54
lab2/main.py
Normal file
@ -0,0 +1,54 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
|
||||
def drop_relevant_columns():
|
||||
imbd_data.drop(columns=["Poster_Link"], inplace=True)
|
||||
imbd_data.drop(columns=["Overview"], inplace=True)
|
||||
|
||||
|
||||
def lowercase_columns_names():
|
||||
imbd_data["Series_Title"] = imbd_data["Series_Title"].str.lower()
|
||||
imbd_data["Genre"] = imbd_data["Genre"].str.lower()
|
||||
imbd_data["Director"] = imbd_data["Director"].str.lower()
|
||||
imbd_data["Star1"] = imbd_data["Star1"].str.lower()
|
||||
imbd_data["Star2"] = imbd_data["Star2"].str.lower()
|
||||
imbd_data["Star3"] = imbd_data["Star3"].str.lower()
|
||||
imbd_data["Star4"] = imbd_data["Star4"].str.lower()
|
||||
|
||||
|
||||
def gross_to_numeric():
|
||||
global imbd_data
|
||||
imbd_data = imbd_data.replace(np.nan, '', regex=True)
|
||||
imbd_data["Gross"] = imbd_data["Gross"].str.replace(',', '')
|
||||
imbd_data["Gross"] = pd.to_numeric(imbd_data["Gross"], errors='coerce')
|
||||
|
||||
|
||||
def create_train_dev_test():
|
||||
data_train, data_test = train_test_split(imbd_data, test_size=230, random_state=1)
|
||||
data_test, data_dev = train_test_split(data_test, test_size=115, random_state=1)
|
||||
print("Dataset successfully divided into test/dev/train sets\n")
|
||||
data_test.to_csv("data_test.csv", encoding="utf-8", index=False)
|
||||
data_dev.to_csv("data_dev.csv", encoding="utf-8", index=False)
|
||||
data_train.to_csv("data_train.csv", encoding="utf-8", index=False)
|
||||
|
||||
print("Data train description: ")
|
||||
print(data_train.describe(include="all"))
|
||||
print("\nData test description: ")
|
||||
print(data_test.describe(include="all"))
|
||||
print("\nData dev description: ")
|
||||
print(data_dev.describe(include="all"))
|
||||
|
||||
|
||||
imbd_data = pd.read_csv('../imdb_top_1000.csv')
|
||||
|
||||
drop_relevant_columns()
|
||||
|
||||
lowercase_columns_names()
|
||||
|
||||
imbd_data = imbd_data.dropna()
|
||||
|
||||
gross_to_numeric()
|
||||
|
||||
create_train_dev_test()
|
30
script.py
30
script.py
@ -1,30 +0,0 @@
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from sklearn.model_selection import train_test_split
|
||||
|
||||
imbd_data = pd.read_csv('imdb_top_1000.csv')
|
||||
|
||||
imbd_data.drop(columns=["Poster_Link"], inplace=True)
|
||||
imbd_data.drop(columns=["Overview"], inplace=True)
|
||||
|
||||
imbd_data["Series_Title"] = imbd_data["Series_Title"].str.lower()
|
||||
imbd_data["Genre"] = imbd_data["Genre"].str.lower()
|
||||
imbd_data["Director"] = imbd_data["Director"].str.lower()
|
||||
imbd_data["Star1"] = imbd_data["Star1"].str.lower()
|
||||
imbd_data["Star2"] = imbd_data["Star2"].str.lower()
|
||||
imbd_data["Star3"] = imbd_data["Star3"].str.lower()
|
||||
imbd_data["Star4"] = imbd_data["Star4"].str.lower()
|
||||
|
||||
imbd_data = imbd_data.replace(np.nan, '', regex=True)
|
||||
imbd_data["Gross"] = imbd_data["Gross"].str.replace(',', '')
|
||||
imbd_data["Gross"] = pd.to_numeric(imbd_data["Gross"], errors='coerce')
|
||||
|
||||
imbd_data = imbd_data.dropna()
|
||||
|
||||
data_train, data_test = train_test_split(imbd_data, test_size=230, random_state=1)
|
||||
data_test, data_dev = train_test_split(data_test, test_size=115, random_state=1)
|
||||
|
||||
print("Dataset successfully divided into test/dev/train sets ")
|
||||
data_test.to_csv("data_test.csv", encoding="utf-8", index=False)
|
||||
data_dev.to_csv("data_dev.csv", encoding="utf-8", index=False)
|
||||
data_train.to_csv("data_train.csv", encoding="utf-8", index=False)
|
Loading…
Reference in New Issue
Block a user