add IUM-05
This commit is contained in:
parent
c1207ceda5
commit
0ce98b3ed7
@ -12,4 +12,12 @@ and prints a short summary of the dataset as well as its subsets.
|
|||||||
### Zadanie 2
|
### Zadanie 2
|
||||||
add Jenkinsfiles and mock data preprocessing
|
add Jenkinsfiles and mock data preprocessing
|
||||||
|
|
||||||
|
### Zadanie 5
|
||||||
|
added lab4 file with new python script and updated Dockerfile.
|
||||||
|
The container downloads the dataset and installs software needed,
|
||||||
|
then trains and evaluates model on the dataset.
|
||||||
|
Loss and accuracy are saved to test_eval.txt file.
|
||||||
|
|
||||||
|
|
||||||
ium01.ipynb is a notebook used to develop previously mentioned scripts.
|
ium01.ipynb is a notebook used to develop previously mentioned scripts.
|
||||||
|
|
||||||
|
678
ium01.ipynb
678
ium01.ipynb
File diff suppressed because one or more lines are too long
@ -14,7 +14,9 @@ pipeline {
|
|||||||
}
|
}
|
||||||
stage('docker') {
|
stage('docker') {
|
||||||
agent {
|
agent {
|
||||||
dockerfile true
|
docker {
|
||||||
|
image 'kubakonieczny/ium:v1.0'
|
||||||
|
}
|
||||||
}
|
}
|
||||||
stages {
|
stages {
|
||||||
stage('script') {
|
stage('script') {
|
||||||
|
@ -1,11 +1,37 @@
|
|||||||
node {
|
pipeline {
|
||||||
docker.image('kubakonieczny/ium:v1.0').withRun("-t -e KAGGLE_USERNAME=kubakonieczny -e KAGGLE_KEY=${params.KAGGLE_KEY}") { c ->
|
agent none
|
||||||
docker.image('kubakonieczny/ium:v1.0').inside {
|
stages {
|
||||||
stage('Test') {
|
stage('copy files') {
|
||||||
sh 'cat /etc/issue'
|
agent any
|
||||||
sh 'ls -lah'
|
steps {
|
||||||
|
sh '''
|
||||||
|
cp ./lab3/script.sh .
|
||||||
|
cp ./lab3/python_script.py .
|
||||||
|
cp ./lab3/Dockerfile .
|
||||||
|
cp ./lab3/requirements.txt .
|
||||||
|
'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('docker') {
|
||||||
|
agent {
|
||||||
|
docker {
|
||||||
|
image 'kubakonieczny/ium:v1.0'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stages {
|
||||||
|
stage('script') {
|
||||||
|
steps {
|
||||||
|
sh '''
|
||||||
|
chmod +x script.sh
|
||||||
|
./script.sh > stats.txt'''
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stage('archive artifact') {
|
||||||
|
steps {
|
||||||
|
archiveArtifacts 'stats.txt'
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sh 'docker logs ${c.id}'
|
|
||||||
}
|
}
|
||||||
|
19
lab4/Dockerfile
Normal file
19
lab4/Dockerfile
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
FROM ubuntu:latest
|
||||||
|
|
||||||
|
RUN apt update >>/dev/null
|
||||||
|
RUN apt install -y apt-utils >>/dev/null
|
||||||
|
RUN apt install -y python3.8 >>/dev/null
|
||||||
|
RUN apt install -y python3-pip >>/dev/null
|
||||||
|
RUN apt install -y unzip >>/dev/null
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY ./test_eval.py ./
|
||||||
|
COPY ./script.sh ./
|
||||||
|
RUN chmod +x script.sh
|
||||||
|
|
||||||
|
COPY ./requirements.txt ./
|
||||||
|
|
||||||
|
RUN pip3 install -r requirements.txt >>/dev/null
|
||||||
|
|
||||||
|
CMD ./script.sh
|
5
lab4/requirements.txt
Normal file
5
lab4/requirements.txt
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
kaggle
|
||||||
|
numpy~=1.19.2
|
||||||
|
pandas
|
||||||
|
sklearn
|
||||||
|
tensorflow
|
6
lab4/script.sh
Normal file
6
lab4/script.sh
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
kaggle datasets download -d 'pcbreviglieri/smart-grid-stability'
|
||||||
|
unzip smart-grid-stability.zip >>/dev/null 2>&1
|
||||||
|
|
||||||
|
python3 test_eval.py
|
61
lab4/test_eval.py
Normal file
61
lab4/test_eval.py
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from sklearn import preprocessing
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from tensorflow.keras import layers
|
||||||
|
|
||||||
|
|
||||||
|
def onezero(label):
|
||||||
|
return 0 if label == 'unstable' else 1
|
||||||
|
|
||||||
|
|
||||||
|
df = pd.read_csv('smart_grid_stability_augmented.csv')
|
||||||
|
|
||||||
|
scaler = preprocessing.StandardScaler().fit(df.iloc[:, 0:-1])
|
||||||
|
df_norm_array = scaler.transform(df.iloc[:, 0:-1])
|
||||||
|
df_norm = pd.DataFrame(data=df_norm_array,
|
||||||
|
columns=df.columns[:-1])
|
||||||
|
df_norm['stabf'] = df['stabf']
|
||||||
|
|
||||||
|
df_norm_data = df_norm.copy()
|
||||||
|
df_norm_data = df_norm_data.drop('stab', axis=1)
|
||||||
|
df_norm_labels = df_norm_data.pop('stabf')
|
||||||
|
|
||||||
|
X_train, X_testAndValid, Y_train, Y_testAndValid = train_test_split(
|
||||||
|
df_norm_data,
|
||||||
|
df_norm_labels,
|
||||||
|
test_size=0.2,
|
||||||
|
random_state=42)
|
||||||
|
|
||||||
|
X_test, X_valid, Y_test, Y_valid = train_test_split(
|
||||||
|
X_testAndValid,
|
||||||
|
Y_testAndValid,
|
||||||
|
test_size=0.5,
|
||||||
|
random_state=42)
|
||||||
|
|
||||||
|
model = tf.keras.Sequential([
|
||||||
|
layers.Input(shape=(12,)),
|
||||||
|
layers.Dense(32),
|
||||||
|
layers.Dense(16),
|
||||||
|
layers.Dense(2, activation='softmax')
|
||||||
|
])
|
||||||
|
|
||||||
|
model.compile(
|
||||||
|
loss=tf.losses.BinaryCrossentropy(),
|
||||||
|
optimizer=tf.optimizers.Adam(),
|
||||||
|
metrics=[tf.keras.metrics.BinaryAccuracy()])
|
||||||
|
|
||||||
|
Y_train_one_zero = [onezero(x) for x in Y_train]
|
||||||
|
Y_train_onehot = np.eye(2)[Y_train_one_zero]
|
||||||
|
|
||||||
|
Y_test_one_zero = [onezero(x) for x in Y_test]
|
||||||
|
Y_test_onehot = np.eye(2)[Y_test_one_zero]
|
||||||
|
|
||||||
|
history = model.fit(tf.convert_to_tensor(X_train, np.float32), Y_train_onehot, epochs=5)
|
||||||
|
|
||||||
|
results = model.evaluate(X_test, Y_test_onehot, batch_size=64)
|
||||||
|
f = open('model_eval.txt', 'w')
|
||||||
|
f.write('test loss: ' + str(results[0]) + '\n' + 'test acc: ' + str(results[1]))
|
||||||
|
f.close()
|
Loading…
Reference in New Issue
Block a user