Compare commits

...

52 Commits

Author SHA1 Message Date
e64175dbb2 Zaktualizuj 'Dockerfile' 2022-05-15 18:11:39 +02:00
95d1232a8f Zaktualizuj 'sacred_training.py' 2022-05-09 17:52:53 +02:00
c0b179f47e Zaktualizuj 'sacred_training.py' 2022-05-09 17:49:21 +02:00
abbca96dbe Zaktualizuj 'sacred_training.py' 2022-05-08 12:23:41 +02:00
82d9e078dc Zaktualizuj 'sacred_training.py' 2022-05-08 12:21:36 +02:00
d88133827f Zaktualizuj 'sacred_training.py' 2022-05-08 12:19:20 +02:00
5314f1039f Zaktualizuj 'sacred_training.py' 2022-05-08 12:18:54 +02:00
8aedc5a1e1 Zaktualizuj 'sacred_training.py' 2022-05-08 12:14:57 +02:00
fc3caf4d57 Zaktualizuj 'Dockerfile' 2022-05-08 12:12:28 +02:00
c893a1f348 Zaktualizuj 'sacred_training.py' 2022-05-08 12:11:07 +02:00
f57843e875 Zaktualizuj 'Jenkinsfile_train' 2022-05-08 12:05:03 +02:00
e7efd18cec Zaktualizuj 'Jenkinsfile_train' 2022-05-08 12:01:13 +02:00
e56e70ebf7 Zaktualizuj 'Jenkinsfile_train' 2022-05-08 12:00:15 +02:00
e3fd58cf37 Zaktualizuj 'sacred_training.py' 2022-05-08 11:57:40 +02:00
59790b4bf1 Zaktualizuj 'sacred_training.py' 2022-05-08 11:56:12 +02:00
fc0267cad2 Zaktualizuj 'Jenkinsfile_train' 2022-05-08 11:53:41 +02:00
bfc5bdffc2 Zaktualizuj 'Dockerfile' 2022-05-08 11:51:58 +02:00
55be77b806 Zaktualizuj 'Jenkinsfile_train' 2022-05-08 11:51:03 +02:00
a26ffe67fe sacred 2022-05-08 11:50:40 +02:00
11bdcb2a23 Zaktualizuj 'Jenkinsfile_evaluation' 2022-05-01 20:07:18 +02:00
a3c5996c9f Zaktualizuj 'evaluation.py' 2022-05-01 20:06:52 +02:00
9bdf7be638 Zaktualizuj 'Jenkinsfile_train' 2022-05-01 19:57:17 +02:00
50f3849829 Zaktualizuj 'Dockerfile' 2022-05-01 19:56:43 +02:00
szymonj98
f416349645 plot 2022-05-01 19:50:46 +02:00
d7af94d7f9 Zaktualizuj 'Jenkinsfile_evaluation' 2022-04-28 20:26:02 +02:00
f90df94db5 Zaktualizuj 'Jenkinsfile_evaluation' 2022-04-28 20:21:35 +02:00
659f72e27d Zaktualizuj 'evaluation.py' 2022-04-28 20:19:31 +02:00
cc8e0d3630 Zaktualizuj 'Jenkinsfile_evaluation' 2022-04-28 19:55:54 +02:00
97dc0891c0 Zaktualizuj 'Jenkinsfile_evaluation' 2022-04-28 19:42:22 +02:00
6e150f2009 Zaktualizuj 'Jenkinsfile_train' 2022-04-28 19:35:43 +02:00
7395076989 Zaktualizuj 'Jenkinsfile_train' 2022-04-28 19:33:58 +02:00
szymonj98
76a6537844 iteration parameter 2022-04-27 21:25:40 +02:00
szymonj98
dcb52af614 evaluation test 2022-04-27 21:17:43 +02:00
szymonj98
f59e2e540b evaluation test 2022-04-27 21:12:46 +02:00
szymonj98
9d70bec54e evaluation test 2022-04-27 21:08:39 +02:00
szymonj98
a2e4417a02 evaluation test 2022-04-27 21:06:57 +02:00
szymonj98
78da89f86f evaluation test 2022-04-27 21:06:37 +02:00
szymonj98
b693a63331 evaluation jenkinsfile 2022-04-27 20:37:23 +02:00
szymonj98
1ea4cf0f27 evaluation jenkinsfile 2022-04-27 20:30:04 +02:00
szymonj98
a5e5ba743d evaluation jenkinsfile 2022-04-27 20:28:53 +02:00
szymonj98
b00a5c3f37 evaluation jenkinsfile 2022-04-27 20:25:37 +02:00
szymonj98
9dbac84880 evaluation jenkinsfile 2022-04-27 20:24:05 +02:00
szymonj98
edb6b8b3b2 evaluation jenkinsfile 2022-04-27 20:18:00 +02:00
szymonj98
80f0fbf88a archive model 2022-04-27 20:06:38 +02:00
szymonj98
d07c6fd4a3 archive model 2022-04-27 20:03:46 +02:00
szymonj98
fdd9858321 archive model 2022-04-27 20:03:25 +02:00
szymonj98
93d69f32f8 test 2022-04-27 19:54:10 +02:00
szymonj98
f6f6017d98 test 2022-04-27 19:53:01 +02:00
szymonj98
129c498b2d test 2022-04-27 19:51:21 +02:00
szymonj98
88514fa942 train script 2022-04-27 19:50:21 +02:00
szymonj98
690e59ac0c jenkins train 2022-04-27 19:49:15 +02:00
szymonj98
27c2cb7956 prepare 2022-04-27 19:42:20 +02:00
13 changed files with 278 additions and 200178 deletions

View File

@ -8,5 +8,7 @@ RUN pip3 install pandas
RUN pip3 install numpy RUN pip3 install numpy
RUN pip3 install sklearn RUN pip3 install sklearn
RUN pip3 install tensorflow RUN pip3 install tensorflow
COPY ./steam-200k.csv ./ RUN pip3 install matplotlib
COPY ./biblioteki_dl.py ./ RUN pip3 install sacred
RUN pip3 install pymongo
RUN pip3 install mlflow

41
Jenkinsfile vendored
View File

@ -1,41 +0,0 @@
pipeline {
parameters {
string(
defaultValue: 'szymonjadczak',
description: 'Kaggle username',
name: 'KAGGLE_USERNAME',
trim: false
)
password(
defaultValue: '',
description: 'Kaggle token taken from kaggle.json file, as described in https://github.com/Kaggle/kaggle-api#api-credentials',
name: 'KAGGLE_KEY'
)
string(
defaultValue: '',
description: 'Value for head command',
name: 'CUTOFF'
)
}
environment {
KAGGLE_USERNAME="$params.KAGGLE_USERNAME"
KAGGLE_KEY="$params.KAGGLE_KEY"
CUTOFF="$params.CUTOFF"
}
agent {
dockerfile {
additionalBuildArgs "-t ium"
}
}
stages {
stage('Stage 1') {
steps {
echo 'Hello world!!!'
checkout([$class: 'GitSCM', branches: [[name: '*/master']], extensions: [], userRemoteConfigs: [[url: 'https://git.wmi.amu.edu.pl/s444386/ium_444386']]])
sh "chmod u+x ./dataset_download.sh"
sh "KAGGLE_USERNAME=${KAGGLE_USERNAME} KAGGLE_KEY=${KAGGLE_KEY} CUTOFF=${CUTOFF} ./dataset_download.sh"
archiveArtifacts 'data.csv'
}
}
}
}

View File

@ -1,21 +0,0 @@
pipeline{
agent {
docker { image 'ium' }
}
parameters {
buildSelector(
defaultSelector: lastSuccessful(),
description: 'Which build to use for copying artifacts',
name: 'BUILD_SELECTOR')
}
stages{
stage('copy artefacts') {
steps {
copyArtifacts filter: 'data.csv', fingerprintArtifacts: true, projectName: 's444386-create-dataset', selector: lastSuccessful()
sh 'chmod u+x ./kagle.py'
sh 'python3 kagle.py'
}
}
}
}

42
Jenkinsfile_evaluation Normal file
View File

@ -0,0 +1,42 @@
pipeline {
agent {
dockerfile true
}
parameters {
gitParameter branchFilter: 'origin/(.*)', defaultValue: 'training-evaluation', name: 'BRANCH', type: 'PT_BRANCH'
buildSelector(
defaultSelector: upstream(),
description: 'Which build to use for copying artifacts',
name: 'BUILD_SELECTOR'
)
}
stages {
stage('Stage 1') {
steps {
git branch: "${params.BRANCH}", url: 'https://git.wmi.amu.edu.pl/s444386/ium_444386.git'
copyArtifacts filter: 'model.tar.gz', projectName: "s444386-training/${BRANCH}/", selector: buildParameter('BUILD_SELECTOR')
copyArtifacts filter: 'xtest.csv', projectName: "s444386-training/${BRANCH}/", selector: buildParameter('BUILD_SELECTOR')
copyArtifacts filter: 'ytest.csv', projectName: "s444386-training/${BRANCH}/", selector: buildParameter('BUILD_SELECTOR')
copyArtifacts filter: 'eval_results.txt', projectName: 's444386-evaluation/training-evaluation/', optional: true
sh 'tar xvzf model.tar.gz'
sh 'python3 evaluation.py'
archiveArtifacts 'evaluation_acuraccy.txt'
archiveArtifacts 'accuraccy.png'
}
}
}
post {
success {
emailext body: 'SUCCESS', subject: 's444386-eval-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
failure {
emailext body: 'FAILURE', subject: 's444386-eval-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
unstable {
emailext body: 'UNSTABLE', subject: 's444386-eval-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
changed {
emailext body: 'CHANGED', subject: 's444386-eval-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
}
}

43
Jenkinsfile_train Normal file
View File

@ -0,0 +1,43 @@
pipeline {
agent {
dockerfile true
}
parameters {
string(
defaultValue: '5',
description: 'learning iterations',
name: 'epoch'
)
}
stages {
stage('Stage 1') {
steps {
copyArtifacts filter: 'data.csv', fingerprintArtifacts: true, projectName: 's444386-create-dataset', selector: lastSuccessful()
sh 'chmod u+x ./sacred_training.py'
sh 'python3 sacred_training.py $epoch'
sh 'tar -czf model.tar.gz model/'
archiveArtifacts 'model.tar.gz'
archiveArtifacts 'xtest.csv'
archiveArtifacts 'ytest.csv'
dir('training') {
archiveArtifacts artifacts: '**/**'
}
}
}
}
post {
success {
emailext body: 'SUCCESS', subject: 's444386-train-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
build job: 's444386-evaluation/training-evaluation/'
}
failure {
emailext body: 'FAILURE', subject: 's444386-train-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
unstable {
emailext body: 'UNSTABLE', subject: 's444386-train-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
changed {
emailext body: 'CHANGED', subject: 's444386-train-status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms'
}
}
}

View File

@ -4,11 +4,14 @@ import pandas as pd
import numpy as np import numpy as np
import csv import csv
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split
import sys
os.system("kaggle datasets download -d tamber/steam-video-games") # os.system("kaggle datasets download -d tamber/steam-video-games")
os.system("unzip -o steam-video-games.zip") # os.system("unzip -o steam-video-games.zip")
steam=pd.read_csv('steam-200k.csv',usecols=[0,1,2,3],names=['userId','game','behavior','hoursPlayed']) epoch = int(sys.argv[1])
steam=pd.read_csv('data.csv',usecols=[0,1,2,3],names=['userId','game','behavior','hoursPlayed'])
steam.isnull().values.any() steam.isnull().values.any()
steam['userId'] = steam.userId.astype(str) steam['userId'] = steam.userId.astype(str)
purchaseCount = steam[steam["behavior"] != "play"]["game"].value_counts() purchaseCount = steam[steam["behavior"] != "play"]["game"].value_counts()
@ -71,17 +74,24 @@ y_train = steam_train['game']
x_test = steam_test[['hoursPlayed','purchaseCount','playCount','playerPlayCount','playerPurchaseCount']] x_test = steam_test[['hoursPlayed','purchaseCount','playCount','playerPlayCount','playerPurchaseCount']]
y_test = steam_test['game'] y_test = steam_test['game']
x_train = np.array(x_train) x_train = np.array(x_train)
y_train = np.array(y_train) y_train = np.array(y_train)
x_test = np.array(x_test) x_test = np.array(x_test)
y_test = np.array(y_test) y_test = np.array(y_test)
with open('xtest.csv','w',encoding='UTF-8',newline='') as xtest:
writer = csv.writer(xtest)
for i in x_test:
writer.writerow(i)
for i,j in enumerate(y_train): for i,j in enumerate(y_train):
y_train[i] = games[j] y_train[i] = games[j]
for i,j in enumerate(y_test): for i,j in enumerate(y_test):
y_test[i] = games[j] y_test[i] = games[j]
np.savetxt("ytest.csv",y_test,delimiter=",",fmt='%d')
model = tf.keras.models.Sequential([ model = tf.keras.models.Sequential([
@ -102,7 +112,7 @@ y_test = np.array(y_test).astype(np.float32)
model.fit(x_train, y_train, epochs=100) model.fit(x_train, y_train, epochs=epoch)
model.evaluate(x_test, y_test) model.evaluate(x_test, y_test)
prediction = model.predict(x_test) prediction = model.predict(x_test)
classes_x=np.argmax(prediction,axis=1) classes_x=np.argmax(prediction,axis=1)
@ -118,4 +128,4 @@ with open('results.csv','w',encoding='UTF-8',newline='') as f:
for row in rows: for row in rows:
writer.writerow(row) writer.writerow(row)
model.save('./model')

View File

@ -1,23 +0,0 @@
151603712,"The Elder Scrolls V Skyrim",purchase,1.0,0
151603712,"The Elder Scrolls V Skyrim",play,273.0,0
151603712,"Fallout 4",purchase,1.0,0
151603712,"Fallout 4",play,87.0,0
151603712,"Spore",purchase,1.0,0
151603712,"Spore",play,14.9,0
151603712,"Fallout New Vegas",purchase,1.0,0
151603712,"Fallout New Vegas",play,12.1,0
151603712,"Left 4 Dead 2",purchase,1.0,0
151603712,"Left 4 Dead 2",play,8.9,0
151603712,"HuniePop",purchase,1.0,0
151603712,"HuniePop",play,8.5,0
151603712,"Path of Exile",purchase,1.0,0
151603712,"Path of Exile",play,8.1,0
151603712,"Poly Bridge",purchase,1.0,0
151603712,"Poly Bridge",play,7.5,0
151603712,"Left 4 Dead",purchase,1.0,0
151603712,"Left 4 Dead",play,3.3,0
151603712,"Team Fortress 2",purchase,1.0,0
151603712,"Team Fortress 2",play,2.8,0
151603712,"Tomb Raider",purchase,1.0,0
151603712,"Tomb Raider",play,2.5,0
151603712,"The Banner Saga",purchase,1.0,0

View File

@ -1,6 +0,0 @@
kaggle datasets download -d tamber/steam-video-games
unzip -o steam-video-games.zip
> data.csv
head -n $CUTOFF steam-200k.csv >> data.csv

View File

@ -1 +0,0 @@
wc -l data.csv >> number_of_lines.txt

27
evaluation.py Normal file
View File

@ -0,0 +1,27 @@
import tensorflow as tf
import os
import pandas as pd
import numpy as np
import csv
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
x_test = pd.read_csv('xtest.csv')
y_test = pd.read_csv('ytest.csv')
model = tf.keras.models.load_model('./model')
res = model.evaluate(x_test, y_test,verbose=0)
with open('evaluation_acuraccy.txt', 'a+') as f:
f.write(str(res[1])+'\n')
with open('evaluation_acuraccy.txt') as f:
scores = [float(line) for line in f if line]
print(scores)
builds = list(range(1, len(scores) + 1))
plot = plt.plot(builds, scores)
plt.xlabel('Build')
plt.xticks(range(1, len(scores) + 1))
plt.ylabel('Accuraccy')
plt.show()
plt.savefig('accuraccy.png')

View File

@ -1,79 +0,0 @@
import os
import pandas as pd
from sklearn.model_selection import train_test_split
#os.system("kaggle datasets download -d tamber/steam-video-games")
#os.system("unzip -o steam-video-games.zip")
steam=pd.read_csv('data.csv',usecols=[0,1,2,3],names=['userId','game','behavior','hoursPlayed'])
steam.isnull().values.any()
steam['userId'] = steam.userId.astype(str)
print("Zbior danych:")
print(steam)
print("Describe:")
print(steam.describe(include='all'),"\n\n")
print("Gracze z najwieksza aktywnoscia:")
print(steam["userId"].value_counts(),"\n\n")
print("Gracze z najwieksza liczba kupionych gier:")
print(steam[steam["behavior"] != "play"]["userId"].value_counts())
print("Mediana:")
print(steam[steam["behavior"] != "play"]["userId"].value_counts().median(),"\n\n")
print("Gracze ktorzy zagrali w najwieksza liczbe gier:")
print(steam[steam["behavior"] != "purchase"]["userId"].value_counts())
print("Mediana:")
print(steam[steam["behavior"] != "purchase"]["userId"].value_counts().median(),"\n\n")
print("Gry:")
print(steam["game"].value_counts(),"\n\n")
print("Sredni czas grania w grania w dana gre")
print(steam[steam["behavior"] != "purchase"].groupby("game").mean().sort_values(by="hoursPlayed",ascending=False))
print("Mediana:")
print(steam[steam["behavior"] != "purchase"].groupby("game").mean().sort_values(by="hoursPlayed",ascending=False).median(),"\n\n")
print("Najczesciej kupowana gra")
print(steam[steam["behavior"] != "play"]["game"].value_counts())
print("Mediana:")
print(steam[steam["behavior"] != "play"]["game"].value_counts().median(),"\n\n")
print("Gra w ktora zagralo najwiecej graczy")
print(steam[steam["behavior"] != "purchase"]["game"].value_counts())
print("Mediana:")
print(steam[steam["behavior"] != "purchase"]["game"].value_counts().median(),"\n\n")
print("Liczba kupionych gier i liczba gier w ktore gracze zagrali")
print(steam["behavior"].value_counts(),"\n\n")
print("Gra z najwieksza liczba godzin dla jednego gracza")
print(steam[steam["behavior"] != "purchase"][["userId","hoursPlayed","game"]].sort_values(by="hoursPlayed",ascending=False))
print("Mediana:")
print(steam[steam["behavior"] != "purchase"]["hoursPlayed"].sort_values(ascending=False).median(),"\n\n")
print("Suma rozegranych godzin dla danej gry")
print(steam[steam["behavior"] != "purchase"].groupby("game").sum().sort_values(by="hoursPlayed",ascending=False))
print("Mediana:")
print(steam[steam["behavior"] != "purchase"].groupby("game").sum().sort_values(by="hoursPlayed",ascending=False).median(),"\n\n")
#odrzucenie gier dla których jest mniej niż 10 wierszy
steam = steam.groupby("game").filter(lambda x: len(x)>10)
#rozmiar zbioru testowego i dev proporcje 8:1:1
size=int(len(steam)/10)
steam_train, steam_test = train_test_split(steam, test_size=size, random_state=1, stratify=steam["game"])
steam_train, steam_dev = train_test_split(steam_train, test_size=size, random_state=1, stratify=steam_train["game"])
print("Zbior trenujacy")
print(steam_train["game"].value_counts(),"\n")
print("Zbior testujacy")
print(steam_test["game"].value_counts(),"\n")
print("Zbior dev")
print(steam_dev["game"].value_counts(),"\n")

147
sacred_training.py Normal file
View File

@ -0,0 +1,147 @@
import tensorflow as tf
import os
import pandas as pd
import numpy as np
import csv
from sklearn.model_selection import train_test_split
import sys
from sacred.observers import MongoObserver
from sacred.observers import FileStorageObserver
from sacred import Experiment
ex = Experiment("444386 sacred_scopes", interactive=True, save_git_info=False)
ex.observers.append(MongoObserver(url='mongodb://admin:IUM_2021@172.17.0.1:27017',db_name='sacred'))
ex.observers.append(FileStorageObserver('training'))
epochs = int(sys.argv[1])
@ex.config
def my_config():
epoch = epochs
layerDenseRelu = 256
layerDropout = 0.01
layerDenseSoftMax = 1000.0
#ex.add_config("config.json")
@ex.capture
def prepare_data():
steam=pd.read_csv('data.csv',usecols=[0,1,2,3],names=['userId','game','behavior','hoursPlayed'])
steam.isnull().values.any()
steam['userId'] = steam.userId.astype(str)
purchaseCount = steam[steam["behavior"] != "play"]["game"].value_counts()
playCount = steam[steam["behavior"] != "purchase"]["game"].value_counts()
playerPurchaseCount = steam[steam["behavior"] != "play"]["userId"].value_counts()
playerPlayCount = steam[steam["behavior"] != "purchase"]["userId"].value_counts()
steam = steam[steam['behavior'] != 'purchase']
steam = steam.groupby("game").filter(lambda x: len(x)>10)
size=int(len(steam)/10)
meanGame = steam[steam["behavior"] != "purchase"].groupby("game").mean()
meanGame = meanGame.to_dict()
meanGame = meanGame['hoursPlayed']
purchaseCount = purchaseCount.to_dict()
playCount = playCount.to_dict()
playerPurchaseCount = playerPurchaseCount.to_dict()
playerPlayCount = playerPlayCount.to_dict()
steam['meanTime'] = 0;
steam['purchaseCount'] = 0;
steam['playCount'] = 0;
steam['playerPurchaseCount'] =0;
steam['playerPlayCount'] =0;
steam['playPercent'] =0;
for i in steam.index:
steam.at[i,'meanTime'] = meanGame[steam.at[i,'game']]
steam.at[i,'purchaseCount'] = purchaseCount[steam.at[i,'game']]
steam.at[i,'playCount'] = playCount[steam.at[i,'game']]
steam.at[i,'playerPurchaseCount'] = playerPurchaseCount[steam.at[i,'userId']]
steam.at[i,'playerPlayCount'] = playerPlayCount[steam.at[i,'userId']]
steam.at[i,'playPercent'] = playerPlayCount[steam.at[i,'userId']]/playerPurchaseCount[steam.at[i,'userId']]
steam_train, steam_test = train_test_split(steam, test_size=size, random_state=1, stratify=steam["game"])
steam_train, steam_dev = train_test_split(steam_train, test_size=size, random_state=1, stratify=steam_train["game"])
games = {}
for i in steam['game']:
games[i] = 0
j=0
for key,game in games.items():
games[key]=j
j=j+1
for i in steam['game']:
i = games[i]
invGames = {v: k for k, v in games.items()}
x_train = steam_train[['hoursPlayed','purchaseCount','playCount','playerPlayCount','playerPurchaseCount']]
y_train = steam_train['game']
x_test = steam_test[['hoursPlayed','purchaseCount','playCount','playerPlayCount','playerPurchaseCount']]
y_test = steam_test['game']
x_train = np.array(x_train)
y_train = np.array(y_train)
x_test = np.array(x_test)
y_test = np.array(y_test)
with open('xtest.csv','w',encoding='UTF-8',newline='') as xtest:
writer = csv.writer(xtest)
for i in x_test:
writer.writerow(i)
for i,j in enumerate(y_train):
y_train[i] = games[j]
for i,j in enumerate(y_test):
y_test[i] = games[j]
y_train = np.array(y_train).astype(np.float32)
y_test = np.array(y_test).astype(np.float32)
np.savetxt("ytest.csv",y_test,delimiter=",",fmt='%d')
return x_train, y_train, x_test, y_test, invGames
@ex.main
def my_main(epoch,layerDenseRelu,layerDropout,layerDenseSoftMax,_run):
x_train, y_train, x_test, y_test, invGames = prepare_data()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(5,1)),
tf.keras.layers.Dense(layerDenseRelu, activation='relu'),
tf.keras.layers.Dropout(layerDropout),
tf.keras.layers.Dense(layerDenseSoftMax, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=epoch)
evaluation = model.evaluate(x_test, y_test)
_run.log_scalar("training.loss", evaluation[0])
_run.log_scalar("training.accuracy", evaluation[1])
prediction = model.predict(x_test)
classes_x=np.argmax(prediction,axis=1)
rows = []
for j,i in enumerate(classes_x):
row = [invGames[i],invGames[y_test[j]]]
rows.append(row)
with open('results.csv','w',encoding='UTF-8',newline='') as f:
writer = csv.writer(f)
writer.writerow(["predicted", "expected"])
for row in rows:
writer.writerow(row)
model.save('./model')
ex.add_artifact('./model/saved_model.pb')
ex.run()

File diff suppressed because it is too large Load Diff