diff --git a/Jenkinsfile_eval b/Jenkinsfile_eval new file mode 100644 index 0000000..445d053 --- /dev/null +++ b/Jenkinsfile_eval @@ -0,0 +1,37 @@ +pipeline { + agent { + dockerfile true + } + parameters { + string( + defaultValue: '200', + description: 'number of epochs', + name: 'EPOCH' + ) + } + + stages { + stage('Stage 1') { + steps { + echo 'Hello world!' + } + } + + stage('Copy from different Pipeline') { + steps { + copyArtifacts fingerprintArtifacts: true, projectName: 's444517-create-dataset', selector: lastSuccessful(), + copyArtifacts fingerprintArtifacts: true, projectName: '444517-training/master', selector: lastSuccessful() + copyArtifacts fingerprintArtifacts: true, projectName: 's444517-evaluation/master', selector: lastSuccessful(), optional: true + } + } + + stage('Get data save artifacts') { + steps { + sh 'python3 ./nn_train_eval.py' + archiveArtifacts artifacts: 'my_model/saved_model.pb, metrics.txt' + + } + } + } +} + diff --git a/Jenkinsfile_train b/Jenkinsfile_train index bedb43d..af065ca 100644 --- a/Jenkinsfile_train +++ b/Jenkinsfile_train @@ -31,10 +31,10 @@ pipeline { } } } - post { - always { - emailext body: "${currentBuild.currentResult}", subject: 's444517_build_status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms' - } - } + //post { + // always { + // emailext body: "${currentBuild.currentResult}", subject: 's444517_build_status', to: 'e19191c5.uam.onmicrosoft.com@emea.teams.ms' + // } + //} } diff --git a/nn_train_eval.py b/nn_train_eval.py new file mode 100644 index 0000000..3fa623f --- /dev/null +++ b/nn_train_eval.py @@ -0,0 +1,42 @@ + + +from sklearn.metrics import accuracy_score, recall_score +import matplotlib.pyplot as plt + +# reading data +def read_data(file_name): + y_pred = [] + y_true = [] + with open(file_name, encoding="utf-8") as file: + for line in file.readlines(): + y_pred.append(line.split(",")[0]) + y_true.append(line.split(",")[1][:-1]) + return y_pred, y_true + +# saving new values +def new_metrics(): + y_pred, y_true = read_data("results.txt") + acc = accuracy_score(y_true, y_pred) + recc = recall_score(y_true, y_pred, average='macro') + + with open("metrics.txt", 'a') as f: + f.write(f"{acc},{recc}\n") + f.close() + +# drawing a plot +def draw_plt(): + acc, recc = read_data("metrics.txt") + no_of_entries = list(range(1, len(acc)+1)) + print(acc) + print(recc) + + plt.plot(no_of_entries, acc, color='green', lw=2, label='Accuracy') + plt.plot(no_of_entries, recc, color='blue', lw=2, label='Recall') + plt.xlabel('Number of builds') + plt.ylabel('Metrics') + plt.legend() + plt.savefig("output.jpg") + + +new_metrics() +draw_plt()