forked from s464914/ium_464914
jenkins evaluation
This commit is contained in:
parent
6a0b357945
commit
5dfd11b904
20
Jenkinsfile
vendored
20
Jenkinsfile
vendored
@ -6,7 +6,6 @@ pipeline {
|
||||
description: 'Build for copying artifacts',
|
||||
name: 'BUILD_SELECTOR'
|
||||
)
|
||||
string(name: 'EPOCHS', defaultValue: '10', description: 'epochs')
|
||||
}
|
||||
stages {
|
||||
stage('Git Checkout') {
|
||||
@ -16,16 +15,27 @@ pipeline {
|
||||
}
|
||||
stage('Copy Artifacts') {
|
||||
steps {
|
||||
copyArtifacts fingerprintArtifacts: true, projectName: 'z-s464914-create-dataset', selector: buildParameter('BUILD_SELECTOR')
|
||||
copyArtifacts filter: '*', projectName: 's464914-training/experiments/', selector: buildParameter('BUILD_SELECTOR')
|
||||
}
|
||||
}
|
||||
stage('Train') {
|
||||
stage('Prediction') {
|
||||
steps {
|
||||
script {
|
||||
def customImage = docker.build("custom-image")
|
||||
customImage.inside {
|
||||
sh 'python3 ./model.py ' + params.EPOCHS
|
||||
archiveArtifacts artifacts: 'model.pth, predictions.txt', onlyIfSuccessful: true
|
||||
sh 'python3 ./prediction.py'
|
||||
archiveArtifacts artifacts: 'predictions.txt', onlyIfSuccessful: true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Metrics') {
|
||||
steps {
|
||||
script {
|
||||
def customImage = docker.build("custom-image")
|
||||
customImage.inside {
|
||||
sh 'python3 ./metrics.py'
|
||||
archiveArtifacts artifacts: 'metrics.txt', onlyIfSuccessful: true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
25
metrics.py
Normal file
25
metrics.py
Normal file
@ -0,0 +1,25 @@
|
||||
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_squared_error
|
||||
import numpy as np
|
||||
|
||||
true_labels = []
|
||||
predicted_labels = []
|
||||
|
||||
f = open("predictions.txt", "r")
|
||||
for line in f:
|
||||
parts = line.strip().split(' ')
|
||||
true_labels.append(int(parts[3]))
|
||||
predicted_labels.append(int(parts[1]))
|
||||
|
||||
accuracy = accuracy_score(true_labels, predicted_labels)
|
||||
precision_micro = precision_score(true_labels, predicted_labels, average='micro')
|
||||
recall_micro = recall_score(true_labels, predicted_labels, average='micro')
|
||||
f1_micro = f1_score(true_labels, predicted_labels, average='micro')
|
||||
rmse = np.sqrt(mean_squared_error(true_labels, predicted_labels))
|
||||
|
||||
with open(r'metrics.txt', 'a') as fp:
|
||||
fp.write(f"Accuracy: {accuracy}\n")
|
||||
fp.write(f"Micro-average Precision: {precision_micro}\n")
|
||||
fp.write(f"Micro-average Recall: {recall_micro}\n")
|
||||
fp.write(f"Micro-average F1-score: {f1_micro}\n")
|
||||
fp.write(f"RMSE: {rmse}\n")
|
||||
fp.write("--------------------\n")
|
@ -6,6 +6,8 @@ import pandas as pd
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
import torch.nn.functional as F
|
||||
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_squared_error
|
||||
import numpy as np
|
||||
|
||||
device = (
|
||||
"cuda"
|
||||
@ -41,7 +43,6 @@ def predict(model, input_data):
|
||||
|
||||
return predicted_class.item() # Return the predicted class label
|
||||
|
||||
|
||||
def main():
|
||||
forest_test = pd.read_csv('forest_test.csv')
|
||||
|
||||
@ -55,15 +56,23 @@ def main():
|
||||
load_model(model, model_path)
|
||||
|
||||
predictions = []
|
||||
for input_data in X_test:
|
||||
predicted_class = predict(model, input_data)
|
||||
predictions.append(predicted_class)
|
||||
|
||||
correct = 0
|
||||
total = 0
|
||||
with torch.no_grad():
|
||||
for input_data, target in zip(X_test, y_test):
|
||||
output = model(input_data)
|
||||
_, predicted_class = torch.max(output, 0)
|
||||
prediction_entry = f"predicted: {predicted_class.item()} true_label: {target}"
|
||||
predictions.append(prediction_entry)
|
||||
total += 1
|
||||
if predicted_class.item() == target:
|
||||
correct += 1
|
||||
|
||||
|
||||
with open(r'predictions.txt', 'w') as fp:
|
||||
for item in predictions:
|
||||
# write each item on a new line
|
||||
fp.write("%s\n" % item)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
116203
predictions.txt
Normal file
116203
predictions.txt
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user