ium_495719/github_project/evaluate.py

62 lines
1.8 KiB
Python
Raw Permalink Normal View History

2024-06-06 01:59:07 +02:00
import pandas as pd
import numpy as np
import sys
import os
import mlflow
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from keras.models import load_model
from helper import prepare_tensors
import matplotlib.pyplot as plt
if len(sys.argv) > 1:
build_number = int(sys.argv[1])
else:
build_number = 0
2024-06-06 02:08:25 +02:00
hp_test = pd.read_csv('./github_project/hp_test.csv')
2024-06-06 01:59:07 +02:00
X_test, Y_test = prepare_tensors(hp_test)
2024-06-06 03:04:30 +02:00
model = load_model('/home/runner/work/ium/ium/github_project/hp_model.h5')
2024-06-06 01:59:07 +02:00
test_predictions = model.predict(X_test)
predictions_df = pd.DataFrame(test_predictions, columns=["Predicted_Price"])
2024-06-06 02:08:25 +02:00
predictions_df.to_csv('./github_project/hp_test_predictions.csv', index=False)
2024-06-06 01:59:07 +02:00
rmse = np.sqrt(mean_squared_error(Y_test, test_predictions))
mae = mean_absolute_error(Y_test, test_predictions)
r2 = r2_score(Y_test, test_predictions)
metrics_df = pd.DataFrame({
'Build_Number': [build_number],
'RMSE': [rmse],
'MAE': [mae],
'R2': [r2]
})
2024-06-06 02:08:25 +02:00
metrics_file = './github_project/hp_test_metrics.csv'
2024-06-06 01:59:07 +02:00
if os.path.isfile(metrics_file):
existing_metrics_df = pd.read_csv(metrics_file)
updated_metrics_df = pd.concat([existing_metrics_df, metrics_df], ignore_index=True)
else:
updated_metrics_df = metrics_df
updated_metrics_df.to_csv(metrics_file, index=False)
metrics = ['RMSE', 'MAE', 'R2']
for metric in metrics:
plt.plot(updated_metrics_df['Build_Number'], updated_metrics_df[metric], marker='o')
plt.title(f'{metric} vs Builds')
plt.xlabel('Build Number')
plt.ylabel(metric)
plt.grid(True)
plot_file = f'plot_{metric.lower()}.png'
plt.savefig(plot_file)
plt.close()
with mlflow.start_run() as run:
mlflow.log_metric('RMSE', rmse)
mlflow.log_metric('MAE', mae)
mlflow.log_metric('R2', r2)