ium_s449288/evaluate.py

37 lines
1.1 KiB
Python
Raw Normal View History

2022-04-25 23:05:50 +02:00
import tensorflow as tf
from tensorflow import keras
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
2022-04-25 23:08:32 +02:00
import numpy as np
2022-04-25 23:13:20 +02:00
import pandas as pd
2022-04-25 23:05:50 +02:00
# Załadowanie modelu z pliku
model = keras.models.load_model('lego_reg_model')
# Załadowanie zbioru testowego
2022-04-25 23:11:03 +02:00
data_test = pd.read_csv('lego_sets_clean_test.csv')
2022-04-25 23:05:50 +02:00
test_piece_counts = np.array(data_test['piece_count'])
test_prices = np.array(data_test['list_price'])
# Prosta ewaluacja (mean absolute error)
test_results = model.evaluate(
test_piece_counts,
test_prices, verbose=0)
# Zapis wartości liczbowej metryki do pliku
with open('eval_results.txt', 'a+') as f:
2022-04-25 23:27:04 +02:00
f.write(str(test_results) + '\n')
2022-04-25 23:05:50 +02:00
# Wygenerowanie i zapisanie do pliku wykresu
with open('eval_results.txt') as f:
2022-04-25 23:27:04 +02:00
scores = [float(line) for line in f if line]
2022-04-25 23:05:50 +02:00
builds = list(range(1, len(scores) + 1))
plot = plt.plot(builds, scores)
plt.xlabel('Build number')
plt.xticks(range(1, len(scores) + 1))
plt.ylabel('Mean absolute error')
plt.title('Model error by build')
plt.savefig('error_plot.jpg')
plt.show()
2022-04-25 22:07:19 +02:00