54 lines
1.3 KiB
Python
54 lines
1.3 KiB
Python
# # %% [markdown]
|
|
# # # Prediction on the test data
|
|
|
|
# # %%
|
|
import os
|
|
|
|
import pandas as pd
|
|
import numpy as np
|
|
import tensorflow as tf
|
|
|
|
|
|
model = tf.keras.models.load_model('VGG19_model.hdf5')
|
|
|
|
# Get the list of all files and directories
|
|
path = "test_data_own_cropped"
|
|
dir_list = os.listdir(path)
|
|
|
|
print(dir_list)
|
|
|
|
# %%
|
|
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
|
|
tf.keras.utils.load_img
|
|
|
|
class_names = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'del', 'nothing', 'space']
|
|
|
|
img_height=224
|
|
img_width=224
|
|
actual=[]
|
|
pred=[]
|
|
|
|
|
|
img_size = [img_height, img_width]
|
|
for i in dir_list:
|
|
for j in os.listdir(path+'/'+i):
|
|
file_path = path+'/'+i + '/' + j
|
|
actual.append(i)
|
|
test_image = tf.keras.utils.load_img(file_path, target_size = img_size)
|
|
test_image = tf.keras.utils.img_to_array(test_image)
|
|
test_image = np.expand_dims(test_image, axis = 0)
|
|
result = model.predict(test_image)
|
|
pred.append(class_names[np.argmax(result)])
|
|
|
|
from sklearn.metrics import confusion_matrix, classification_report
|
|
from sklearn.metrics import accuracy_score
|
|
|
|
print("Test accuracy=",accuracy_score(pred,actual))
|
|
print("Classification report:\n",classification_report(pred,actual))
|
|
|
|
|
|
# %%
|
|
|
|
|
|
|