97 lines
3.5 KiB
Python
97 lines
3.5 KiB
Python
import os, random
|
|
import numpy as np
|
|
import joblib
|
|
import tensorflow as tf
|
|
|
|
from tensorflow import keras
|
|
from tensorflow.python.keras.layers.convolutional import Conv2D
|
|
from tensorflow.python.keras.layers.core import Dense, Flatten
|
|
from tensorflow.python.keras.models import Sequential
|
|
from tensorflow.python.keras.preprocessing.image_dataset import load_image
|
|
|
|
|
|
class Neural_network:
|
|
|
|
def __init__(self):
|
|
|
|
#check physical devices
|
|
physical_devices = tf.config.experimental.list_physical_devices('GPU')
|
|
print("Num GPUs Available: ", len(physical_devices))
|
|
tf.config.experimental.set_memory_growth(physical_devices[0], enable=True)
|
|
|
|
def build_model(self,image_size = (25,25),batch_size = 8):
|
|
#Generate a Dataset
|
|
self.train_ds = tf.keras.preprocessing.image_dataset_from_directory(
|
|
"core/resources/data/train",
|
|
validation_split=0.2,
|
|
subset="training",
|
|
shuffle = False,
|
|
image_size=image_size,
|
|
batch_size=batch_size,
|
|
)
|
|
self.val_ds = tf.keras.preprocessing.image_dataset_from_directory(
|
|
"core/resources/data/validate",
|
|
validation_split=0.2,
|
|
subset="validation",
|
|
shuffle = False,
|
|
image_size=image_size,
|
|
batch_size=batch_size,
|
|
)
|
|
|
|
self.class_names = self.train_ds.class_names
|
|
print(self.class_names)
|
|
|
|
#Configure the dataset
|
|
AUTOTUNE = tf.data.AUTOTUNE
|
|
train_ds = self.train_ds.prefetch(buffer_size=AUTOTUNE)
|
|
val_ds = self.val_ds.prefetch(buffer_size=AUTOTUNE)
|
|
|
|
input_shape=(25,25,3)
|
|
#create model
|
|
model = Sequential()
|
|
model.add(Conv2D(16, kernel_size=(5), activation='relu', input_shape=input_shape))
|
|
model.add(Conv2D(32, kernel_size=(5, 5), activation='relu'))
|
|
model.add(Conv2D(64, kernel_size=(5, 5), activation='relu'))
|
|
model.add(Conv2D(128, kernel_size=(5, 5), activation='relu'))
|
|
model.add(Flatten())
|
|
model.add(Dense(16, activation='relu'))
|
|
model.add(Dense(9, activation='softmax'))
|
|
|
|
model.compile(
|
|
optimizer=keras.optimizers.Adam(),
|
|
loss="sparse_categorical_crossentropy",
|
|
metrics=["accuracy"],
|
|
)
|
|
|
|
#model.summary()
|
|
#Training the model
|
|
model.fit(
|
|
train_ds, epochs=15, validation_data=val_ds,
|
|
)
|
|
|
|
self.model = model
|
|
self.model.save("core/saved_model.h5")
|
|
joblib.dump(self.class_names, fr"saved_model_classes.joblib")
|
|
|
|
# load an image and predict the class
|
|
def predict(self):
|
|
folder="core/resources/data/Mushrooms/jadalne"
|
|
|
|
filename=random.choice(os.listdir(folder))
|
|
print(filename)
|
|
|
|
# load the image
|
|
img = keras.preprocessing.image.load_img(
|
|
filename, grayscale=False, color_mode='rgb',
|
|
target_size=None,interpolation='nearest'
|
|
)
|
|
# convert to array
|
|
img_array = keras.preprocessing.image.img_to_array(img)
|
|
img_array = np.array([img_array])
|
|
# load model
|
|
self.model = tf.keras.models.load_model("saved_model.h5")
|
|
self.class_names = joblib.load("saved_model_classes.joblib")
|
|
# predict the class
|
|
predictions = self.model.predict(img_array)
|
|
result = tf.nn.softmax(predictions[0])
|
|
print(result) |