839 KiB
839 KiB
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import keras_tuner as kt
import keras
Konfiguracja
train_data_dir="./Trees"
validation_data_dir="./Trees"
batch_size=16
img_height, img_width = 60,80
Generowanie zdjęć
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2, # view images from various angles
zoom_range=0.2,
horizontal_flip=True, # randomly flip img horizontally
vertical_flip=True, # randomly flip img vertically
validation_split=0.2) # set validation split
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset='training') # set as training data
validation_generator = train_datagen.flow_from_directory(
validation_data_dir, # same directory as training data
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset='validation')
Found 759 images belonging to 3 classes. Found 187 images belonging to 3 classes.
def plot_image(fname, label, col):
img = mpimg.imread(train_data_dir + '/' + fname)
plt.subplot(1,3,col)
plt.title(label)
plt.imshow(img)
plt.figure(figsize=(10,5))
cat1_files = ['AS12_7/18.png','AS12_7/103.png','AS12_7/225.png']
for idx, f in enumerate(cat1_files):
plot_image(f, f'Category 1: {idx+1}', idx + 1)
plt.show()
plt.figure(figsize=(10,5))
cat2_files = ['EA01a/15.png','EA01a/5.png','EA01a/12.png']
for idx, f in enumerate(cat2_files):
plot_image(f, f'Category 2: {idx+1}', idx + 1)
plt.show()
plt.figure(figsize=(10,5))
cat3_files = ['EU43_6/135.png','EU43_6/195.png','EU43_6/16.png']
for idx, f in enumerate(cat3_files):
plot_image(f, f'Category 2: {idx+1}', idx + 1)
plt.show()
CNN
model = Sequential()
model.add(Conv2D(32, (3,3), activation='relu', input_shape=(img_height, img_width, 3)))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(512, activation='relu'))
model.add(Dense(3, activation='softmax'))
plot_model(model, to_file='model1_plot.png', show_shapes=True,show_dtype=True, show_layer_names=True, expand_nested=True,)
print(model.summary())
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 58, 78, 32) 896 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 29, 39, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 27, 37, 64) 18496 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 13, 18, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 11, 16, 64) 36928 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 5, 8, 64) 0 _________________________________________________________________ flatten (Flatten) (None, 2560) 0 _________________________________________________________________ dense (Dense) (None, 512) 1311232 _________________________________________________________________ dense_1 (Dense) (None, 512) 262656 _________________________________________________________________ dense_2 (Dense) (None, 3) 1539 ================================================================= Total params: 1,631,747 Trainable params: 1,631,747 Non-trainable params: 0 _________________________________________________________________ None
model.compile(optimizer=RMSprop(learning_rate=0.0001),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'])
history = model.fit(train_generator, epochs=16, verbose=1,
validation_data = validation_generator, validation_steps = 4)
Epoch 1/16 48/48 [==============================] - 21s 419ms/step - loss: 0.8317 - accuracy: 0.6440 - val_loss: 0.7841 - val_accuracy: 0.8125 Epoch 2/16 48/48 [==============================] - 18s 376ms/step - loss: 0.6134 - accuracy: 0.7810 - val_loss: 0.3542 - val_accuracy: 0.8438 Epoch 3/16 48/48 [==============================] - 18s 377ms/step - loss: 0.4492 - accuracy: 0.8211 - val_loss: 0.2147 - val_accuracy: 0.9531 Epoch 4/16 48/48 [==============================] - 18s 367ms/step - loss: 0.2990 - accuracy: 0.9016 - val_loss: 0.4539 - val_accuracy: 0.8125 Epoch 5/16 48/48 [==============================] - 18s 379ms/step - loss: 0.2767 - accuracy: 0.8925 - val_loss: 0.0954 - val_accuracy: 0.9531 Epoch 6/16 48/48 [==============================] - 17s 364ms/step - loss: 0.2011 - accuracy: 0.9363 - val_loss: 0.1089 - val_accuracy: 0.9688 Epoch 7/16 48/48 [==============================] - 17s 362ms/step - loss: 0.2036 - accuracy: 0.9311 - val_loss: 0.0708 - val_accuracy: 1.0000 Epoch 8/16 48/48 [==============================] - 17s 361ms/step - loss: 0.1124 - accuracy: 0.9751 - val_loss: 0.5785 - val_accuracy: 0.7500 Epoch 9/16 48/48 [==============================] - 17s 360ms/step - loss: 0.1146 - accuracy: 0.9699 - val_loss: 0.0354 - val_accuracy: 1.0000 Epoch 10/16 48/48 [==============================] - 16s 341ms/step - loss: 0.1671 - accuracy: 0.9596 - val_loss: 0.0151 - val_accuracy: 1.0000 Epoch 11/16 48/48 [==============================] - 16s 327ms/step - loss: 0.0743 - accuracy: 0.9792 - val_loss: 0.0216 - val_accuracy: 1.0000 Epoch 12/16 48/48 [==============================] - 16s 332ms/step - loss: 0.0752 - accuracy: 0.9779 - val_loss: 0.0099 - val_accuracy: 1.0000 Epoch 13/16 48/48 [==============================] - 16s 328ms/step - loss: 0.1131 - accuracy: 0.9679 - val_loss: 0.0610 - val_accuracy: 0.9844 Epoch 14/16 48/48 [==============================] - 15s 318ms/step - loss: 0.1129 - accuracy: 0.9591 - val_loss: 0.0253 - val_accuracy: 1.0000 Epoch 15/16 48/48 [==============================] - 16s 329ms/step - loss: 0.0445 - accuracy: 0.9877 - val_loss: 0.0213 - val_accuracy: 1.0000 Epoch 16/16 48/48 [==============================] - 17s 346ms/step - loss: 0.0665 - accuracy: 0.9746 - val_loss: 0.0353 - val_accuracy: 0.9844
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
<matplotlib.legend.Legend at 0x17d175c4e20>
CNN + hyperparameter tuning
def build_model(hp):
inputs = keras.Input(shape=(img_height, img_width, 3))
x = inputs
for i in range(hp.Int("cnn_layers", 1, 3)):
x = Conv2D(
hp.Int(f"filters_{i}", 32, 128, step=32),
kernel_size=(3, 3),
activation="relu",
)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
# A hyperparamter for whether to use dropout layer.
if hp.Boolean("dropout"):
x = Dropout(0.5)(x)
outputs = Dense(units=3, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
# Compile the model.
model.compile(
optimizer=RMSprop(learning_rate=0.0001),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'],
)
return model
hp = kt.HyperParameters()
model = build_model(hp)
plot_model(model, to_file='model2_plot.png', show_shapes=True,show_dtype=True, show_layer_names=True, expand_nested=True,)
history = model.fit(train_generator, verbose=1, epochs=16,
validation_data = validation_generator, validation_steps = 4)
Epoch 1/16 48/48 [==============================] - 17s 342ms/step - loss: 0.7536 - accuracy: 0.6303 - val_loss: 0.5446 - val_accuracy: 0.9375 Epoch 2/16 48/48 [==============================] - 18s 369ms/step - loss: 0.4898 - accuracy: 0.8538 - val_loss: 0.3223 - val_accuracy: 0.9531 Epoch 3/16 48/48 [==============================] - 17s 362ms/step - loss: 0.3330 - accuracy: 0.8921 - val_loss: 0.1827 - val_accuracy: 0.9844 Epoch 4/16 48/48 [==============================] - 18s 380ms/step - loss: 0.2685 - accuracy: 0.9094 - val_loss: 0.2703 - val_accuracy: 0.9844 Epoch 5/16 48/48 [==============================] - 19s 403ms/step - loss: 0.2051 - accuracy: 0.9460 - val_loss: 0.2149 - val_accuracy: 0.9844 Epoch 6/16 48/48 [==============================] - 19s 403ms/step - loss: 0.1838 - accuracy: 0.9561 - val_loss: 0.1424 - val_accuracy: 0.9531 Epoch 7/16 48/48 [==============================] - 19s 394ms/step - loss: 0.1609 - accuracy: 0.9720 - val_loss: 0.1484 - val_accuracy: 0.9688 Epoch 8/16 48/48 [==============================] - 19s 404ms/step - loss: 0.1325 - accuracy: 0.9680 - val_loss: 0.0957 - val_accuracy: 0.9844 Epoch 9/16 48/48 [==============================] - 19s 406ms/step - loss: 0.1129 - accuracy: 0.9808 - val_loss: 0.1412 - val_accuracy: 0.9844 Epoch 10/16 48/48 [==============================] - 19s 401ms/step - loss: 0.1178 - accuracy: 0.9695 - val_loss: 0.0513 - val_accuracy: 1.0000 Epoch 11/16 48/48 [==============================] - 20s 408ms/step - loss: 0.0963 - accuracy: 0.9795 - val_loss: 0.0421 - val_accuracy: 1.0000 Epoch 12/16 48/48 [==============================] - 18s 387ms/step - loss: 0.0932 - accuracy: 0.9893 - val_loss: 0.0515 - val_accuracy: 1.0000 Epoch 13/16 48/48 [==============================] - 20s 416ms/step - loss: 0.0915 - accuracy: 0.9801 - val_loss: 0.1114 - val_accuracy: 0.9844 Epoch 14/16 48/48 [==============================] - 17s 356ms/step - loss: 0.0663 - accuracy: 0.9935 - val_loss: 0.0428 - val_accuracy: 1.0000 Epoch 15/16 48/48 [==============================] - 17s 360ms/step - loss: 0.0889 - accuracy: 0.9767 - val_loss: 0.0825 - val_accuracy: 0.9844 Epoch 16/16 48/48 [==============================] - 17s 347ms/step - loss: 0.0807 - accuracy: 0.9835 - val_loss: 0.0292 - val_accuracy: 1.0000
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
<matplotlib.legend.Legend at 0x17d1769efd0>
MLP
model = Sequential()
model.add(Flatten(input_shape=(img_height, img_width, 3), name="Input_layer"))
model.add(Dense(350, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(3, activation='softmax'))
plot_model(model, to_file='model3_plot.png', show_shapes=True,show_dtype=True, show_layer_names=True, expand_nested=True,)
model.compile(
optimizer=RMSprop(learning_rate=0.0001),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'],
)
history = model.fit(train_generator, verbose=1, epochs=16,
validation_data = validation_generator, validation_steps = 4)
Epoch 1/16 48/48 [==============================] - 19s 388ms/step - loss: 1.2904 - accuracy: 0.5216 - val_loss: 0.6917 - val_accuracy: 0.6562 Epoch 2/16 48/48 [==============================] - 19s 404ms/step - loss: 0.6465 - accuracy: 0.7112 - val_loss: 0.7890 - val_accuracy: 0.6719 Epoch 3/16 48/48 [==============================] - 18s 383ms/step - loss: 0.5448 - accuracy: 0.7737 - val_loss: 0.4680 - val_accuracy: 0.8594 Epoch 4/16 48/48 [==============================] - 18s 368ms/step - loss: 0.4313 - accuracy: 0.8452 - val_loss: 0.5577 - val_accuracy: 0.7500 Epoch 5/16 48/48 [==============================] - 24s 501ms/step - loss: 0.4464 - accuracy: 0.8121 - val_loss: 0.2900 - val_accuracy: 0.7969 Epoch 6/16 48/48 [==============================] - 21s 438ms/step - loss: 0.3362 - accuracy: 0.8622 - val_loss: 0.2690 - val_accuracy: 0.7969 Epoch 7/16 48/48 [==============================] - 20s 426ms/step - loss: 0.3143 - accuracy: 0.8954 - val_loss: 0.3525 - val_accuracy: 0.8438 Epoch 8/16 48/48 [==============================] - 20s 426ms/step - loss: 0.2822 - accuracy: 0.9002 - val_loss: 0.2506 - val_accuracy: 0.9688 Epoch 9/16 48/48 [==============================] - 19s 398ms/step - loss: 0.2282 - accuracy: 0.9134 - val_loss: 0.3262 - val_accuracy: 0.9219 Epoch 10/16 48/48 [==============================] - 20s 409ms/step - loss: 0.1698 - accuracy: 0.9452 - val_loss: 0.3116 - val_accuracy: 0.9375 Epoch 11/16 48/48 [==============================] - 20s 409ms/step - loss: 0.2168 - accuracy: 0.9184 - val_loss: 0.2565 - val_accuracy: 0.9062 Epoch 12/16 48/48 [==============================] - 20s 409ms/step - loss: 0.2145 - accuracy: 0.9224 - val_loss: 0.1434 - val_accuracy: 1.0000 Epoch 13/16 48/48 [==============================] - 20s 425ms/step - loss: 0.2573 - accuracy: 0.9295 - val_loss: 0.1047 - val_accuracy: 0.9688 Epoch 14/16 48/48 [==============================] - 19s 405ms/step - loss: 0.1623 - accuracy: 0.9460 - val_loss: 0.1506 - val_accuracy: 0.9844 Epoch 15/16 48/48 [==============================] - 19s 395ms/step - loss: 0.1781 - accuracy: 0.9512 - val_loss: 0.0769 - val_accuracy: 0.9844 Epoch 16/16 48/48 [==============================] - 18s 383ms/step - loss: 0.2260 - accuracy: 0.9164 - val_loss: 0.2339 - val_accuracy: 0.9844
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
<matplotlib.legend.Legend at 0x17d195eb6d0>