symulowanie_cnn/cnn.ipynb
2021-12-06 11:54:14 +01:00

32 KiB

import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D
import os
from sklearn.model_selection import train_test_split
import cv2
import numpy as np
import matplotlib.pyplot as plt
def preprocess(img):
    scale_percent = 10
    width = int(img.shape[1] * scale_percent / 100)
    height = int(img.shape[0] * scale_percent / 100)
    dim = (width, height)
    resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
    return resized

def readData(data_links):
    x, y = [], []
    for link in data_links:
        img = cv2.imread(link, cv2.IMREAD_COLOR)
        img = preprocess(img)
        label = link.split("/")[1].split('_')[1]
        new_label = 4
        
        if label == '3':
            new_label = 0
        elif label == '5':
            new_label = 1
        elif label == '8':
            new_label = 2
        
        x.append(img)
        y.append(new_label)

    return x, y

data_links = os.listdir("data/")
data_links = ["data/" + x for x in data_links]
x, y = readData(data_links)
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
X_train[0].shape
(60, 80, 3)
X_train = np.array([i / 255.0 for i in X_train], dtype=np.float64)
X_test = np.array([i / 255.0 for i in X_test], dtype=np.float64)
y_train = np.array(y_train, dtype=np.int64)
y_test = np.array(y_test, dtype=np.int64)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(60, 80, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))

model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))

model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(3, activation='sigmoid'))
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])

history = model.fit(X_train, y_train, epochs=20, 
                    validation_data=(X_test, y_test))
Epoch 1/20
20/20 [==============================] - 2s 70ms/step - loss: 1.0857 - accuracy: 0.3730 - val_loss: 1.0336 - val_accuracy: 0.4926
Epoch 2/20
20/20 [==============================] - 1s 64ms/step - loss: 0.9725 - accuracy: 0.5127 - val_loss: 0.8676 - val_accuracy: 0.6037
Epoch 3/20
20/20 [==============================] - 1s 65ms/step - loss: 0.8294 - accuracy: 0.6524 - val_loss: 0.7766 - val_accuracy: 0.6852
Epoch 4/20
20/20 [==============================] - 1s 65ms/step - loss: 0.8491 - accuracy: 0.6476 - val_loss: 0.7487 - val_accuracy: 0.6815
Epoch 5/20
20/20 [==============================] - 1s 66ms/step - loss: 0.7602 - accuracy: 0.6667 - val_loss: 0.8408 - val_accuracy: 0.5741
Epoch 6/20
20/20 [==============================] - 1s 66ms/step - loss: 0.7095 - accuracy: 0.7048 - val_loss: 0.6505 - val_accuracy: 0.7296
Epoch 7/20
20/20 [==============================] - 1s 65ms/step - loss: 0.6341 - accuracy: 0.7349 - val_loss: 0.5629 - val_accuracy: 0.7852
Epoch 8/20
20/20 [==============================] - 1s 68ms/step - loss: 0.5052 - accuracy: 0.7921 - val_loss: 0.4398 - val_accuracy: 0.8444
Epoch 9/20
20/20 [==============================] - 1s 70ms/step - loss: 0.3686 - accuracy: 0.8508 - val_loss: 0.3397 - val_accuracy: 0.8593
Epoch 10/20
20/20 [==============================] - 1s 70ms/step - loss: 0.2249 - accuracy: 0.9317 - val_loss: 0.2998 - val_accuracy: 0.8889
Epoch 11/20
20/20 [==============================] - 1s 70ms/step - loss: 0.4271 - accuracy: 0.8365 - val_loss: 0.4476 - val_accuracy: 0.8296
Epoch 12/20
20/20 [==============================] - 1s 67ms/step - loss: 0.2457 - accuracy: 0.9270 - val_loss: 0.2269 - val_accuracy: 0.9074
Epoch 13/20
20/20 [==============================] - 1s 67ms/step - loss: 0.1181 - accuracy: 0.9635 - val_loss: 0.1664 - val_accuracy: 0.9407
Epoch 14/20
20/20 [==============================] - 1s 68ms/step - loss: 0.0965 - accuracy: 0.9714 - val_loss: 0.1522 - val_accuracy: 0.9444
Epoch 15/20
20/20 [==============================] - 1s 70ms/step - loss: 0.0577 - accuracy: 0.9810 - val_loss: 0.1595 - val_accuracy: 0.9444
Epoch 16/20
20/20 [==============================] - 1s 68ms/step - loss: 0.1496 - accuracy: 0.9444 - val_loss: 0.3178 - val_accuracy: 0.8963
Epoch 17/20
20/20 [==============================] - 1s 66ms/step - loss: 0.0731 - accuracy: 0.9794 - val_loss: 0.0890 - val_accuracy: 0.9704
Epoch 18/20
20/20 [==============================] - 1s 66ms/step - loss: 0.0253 - accuracy: 0.9937 - val_loss: 0.0744 - val_accuracy: 0.9704
Epoch 19/20
20/20 [==============================] - 1s 65ms/step - loss: 0.0138 - accuracy: 0.9984 - val_loss: 0.0656 - val_accuracy: 0.9778
Epoch 20/20
20/20 [==============================] - 1s 65ms/step - loss: 0.0092 - accuracy: 0.9984 - val_loss: 0.0616 - val_accuracy: 0.9741
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')

test_loss, test_acc = model.evaluate(X_test,  y_test, verbose=2)

print(test_loss)
print(test_acc)
9/9 - 0s - loss: 0.0616 - accuracy: 0.9741 - 153ms/epoch - 17ms/step
0.06160588562488556
0.9740740656852722