SW-Wiktor-Bombola/SW-Unity/Plants Neural Network.ipynb
2021-12-19 23:21:08 +01:00

13 KiB

import cv2
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation, Conv2D, MaxPooling2D
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import re
def preprocessing(image):
    scale_percent = 10
    width = int(image.shape[1] * scale_percent / 100)
    height = int(image.shape[0] * scale_percent / 100)
    dim = (width, height)
    return cv2.resize(image, dim, interpolation = cv2.INTER_AREA)


def read_data(data_images):
    x, y = [], []
    for image in data_images:
        img = cv2.imread(image, cv2.IMREAD_COLOR)
        img = preprocessing(img)
        y_label = re.search(r"(?<=-).(?=-)", image).group(0)
        x.append(img)
        y.append(y_label)
    return x, y
location = "capturedframe/"
data_images = os.listdir(location)
# for x in data_images:
#     os.rename(location+x, "tree-1-"+ x[13:])
data_images = [location + x for x in data_images if x.endswith(".png")]

print()
x, y = read_data(data_images)
print(y)
['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1']
X_train, X_test, y_train, y_test = train_test_split(x,y, test_size=0.2, random_state=81)
X_train[0].shape
(60, 80, 3)
X_train = np.array([x / 255.0 for x in X_train], dtype=np.float64)
X_test = np.array([x / 255.0 for x in X_test], dtype=np.float64)

y_train = np.array(y_train, dtype=np.int64)
y_test = np.array(y_test, dtype=np.int64)
print((X_train[0]))
[[[0.00073818 0.00086121 0.00070742]
  [0.0009381  0.00112265 0.0009381 ]
  [0.00104575 0.00129181 0.00107651]
  ...
  [0.00246059 0.00273741 0.00247597]
  [0.00229143 0.00267589 0.00241446]
  [0.00232218 0.00276817 0.00247597]]

 [[0.00089196 0.00099962 0.00081507]
  [0.00107651 0.00130719 0.00109189]
  [0.0009381  0.00112265 0.0009381 ]
  ...
  [0.00244521 0.00276817 0.00250673]
  [0.00218378 0.00270665 0.0023837 ]
  [0.00219915 0.002599   0.0023837 ]]

 [[0.0012303  0.00124567 0.00103037]
  [0.00113802 0.00132257 0.00110727]
  [0.00099962 0.0012303  0.00103037]
  ...
  [0.00233756 0.00279892 0.00249135]
  [0.00226067 0.00264514 0.00232218]
  [0.00226067 0.00267589 0.00236832]]

 ...

 [[0.00084583 0.00101499 0.00083045]
  [0.00090734 0.00112265 0.00092272]
  [0.00090734 0.00109189 0.00089196]
  ...
  [0.00229143 0.00292195 0.002599  ]
  [0.00210688 0.00255286 0.00224529]
  [0.00226067 0.00270665 0.00250673]]

 [[0.00087659 0.00101499 0.00079969]
  [0.00079969 0.0009381  0.00075356]
  [0.00089196 0.00107651 0.00089196]
  ...
  [0.00247597 0.00290657 0.00264514]
  [0.00236832 0.00270665 0.00246059]
  [0.00235294 0.00293733 0.002599  ]]

 [[0.0009381  0.00112265 0.00092272]
  [0.00084583 0.00099962 0.00079969]
  [0.00084583 0.00099962 0.00081507]
  ...
  [0.00282968 0.00315263 0.00290657]
  [0.00276817 0.0031065  0.0028912 ]
  [0.00224529 0.00278354 0.00230681]]]
model = Sequential()
model.add(Conv2D(32, (3,3), activation='relu', input_shape=(X_train[0].shape)))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D((2,2)))

model.add(Conv2D(32, (3,3), activation='relu'))

model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
print(model.summary())
Model: "sequential_6"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_16 (Conv2D)           (None, 58, 78, 32)        896       
_________________________________________________________________
max_pooling2d_12 (MaxPooling (None, 29, 39, 32)        0         
_________________________________________________________________
conv2d_17 (Conv2D)           (None, 27, 37, 64)        18496     
_________________________________________________________________
max_pooling2d_13 (MaxPooling (None, 13, 18, 64)        0         
_________________________________________________________________
conv2d_18 (Conv2D)           (None, 11, 16, 32)        18464     
_________________________________________________________________
max_pooling2d_14 (MaxPooling (None, 5, 8, 32)          0         
_________________________________________________________________
flatten_6 (Flatten)          (None, 1280)              0         
_________________________________________________________________
dense_12 (Dense)             (None, 256)               327936    
_________________________________________________________________
dense_13 (Dense)             (None, 2)                 514       
=================================================================
Total params: 366,306
Trainable params: 366,306
Non-trainable params: 0
_________________________________________________________________
None
model.compile(optimizer='adam',
             loss=tf.keras.losses.SparseCategoricalCrossentropy(),
             metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=10,
                   validation_data=(X_test, y_test))
Epoch 1/10
9/9 [==============================] - 1s 62ms/step - loss: 0.4567 - accuracy: 0.9173 - val_loss: 0.0150 - val_accuracy: 1.0000
Epoch 2/10
9/9 [==============================] - 0s 52ms/step - loss: 0.0021 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 3/10
9/9 [==============================] - 0s 50ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 4/10
9/9 [==============================] - 0s 50ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 5/10
9/9 [==============================] - 0s 51ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 6/10
9/9 [==============================] - 0s 50ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 7/10
9/9 [==============================] - 0s 53ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 8/10
9/9 [==============================] - 0s 52ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 9/10
9/9 [==============================] - 0s 50ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
Epoch 10/10
9/9 [==============================] - 0s 49ms/step - loss: 0.0000e+00 - accuracy: 1.0000 - val_loss: 0.0000e+00 - val_accuracy: 1.0000
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
3/3 - 0s - loss: 0.0000e+00 - accuracy: 1.0000