Symulowanie-wizualne/sw_lab5.ipynb
2023-01-11 16:34:19 +01:00

25 KiB

Aleksandra Jonas, Aleksandra Gronowska, Iwona Christop

Generowanie dodatkowych zdjęć w oparciu o filtry krawędziowe

import os
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import json
%matplotlib inline
def fix_float_img(img):
    img_normed = 255 * (img - img.min()) / (img.max() - img.min())
    img_normed = np.array(img_normed, np.int)
    return img_normed
directory = r"C:\Users\jonas\OneDrive\Pulpit\train_test_sw\train_sw"
subdirs = [r"\Tomato", r"\Lemon", r"\Beech", r"\Mean", r"\Gardenia"]

json_entries = []

for sub in subdirs:
    path = directory + sub
    
    for filename in os.listdir(path):
        f = os.path.join(path, filename)
        
        if os.path.isfile(f):
            img = cv.imread(f)

            # edge detecting using canny
            img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
            img_blurred = cv.GaussianBlur(img_gray, (3, 3), 0, 0)
            img_laplacian = cv.Laplacian(img_blurred, cv.CV_32F, ksize=3)

            cv.normalize(img_laplacian, img_laplacian, 0, 1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)

            filename_edge = f[:-4] + 'K.png'
            final_edge = fix_float_img(img_laplacian)
            cv.imwrite(filename_edge, final_edge)

#             # rotating images
#             img_rotated = cv.rotate(img, cv.ROTATE_90_CLOCKWISE)
#             img_rot_4 = cv.cvtColor(img_rotated, cv.COLOR_RGB2RGBA)
#             img_rot_4[:, :, 3] = np.zeros((256,1))
#             filename_rotated = f[:-4] + 'R.png'
#             cv.imwrite(filename_rotated, img_rotated)
C:\Users\jonas\AppData\Local\Temp\ipykernel_7316\1949762618.py:3: DeprecationWarning: `np.int` is a deprecated alias for the builtin `int`. To silence this warning, use `int` by itself. Doing this will not modify any behavior and is safe. When replacing `np.int`, you may wish to use e.g. `np.int64` or `np.int32` to specify the precision. If you wish to review your current use, check the release note link for additional information.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
  img_normed = np.array(img_normed, np.int)

MLP

import sys
import subprocess
import pkg_resources
import numpy as np

required = { 'scikit-image'}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed

if missing: 
    python = sys.executable
    subprocess.check_call([python, '-m', 'pip', 'install', *missing], stdout=subprocess.DEVNULL)

def load_train_data(input_dir, newSize=(64,64)):
    import numpy as np
    import pandas as pd
    import os
    from skimage.io import imread
    import cv2 as cv
    from pathlib import Path
    import random
    from shutil import copyfile, rmtree
    import json

    import seaborn as sns
    import matplotlib.pyplot as plt

    import matplotlib
    
    image_dir = Path(input_dir)
    categories_name = []
    for file in os.listdir(image_dir):
        d = os.path.join(image_dir, file)
        if os.path.isdir(d):
            categories_name.append(file)

    folders = [directory for directory in image_dir.iterdir() if directory.is_dir()]

    train_img = []
    categories_count=[]
    labels=[]
    for i, direc in enumerate(folders):
        count = 0
        for obj in direc.iterdir():
            if os.path.isfile(obj) and os.path.basename(os.path.normpath(obj)) != 'desktop.ini':
                labels.append(os.path.basename(os.path.normpath(direc)))
                count += 1
                img = imread(obj)#zwraca ndarry postaci xSize x ySize x colorDepth
                if img.shape[-1] == 256:
                    img = np.repeat(img[..., np.newaxis], 4, axis=2)
                elif img.shape[-1] == 3:
                    img[:, :, 3] = img[1]
                img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
                img = img / 255#normalizacja
                train_img.append(img)
        categories_count.append(count)
    X={}
    X["values"] = np.array(train_img)
    X["categories_name"] = categories_name
    X["categories_count"] = categories_count
    X["labels"]=labels
    return X

def load_test_data(input_dir, newSize=(256,256)):
    import numpy as np
    import pandas as pd
    import os
    from skimage.io import imread
    import cv2 as cv
    from pathlib import Path
    import random
    from shutil import copyfile, rmtree
    import json

    import seaborn as sns
    import matplotlib.pyplot as plt

    import matplotlib

    image_path = Path(input_dir)

    labels_path = image_path.parents[0] / 'test_labels.json'

    jsonString = labels_path.read_text()
    objects = json.loads(jsonString)

    categories_name = []
    categories_count=[]
    count = 0
    c = objects[0]['value']
    for e in  objects:
        if e['value'] != c:
            categories_count.append(count)
            c = e['value']
            count = 1
        else:
            count += 1
        if not e['value'] in categories_name:
            categories_name.append(e['value'])

    categories_count.append(count)
    
    test_img = []

    labels=[]
    for e in objects:
        p = image_path / e['filename']
        img = imread(p)#zwraca ndarry postaci xSize x ySize x colorDepth
        img = cv.resize(img, newSize, interpolation=cv.INTER_AREA)# zwraca ndarray
        img = img / 255#normalizacja
        test_img.append(img)
        labels.append(e['value'])

    X={}
    X["values"] = np.array(test_img)
    X["categories_name"] = categories_name
    X["categories_count"] = categories_count
    X["labels"]=labels
    return X

from sklearn.preprocessing import LabelEncoder

# Data load
data_train = load_train_data("train_test_sw/train_sw", newSize=(16,16))
X_train = data_train['values']
y_train = data_train['labels']

data_test = load_test_data("train_test_sw/test_sw", newSize=(16,16))
X_test = data_test['values']
y_test = data_test['labels']

class_le = LabelEncoder()
y_train_enc = class_le.fit_transform(y_train)
y_test_enc = class_le.fit_transform(y_test)

X_train = X_train.flatten().reshape(X_train.shape[0], int(np.prod(X_train.shape) / X_train.shape[0]))
X_test = X_test.flatten().reshape(X_test.shape[0], int(np.prod(X_test.shape) / X_test.shape[0]))
X_train.shape
(4708, 1024)
X = X_train.T
m_train, _ = X.shape
m, n = X.shape
def init_params():
    W1 = np.random.rand(5, 1024) - 0.5
    b1 = np.random.rand(5, 1) - 0.5
    W2 = np.random.rand(5, 5) - 0.5
    b2 = np.random.rand(5, 1) - 0.5
    return W1, b1, W2, b2

def ReLU(Z):
    return np.maximum(Z, 0)

def softmax(Z):
    A = np.exp(Z) / sum(np.exp(Z))
    return A
    
def forward_prop(W1, b1, W2, b2, X):
    Z1 = W1.dot(X) + b1
    A1 = ReLU(Z1)
    Z2 = W2.dot(A1) + b2
    A2 = softmax(Z2)
    return Z1, A1, Z2, A2

def ReLU_deriv(Z):
    return Z > 0

def one_hot(Y):
    one_hot_Y = np.zeros((Y.size, Y.max() + 1))
    one_hot_Y[np.arange(Y.size), Y] = 1
    one_hot_Y = one_hot_Y.T
    return one_hot_Y

def backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y):
    one_hot_Y = one_hot(Y)
    dZ2 = A2 - one_hot_Y
    dW2 = 1 / m * dZ2.dot(A1.T)
    db2 = 1 / m * np.sum(dZ2)
    dZ1 = W2.T.dot(dZ2) * ReLU_deriv(Z1)
    dW1 = 1 / m * dZ1.dot(X.T)
    db1 = 1 / m * np.sum(dZ1)
    return dW1, db1, dW2, db2

def update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha):
    W1 = W1 - alpha * dW1
    b1 = b1 - alpha * db1    
    W2 = W2 - alpha * dW2  
    b2 = b2 - alpha * db2    
    return W1, b1, W2, b2
def get_predictions(A2):
    return np.argmax(A2, 0)

def get_accuracy(predictions, Y):
    print(predictions, Y)
    return np.sum(predictions == Y) / Y.size

def gradient_descent(X, Y, alpha, iterations):
    W1, b1, W2, b2 = init_params()
    for i in range(iterations):
        Z1, A1, Z2, A2 = forward_prop(W1, b1, W2, b2, X)
        dW1, db1, dW2, db2 = backward_prop(Z1, A1, Z2, A2, W1, W2, X, Y)
        W1, b1, W2, b2 = update_params(W1, b1, W2, b2, dW1, db1, dW2, db2, alpha)
        if i % 10 == 0:
            print("Iteration: ", i)
            predictions = get_predictions(A2)
            print(get_accuracy(predictions, Y))
    return W1, b1, W2, b2
W1, b1, W2, b2 = gradient_descent(X, y_train_enc, 0.10, 1000)
Iteration:  0
[0 0 0 ... 0 0 0] [0 0 0 ... 4 4 4]
0.20348343245539507
Iteration:  10
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  20
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  30
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  40
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  50
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  60
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  70
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  80
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  90
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  100
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  110
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  120
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  130
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  140
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  150
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  160
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  170
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  180
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  190
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  200
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  210
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  220
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  230
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  240
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  250
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  260
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  270
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  280
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  290
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  300
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  310
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  320
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  330
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  340
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  350
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  360
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  370
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  380
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  390
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  400
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  410
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  420
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  430
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  440
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  450
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  460
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  470
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  480
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  490
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  500
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  510
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  520
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  530
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  540
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  550
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  560
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  570
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  580
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  590
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  600
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  610
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  620
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  630
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  640
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  650
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  660
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  670
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  680
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  690
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  700
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  710
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  720
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  730
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  740
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  750
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  760
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  770
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  780
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  790
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  800
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  810
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  820
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  830
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  840
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  850
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  860
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  870
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  880
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  890
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  900
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  910
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  920
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  930
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  940
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  950
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  960
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  970
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  980
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
Iteration:  990
[2 2 2 ... 2 2 2] [0 0 0 ... 4 4 4]
0.2296091758708581
def make_predictions(X, W1, b1, W2, b2):
    _, _, _, A2 = forward_prop(W1, b1, W2, b2, X)
    predictions = get_predictions(A2)
    return predictions
dev_predictions = make_predictions(X_test.T, W1, b1, W2, b2)
get_accuracy(dev_predictions, y_test_enc)
[2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4]
0.20077220077220076