Symulowanie-wizualne/sw_lab3.ipynb
Iwona Christop 77a4ecdda1 Update lab3
2022-12-08 16:11:19 +01:00

12 KiB

from load_data import get_dataset
import numpy as np
from tabulate import tabulate

X_train, y_train, X_test, y_test = get_dataset(new_size=64)

Zadanie 1 (2 pkt)

Rozwiń algorytm regresji logistycznej z lab. 1, wprowadzając do niego człon regularyzacyjny.

class LogisticRegression():
    def __init__(self, l2=1):
        self.l2 = l2

    def mapY(self, y, cls):
        m = len(y)
        yBi = np.matrix(np.zeros(m)).reshape(m, 1)
        yBi[y == cls] = 1.
        return yBi

    def indicatorMatrix(self, y):
        classes = np.unique(y.tolist())
        m, k = len(y), len(classes)
        # k = len(classes)
        Y = np.matrix(np.zeros((m, k)))
        for i, cls in enumerate(classes):
            Y[:, i] = self.mapY(y, cls)
        return Y
    
    # Zapis macierzowy funkcji softmax
    def softmax(self, X):
        return np.exp(X) / np.sum(np.exp(X))
    
    # Funkcja regresji logistcznej
    def h(self, theta, X):
        return 1.0 /(1.0 + np.exp(-X * theta))
    
    # Funkcja kosztu dla regresji logistycznej
    def J(self, h, theta, X, y):
        m = len(y)
        h_val = h(theta, X)
        s1 = np.multiply(y, np.log(h_val))
        s2 = np.multiply((1 - y), np.log(1 - h_val))
        s3 = np.sum(s1+s2, axis=0)/m
        s4 = (self.l2 * np.sum(np.square(theta))) / 2*m
        return -s3 + s4

    # Gradient dla regresji logistycznej
    def dJ(self, h, theta, X, y):
        return 1.0 / (self.l2/len(y)) * (X.T * (h(theta, X) - y))

    # Metoda gradientu prostego dla regresji logistycznej
    def GD(self, h, fJ, fdJ, theta, X, y, alpha=0.01, eps=10**-3, maxSteps=10000):
        errorCurr = fJ(h, theta, X, y)
        errors = [[errorCurr, theta]]
        while True:
            # oblicz nowe theta
            theta = theta - alpha * fdJ(h, theta, X, y)
            # raportuj poziom błędu
            errorCurr, errorPrev = fJ(h, theta, X, y), errorCurr
            # kryteria stopu
            if abs(errorPrev - errorCurr) <= eps:
                break
            if len(errors) > maxSteps:
                break
            errors.append([errorCurr, theta]) 
        # return theta, errors
        return theta

    def trainMaxEnt(self, X, Y):
        n = X.shape[1]
        thetas = []
        for c in range(Y.shape[1]):
            YBi = Y[:,c]
            theta = np.matrix(np.random.random(n)).reshape(n,1)
            # Macierz parametrów theta obliczona dla każdej klasy osobno.
            # thetaBest, errors = self.GD(self.h, self.J, self.dJ, theta, 
            #                     X, YBi, alpha=0.1, eps=10**-4)
            # thetas.append(thetaBest)
            thetas.append(self.GD(self.h, self.J, self.dJ, theta, X, YBi, alpha=0.1, eps=10**-4))
        return thetas

    def classify(self, thetas, X):
        regs = np.array([(X*theta).item() for theta in thetas])
        return np.argmax(self.softmax(regs))
        # probs = self.softmax(regs)
        # result = np.argmax(probs)
        # return result

    def class_score(self, expected, predicted):
        # accuracy = TP + TN / FP + FN + TP + TN
        accuracy = sum(1 for exp, pred in zip(expected, predicted) if exp == pred) / len(expected)
        # precision = TP / FP + TP
        precision = sum(
            1 for exp, pred in zip(expected, predicted) if exp == 1.0 and pred == 1.0) / sum(
                1 for exp, pred in zip(expected, predicted) if exp == 1.0)
        # recall = TP / FN + TP
        recall = sum(
            1 for exp, pred in zip(expected, predicted) if exp == 1.0 and pred == 1.0) / sum(
                1 for exp, pred in zip(expected, predicted) if pred == 1.0)
        f1 = (2 * precision * recall) / (precision + recall)
        return accuracy, precision, recall, f1

    def fit(self, X_train, y_train):
        # Y = self.indicatorMatrix(y_train)
        # self.thetas = self.trainMaxEnt(X_train, Y)
        self.thetas = self.trainMaxEnt(X_train, self.indicatorMatrix(y_train))

    def predict(self, X_test):
        return np.array([self.classify(self.thetas, x) for x in X_test])

    def accuracy(self, expected, predicted):
        return sum(1 for x, y in zip(expected, predicted) if x == y) / len(expected)
# 16x16, l2=0.01 -> 5m 11.4s
# 32x32, l2=0.1 -> 20m 31.3s
# 64x64, l2=0.1 -> 219m 10.8s
logreg = LogisticRegression(l2=0.1)
logreg.fit(X_train, y_train)
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:33: RuntimeWarning: divide by zero encountered in log
  s2 = np.multiply((1 - y), np.log(1 - h_val))
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:33: RuntimeWarning: invalid value encountered in multiply
  s2 = np.multiply((1 - y), np.log(1 - h_val))
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:26: RuntimeWarning: overflow encountered in exp
  return 1.0 /(1.0 + np.exp(-X * theta))
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:32: RuntimeWarning: divide by zero encountered in log
  s1 = np.multiply(y, np.log(h_val))
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:32: RuntimeWarning: invalid value encountered in multiply
  s1 = np.multiply(y, np.log(h_val))
logreg.accuracy(y_test, logreg.predict(X_test))
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:22: RuntimeWarning: overflow encountered in exp
  return np.exp(X) / np.sum(np.exp(X))
/var/folders/7c/v61kq2b95dzbt7s47fxy0grm0000gn/T/ipykernel_25260/2085424405.py:22: RuntimeWarning: invalid value encountered in true_divide
  return np.exp(X) / np.sum(np.exp(X))
0.5173745173745173

Zadanie 2 (4 pkt)

Zaimplementuj algorytm SVM z miękkim marginesem (regularyzacją).

class SVM():
    def __init__(self, lr=0.001, lambda_param=10**-6, n_iters=1000):
        self.lr = lr
        self.lambda_param = lambda_param
        self.n_iters = n_iters

    def _mapY(self, y, cls):
        m = len(y)
        yBi = np.matrix(np.zeros(m)).reshape(m, 1)
        yBi[y == cls] = 1.
        return yBi

    def _indicatorMatrix(self, y):
        classes = np.unique(y.tolist())
        m, k = len(y), len(classes)
        Y = np.matrix(np.zeros((m, k)))
        for i, cls in enumerate(classes):
            Y[:, i] = self._mapY(y, cls)
        return Y

    def fit(self, X, y):
        n_classes = len(np.unique(y))
        y = self._indicatorMatrix(y)
        y = np.where(y == 0, -1, 1)

        n_features = X.shape[1]
        self.weights, self.biases = [], []

        for cls in range(n_classes):
            y_ = y[:,cls]
            y_ = np.where(y_ <= 0, -1, 1)

            w, b = np.zeros(n_features), 0

            for _ in range(self.n_iters):
                for idx, x_i in enumerate(X):
                    condition = y_[idx] * (np.dot(x_i, w) - b) >= 1
                    if condition:
                        w -= self.lr * (2 * self.lambda_param * w)
                    else:
                        w -= self.lr * (2 * self.lambda_param * w - np.dot(x_i, y_[idx]))
                        b -= self.lr * y_[idx]
            self.weights.append(w)
            self.biases.append(b)

    def _classify(self, x):
        cls = [np.sign(np.dot(x, self.weights[i]) - self.biases[i]) for i in range(len(self.biases))]
        return cls.index(1.0) if 1.0 in cls else 0

    def predict(self, X):
        return list(map(lambda x: self._classify(x), X))

    def accuracy(self, expected, predicted):
        return sum(1 for x, y in zip(expected, predicted) if x == y) / len(expected)
# 16x16 -> 1m 28.8s
# 32x32 -> 2m 2.2s
# 64x64 -> 4m 55.3s

svm = SVM()
svm.fit(X_train, y_train)
svm.accuracy(y_test, svm.predict(X_test))
0.32432432432432434