158 lines
4.4 KiB
Python
158 lines
4.4 KiB
Python
import numpy as np
|
|
from sklearn import datasets
|
|
import matplotlib.pyplot as plt
|
|
|
|
from sklearn.metrics import accuracy_score
|
|
|
|
from sklearn.datasets import load_iris
|
|
data = load_iris()
|
|
data.target[[10, 25, 50]]
|
|
list(data.target_names)
|
|
|
|
|
|
def generate_data():
|
|
# Keep results deterministic
|
|
np.random.seed(1234)
|
|
X, y = datasets.make_moons(200, noise=0.25)
|
|
# X, y = datasets.make_classification(200, 2, 2, 0)
|
|
return X, y
|
|
|
|
|
|
def visualize(X, y, model=None):
|
|
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
|
|
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
|
|
h = 0.01
|
|
xx, yy = np.meshgrid(
|
|
np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
|
|
if model:
|
|
Z = predict(model, np.c_[xx.ravel(), yy.ravel()])
|
|
Z = Z.reshape(xx.shape)
|
|
plt.contourf(xx, yy, Z, cmap=plt.cm.viridis)
|
|
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.viridis)
|
|
plt.show()
|
|
|
|
|
|
def initialize_model(dim_in=2, dim_hid=3, dim_out=2):
|
|
# Keep results deterministic
|
|
np.random.seed(1234)
|
|
W1 = np.random.randn(dim_in, dim_hid) / np.sqrt(dim_in)
|
|
b1 = np.zeros((1, dim_hid))
|
|
W2 = np.random.randn(dim_hid, dim_out) / np.sqrt(dim_hid)
|
|
b2 = np.zeros((1, dim_out))
|
|
return W1, b1, W2, b2
|
|
|
|
|
|
def softmax(X):
|
|
e = np.exp(X)
|
|
return e / np.sum(e, axis=1, keepdims=True)
|
|
|
|
|
|
def predict(model, X):
|
|
W1, b1, W2, b2 = model
|
|
z1 = X.dot(W1) + b1
|
|
a1 = np.tanh(z1)
|
|
z2 = a1.dot(W2) + b2
|
|
probs = softmax(z2)
|
|
return np.argmax(probs, axis=1)
|
|
|
|
|
|
def calculate_cost(model, X, y):
|
|
W1, b1, W2, b2 = model
|
|
z1 = X.dot(W1) + b1
|
|
a1 = np.tanh(z1)
|
|
z2 = a1.dot(W2) + b2
|
|
probs = softmax(z2)
|
|
preds = probs[:, 1]
|
|
return -1. / len(y) * np.sum(
|
|
np.multiply(y, np.log(preds)) + np.multiply(1 - y, np.log(1 - preds)),
|
|
axis=0)
|
|
|
|
|
|
# def accuracy(model, X, y):
|
|
# predicted = predict(model, X)
|
|
# return len([1 for x, y in predict(model, X) if x==y])/len(y)
|
|
|
|
|
|
def accuracy(model, X, y):
|
|
y_pred = predict(model, X)
|
|
return accuracy_score(y, y_pred)
|
|
|
|
|
|
# def accuracy1(x, y, model):
|
|
# y_pred = (model.predict(x)).type(torch.FloatTensor)
|
|
# y = y.unsqueeze(1)
|
|
# correct = (y_pred == y).type(torch.FloatTensor)
|
|
# return correct.mean()
|
|
|
|
|
|
def train(model, X, y, alpha=0.01, epochs=10000, debug=False):
|
|
W1, b1, W2, b2 = model
|
|
m = len(X)
|
|
|
|
for i in range(epochs):
|
|
# Forward propagation
|
|
z1 = X.dot(W1) + b1
|
|
a1 = np.tanh(z1)
|
|
z2 = a1.dot(W2) + b2
|
|
probs = softmax(z2)
|
|
|
|
# Backpropagation
|
|
delta3 = probs
|
|
delta3[range(m), y] -= 1
|
|
dW2 = (a1.T).dot(delta3)
|
|
db2 = np.sum(delta3, axis=0, keepdims=True)
|
|
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
|
|
dW1 = np.dot(X.T, delta2)
|
|
db1 = np.sum(delta2, axis=0)
|
|
|
|
# Parameter update
|
|
W1 -= alpha * dW1
|
|
b1 -= alpha * db1
|
|
W2 -= alpha * dW2
|
|
b2 -= alpha * db2
|
|
|
|
# Print loss
|
|
if debug and i % 1000 == 0:
|
|
model = (W1, b1, W2, b2)
|
|
print("Cost after iteration {}: {:.4f}".format(i, calculate_cost(
|
|
model, X, y)))
|
|
print("Accuracy iteration {}: {:.4f}".format(i, accuracy(model, X, y)))
|
|
|
|
return W1, b1, W2, b2
|
|
|
|
|
|
if __name__ == '__main__':
|
|
X, y = generate_data()
|
|
visualize(X, y)
|
|
|
|
model = train(initialize_model(dim_hid=5), X, y, debug=True)
|
|
visualize(X, y, model)
|
|
|
|
print("Skuteczność klasyfikatora:", accuracy(X, y, model))
|
|
|
|
model = train(initialize_model(dim_hid=1), X, y, debug=True)
|
|
visualize(X, y, model)
|
|
|
|
print("Skuteczność klasyfikatora dla wielkości warstwy równej 1:", accuracy(X, y, model))
|
|
|
|
model = train(initialize_model(dim_hid=2), X, y, debug=True)
|
|
visualize(X, y, model)
|
|
|
|
print("Skuteczność klasyfikatora dla wielkości warstwy równej 2:", accuracy(X, y, model))
|
|
|
|
model = train(initialize_model(dim_hid=5), X, y, debug=True)
|
|
visualize(X, y, model)
|
|
|
|
print("Skuteczność klasyfikatora dla wielkości warstwy równej 3:", accuracy(X, y, model))
|
|
|
|
model = train(initialize_model(dim_hid=10), X, y, debug=True)
|
|
visualize(X, y, model)
|
|
|
|
print("Skuteczność klasyfikatora dla wielkości warstwy równej 10:", accuracy(X, y, model))
|
|
|
|
model = train(initialize_model(dim_hid=15), X, y, debug=True)
|
|
visualize(X, y, model)
|
|
|
|
print("Skuteczność klasyfikatora dla wielkości warstwy równej 15:", accuracy(X, y, model))
|
|
|