neural_networks
BIN
src/cyfry/1.png
Normal file
After Width: | Height: | Size: 246 B |
BIN
src/cyfry/2.png
Normal file
After Width: | Height: | Size: 381 B |
BIN
src/cyfry/3.png
Normal file
After Width: | Height: | Size: 577 B |
BIN
src/cyfry/4.png
Normal file
After Width: | Height: | Size: 455 B |
BIN
src/cyfry/5.png
Normal file
After Width: | Height: | Size: 367 B |
BIN
src/cyfry/6.png
Normal file
After Width: | Height: | Size: 346 B |
BIN
src/cyfry/7.png
Normal file
After Width: | Height: | Size: 337 B |
BIN
src/cyfry/8.png
Normal file
After Width: | Height: | Size: 619 B |
BIN
src/cyfry/9.png
Normal file
After Width: | Height: | Size: 541 B |
150
src/neural_network.py
Normal file
@ -0,0 +1,150 @@
|
||||
|
||||
from emnist import list_datasets
|
||||
from emnist import extract_test_samples
|
||||
from emnist import extract_training_samples
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch import optim
|
||||
import scipy.special
|
||||
from matplotlib.pyplot import imshow
|
||||
import glob
|
||||
import imageio
|
||||
|
||||
dig_train_images, dig_train_labels = extract_training_samples('digits')
|
||||
dig_test_images, dig_test_labels = extract_test_samples('digits')
|
||||
let_train_images, let_train_labels = extract_training_samples('letters')
|
||||
let_test_images, let_test_labels = extract_test_samples('letters')
|
||||
#print(dig_train_images.shape)
|
||||
|
||||
#def plotdigit(image):
|
||||
# img = np.reshape(image, (-1, 28))
|
||||
# imshow(img, cmap='Greys', vmin=0, vmax=255)
|
||||
print(dig_train_images.shape)
|
||||
"""
|
||||
dig_train_images = dig_train_images / 255
|
||||
dig_test_images = dig_test_images / 255
|
||||
let_train_images = let_train_images / 255
|
||||
let_test_images = let_test_images / 255
|
||||
|
||||
dig_train_images = [torch.tensor(image, dtype=torch.float32) for image in dig_train_images]
|
||||
"""
|
||||
#print(dig_train_images[0])
|
||||
dig_train_images = dig_train_images.reshape(len(dig_train_images),28*28)
|
||||
d_train = dig_train_images[:1000]
|
||||
d_labels = dig_train_labels[:1000]
|
||||
|
||||
dig_test_images = dig_test_images.reshape(len(dig_test_images),28*28)
|
||||
d_test = dig_test_images[:600]
|
||||
d_labelstest = dig_test_labels[:600]
|
||||
|
||||
print(d_test.shape)
|
||||
print(d_labelstest)
|
||||
#print(dig_train_images[0])
|
||||
#print(dig_train_images.shape)
|
||||
|
||||
|
||||
class NeuralNetwork:
|
||||
def __init__(self, inputNodes, hiddenNodes, outputNodes, learningGrade):
|
||||
self.inodes = inputNodes
|
||||
self.hnodes = hiddenNodes
|
||||
self.onodes = outputNodes
|
||||
|
||||
self.weights = (np.random.rand(self.hnodes, self.inodes) - 0.5)
|
||||
self.hidden = (np.random.rand(self.onodes, self.hnodes) - 0.5)
|
||||
|
||||
#print( 'Matrix1 \n', self.weights)
|
||||
#print( 'Matrix2 \n', self.hidden)
|
||||
|
||||
self.lr = learningGrade
|
||||
|
||||
self.activationFunction = lambda x: scipy.special.expit(x)
|
||||
|
||||
pass
|
||||
|
||||
def train(self, inputsList, targetsList):
|
||||
|
||||
inputs = np.array(inputsList,ndmin=2).T
|
||||
targets = np.array(targetsList,ndmin=2).T
|
||||
|
||||
#forward pass
|
||||
hiddenInputs = np.dot(self.weights, inputs) + 2
|
||||
hiddenOutputs = self.activationFunction(hiddenInputs)
|
||||
|
||||
finalInputs = np.dot(self.hidden, hiddenOutputs) + 1
|
||||
finalOutputs = self.activationFunction(finalInputs)
|
||||
|
||||
outputErrors = targets - finalOutputs
|
||||
#print(outputErrors.shape)
|
||||
x =self.weights.T
|
||||
#print(x.shape)
|
||||
hiddenErrors = np.dot(self.hidden.T, outputErrors)
|
||||
|
||||
#print('OutputErrors', outputErrors.shape)
|
||||
#print('finalOutputs',finalOutputs.shape)
|
||||
#print(x.shape)
|
||||
self.hidden += self.lr * np.dot((outputErrors * finalOutputs * (1.0 - finalOutputs)) , np.transpose(hiddenOutputs))
|
||||
self.weights += self.lr * np.dot((hiddenErrors * hiddenOutputs * (1.0 - hiddenOutputs)) , np.transpose(inputs))
|
||||
|
||||
|
||||
pass
|
||||
|
||||
def query(self, inputsList):
|
||||
|
||||
inputs = np.array(inputsList, ndmin=2).T
|
||||
|
||||
|
||||
hiddenInputs = np.dot(self.weights, inputs)
|
||||
hiddenOutputs = self.activationFunction(hiddenInputs)
|
||||
|
||||
finalInputs = np.dot(self.hidden, hiddenOutputs)
|
||||
finalOutputs = self.activationFunction(finalInputs)
|
||||
|
||||
return finalOutputs
|
||||
|
||||
|
||||
|
||||
"""
|
||||
def getAccurancy(predictons,Y):
|
||||
print(predictons,Y)
|
||||
return np.sum(predictons=Y)/Y.size
|
||||
|
||||
def getPredictions(A2):
|
||||
return np.argmax(A2,0)
|
||||
"""
|
||||
|
||||
|
||||
#n = NeuralNetwork(inputNodes=3, hiddenNodes=5, outputNodes=2, learningGrade=0.2)
|
||||
n = NeuralNetwork(inputNodes=784, hiddenNodes=200, outputNodes=10, learningGrade=0.1)
|
||||
|
||||
def trainNetwork(n):
|
||||
epochs = 10
|
||||
outputNodes = 10
|
||||
for e in range(epochs):
|
||||
m=0
|
||||
print('Epoch', e+1)
|
||||
|
||||
for record in d_train:
|
||||
inputs = (np.asfarray(record[0:])/255 * 0.99) + 0.01
|
||||
#print(inputs.shape)
|
||||
|
||||
targets = np.zeros(outputNodes) + 0.01
|
||||
targets[d_labels[m]] = 0.99
|
||||
#print(targets)
|
||||
n.train(inputs,targets)
|
||||
|
||||
m+=1
|
||||
pass
|
||||
pass
|
||||
|
||||
|
||||
trainNetwork(n)
|
||||
|
||||
record = d_test[0]
|
||||
#print('Label', d_labelstest[0])
|
||||
inputs = np.asfarray(record[0:])/ 255 * 0.99 + 0.01
|
||||
#print(n.query(inputs))
|
||||
|
||||
|
||||
|
||||
|