Merge branch 'main' of https://git.wmi.amu.edu.pl/s452639/psi
This commit is contained in:
commit
6721c16d3d
17
.vscode/launch.json
vendored
Normal file
17
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
|
||||
|
||||
{
|
||||
"type": "pwa-chrome",
|
||||
"request": "launch",
|
||||
"name": "Launch Chrome against localhost",
|
||||
"url": "http://localhost:8080",
|
||||
"webRoot": "${workspaceFolder}"
|
||||
}
|
||||
]
|
||||
}
|
BIN
src/Dhidden.npy
Normal file
BIN
src/Dhidden.npy
Normal file
Binary file not shown.
BIN
src/Dweights.npy
Normal file
BIN
src/Dweights.npy
Normal file
Binary file not shown.
BIN
src/Lhidden_test.npy
Normal file
BIN
src/Lhidden_test.npy
Normal file
Binary file not shown.
BIN
src/Lweights_test.npy
Normal file
BIN
src/Lweights_test.npy
Normal file
Binary file not shown.
BIN
src/litery/1.png
Normal file
BIN
src/litery/1.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 383 B |
BIN
src/litery/2.png
Normal file
BIN
src/litery/2.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 402 B |
BIN
src/litery/3.png
Normal file
BIN
src/litery/3.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 403 B |
BIN
src/litery/4.png
Normal file
BIN
src/litery/4.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 454 B |
BIN
src/litery/5.png
Normal file
BIN
src/litery/5.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 376 B |
@ -1,96 +1,78 @@
|
||||
|
||||
from emnist import list_datasets
|
||||
from emnist import extract_test_samples
|
||||
from emnist import extract_training_samples
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch import optim
|
||||
import scipy.special
|
||||
from matplotlib.pyplot import imshow
|
||||
import glob
|
||||
import imageio
|
||||
|
||||
""" pobranie obrazów cyfr i liter z biblioteki """
|
||||
dig_train_images, dig_train_labels = extract_training_samples('digits')
|
||||
dig_test_images, dig_test_labels = extract_test_samples('digits')
|
||||
let_train_images, let_train_labels = extract_training_samples('letters')
|
||||
let_test_images, let_test_labels = extract_test_samples('letters')
|
||||
|
||||
|
||||
|
||||
|
||||
#print(dig_train_images[0])
|
||||
""" przekształcenie tablic """
|
||||
dig_train_images = dig_train_images.reshape(len(dig_train_images),28*28)
|
||||
d_train = dig_train_images[:1000]
|
||||
d_labels = dig_train_labels[:1000]
|
||||
|
||||
dig_test_images = dig_test_images.reshape(len(dig_test_images),28*28)
|
||||
d_test = dig_test_images[:600]
|
||||
d_labelstest = dig_test_labels[:600]
|
||||
|
||||
print(d_test.shape)
|
||||
print(d_labelstest)
|
||||
#print(dig_train_images[0])
|
||||
#print(dig_train_images.shape)
|
||||
let_train_images = let_train_images.reshape(len(let_train_images),28*28)
|
||||
let_test_images = let_test_images.reshape(len(let_test_images),28*28)
|
||||
|
||||
|
||||
class NeuralNetwork:
|
||||
""" inicjalizacja sieci neuronowej """
|
||||
def __init__(self, inputNodes, hiddenNodes, outputNodes, learningGrade, fileWeight, fileHidden):
|
||||
self.inodes = inputNodes
|
||||
self.hnodes = hiddenNodes
|
||||
self.onodes = outputNodes
|
||||
|
||||
"""te pierwsze dwa użyj przy nauce, potem zostaw cały czas te 2"""
|
||||
#self.weights = (np.random.rand(self.hnodes, self.inodes) - 0.5)
|
||||
#self.hidden = (np.random.rand(self.onodes, self.hnodes) - 0.5)
|
||||
self.weights = np.load(fileWeight)
|
||||
self.hidden = np.load(fileHidden)
|
||||
|
||||
#print( 'Matrix1 \n', self.weights)
|
||||
#print( 'Matrix2 \n', self.hidden)
|
||||
""" używane przy uczeniu sieci """
|
||||
self.weights = (np.random.rand(self.hnodes, self.inodes) - 0.5)
|
||||
self.hidden = (np.random.rand(self.onodes, self.hnodes) - 0.5)
|
||||
""" używane przy pobieraniu danych o nauczonej sieci, z pliku """
|
||||
# self.weights = np.load(fileWeight)
|
||||
# self.hidden = np.load(fileHidden)
|
||||
|
||||
self.lr = learningGrade
|
||||
|
||||
""" funkcja aktywacji """
|
||||
self.activationFunction = lambda x: scipy.special.expit(x)
|
||||
|
||||
pass
|
||||
|
||||
"""trening sieci neuronowej"""
|
||||
def train(self, inputsList, targetsList):
|
||||
|
||||
""" konwersja list na tablice 2d """
|
||||
inputs = np.array(inputsList,ndmin=2).T
|
||||
targets = np.array(targetsList,ndmin=2).T
|
||||
|
||||
#forward pass
|
||||
hiddenInputs = np.dot(self.weights, inputs) + 2
|
||||
""" forward pass """
|
||||
hiddenInputs = np.dot(self.weights, inputs) # input -> hidden layer
|
||||
hiddenOutputs = self.activationFunction(hiddenInputs)
|
||||
|
||||
finalInputs = np.dot(self.hidden, hiddenOutputs) + 1
|
||||
finalInputs = np.dot(self.hidden, hiddenOutputs)
|
||||
finalOutputs = self.activationFunction(finalInputs)
|
||||
|
||||
""" backward pass """
|
||||
outputErrors = targets - finalOutputs
|
||||
#print(outputErrors.shape)
|
||||
x =self.weights.T
|
||||
#print(x.shape)
|
||||
hiddenErrors = np.dot(self.hidden.T, outputErrors)
|
||||
|
||||
#print('OutputErrors', outputErrors.shape)
|
||||
#print('finalOutputs',finalOutputs.shape)
|
||||
#print(x.shape)
|
||||
self.hidden += self.lr * np.dot((outputErrors * finalOutputs * (1.0 - finalOutputs)) , np.transpose(hiddenOutputs))
|
||||
self.weights += self.lr * np.dot((hiddenErrors * hiddenOutputs * (1.0 - hiddenOutputs)) , np.transpose(inputs))
|
||||
|
||||
|
||||
pass
|
||||
|
||||
""" zapisywanie wytrenowanej sieci do pliku """
|
||||
def saveTraining(self, fileWeight, fileHidden):
|
||||
np.save(fileWeight, self.weights)
|
||||
np.save(fileHidden, self.hidden)
|
||||
|
||||
""" wykorzystanie sieci """
|
||||
def query(self, inputsList):
|
||||
|
||||
""" konwersja listy na tablicę 2d """
|
||||
inputs = np.array(inputsList, ndmin=2).T
|
||||
|
||||
|
||||
hiddenInputs = np.dot(self.weights, inputs)
|
||||
hiddenOutputs = self.activationFunction(hiddenInputs)
|
||||
|
||||
@ -100,25 +82,26 @@ class NeuralNetwork:
|
||||
return finalOutputs
|
||||
|
||||
|
||||
|
||||
""" dodaj tablicę literek"""
|
||||
#n = NeuralNetwork(inputNodes=3, hiddenNodes=5, outputNodes=2, learningGrade=0.2)
|
||||
""" tablice sieci neuronowych """
|
||||
digitNetwork = NeuralNetwork(inputNodes=784, hiddenNodes=200, outputNodes=10, learningGrade=0.1, fileWeight="Dweights.npy", fileHidden="Dhidden.npy")
|
||||
letterNetwork = NeuralNetwork(inputNodes=784, hiddenNodes=200, outputNodes=27, learningGrade=0.1, fileWeight="Lweights.npy", fileHidden="Lhidden.npy")
|
||||
|
||||
def trainNetwork(n, fWeight, fHidden, trainingSamples):
|
||||
|
||||
|
||||
# trainNetwork(digitNetwork, "Dweights_test.npy", "Dhidden_test.npy", let_train_images, let_train_labels)
|
||||
def trainNetwork(n, fWeight, fHidden, trainingSamples, trainingLabels):
|
||||
epochs = 10
|
||||
outputNodes = 10
|
||||
outputNodes = 27
|
||||
for e in range(epochs):
|
||||
m=0
|
||||
print('Epoch', e+1)
|
||||
|
||||
for record in trainingSamples:
|
||||
""" zmiana wartości przedziału z [0,255] na [0,1] """
|
||||
inputs = (np.asfarray(record[0:])/255 * 0.99) + 0.01
|
||||
#print(inputs.shape)
|
||||
|
||||
targets = np.zeros(outputNodes) + 0.01
|
||||
targets[d_labels[m]] = 0.99
|
||||
#print(targets)
|
||||
targets[trainingLabels[m]] = 0.99
|
||||
n.train(inputs,targets)
|
||||
|
||||
m+=1
|
||||
@ -127,14 +110,135 @@ def trainNetwork(n, fWeight, fHidden, trainingSamples):
|
||||
n.saveTraining(fileWeight=fWeight, fileHidden=fHidden)
|
||||
|
||||
|
||||
def testing(n, testingSamples, testingLabels):
|
||||
scorecard = []
|
||||
k = 0
|
||||
for record in testingSamples:
|
||||
inputs = (np.asfarray(record[0:])/255 * 0.99) + 0.01
|
||||
correctLabels = testingLabels[k]
|
||||
|
||||
##################################### ODPALANIE TRAINING
|
||||
#trainNetwork(digitNetwork, "Dweights.npy", "Dhidden.npy", d_train)
|
||||
outputs = n.query(inputs)
|
||||
label = np.argmax(outputs)
|
||||
|
||||
#record = d_test[0]
|
||||
#print('Label', d_labelstest[0])
|
||||
#inputs = np.asfarray(record[0:])/ 255 * 0.99 + 0.01
|
||||
#print(n.query(inputs))
|
||||
if(label == correctLabels):
|
||||
scorecard.append(1)
|
||||
else:
|
||||
scorecard.append(0)
|
||||
k+=1
|
||||
|
||||
scorecardArray = np.asfarray(scorecard)
|
||||
print('Performance', scorecardArray.sum() / scorecardArray.size)
|
||||
|
||||
testing(digitNetwork,dig_test_images,dig_test_labels)
|
||||
testing(letterNetwork,let_test_images,let_test_labels)
|
||||
|
||||
li = []
|
||||
ourOwnDataset = []
|
||||
record_cache = None
|
||||
def testCase(inputWord):
|
||||
len = len(inputWord)
|
||||
|
||||
word = ""
|
||||
for i in range(0,len-2):
|
||||
imgArray = imageio.imread(imageFileName, as_gray=True)
|
||||
imgData = 255 - imgArray.reshape(784)
|
||||
imgData = (imgData/255 * 0.99) + 0.01
|
||||
#inputWord[i]
|
||||
word = word + recognizeLet(letterNetwork ,imgData)
|
||||
|
||||
i=len-2
|
||||
for i in range(i,len):
|
||||
imgArray = imageio.imread(imageFileName, as_gray=True)
|
||||
imgData = 255 - imgArray.reshape(784)
|
||||
imgData = (imgData/255 * 0.99) + 0.01
|
||||
#inputWord[i]
|
||||
word = word + recognizeNum(digitNetwork, imgData)
|
||||
|
||||
|
||||
#testing
|
||||
#assert record_cache.shape == ourOwnDataset[0].shape
|
||||
#labelInput = np.asfarray(li)
|
||||
#print(labelInput)
|
||||
print('slowo: ', word)
|
||||
pass
|
||||
|
||||
|
||||
|
||||
def recognizeLet(n,imgData):
|
||||
letters=['','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
|
||||
#record = np.append(label,imgData)
|
||||
outputs = n.query(imgData)
|
||||
label = np.argmax(outputs)
|
||||
return letters[int(label)]
|
||||
|
||||
def recognizeNum(n, imgData):
|
||||
pass
|
||||
|
||||
#record = np.append(label,imgData)
|
||||
outputs = n.query(imgData)
|
||||
#print('Record: ',record)
|
||||
#ourOwnDataset.append(record)
|
||||
#if record_cache is None:
|
||||
# record_cache = record
|
||||
#print(ood[0])
|
||||
#li.append(label)
|
||||
label = np.argmax(outputs)
|
||||
return str(label)
|
||||
pass
|
||||
|
||||
|
||||
"""
|
||||
li = []
|
||||
#ourOwnDataset = np.asfarray(ood)
|
||||
ourOwnDataset = []
|
||||
|
||||
record_cache = None
|
||||
for imageFileName in glob.glob('cyfry/?.png'):
|
||||
label = int(imageFileName[-5:-4])
|
||||
print('loading...', imageFileName)
|
||||
|
||||
imgArray = imageio.imread(imageFileName, as_gray=True)
|
||||
#print(' imgArray: ', imgArray)
|
||||
imgData = 255 - imgArray.reshape(784)
|
||||
#print('imgData1: ',imgData)
|
||||
imgData = (imgData/255 * 0.99) + 0.01
|
||||
#print('imgData2: ',imgData)
|
||||
|
||||
#print(np.min(imgData))
|
||||
#print(np.max(imgData))
|
||||
|
||||
record = np.append(label,imgData)
|
||||
#print('Record: ',record)
|
||||
ourOwnDataset.append(record)
|
||||
if record_cache is None:
|
||||
record_cache = record
|
||||
#print(ood[0])
|
||||
li.append(label)
|
||||
pass
|
||||
|
||||
assert record_cache.shape == ourOwnDataset[0].shape
|
||||
labelInput = np.asfarray(li)
|
||||
#print(labelInput)
|
||||
|
||||
|
||||
|
||||
word = ""
|
||||
for item in range(0,9):
|
||||
correctLabels = labelInput[item]
|
||||
outputs = n.query(ourOwnDataset[item][1:])
|
||||
print(outputs)
|
||||
|
||||
label = np.argmax(outputs)
|
||||
#print('Network says: ', label)
|
||||
#labelString = np.array_str(label)
|
||||
word = word + str(label)
|
||||
|
||||
print('slowo: ', word)
|
||||
|
||||
"""
|
||||
|
||||
|
||||
|
||||
|
||||
##################################### URUCHOMIENIE TRENINGU
|
||||
#trainNetwork(letterNetwork, "Lweights_test.npy", "Lhidden_test.npy", let_train_images, let_train_labels)
|
||||
# trainNetwork(digitNetwork, "Dweights_test.npy", "Dhidden_test.npy", let_train_images, let_train_labels)
|
@ -64,6 +64,12 @@ function randomFromSet(set) {
|
||||
unreachable();
|
||||
}
|
||||
|
||||
function mapToJson(map) {
|
||||
return JSON.stringify([...map]);
|
||||
}
|
||||
function jsonToMap(jsonStr) {
|
||||
return new Map(JSON.parse(jsonStr));
|
||||
}
|
||||
|
||||
function nice(v) { return `${(v * 100).toFixed(1)}%` }
|
||||
|
||||
@ -75,7 +81,9 @@ async function requestJSONCached(url, params = {}) {
|
||||
const response = await fetch(url, params);
|
||||
const json = await response.json();
|
||||
requestJSONCached.cache.set(key, json);
|
||||
const cache = mapToJson(requestJSONCached.cache);
|
||||
localStorage.setItem("cache", cache);
|
||||
return json;
|
||||
}
|
||||
|
||||
requestJSONCached.cache = new Map();
|
||||
requestJSONCached.cache = jsonToMap(localStorage.getItem("cache"));
|
||||
|
@ -12,8 +12,25 @@ class OrdersView {
|
||||
[...document.querySelectorAll('.orders-window .canvases canvas')].map((canv, index) => {
|
||||
return new Promise((resolve, reject) => {
|
||||
canv.toBlob(blob => {
|
||||
let blobUrl = URL.createObjectURL(blob);
|
||||
|
||||
const img = new Image();
|
||||
img.src = blobUrl;
|
||||
|
||||
img.onload = () => {
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = 28;
|
||||
canvas.height = 28;
|
||||
|
||||
const ctx = canvas.getContext('2d');
|
||||
ctx.drawImage(img, 0, 0, 28, 28);
|
||||
|
||||
canvas.toBlob((blob) => {
|
||||
// blobUrl = URL.createObjectURL(blob);
|
||||
formData.append(`file-${index}`, blob,`file-${index}.png`);
|
||||
resolve();
|
||||
}, 'image/png');
|
||||
}
|
||||
});
|
||||
});
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user