Compare commits
46 Commits
put_packag
...
master
Author | SHA1 | Date | |
---|---|---|---|
ad0e642964 | |||
|
175055c84a | ||
c918e1007b | |||
|
e4d7f6e37d | ||
07340db2b8 | |||
|
e496375238 | ||
c8859b34f8 | |||
f27ff12d88 | |||
1a2c00c6e2 | |||
|
b5a2e4bd8b | ||
ce6bd4c800 | |||
|
972480f501 | ||
88f5a1acd0 | |||
|
8cf63dc56d | ||
|
e4bc5e2063 | ||
fc3296b57a | |||
|
bb8f28ad40 | ||
51d8327040 | |||
|
255e756dd3 | ||
55742839d9 | |||
|
f7cc21a386 | ||
|
23d5dbd614 | ||
|
9172aa397f | ||
|
28b120769d | ||
9b1d9c1f13 | |||
|
133e54ab12 | ||
aa340e68d3 | |||
19c2898b4e | |||
fe6e9ff081 | |||
48480c717e | |||
dcda3bbfef | |||
|
c946313bcd | ||
497b01ccb3 | |||
|
57d299e767 | ||
40ad5222a1 | |||
|
d3d2fcfb0b | ||
bdea00c5b2 | |||
3f65be482c | |||
d1ba1659c1 | |||
7bf768e6af | |||
4670845a00 | |||
5341f5330b | |||
01e3724d8e | |||
7495d47f7f | |||
230b94064b | |||
87ec5eba2b |
5
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
{
|
||||
"python.analysis.extraPaths": [
|
||||
"./DecisionTree"
|
||||
]
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
import graphviz
|
||||
import pandas as pd
|
||||
from sklearn.tree import DecisionTreeClassifier
|
||||
from sklearn.tree import export_graphviz
|
||||
|
||||
plikZPrzecinkami = open("training_data.txt", 'w')
|
||||
|
||||
with open('200permutations_table.txt', 'r') as plik:
|
||||
for linia in plik:
|
||||
liczby = linia.strip()
|
||||
wiersz = ""
|
||||
licznik = 0
|
||||
for liczba in liczby:
|
||||
wiersz += liczba
|
||||
wiersz += ";"
|
||||
wiersz = wiersz[:-1]
|
||||
wiersz += '\n'
|
||||
plikZPrzecinkami.write(wiersz)
|
||||
|
||||
plikZPrzecinkami.close()
|
||||
|
||||
x = pd.read_csv('training_data.txt', delimiter=';',
|
||||
names=['wielkosc', 'waga,', 'priorytet', 'ksztalt', 'kruchosc', 'dolna', 'gorna', 'g > d'])
|
||||
y = pd.read_csv('decisions.txt', names=['polka'])
|
||||
# X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1) # 70% treningowe and 30% testowe
|
||||
|
||||
# Tworzenie instancji klasyfikatora ID3
|
||||
clf = DecisionTreeClassifier(criterion='entropy')
|
||||
|
||||
# Trenowanie klasyfikatora
|
||||
clf.fit(x.values, y.values)
|
||||
# clf.fit(X_train, y_train)
|
||||
|
||||
|
||||
# Predykcja na nowych danych
|
||||
new_data = [[2, 2, 1, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]]
|
||||
predictions = clf.predict(new_data)
|
||||
# y_pred = clf.predict(X_test)
|
||||
|
||||
|
||||
print(predictions)
|
||||
# print("Accuracy:", clf.score(new_data, predictions))
|
||||
# print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
|
||||
|
||||
|
||||
# Wygenerowanie pliku .dot reprezentującego drzewo
|
||||
dot_data = export_graphviz(clf, out_file=None, feature_names=list(x.columns), class_names=['0', '1'], filled=True,
|
||||
rounded=True)
|
||||
|
||||
# Tworzenie obiektu graphviz z pliku .dot
|
||||
graph = graphviz.Source(dot_data)
|
||||
|
||||
# Wyświetlanie drzewa
|
||||
graph.view()
|
||||
|
||||
z = pd.concat([x, y], axis=1)
|
||||
z.to_csv('dane.csv', index=False)
|
BIN
DecisionTree/wyuczone_drzewo.pkl
Normal file
BIN
NeuralNetwork/best_model.pth
Normal file
31
NeuralNetwork/learning_results.txt
Normal file
@ -0,0 +1,31 @@
|
||||
Epoch: 1 Train Loss: 65 Train Accuracy: 0.5754245754245755
|
||||
Epoch: 2 Train Loss: 25 Train Accuracy: 0.7457542457542458
|
||||
Epoch: 3 Train Loss: 8 Train Accuracy: 0.8431568431568431
|
||||
Epoch: 4 Train Loss: 2 Train Accuracy: 0.9010989010989011
|
||||
Epoch: 5 Train Loss: 1 Train Accuracy: 0.9335664335664335
|
||||
Epoch: 6 Train Loss: 0 Train Accuracy: 0.9545454545454546
|
||||
Epoch: 7 Train Loss: 0 Train Accuracy: 0.972027972027972
|
||||
Epoch: 8 Train Loss: 0 Train Accuracy: 0.9820179820179821
|
||||
Epoch: 9 Train Loss: 0 Train Accuracy: 0.994005994005994
|
||||
Epoch: 10 Train Loss: 0 Train Accuracy: 0.9945054945054945
|
||||
|
||||
Epoch: 1 Train Loss: 42 Train Accuracy: 0.6428571428571429
|
||||
Epoch: 2 Train Loss: 11 Train Accuracy: 0.8306693306693307
|
||||
Epoch: 3 Train Loss: 3 Train Accuracy: 0.8921078921078921
|
||||
Epoch: 4 Train Loss: 2 Train Accuracy: 0.8891108891108891
|
||||
Epoch: 5 Train Loss: 1 Train Accuracy: 0.9335664335664335
|
||||
Epoch: 6 Train Loss: 0 Train Accuracy: 0.952047952047952
|
||||
Epoch: 7 Train Loss: 0 Train Accuracy: 0.9545454545454546
|
||||
Epoch: 8 Train Loss: 0 Train Accuracy: 0.9655344655344655
|
||||
Epoch: 9 Train Loss: 0 Train Accuracy: 0.9815184815184815
|
||||
Epoch: 10 Train Loss: 0 Train Accuracy: 0.9805194805194806
|
||||
Epoch: 11 Train Loss: 0 Train Accuracy: 0.9855144855144855
|
||||
Epoch: 12 Train Loss: 0 Train Accuracy: 0.989010989010989
|
||||
Epoch: 13 Train Loss: 0 Train Accuracy: 0.9925074925074925
|
||||
Epoch: 14 Train Loss: 0 Train Accuracy: 0.9915084915084915
|
||||
Epoch: 15 Train Loss: 0 Train Accuracy: 0.9885114885114885
|
||||
Epoch: 16 Train Loss: 0 Train Accuracy: 0.994005994005994
|
||||
Epoch: 17 Train Loss: 0 Train Accuracy: 0.997002997002997
|
||||
Epoch: 18 Train Loss: 0 Train Accuracy: 0.9965034965034965
|
||||
Epoch: 19 Train Loss: 0 Train Accuracy: 0.999000999000999
|
||||
Epoch: 20 Train Loss: 0 Train Accuracy: 1.0
|
60
NeuralNetwork/neural_network_learning.py
Normal file
@ -0,0 +1,60 @@
|
||||
import glob
|
||||
from src.torchvision_resize_dataset import combined_dataset, images_path, classes
|
||||
import src.data_model
|
||||
from torch.optim import Adam
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
|
||||
train_loader = DataLoader(
|
||||
combined_dataset, #dataset of images
|
||||
batch_size=256, # accuracy
|
||||
shuffle=True # rand order
|
||||
)
|
||||
|
||||
model = src.data_model.DataModel(num_objects=2).to(device)
|
||||
|
||||
#optimizer
|
||||
optimizer = Adam(model.parameters(), lr=0.001, weight_decay=0.0001)
|
||||
#loss function
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
|
||||
num_epochs = 20
|
||||
# train_size = len(glob.glob(images_path+'*.jpg'))
|
||||
train_size = 2002
|
||||
|
||||
go_to_accuracy = 0.0
|
||||
for epoch in range(num_epochs):
|
||||
#training on dataset
|
||||
model.train()
|
||||
train_accuracy = 0.0
|
||||
train_loss = 0.0
|
||||
for i, (images, labels) in enumerate(train_loader):
|
||||
if torch.cuda.is_available():
|
||||
images = torch.Variable(images.cuda())
|
||||
labels = torch.Variable(labels.cuda())
|
||||
# clearing the optimizer gradients
|
||||
optimizer.zero_grad()
|
||||
|
||||
outputs = model(images) # predoction
|
||||
loss = criterion(outputs, labels) #loss calculation
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
train_loss += loss.cpu().data*images.size(0)
|
||||
_, prediction = torch.max(outputs.data, 1)
|
||||
|
||||
train_accuracy += int(torch.sum(prediction == labels.data))
|
||||
|
||||
train_accuracy = train_accuracy/train_size
|
||||
train_loss = train_loss/train_size
|
||||
|
||||
model.eval()
|
||||
|
||||
print('Epoch: '+ str(epoch+1) +' Train Loss: '+ str(int(train_loss)) +' Train Accuracy: '+ str(train_accuracy))
|
||||
|
||||
if train_accuracy > go_to_accuracy:
|
||||
go_to_accuracy= train_accuracy
|
||||
torch.save(model.state_dict(), "best_model.pth")
|
BIN
NeuralNetwork/older_best_model.pth
Normal file
147
NeuralNetwork/prediction.py
Normal file
@ -0,0 +1,147 @@
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torchvision.transforms import transforms
|
||||
import numpy as np
|
||||
from torch.autograd import Variable
|
||||
from torchvision.models import squeezenet1_1
|
||||
import torch.functional as F
|
||||
from io import open
|
||||
import os
|
||||
from PIL import Image
|
||||
import pathlib
|
||||
import glob
|
||||
from tkinter import Tk, Label
|
||||
from PIL import Image, ImageTk
|
||||
|
||||
absolute_path = os.path.abspath('NeuralNetwork/src/train_images')
|
||||
train_path = absolute_path
|
||||
absolute_path = os.path.abspath('Images/Items_test')
|
||||
pred_path = absolute_path
|
||||
|
||||
root=pathlib.Path(train_path)
|
||||
classes=sorted([j.name.split('/')[-1] for j in root.iterdir()])
|
||||
|
||||
|
||||
class DataModel(nn.Module):
|
||||
def __init__(self, num_classes):
|
||||
super(DataModel, self).__init__()
|
||||
#input (batch=256, nr of channels rgb=3 , size=244x244)
|
||||
|
||||
# convolution
|
||||
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1)
|
||||
#shape (256, 12, 224x224)
|
||||
|
||||
# batch normalization
|
||||
self.bn1 = nn.BatchNorm2d(num_features=12)
|
||||
#shape (256, 12, 224x224)
|
||||
self.reul1 = nn.ReLU()
|
||||
|
||||
self.pool=nn.MaxPool2d(kernel_size=2, stride=2)
|
||||
# reduce image size by factor 2
|
||||
# pooling window moves by 2 pixels at a time instead of 1
|
||||
# shape (256, 12, 112x112)
|
||||
|
||||
|
||||
|
||||
self.conv2 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3, stride=1, padding=1)
|
||||
self.bn2 = nn.BatchNorm2d(num_features=24)
|
||||
self.reul2 = nn.ReLU()
|
||||
# shape (256, 24, 112x112)
|
||||
|
||||
self.conv3 = nn.Conv2d(in_channels=24, out_channels=48, kernel_size=3, stride=1, padding=1)
|
||||
#shape (256, 48, 112x112)
|
||||
self.bn3 = nn.BatchNorm2d(num_features=48)
|
||||
#shape (256, 48, 112x112)
|
||||
self.reul3 = nn.ReLU()
|
||||
|
||||
# connected layer
|
||||
self.fc = nn.Linear(in_features=48*112*112, out_features=num_classes)
|
||||
|
||||
def forward(self, input):
|
||||
output = self.conv1(input)
|
||||
output = self.bn1(output)
|
||||
output = self.reul1(output)
|
||||
|
||||
output = self.pool(output)
|
||||
output = self.conv2(output)
|
||||
output = self.bn2(output)
|
||||
output = self.reul2(output)
|
||||
|
||||
output = self.conv3(output)
|
||||
output = self.bn3(output)
|
||||
output = self.reul3(output)
|
||||
|
||||
# output shape matrix (256, 48, 112x112)
|
||||
#print(output.shape)
|
||||
#print(self.fc.weight.shape)
|
||||
|
||||
output = output.view(-1, 48*112*112)
|
||||
output = self.fc(output)
|
||||
|
||||
return output
|
||||
|
||||
script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
file_path = os.path.join(script_dir, 'best_model.pth')
|
||||
checkpoint=torch.load(file_path)
|
||||
model = DataModel(num_classes=2)
|
||||
model.load_state_dict(checkpoint)
|
||||
model.eval()
|
||||
|
||||
transformer = transforms.Compose([
|
||||
transforms.Resize((224, 224)), # Resize images to (224, 224)
|
||||
transforms.ToTensor(), # Convert images to tensors, 0-255 to 0-1
|
||||
# transforms.RandomHorizontalFlip(), # 0.5 chance to flip the image
|
||||
transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])
|
||||
])
|
||||
|
||||
def prediction(img_path,transformer):
|
||||
|
||||
image=Image.open(img_path)
|
||||
|
||||
image_tensor=transformer(image).float()
|
||||
|
||||
image_tensor=image_tensor.unsqueeze_(0)
|
||||
|
||||
if torch.cuda.is_available():
|
||||
image_tensor.cuda()
|
||||
|
||||
input=Variable(image_tensor)
|
||||
|
||||
output=model(input)
|
||||
|
||||
index=output.data.numpy().argmax()
|
||||
|
||||
pred=classes[index]
|
||||
|
||||
return pred
|
||||
|
||||
def prediction_keys():
|
||||
|
||||
#funkcja zwracajaca sciezki do kazdego pliku w folderze w postaci listy
|
||||
|
||||
images_path=glob.glob(pred_path+'/*.jpg')
|
||||
|
||||
pred_list=[]
|
||||
|
||||
for i in images_path:
|
||||
pred_list.append(i)
|
||||
|
||||
return pred_list
|
||||
|
||||
def predict_one(path):
|
||||
|
||||
#wyswietlanie obrazka po kazdym podniesieniu itemu
|
||||
root = Tk()
|
||||
root.title("Okno z obrazkiem")
|
||||
|
||||
image = Image.open(path)
|
||||
photo = ImageTk.PhotoImage(image)
|
||||
label = Label(root, image=photo)
|
||||
label.pack()
|
||||
|
||||
root.mainloop()
|
||||
|
||||
#uruchamia sie funkcja spr czy obrazek to paczka czy list
|
||||
pred_print = prediction(path,transformer)
|
||||
print('Zdjecie jest: '+pred_print)
|
||||
return pred_print
|
61
NeuralNetwork/src/data_model.py
Normal file
@ -0,0 +1,61 @@
|
||||
import torch.nn as nn
|
||||
import torch
|
||||
|
||||
|
||||
class DataModel(nn.Module):
|
||||
def __init__(self, num_objects):
|
||||
super(DataModel, self).__init__()
|
||||
#input (batch=256, nr of channels rgb=3 , size=244x244)
|
||||
|
||||
# convolution
|
||||
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=3, stride=1, padding=1)
|
||||
#shape (256, 12, 224x224)
|
||||
|
||||
# batch normalization
|
||||
self.bn1 = nn.BatchNorm2d(num_features=12)
|
||||
#shape (256, 12, 224x224)
|
||||
self.reul1 = nn.ReLU()
|
||||
|
||||
self.pool=nn.MaxPool2d(kernel_size=2, stride=2)
|
||||
# reduce image size by factor 2
|
||||
# pooling window moves by 2 pixels at a time instead of 1
|
||||
# shape (256, 12, 112x112)
|
||||
|
||||
|
||||
|
||||
self.conv2 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3, stride=1, padding=1)
|
||||
self.bn2 = nn.BatchNorm2d(num_features=24)
|
||||
self.reul2 = nn.ReLU()
|
||||
# shape (256, 24, 112x112)
|
||||
|
||||
self.conv3 = nn.Conv2d(in_channels=24, out_channels=48, kernel_size=3, stride=1, padding=1)
|
||||
#shape (256, 48, 112x112)
|
||||
self.bn3 = nn.BatchNorm2d(num_features=48)
|
||||
#shape (256, 48, 112x112)
|
||||
self.reul3 = nn.ReLU()
|
||||
|
||||
# connected layer
|
||||
self.fc = nn.Linear(in_features=48*112*112, out_features=num_objects)
|
||||
|
||||
def forward(self, input):
|
||||
output = self.conv1(input)
|
||||
output = self.bn1(output)
|
||||
output = self.reul1(output)
|
||||
|
||||
output = self.pool(output)
|
||||
output = self.conv2(output)
|
||||
output = self.bn2(output)
|
||||
output = self.reul2(output)
|
||||
|
||||
output = self.conv3(output)
|
||||
output = self.bn3(output)
|
||||
output = self.reul3(output)
|
||||
|
||||
# output shape matrix (256, 48, 112x112)
|
||||
#print(output.shape)
|
||||
#print(self.fc.weight.shape)
|
||||
|
||||
output = output.view(-1, 48*112*112)
|
||||
output = self.fc(output)
|
||||
|
||||
return output
|
31
NeuralNetwork/src/torchvision_resize_dataset.py
Normal file
@ -0,0 +1,31 @@
|
||||
import glob
|
||||
import pathlib
|
||||
import torchvision.transforms as transforms
|
||||
from torchvision.datasets import ImageFolder
|
||||
from torch.utils.data import ConcatDataset
|
||||
|
||||
# images have to be the same size for the algorithm to work
|
||||
transform = transforms.Compose([
|
||||
transforms.Resize((224, 224)), # Resize images to (224, 224)
|
||||
transforms.ToTensor(), # Convert images to tensors, 0-255 to 0-1
|
||||
# transforms.RandomHorizontalFlip(), # 0.5 chance to flip the image
|
||||
transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])
|
||||
])
|
||||
|
||||
letters_path = 'C:/Users/wojmed/Documents/VS repositories/Inteligentny_Wozek/NeuralNetwork/src/train_images/letters'
|
||||
package_path = 'C:/Users/wojmed/Documents/VS repositories/Inteligentny_Wozek/NeuralNetwork/src/train_images/package'
|
||||
images_path = 'C:/Users/wojmed/Documents/VS repositories/Inteligentny_Wozek/NeuralNetwork/src/train_images'
|
||||
|
||||
# # Load images from folders
|
||||
# letter_folder = ImageFolder(letters_path, transform=transform)
|
||||
# package_folder = ImageFolder(package_path, transform=transform)
|
||||
|
||||
# Combine the both datasets into a single dataset
|
||||
#combined_dataset = ConcatDataset([letter_folder, package_folder])
|
||||
combined_dataset = ImageFolder(images_path, transform=transform)
|
||||
|
||||
#image classes
|
||||
path=pathlib.Path(images_path)
|
||||
classes = sorted([i.name.split("/")[-1] for i in path.iterdir()])
|
||||
|
||||
# print(classes)
|
BIN
NeuralNetwork/src/train_images/letters/1.jpg
Normal file
After Width: | Height: | Size: 2.7 MiB |
BIN
NeuralNetwork/src/train_images/letters/10.jpg
Normal file
After Width: | Height: | Size: 1.8 MiB |
BIN
NeuralNetwork/src/train_images/letters/100.jpg
Normal file
After Width: | Height: | Size: 193 KiB |
BIN
NeuralNetwork/src/train_images/letters/1000.jpg
Normal file
After Width: | Height: | Size: 59 KiB |
BIN
NeuralNetwork/src/train_images/letters/101.jpg
Normal file
After Width: | Height: | Size: 8.0 KiB |
BIN
NeuralNetwork/src/train_images/letters/102.jpg
Normal file
After Width: | Height: | Size: 36 KiB |
BIN
NeuralNetwork/src/train_images/letters/103.jpg
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
NeuralNetwork/src/train_images/letters/104.jpg
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
NeuralNetwork/src/train_images/letters/105.jpg
Normal file
After Width: | Height: | Size: 41 KiB |
BIN
NeuralNetwork/src/train_images/letters/106.jpg
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
NeuralNetwork/src/train_images/letters/107.jpg
Normal file
After Width: | Height: | Size: 34 KiB |
BIN
NeuralNetwork/src/train_images/letters/108.jpg
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
NeuralNetwork/src/train_images/letters/109.jpg
Normal file
After Width: | Height: | Size: 36 KiB |
BIN
NeuralNetwork/src/train_images/letters/11.jpg
Normal file
After Width: | Height: | Size: 7.7 MiB |
BIN
NeuralNetwork/src/train_images/letters/110.jpg
Normal file
After Width: | Height: | Size: 40 KiB |
BIN
NeuralNetwork/src/train_images/letters/111.jpg
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
NeuralNetwork/src/train_images/letters/112.jpg
Normal file
After Width: | Height: | Size: 64 KiB |
BIN
NeuralNetwork/src/train_images/letters/113.jpg
Normal file
After Width: | Height: | Size: 32 KiB |
BIN
NeuralNetwork/src/train_images/letters/114.jpg
Normal file
After Width: | Height: | Size: 135 KiB |
BIN
NeuralNetwork/src/train_images/letters/115.jpg
Normal file
After Width: | Height: | Size: 34 KiB |
BIN
NeuralNetwork/src/train_images/letters/116.jpg
Normal file
After Width: | Height: | Size: 60 KiB |
BIN
NeuralNetwork/src/train_images/letters/117.jpg
Normal file
After Width: | Height: | Size: 76 KiB |
BIN
NeuralNetwork/src/train_images/letters/118.jpg
Normal file
After Width: | Height: | Size: 62 KiB |
BIN
NeuralNetwork/src/train_images/letters/119.jpg
Normal file
After Width: | Height: | Size: 78 KiB |
BIN
NeuralNetwork/src/train_images/letters/12.jpg
Normal file
After Width: | Height: | Size: 6.6 MiB |
BIN
NeuralNetwork/src/train_images/letters/120.jpg
Normal file
After Width: | Height: | Size: 91 KiB |
BIN
NeuralNetwork/src/train_images/letters/121.jpg
Normal file
After Width: | Height: | Size: 36 KiB |
BIN
NeuralNetwork/src/train_images/letters/122.jpg
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
NeuralNetwork/src/train_images/letters/123.jpg
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
NeuralNetwork/src/train_images/letters/124.jpg
Normal file
After Width: | Height: | Size: 44 KiB |
BIN
NeuralNetwork/src/train_images/letters/125.jpg
Normal file
After Width: | Height: | Size: 35 KiB |
BIN
NeuralNetwork/src/train_images/letters/126.jpg
Normal file
After Width: | Height: | Size: 85 KiB |
BIN
NeuralNetwork/src/train_images/letters/127.jpg
Normal file
After Width: | Height: | Size: 53 KiB |
BIN
NeuralNetwork/src/train_images/letters/128.jpg
Normal file
After Width: | Height: | Size: 6.3 KiB |
BIN
NeuralNetwork/src/train_images/letters/129.jpg
Normal file
After Width: | Height: | Size: 74 KiB |
BIN
NeuralNetwork/src/train_images/letters/13.jpg
Normal file
After Width: | Height: | Size: 1.8 MiB |
BIN
NeuralNetwork/src/train_images/letters/130.jpg
Normal file
After Width: | Height: | Size: 99 KiB |
BIN
NeuralNetwork/src/train_images/letters/131.jpg
Normal file
After Width: | Height: | Size: 38 KiB |
BIN
NeuralNetwork/src/train_images/letters/132.jpg
Normal file
After Width: | Height: | Size: 1004 KiB |
BIN
NeuralNetwork/src/train_images/letters/133.jpg
Normal file
After Width: | Height: | Size: 976 B |
BIN
NeuralNetwork/src/train_images/letters/134.jpg
Normal file
After Width: | Height: | Size: 434 KiB |
BIN
NeuralNetwork/src/train_images/letters/135.jpg
Normal file
After Width: | Height: | Size: 131 KiB |
BIN
NeuralNetwork/src/train_images/letters/136.jpg
Normal file
After Width: | Height: | Size: 111 KiB |
BIN
NeuralNetwork/src/train_images/letters/137.jpg
Normal file
After Width: | Height: | Size: 17 KiB |
BIN
NeuralNetwork/src/train_images/letters/138.jpg
Normal file
After Width: | Height: | Size: 3.6 KiB |
BIN
NeuralNetwork/src/train_images/letters/139.jpg
Normal file
After Width: | Height: | Size: 73 KiB |
BIN
NeuralNetwork/src/train_images/letters/14.jpg
Normal file
After Width: | Height: | Size: 1.6 MiB |
BIN
NeuralNetwork/src/train_images/letters/140.jpg
Normal file
After Width: | Height: | Size: 5.9 KiB |
BIN
NeuralNetwork/src/train_images/letters/141.jpg
Normal file
After Width: | Height: | Size: 5.4 KiB |
BIN
NeuralNetwork/src/train_images/letters/142.jpg
Normal file
After Width: | Height: | Size: 4.2 KiB |
BIN
NeuralNetwork/src/train_images/letters/143.jpg
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
NeuralNetwork/src/train_images/letters/144.jpg
Normal file
After Width: | Height: | Size: 6.7 KiB |
BIN
NeuralNetwork/src/train_images/letters/145.jpg
Normal file
After Width: | Height: | Size: 38 KiB |
BIN
NeuralNetwork/src/train_images/letters/146.jpg
Normal file
After Width: | Height: | Size: 555 KiB |
BIN
NeuralNetwork/src/train_images/letters/147.jpg
Normal file
After Width: | Height: | Size: 52 KiB |
BIN
NeuralNetwork/src/train_images/letters/148.jpg
Normal file
After Width: | Height: | Size: 51 KiB |
BIN
NeuralNetwork/src/train_images/letters/149.jpg
Normal file
After Width: | Height: | Size: 362 KiB |
BIN
NeuralNetwork/src/train_images/letters/15.jpg
Normal file
After Width: | Height: | Size: 1.6 MiB |
BIN
NeuralNetwork/src/train_images/letters/150.jpg
Normal file
After Width: | Height: | Size: 153 KiB |
BIN
NeuralNetwork/src/train_images/letters/151.jpg
Normal file
After Width: | Height: | Size: 108 KiB |
BIN
NeuralNetwork/src/train_images/letters/152.jpg
Normal file
After Width: | Height: | Size: 24 KiB |
BIN
NeuralNetwork/src/train_images/letters/153.jpg
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
NeuralNetwork/src/train_images/letters/154.jpg
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
NeuralNetwork/src/train_images/letters/155.jpg
Normal file
After Width: | Height: | Size: 3.7 KiB |
BIN
NeuralNetwork/src/train_images/letters/156.jpg
Normal file
After Width: | Height: | Size: 15 KiB |
BIN
NeuralNetwork/src/train_images/letters/157.jpg
Normal file
After Width: | Height: | Size: 60 KiB |
BIN
NeuralNetwork/src/train_images/letters/158.jpg
Normal file
After Width: | Height: | Size: 66 KiB |
BIN
NeuralNetwork/src/train_images/letters/159.jpg
Normal file
After Width: | Height: | Size: 150 KiB |
BIN
NeuralNetwork/src/train_images/letters/16.jpg
Normal file
After Width: | Height: | Size: 3.0 MiB |
BIN
NeuralNetwork/src/train_images/letters/160.jpg
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
NeuralNetwork/src/train_images/letters/161.jpg
Normal file
After Width: | Height: | Size: 120 KiB |
BIN
NeuralNetwork/src/train_images/letters/162.jpg
Normal file
After Width: | Height: | Size: 3.7 KiB |
BIN
NeuralNetwork/src/train_images/letters/163.jpg
Normal file
After Width: | Height: | Size: 9.8 KiB |
BIN
NeuralNetwork/src/train_images/letters/164.jpg
Normal file
After Width: | Height: | Size: 59 KiB |
BIN
NeuralNetwork/src/train_images/letters/165.jpg
Normal file
After Width: | Height: | Size: 59 KiB |
BIN
NeuralNetwork/src/train_images/letters/166.jpg
Normal file
After Width: | Height: | Size: 9.4 KiB |
BIN
NeuralNetwork/src/train_images/letters/167.jpg
Normal file
After Width: | Height: | Size: 7.5 KiB |
BIN
NeuralNetwork/src/train_images/letters/168.jpg
Normal file
After Width: | Height: | Size: 8.8 KiB |
BIN
NeuralNetwork/src/train_images/letters/169.jpg
Normal file
After Width: | Height: | Size: 88 KiB |
BIN
NeuralNetwork/src/train_images/letters/17.jpg
Normal file
After Width: | Height: | Size: 1.1 MiB |
BIN
NeuralNetwork/src/train_images/letters/170.jpg
Normal file
After Width: | Height: | Size: 96 KiB |
BIN
NeuralNetwork/src/train_images/letters/171.jpg
Normal file
After Width: | Height: | Size: 9.2 KiB |
BIN
NeuralNetwork/src/train_images/letters/172.jpg
Normal file
After Width: | Height: | Size: 55 KiB |
BIN
NeuralNetwork/src/train_images/letters/173.jpg
Normal file
After Width: | Height: | Size: 17 KiB |
BIN
NeuralNetwork/src/train_images/letters/174.jpg
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
NeuralNetwork/src/train_images/letters/175.jpg
Normal file
After Width: | Height: | Size: 78 KiB |
BIN
NeuralNetwork/src/train_images/letters/176.jpg
Normal file
After Width: | Height: | Size: 30 KiB |
BIN
NeuralNetwork/src/train_images/letters/177.jpg
Normal file
After Width: | Height: | Size: 280 KiB |
BIN
NeuralNetwork/src/train_images/letters/178.jpg
Normal file
After Width: | Height: | Size: 264 KiB |
BIN
NeuralNetwork/src/train_images/letters/179.jpg
Normal file
After Width: | Height: | Size: 56 KiB |