This commit is contained in:
XsedoX 2022-05-26 13:19:17 +02:00
parent e8c32ade2a
commit 5c1a1605b8
11 changed files with 31 additions and 21 deletions

1
.gitignore vendored
View File

@ -150,3 +150,4 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear # and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder. # option (not recommended) you can uncomment the following to ignore the entire idea folder.
.idea/ .idea/
/algorithms/neural_network/data/

Binary file not shown.

Before

Width:  |  Height:  |  Size: 814 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 820 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 789 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 760 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 725 B

View File

@ -10,23 +10,33 @@ from common.constants import DEVICE, BATCH_SIZE, NUM_EPOCHS, LEARNING_RATE, SETU
class NeuralNetwork(pl.LightningModule): class NeuralNetwork(pl.LightningModule):
def __init__(self, numChannels=3, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, num_classes=4): def __init__(self, numChannels=3, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, num_classes=4):
super().__init__() super(NeuralNetwork, self).__init__()
self.layer = nn.Sequential( self.conv1 = nn.Conv2d(numChannels, 24, (3, 3), padding=1)
nn.Linear(36*36*3, 300), self.relu1 = nn.ReLU()
nn.ReLU(), self.maxpool1 = nn.MaxPool2d((2, 2), stride=2)
nn.Linear(300, 4), self.conv2 = nn.Conv2d(24, 48, (3, 3), padding=1)
nn.LogSoftmax(dim=-1) self.relu2 = nn.ReLU()
) self.fc1 = nn.Linear(48*18*18, 4)
self.relu3 = nn.ReLU()
self.fc2 = nn.Linear(500, num_classes)
self.logSoftmax = nn.LogSoftmax(dim=1)
self.batch_size = batch_size self.batch_size = batch_size
self.learning_rate = learning_rate self.learning_rate = learning_rate
def forward(self, x): def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = x.reshape(x.shape[0], -1) x = x.reshape(x.shape[0], -1)
x = self.layer(x) x = self.fc1(x)
x = self.logSoftmax(x)
return x return x
def configure_optimizers(self): def configure_optimizers(self):
optimizer = SGD(self.parameters(), lr=self.learning_rate) optimizer = Adam(self.parameters(), lr=self.learning_rate)
return optimizer return optimizer
def training_step(self, batch, batch_idx): def training_step(self, batch, batch_idx):

View File

@ -100,7 +100,7 @@ def what_is_it(img_path, show_img=False):
plt.imshow(plt.imread(img_path)) plt.imshow(plt.imread(img_path))
plt.show() plt.show()
image = SETUP_PHOTOS(image).unsqueeze(0) image = SETUP_PHOTOS(image).unsqueeze(0)
model = NeuralNetwork.load_from_checkpoint('./lightning_logs/version_3/checkpoints/epoch=8-step=810.ckpt') model = NeuralNetwork.load_from_checkpoint('./lightning_logs/version_13/checkpoints/epoch=4-step=405.ckpt')
with torch.no_grad(): with torch.no_grad():
model.eval() model.eval()
@ -109,17 +109,17 @@ def what_is_it(img_path, show_img=False):
CNN = NeuralNetwork() CNN = NeuralNetwork()
common.helpers.createCSV()
#trainer = pl.Trainer(accelerator='gpu', devices=1, callbacks=[EarlyStopping('val_loss')], max_epochs=NUM_EPOCHS)
trainer = pl.Trainer(accelerator='gpu', devices=1, auto_scale_batch_size=True, callbacks=[EarlyStopping('val_loss')], max_epochs=NUM_EPOCHS) trainer = pl.Trainer(accelerator='gpu', devices=1, auto_lr_find=True, max_epochs=NUM_EPOCHS)
#trainer = pl.Trainer(accelerator='gpu', devices=1, auto_lr_find=True, max_epochs=NUM_EPOCHS)
trainset = WaterSandTreeGrass('./data/train_csv_file.csv', transform=SETUP_PHOTOS) trainset = WaterSandTreeGrass('./data/train_csv_file.csv', transform=SETUP_PHOTOS)
testset = WaterSandTreeGrass('./data/test_csv_file.csv', transform=SETUP_PHOTOS) testset = WaterSandTreeGrass('./data/test_csv_file.csv', transform=SETUP_PHOTOS)
train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(testset, batch_size=BATCH_SIZE) test_loader = DataLoader(testset, batch_size=BATCH_SIZE)
#trainer.fit(CNN, train_loader, test_loader) trainer.fit(CNN, train_loader, test_loader)
#trainer.tune(CNN, train_loader, test_loader) #trainer.tune(CNN, train_loader, test_loader)
check_accuracy_tiles() #check_accuracy_tiles()
print(what_is_it('../../resources/textures/sand.png', True)) #print(what_is_it('../../resources/textures/sand.png', True))

View File

@ -77,19 +77,18 @@ BAR_HEIGHT_MULTIPLIER = 0.1
#NEURAL_NETWORK #NEURAL_NETWORK
LEARNING_RATE = 0.13182567385564073 LEARNING_RATE = 0.00478630092322638
BATCH_SIZE = 64 BATCH_SIZE = 64
NUM_EPOCHS = 50 NUM_EPOCHS = 20
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print("Using ", DEVICE) print("Using ", DEVICE)
CLASSES = ['grass', 'sand', 'tree', 'water'] CLASSES = ['grass', 'sand', 'tree', 'water']
SETUP_PHOTOS = transforms.Compose([ SETUP_PHOTOS = transforms.Compose([
transforms.Resize(36),
transforms.CenterCrop(36),
transforms.ToPILImage(), transforms.ToPILImage(),
transforms.ToTensor(), transforms.ToTensor(),
transforms.Resize((36, 36)),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
]) ])