diff --git a/.gitignore b/.gitignore index f73443c..a7c7cae 100644 --- a/.gitignore +++ b/.gitignore @@ -149,4 +149,5 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -.idea/ \ No newline at end of file +.idea/ +/algorithms/neural_network/data/ diff --git a/algorithms/neural_network/data/test/grass/grass1.png b/algorithms/neural_network/data/test/grass/grass1.png deleted file mode 100644 index dd88981..0000000 Binary files a/algorithms/neural_network/data/test/grass/grass1.png and /dev/null differ diff --git a/algorithms/neural_network/data/test/grass/grass2.png b/algorithms/neural_network/data/test/grass/grass2.png deleted file mode 100644 index c07ab8f..0000000 Binary files a/algorithms/neural_network/data/test/grass/grass2.png and /dev/null differ diff --git a/algorithms/neural_network/data/test/grass/grass3.png b/algorithms/neural_network/data/test/grass/grass3.png deleted file mode 100644 index 4fac085..0000000 Binary files a/algorithms/neural_network/data/test/grass/grass3.png and /dev/null differ diff --git a/algorithms/neural_network/data/test/grass/grass4.png b/algorithms/neural_network/data/test/grass/grass4.png deleted file mode 100644 index 74cb3d1..0000000 Binary files a/algorithms/neural_network/data/test/grass/grass4.png and /dev/null differ diff --git a/algorithms/neural_network/data/test/sand/sand.png b/algorithms/neural_network/data/test/sand/sand.png deleted file mode 100644 index 51c072d..0000000 Binary files a/algorithms/neural_network/data/test/sand/sand.png and /dev/null differ diff --git a/algorithms/neural_network/data/test/tree/grass_with_tree.jpg b/algorithms/neural_network/data/test/tree/grass_with_tree.jpg deleted file mode 100644 index 56af6b8..0000000 Binary files a/algorithms/neural_network/data/test/tree/grass_with_tree.jpg and /dev/null differ diff --git a/algorithms/neural_network/data/test/water/water.png b/algorithms/neural_network/data/test/water/water.png deleted file mode 100644 index 28b45db..0000000 Binary files a/algorithms/neural_network/data/test/water/water.png and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_0/checkpoints/epoch=6-step=630.ckpt b/algorithms/neural_network/lightning_logs/version_0/checkpoints/epoch=6-step=630.ckpt deleted file mode 100644 index e3fcb43..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_0/checkpoints/epoch=6-step=630.ckpt and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_0/events.out.tfevents.1653283421.DESKTOP-97QK98R.19372.0 b/algorithms/neural_network/lightning_logs/version_0/events.out.tfevents.1653283421.DESKTOP-97QK98R.19372.0 deleted file mode 100644 index 8f13c49..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_0/events.out.tfevents.1653283421.DESKTOP-97QK98R.19372.0 and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_1/checkpoints/epoch=7-step=720.ckpt b/algorithms/neural_network/lightning_logs/version_1/checkpoints/epoch=7-step=720.ckpt deleted file mode 100644 index bb7429b..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_1/checkpoints/epoch=7-step=720.ckpt and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_1/events.out.tfevents.1653288273.DESKTOP-97QK98R.9316.0 b/algorithms/neural_network/lightning_logs/version_1/events.out.tfevents.1653288273.DESKTOP-97QK98R.9316.0 deleted file mode 100644 index 07da97d..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_1/events.out.tfevents.1653288273.DESKTOP-97QK98R.9316.0 and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_1/hparams.yaml b/algorithms/neural_network/lightning_logs/version_1/hparams.yaml deleted file mode 100644 index 0967ef4..0000000 --- a/algorithms/neural_network/lightning_logs/version_1/hparams.yaml +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/algorithms/neural_network/lightning_logs/version_2/checkpoints/epoch=0-step=90.ckpt b/algorithms/neural_network/lightning_logs/version_2/checkpoints/epoch=0-step=90.ckpt deleted file mode 100644 index 46e80c2..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_2/checkpoints/epoch=0-step=90.ckpt and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_2/events.out.tfevents.1653290364.DESKTOP-97QK98R.8432.0 b/algorithms/neural_network/lightning_logs/version_2/events.out.tfevents.1653290364.DESKTOP-97QK98R.8432.0 deleted file mode 100644 index 37da598..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_2/events.out.tfevents.1653290364.DESKTOP-97QK98R.8432.0 and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_2/hparams.yaml b/algorithms/neural_network/lightning_logs/version_2/hparams.yaml deleted file mode 100644 index 0967ef4..0000000 --- a/algorithms/neural_network/lightning_logs/version_2/hparams.yaml +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/algorithms/neural_network/lightning_logs/version_23/checkpoints/epoch=3-step=324.ckpt b/algorithms/neural_network/lightning_logs/version_23/checkpoints/epoch=3-step=324.ckpt new file mode 100644 index 0000000..592f965 Binary files /dev/null and b/algorithms/neural_network/lightning_logs/version_23/checkpoints/epoch=3-step=324.ckpt differ diff --git a/algorithms/neural_network/lightning_logs/version_23/events.out.tfevents.1653922926.DESKTOP-97QK98R.9912.0 b/algorithms/neural_network/lightning_logs/version_23/events.out.tfevents.1653922926.DESKTOP-97QK98R.9912.0 new file mode 100644 index 0000000..bc7cd86 Binary files /dev/null and b/algorithms/neural_network/lightning_logs/version_23/events.out.tfevents.1653922926.DESKTOP-97QK98R.9912.0 differ diff --git a/algorithms/neural_network/lightning_logs/version_0/hparams.yaml b/algorithms/neural_network/lightning_logs/version_23/hparams.yaml similarity index 100% rename from algorithms/neural_network/lightning_logs/version_0/hparams.yaml rename to algorithms/neural_network/lightning_logs/version_23/hparams.yaml diff --git a/algorithms/neural_network/lightning_logs/version_3/checkpoints/epoch=8-step=810.ckpt b/algorithms/neural_network/lightning_logs/version_3/checkpoints/epoch=8-step=810.ckpt deleted file mode 100644 index beb5dcd..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_3/checkpoints/epoch=8-step=810.ckpt and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_3/events.out.tfevents.1653310536.DESKTOP-97QK98R.15588.0 b/algorithms/neural_network/lightning_logs/version_3/events.out.tfevents.1653310536.DESKTOP-97QK98R.15588.0 deleted file mode 100644 index 8e95fd1..0000000 Binary files a/algorithms/neural_network/lightning_logs/version_3/events.out.tfevents.1653310536.DESKTOP-97QK98R.15588.0 and /dev/null differ diff --git a/algorithms/neural_network/lightning_logs/version_3/hparams.yaml b/algorithms/neural_network/lightning_logs/version_3/hparams.yaml deleted file mode 100644 index 0967ef4..0000000 --- a/algorithms/neural_network/lightning_logs/version_3/hparams.yaml +++ /dev/null @@ -1 +0,0 @@ -{} diff --git a/algorithms/neural_network/neural_network.py b/algorithms/neural_network/neural_network.py index d439d5b..fd561d1 100644 --- a/algorithms/neural_network/neural_network.py +++ b/algorithms/neural_network/neural_network.py @@ -10,23 +10,39 @@ from common.constants import DEVICE, BATCH_SIZE, NUM_EPOCHS, LEARNING_RATE, SETU class NeuralNetwork(pl.LightningModule): def __init__(self, numChannels=3, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, num_classes=4): - super().__init__() - self.layer = nn.Sequential( - nn.Linear(36*36*3, 300), - nn.ReLU(), - nn.Linear(300, 4), - nn.LogSoftmax(dim=-1) - ) + super(NeuralNetwork, self).__init__() + self.conv1 = nn.Conv2d(numChannels, 24, (3, 3), padding=1) + self.relu1 = nn.ReLU() + self.maxpool1 = nn.MaxPool2d((2, 2), stride=2) + self.conv2 = nn.Conv2d(24, 48, (3, 3), padding=1) + self.relu2 = nn.ReLU() + self.fc1 = nn.Linear(48*18*18, 800) + self.relu3 = nn.ReLU() + self.fc2 = nn.Linear(800, 400) + self.relu4 = nn.ReLU() + self.fc3 = nn.Linear(400, 4) + self.logSoftmax = nn.LogSoftmax(dim=1) + self.batch_size = batch_size self.learning_rate = learning_rate def forward(self, x): + x = self.conv1(x) + x = self.relu1(x) + x = self.maxpool1(x) + x = self.conv2(x) + x = self.relu2(x) x = x.reshape(x.shape[0], -1) - x = self.layer(x) + x = self.fc1(x) + x = self.relu3(x) + x = self.fc2(x) + x = self.relu4(x) + x = self.fc3(x) + x = self.logSoftmax(x) return x def configure_optimizers(self): - optimizer = SGD(self.parameters(), lr=self.learning_rate) + optimizer = Adam(self.parameters(), lr=self.learning_rate) return optimizer def training_step(self, batch, batch_idx): diff --git a/algorithms/neural_network/neural_network_interface.py b/algorithms/neural_network/neural_network_interface.py index 5098e45..56905a3 100644 --- a/algorithms/neural_network/neural_network_interface.py +++ b/algorithms/neural_network/neural_network_interface.py @@ -10,44 +10,8 @@ from torch.optim import Adam import matplotlib.pyplot as plt import pytorch_lightning as pl from pytorch_lightning.callbacks import EarlyStopping - - -def train(model): - model = model.to(DEVICE) - model.train() - trainset = WaterSandTreeGrass('./data/train_csv_file.csv', transform=SETUP_PHOTOS) - testset = WaterSandTreeGrass('./data/test_csv_file.csv', transform=SETUP_PHOTOS) - train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) - test_loader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=True) - - criterion = nn.CrossEntropyLoss() - optimizer = Adam(model.parameters(), lr=LEARNING_RATE) - - for epoch in range(NUM_EPOCHS): - for batch_idx, (data, targets) in enumerate(train_loader): - data = data.to(device=DEVICE) - targets = targets.to(device=DEVICE) - - scores = model(data) - loss = criterion(scores, targets) - - optimizer.zero_grad() - loss.backward() - - optimizer.step() - - if batch_idx % 4 == 0: - print("epoch: %d loss: %.4f" % (epoch, loss.item())) - - print("FINISHED TRAINING!") - torch.save(model.state_dict(), "./learnednetwork.pth") - - print("Checking accuracy for the train set.") - check_accuracy(train_loader) - print("Checking accuracy for the test set.") - check_accuracy(test_loader) - print("Checking accuracy for the tiles.") - check_accuracy_tiles() +import torchvision.transforms.functional as F +from PIL import Image def check_accuracy_tiles(): @@ -95,12 +59,13 @@ def check_accuracy_tiles(): def what_is_it(img_path, show_img=False): - image = read_image(img_path, mode=ImageReadMode.RGB) + image = Image.open(img_path).convert('RGB') if show_img: - plt.imshow(plt.imread(img_path)) + plt.imshow(image) plt.show() + image = SETUP_PHOTOS(image).unsqueeze(0) - model = NeuralNetwork.load_from_checkpoint('./lightning_logs/version_3/checkpoints/epoch=8-step=810.ckpt') + model = NeuralNetwork.load_from_checkpoint('./lightning_logs/version_20/checkpoints/epoch=3-step=324.ckpt') with torch.no_grad(): model.eval() @@ -108,18 +73,53 @@ def what_is_it(img_path, show_img=False): return ID_TO_CLASS[idx] -CNN = NeuralNetwork() +def check_accuracy(tset): + model = NeuralNetwork.load_from_checkpoint('./lightning_logs/version_23/checkpoints/epoch=3-step=324.ckpt') + num_correct = 0 + num_samples = 0 + model = model.to(DEVICE) + model.eval() + + with torch.no_grad(): + for photo, label in tset: + photo = photo.to(DEVICE) + label = label.to(DEVICE) + + scores = model(photo) + predictions = scores.argmax(dim=1) + num_correct += (predictions == label).sum() + num_samples += predictions.size(0) + + print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}%') -trainer = pl.Trainer(accelerator='gpu', devices=1, auto_scale_batch_size=True, callbacks=[EarlyStopping('val_loss')], max_epochs=NUM_EPOCHS) +def check_accuracy_data(): + trainset = WaterSandTreeGrass('./data/train_csv_file.csv', transform=SETUP_PHOTOS) + testset = WaterSandTreeGrass('./data/test_csv_file.csv', transform=SETUP_PHOTOS) + train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) + test_loader = DataLoader(testset, batch_size=BATCH_SIZE) + + print("Accuracy of train_set:") + check_accuracy(train_loader) + print("Accuracy of test_set:") + check_accuracy(test_loader) + +#CNN = NeuralNetwork() +#common.helpers.createCSV() + +#trainer = pl.Trainer(accelerator='gpu', callbacks=EarlyStopping('val_loss'), devices=1, max_epochs=NUM_EPOCHS) #trainer = pl.Trainer(accelerator='gpu', devices=1, auto_lr_find=True, max_epochs=NUM_EPOCHS) -trainset = WaterSandTreeGrass('./data/train_csv_file.csv', transform=SETUP_PHOTOS) -testset = WaterSandTreeGrass('./data/test_csv_file.csv', transform=SETUP_PHOTOS) -train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) -test_loader = DataLoader(testset, batch_size=BATCH_SIZE) - +#trainset = WaterSandTreeGrass('./data/train_csv_file.csv', transform=SETUP_PHOTOS) +#testset = WaterSandTreeGrass('./data/test_csv_file.csv', transform=SETUP_PHOTOS) +#train_loader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) +#test_loader = DataLoader(testset, batch_size=BATCH_SIZE) #trainer.fit(CNN, train_loader, test_loader) #trainer.tune(CNN, train_loader, test_loader) -check_accuracy_tiles() -print(what_is_it('../../resources/textures/sand.png', True)) + + +#print(what_is_it('../../resources/textures/grass2.png', True)) + +#check_accuracy_data() + +#check_accuracy_tiles() diff --git a/algorithms/neural_network/watersandtreegrass.py b/algorithms/neural_network/watersandtreegrass.py index 93525d0..9c1838e 100644 --- a/algorithms/neural_network/watersandtreegrass.py +++ b/algorithms/neural_network/watersandtreegrass.py @@ -3,6 +3,7 @@ from torch.utils.data import Dataset import pandas as pd from torchvision.io import read_image, ImageReadMode from common.helpers import createCSV +from PIL import Image class WaterSandTreeGrass(Dataset): @@ -15,7 +16,8 @@ class WaterSandTreeGrass(Dataset): return len(self.img_labels) def __getitem__(self, idx): - image = read_image(self.img_labels.iloc[idx, 0], mode=ImageReadMode.RGB) + image = Image.open(self.img_labels.iloc[idx, 0]).convert('RGB') + label = torch.tensor(int(self.img_labels.iloc[idx, 1])) if self.transform: diff --git a/common/constants.py b/common/constants.py index ca7f43d..d4b40d8 100644 --- a/common/constants.py +++ b/common/constants.py @@ -77,19 +77,17 @@ BAR_HEIGHT_MULTIPLIER = 0.1 #NEURAL_NETWORK -LEARNING_RATE = 0.13182567385564073 +LEARNING_RATE = 0.000630957344480193 BATCH_SIZE = 64 -NUM_EPOCHS = 50 +NUM_EPOCHS = 9 DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') print("Using ", DEVICE) CLASSES = ['grass', 'sand', 'tree', 'water'] SETUP_PHOTOS = transforms.Compose([ - transforms.Resize(36), - transforms.CenterCrop(36), - transforms.ToPILImage(), transforms.ToTensor(), + transforms.Resize((36, 36)), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ])