Compare commits

...

13 Commits

Author SHA1 Message Date
09cdf6cc3d Merge pull request 'Neural_network' (#4) from Neural_network into master
Reviewed-on: #4
2024-06-04 16:58:57 +02:00
Marek
57c6facea1 added photo display next to the field 2024-06-04 16:55:27 +02:00
Marek
3d2a88d1ea fixes 2024-05-27 05:28:48 +02:00
MarRac
df7c553c59 modified the set of photos and types of plants 2024-05-27 04:32:31 +02:00
MarRac
8ddba0c828 modified the CNN to get better results 2024-05-27 04:27:46 +02:00
MarRac
35d7a6ab6e dodano zastosowanie sieci neuronowej w main 2024-05-27 00:32:20 +02:00
MarRac
9667655a2a changed neural network to CNN and added some tests 2024-05-26 23:28:22 +02:00
MarRac
b45c2e0f1f added functions for loading images, model and testing 2024-05-26 19:56:18 +02:00
MarRac
fb0ec5057c added useful functions for tile selection and more 2024-05-26 19:54:46 +02:00
MarRac
9e978d6032 added testing of the network 2024-05-25 22:30:04 +02:00
MarRac
c363b09f85 small fixes 2024-05-25 18:41:25 +02:00
b5d25d710d Dodano funkcję train i accuracy oraz model z dwoma ukrytymi warswtami 2024-05-25 16:33:34 +02:00
MarRac
2ec3f1a89a added datasets and loading data 2024-05-25 02:07:27 +02:00
4847 changed files with 216 additions and 24 deletions

BIN
source/CNN_model.pth Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

27
source/NN/model.py Normal file
View File

@ -0,0 +1,27 @@
import torch.nn as nn
import torch
import torch.nn.functional as F
class Conv_Neural_Network_Model(nn.Module):
def __init__(self, num_classes=5,hidden_layer1 = 512,hidden_layer2 = 256):
super(Conv_Neural_Network_Model, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)
self.fc1 = nn.Linear(64*25*25,hidden_layer1)
self.fc2 = nn.Linear(hidden_layer1,hidden_layer2)
self.out = nn.Linear(hidden_layer2,num_classes)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool1(F.relu(self.conv2(x)))
x = x.view(-1, 64*25*25) #<----flattening the image
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.out(x)
return F.log_softmax(x, dim=-1)

120
source/NN/neural_network.py Normal file
View File

@ -0,0 +1,120 @@
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, transforms, utils
from torchvision.transforms import Compose, Lambda, ToTensor
import matplotlib.pyplot as plt
from NN.model import *
from PIL import Image
import pygame
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#data transform to tensors:
data_transformer = transforms.Compose([
transforms.Resize((100, 100)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5 ), (0.5, 0.5, 0.5))
])
#loading data:
train_set = datasets.ImageFolder(root='resources/train', transform=data_transformer)
test_set = datasets.ImageFolder(root='resources/test', transform=data_transformer)
#to mozna nawet przerzucic do funkcji train:
# train_loader = DataLoader(train_set, batch_size=64, shuffle=True)
#test_loader = DataLoader(test_set, batch_size=32, shuffle=True)
#function for training model
def train(model, dataset, iter=100, batch_size=64):
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
criterion = nn.NLLLoss()
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
model.train()
for epoch in range(iter):
for inputs, labels in train_loader:
optimizer.zero_grad()
output = model(inputs.to(device))
loss = criterion(output, labels.to(device))
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print('epoch: %3d loss: %.4f' % (epoch, loss))
#function for getting accuracy
def accuracy(model, dataset):
model.eval()
with torch.no_grad():
correct = sum([
(model(inputs.to(device)).argmax(dim=1) == labels.to(device)).sum()
for inputs, labels in DataLoader(dataset, batch_size=64, shuffle=True)
])
return correct.float() / len(dataset)
# model = Conv_Neural_Network_Model()
# model.to(device)
#loading the already saved model:
# model.load_state_dict(torch.load('CNN_model.pth'))
# model.eval()
# #training the model:
# train(model, train_set)
# print(f"Accuracy of the network is: {100*accuracy(model, test_set)}%")
# torch.save(model.state_dict(), 'CNN_model.pth')
def load_model():
model = Conv_Neural_Network_Model()
model.load_state_dict(torch.load('CNN_model.pth', map_location=torch.device('cpu')))
model.eval()
return model
def load_image(image_path):
testImage = Image.open(image_path).convert('RGB')
testImage = data_transformer(testImage)
testImage = testImage.unsqueeze(0)
return testImage
def display_image(screen, image_path, position):
image = pygame.image.load(image_path)
image = pygame.transform.scale(image, (250, 250))
screen.blit(image, position)
def display_result(screen, position, predicted_class):
font = pygame.font.Font(None, 30)
displayed_text = font.render("The predicted image is: "+str(predicted_class), 1, (255,255,255))
screen.blit(displayed_text, position)
def guess_image(model, image_tensor):
with torch.no_grad():
testOutput = model(image_tensor)
_, predicted = torch.max(testOutput, 1)
predicted_class = train_set.classes[predicted.item()]
return predicted_class
#TEST - loading the image and getting results:
# testImage_path = 'resources/images/plant_photos/1c76aa4d-11f4-47d1-8bdd-2cb78deeeccf.jpg'
# testImage = Image.open(testImage_path)
# testImage = data_transformer(testImage)
# testImage = testImage.unsqueeze(0)
# testImage = testImage.to(device)
# model.load_state_dict(torch.load('CNN_model.pth'))
# model.to(device)
# model.eval()
# testOutput = model(testImage)
# _, predicted = torch.max(testOutput, 1)
# predicted_class = train_set.classes[predicted.item()]
# print(f'The predicted class is: {predicted_class}')

Binary file not shown.

View File

@ -53,3 +53,10 @@ def get_tile_coordinates(index):
return tile.x, tile.y return tile.x, tile.y
else: else:
return None return None
def get_tile_index():
valid_indices = []
for index, tile in enumerate(tiles):
if tile.image=="resources/images/sampling.png":
valid_indices.append(index)
return random.choice(valid_indices)

View File

@ -3,17 +3,21 @@ import time
import random import random
import pandas as pd import pandas as pd
import joblib import joblib
from area.constants import WIDTH, HEIGHT, TILE_SIZE from area.constants import WIDTH, HEIGHT, TILE_SIZE
from area.field import drawWindow from area.field import drawWindow
from area.tractor import Tractor, do_actions from area.tractor import Tractor, do_actions
from area.field import tiles, fieldX, fieldY from area.field import tiles, fieldX, fieldY
from area.field import get_tile_coordinates from area.field import get_tile_coordinates, get_tile_index
from ground import Dirt from ground import Dirt
from plant import Plant from plant import Plant
from bfs import graphsearch, Istate, succ from bfs import graphsearch, Istate, succ
from astar import a_star from astar import a_star
WIN = pygame.display.set_mode((WIDTH, HEIGHT)) from NN.neural_network import load_model, load_image, guess_image, display_image, display_result
from PIL import Image
pygame.init()
WIN_WIDTH = WIDTH + 300
WIN = pygame.display.set_mode((WIN_WIDTH, HEIGHT))
pygame.display.set_caption('Intelligent tractor') pygame.display.set_caption('Intelligent tractor')
@ -23,7 +27,7 @@ def main():
pygame.display.update() pygame.display.update()
#getting coordinates of our "goal tile": #getting coordinates of our "goal tile":
tile_index=127 tile_index = get_tile_index()
tile_x, tile_y = get_tile_coordinates(tile_index) tile_x, tile_y = get_tile_coordinates(tile_index)
if tile_x is not None and tile_y is not None: if tile_x is not None and tile_y is not None:
print(f"Coordinates of tile {tile_index} are: ({tile_x}, {tile_y})") print(f"Coordinates of tile {tile_index} are: ({tile_x}, {tile_y})")
@ -62,14 +66,39 @@ def main():
for event in pygame.event.get(): for event in pygame.event.get():
if event.type == pygame.QUIT: if event.type == pygame.QUIT:
run = False run = False
#small test of work_on_field method:
time.sleep(1) time.sleep(1)
tile1 = tiles[0]
# movement based on route-planning (test):
tractor.draw_tractor(WIN)
time.sleep(1)
if moves != False:
do_actions(tractor, WIN, moves)
#guessing the image under the tile:
goalTile = tiles[tile_index]
image_path = goalTile.photo
display_image(WIN, goalTile.photo, (WIDTH-20 , 300)) #displays photo next to the field
pygame.display.update()
image_tensor = load_image(image_path)
prediction = guess_image(load_model(), image_tensor)
display_result(WIN, (WIDTH - 50 , 600), prediction) #display text under the photo
pygame.display.update()
print(f"The predicted image is: {prediction}")
p1 = Plant('wheat', 'cereal', random.randint(1,100), random.randint(1,100), random.randint(1,100)) p1 = Plant('wheat', 'cereal', random.randint(1,100), random.randint(1,100), random.randint(1,100))
goalTile.plant = p1
d1 = Dirt(random.randint(1, 100), random.randint(1,100)) d1 = Dirt(random.randint(1, 100), random.randint(1,100))
d1.pests_and_weeds() d1.pests_and_weeds()
tile1.ground=d1 goalTile.ground=d1
#getting the name and type of the recognized plant:
p1.update_name(prediction)
#decission tree test:
if d1.pest: if d1.pest:
pe = 1 pe = 1
else: else:
@ -116,19 +145,13 @@ def main():
model = joblib.load('model.pkl') model = joblib.load('model.pkl')
nowe_dane = pd.read_csv('model_data.csv') nowe_dane = pd.read_csv('model_data.csv')
predykcje = model.predict(nowe_dane) predykcje = model.predict(nowe_dane)
# movement based on route-planning (test):
tractor.draw_tractor(WIN)
time.sleep(1)
if moves != False:
do_actions(tractor, WIN, moves)
print(predykcje) print(predykcje)
#work on field:
if predykcje == 'work': if predykcje == 'work':
tractor.work_on_field(tile1, d1, p1) tractor.work_on_field(goalTile, d1, p1)
time.sleep(30) time.sleep(50)
print("\n") print("\n")

View File

@ -19,7 +19,19 @@ class Plant:
else: else:
print("Unable to grow due to bad condition of the ground") print("Unable to grow due to bad condition of the ground")
# more properties def update_name(self, predicted_class):
if predicted_class == "Apple":
self.name = "apple"
self.plant_type = 'fruit'
elif predicted_class == "Radish":
self.name = "radish"
self.plant_type = 'vegetable'
# add init, getters,setters elif predicted_class == "Cauliflower":
self.name = "cauliflower"
self.plant_type = 'vegetable'
elif predicted_class == "Wheat":
self.name = "wheat"
self.plant_type = 'cereal'

Binary file not shown.

After

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 245 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 234 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 190 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 542 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 743 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 130 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.2 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 341 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 197 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 305 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.5 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.1 KiB

Some files were not shown because too many files have changed in this diff Show More