Neural_network #4
BIN
source/CNN_model.pth
Normal file
BIN
source/NN/__pycache__/model.cpython-311.pyc
Normal file
BIN
source/NN/__pycache__/neural_network.cpython-311.pyc
Normal file
27
source/NN/model.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
import torch.nn as nn
|
||||||
|
import torch
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
|
||||||
|
class Conv_Neural_Network_Model(nn.Module):
|
||||||
|
def __init__(self, num_classes=5,hidden_layer1 = 512,hidden_layer2 = 256):
|
||||||
|
super(Conv_Neural_Network_Model, self).__init__()
|
||||||
|
|
||||||
|
self.conv1 = nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1)
|
||||||
|
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
|
||||||
|
self.conv2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1)
|
||||||
|
|
||||||
|
self.fc1 = nn.Linear(64*25*25,hidden_layer1)
|
||||||
|
self.fc2 = nn.Linear(hidden_layer1,hidden_layer2)
|
||||||
|
self.out = nn.Linear(hidden_layer2,num_classes)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = self.pool1(F.relu(self.conv1(x)))
|
||||||
|
x = self.pool1(F.relu(self.conv2(x)))
|
||||||
|
x = x.view(-1, 64*25*25) #<----flattening the image
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = torch.relu(x)
|
||||||
|
x = self.fc2(x)
|
||||||
|
x = torch.relu(x)
|
||||||
|
x = self.out(x)
|
||||||
|
return F.log_softmax(x, dim=-1)
|
120
source/NN/neural_network.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
from torchvision import datasets, transforms, utils
|
||||||
|
from torchvision.transforms import Compose, Lambda, ToTensor
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from NN.model import *
|
||||||
|
from PIL import Image
|
||||||
|
import pygame
|
||||||
|
|
||||||
|
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
||||||
|
|
||||||
|
#data transform to tensors:
|
||||||
|
data_transformer = transforms.Compose([
|
||||||
|
transforms.Resize((100, 100)),
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.5, 0.5, 0.5 ), (0.5, 0.5, 0.5))
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
#loading data:
|
||||||
|
train_set = datasets.ImageFolder(root='resources/train', transform=data_transformer)
|
||||||
|
test_set = datasets.ImageFolder(root='resources/test', transform=data_transformer)
|
||||||
|
|
||||||
|
|
||||||
|
#to mozna nawet przerzucic do funkcji train:
|
||||||
|
# train_loader = DataLoader(train_set, batch_size=64, shuffle=True)
|
||||||
|
#test_loader = DataLoader(test_set, batch_size=32, shuffle=True)
|
||||||
|
|
||||||
|
|
||||||
|
#function for training model
|
||||||
|
def train(model, dataset, iter=100, batch_size=64):
|
||||||
|
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
|
||||||
|
criterion = nn.NLLLoss()
|
||||||
|
train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for epoch in range(iter):
|
||||||
|
for inputs, labels in train_loader:
|
||||||
|
optimizer.zero_grad()
|
||||||
|
output = model(inputs.to(device))
|
||||||
|
loss = criterion(output, labels.to(device))
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
if epoch % 10 == 0:
|
||||||
|
print('epoch: %3d loss: %.4f' % (epoch, loss))
|
||||||
|
|
||||||
|
#function for getting accuracy
|
||||||
|
def accuracy(model, dataset):
|
||||||
|
model.eval()
|
||||||
|
with torch.no_grad():
|
||||||
|
correct = sum([
|
||||||
|
(model(inputs.to(device)).argmax(dim=1) == labels.to(device)).sum()
|
||||||
|
for inputs, labels in DataLoader(dataset, batch_size=64, shuffle=True)
|
||||||
|
])
|
||||||
|
|
||||||
|
return correct.float() / len(dataset)
|
||||||
|
|
||||||
|
|
||||||
|
# model = Conv_Neural_Network_Model()
|
||||||
|
# model.to(device)
|
||||||
|
|
||||||
|
#loading the already saved model:
|
||||||
|
# model.load_state_dict(torch.load('CNN_model.pth'))
|
||||||
|
# model.eval()
|
||||||
|
|
||||||
|
# #training the model:
|
||||||
|
# train(model, train_set)
|
||||||
|
# print(f"Accuracy of the network is: {100*accuracy(model, test_set)}%")
|
||||||
|
# torch.save(model.state_dict(), 'CNN_model.pth')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def load_model():
|
||||||
|
model = Conv_Neural_Network_Model()
|
||||||
|
model.load_state_dict(torch.load('CNN_model.pth', map_location=torch.device('cpu')))
|
||||||
|
model.eval()
|
||||||
|
return model
|
||||||
|
|
||||||
|
|
||||||
|
def load_image(image_path):
|
||||||
|
testImage = Image.open(image_path).convert('RGB')
|
||||||
|
testImage = data_transformer(testImage)
|
||||||
|
testImage = testImage.unsqueeze(0)
|
||||||
|
return testImage
|
||||||
|
|
||||||
|
def display_image(screen, image_path, position):
|
||||||
|
image = pygame.image.load(image_path)
|
||||||
|
image = pygame.transform.scale(image, (250, 250))
|
||||||
|
screen.blit(image, position)
|
||||||
|
|
||||||
|
def display_result(screen, position, predicted_class):
|
||||||
|
font = pygame.font.Font(None, 30)
|
||||||
|
displayed_text = font.render("The predicted image is: "+str(predicted_class), 1, (255,255,255))
|
||||||
|
screen.blit(displayed_text, position)
|
||||||
|
|
||||||
|
def guess_image(model, image_tensor):
|
||||||
|
with torch.no_grad():
|
||||||
|
testOutput = model(image_tensor)
|
||||||
|
_, predicted = torch.max(testOutput, 1)
|
||||||
|
predicted_class = train_set.classes[predicted.item()]
|
||||||
|
return predicted_class
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#TEST - loading the image and getting results:
|
||||||
|
# testImage_path = 'resources/images/plant_photos/1c76aa4d-11f4-47d1-8bdd-2cb78deeeccf.jpg'
|
||||||
|
# testImage = Image.open(testImage_path)
|
||||||
|
# testImage = data_transformer(testImage)
|
||||||
|
# testImage = testImage.unsqueeze(0)
|
||||||
|
# testImage = testImage.to(device)
|
||||||
|
|
||||||
|
# model.load_state_dict(torch.load('CNN_model.pth'))
|
||||||
|
# model.to(device)
|
||||||
|
# model.eval()
|
||||||
|
|
||||||
|
# testOutput = model(testImage)
|
||||||
|
# _, predicted = torch.max(testOutput, 1)
|
||||||
|
# predicted_class = train_set.classes[predicted.item()]
|
||||||
|
# print(f'The predicted class is: {predicted_class}')
|
BIN
source/__pycache__/astar.cpython-311.pyc
Normal file
@ -53,3 +53,10 @@ def get_tile_coordinates(index):
|
|||||||
return tile.x, tile.y
|
return tile.x, tile.y
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_tile_index():
|
||||||
|
valid_indices = []
|
||||||
|
for index, tile in enumerate(tiles):
|
||||||
|
if tile.image=="resources/images/sampling.png":
|
||||||
|
valid_indices.append(index)
|
||||||
|
return random.choice(valid_indices)
|
@ -3,17 +3,21 @@ import time
|
|||||||
import random
|
import random
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
import joblib
|
import joblib
|
||||||
|
|
||||||
from area.constants import WIDTH, HEIGHT, TILE_SIZE
|
from area.constants import WIDTH, HEIGHT, TILE_SIZE
|
||||||
from area.field import drawWindow
|
from area.field import drawWindow
|
||||||
from area.tractor import Tractor, do_actions
|
from area.tractor import Tractor, do_actions
|
||||||
from area.field import tiles, fieldX, fieldY
|
from area.field import tiles, fieldX, fieldY
|
||||||
from area.field import get_tile_coordinates
|
from area.field import get_tile_coordinates, get_tile_index
|
||||||
from ground import Dirt
|
from ground import Dirt
|
||||||
from plant import Plant
|
from plant import Plant
|
||||||
from bfs import graphsearch, Istate, succ
|
from bfs import graphsearch, Istate, succ
|
||||||
from astar import a_star
|
from astar import a_star
|
||||||
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
|
from NN.neural_network import load_model, load_image, guess_image, display_image, display_result
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
pygame.init()
|
||||||
|
WIN_WIDTH = WIDTH + 300
|
||||||
|
WIN = pygame.display.set_mode((WIN_WIDTH, HEIGHT))
|
||||||
pygame.display.set_caption('Intelligent tractor')
|
pygame.display.set_caption('Intelligent tractor')
|
||||||
|
|
||||||
|
|
||||||
@ -23,7 +27,7 @@ def main():
|
|||||||
pygame.display.update()
|
pygame.display.update()
|
||||||
|
|
||||||
#getting coordinates of our "goal tile":
|
#getting coordinates of our "goal tile":
|
||||||
tile_index=127
|
tile_index = get_tile_index()
|
||||||
tile_x, tile_y = get_tile_coordinates(tile_index)
|
tile_x, tile_y = get_tile_coordinates(tile_index)
|
||||||
if tile_x is not None and tile_y is not None:
|
if tile_x is not None and tile_y is not None:
|
||||||
print(f"Coordinates of tile {tile_index} are: ({tile_x}, {tile_y})")
|
print(f"Coordinates of tile {tile_index} are: ({tile_x}, {tile_y})")
|
||||||
@ -62,14 +66,39 @@ def main():
|
|||||||
for event in pygame.event.get():
|
for event in pygame.event.get():
|
||||||
if event.type == pygame.QUIT:
|
if event.type == pygame.QUIT:
|
||||||
run = False
|
run = False
|
||||||
|
|
||||||
#small test of work_on_field method:
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
tile1 = tiles[0]
|
|
||||||
|
# movement based on route-planning (test):
|
||||||
|
|
||||||
|
tractor.draw_tractor(WIN)
|
||||||
|
time.sleep(1)
|
||||||
|
if moves != False:
|
||||||
|
do_actions(tractor, WIN, moves)
|
||||||
|
|
||||||
|
|
||||||
|
#guessing the image under the tile:
|
||||||
|
goalTile = tiles[tile_index]
|
||||||
|
image_path = goalTile.photo
|
||||||
|
display_image(WIN, goalTile.photo, (WIDTH-20 , 300)) #displays photo next to the field
|
||||||
|
pygame.display.update()
|
||||||
|
|
||||||
|
image_tensor = load_image(image_path)
|
||||||
|
prediction = guess_image(load_model(), image_tensor)
|
||||||
|
|
||||||
|
display_result(WIN, (WIDTH - 50 , 600), prediction) #display text under the photo
|
||||||
|
pygame.display.update()
|
||||||
|
print(f"The predicted image is: {prediction}")
|
||||||
|
|
||||||
|
|
||||||
p1 = Plant('wheat', 'cereal', random.randint(1,100), random.randint(1,100), random.randint(1,100))
|
p1 = Plant('wheat', 'cereal', random.randint(1,100), random.randint(1,100), random.randint(1,100))
|
||||||
|
goalTile.plant = p1
|
||||||
d1 = Dirt(random.randint(1, 100), random.randint(1,100))
|
d1 = Dirt(random.randint(1, 100), random.randint(1,100))
|
||||||
d1.pests_and_weeds()
|
d1.pests_and_weeds()
|
||||||
tile1.ground=d1
|
goalTile.ground=d1
|
||||||
|
#getting the name and type of the recognized plant:
|
||||||
|
p1.update_name(prediction)
|
||||||
|
|
||||||
|
#decission tree test:
|
||||||
if d1.pest:
|
if d1.pest:
|
||||||
pe = 1
|
pe = 1
|
||||||
else:
|
else:
|
||||||
@ -116,19 +145,13 @@ def main():
|
|||||||
|
|
||||||
model = joblib.load('model.pkl')
|
model = joblib.load('model.pkl')
|
||||||
nowe_dane = pd.read_csv('model_data.csv')
|
nowe_dane = pd.read_csv('model_data.csv')
|
||||||
|
|
||||||
predykcje = model.predict(nowe_dane)
|
predykcje = model.predict(nowe_dane)
|
||||||
|
|
||||||
# movement based on route-planning (test):
|
|
||||||
|
|
||||||
tractor.draw_tractor(WIN)
|
|
||||||
time.sleep(1)
|
|
||||||
if moves != False:
|
|
||||||
do_actions(tractor, WIN, moves)
|
|
||||||
print(predykcje)
|
print(predykcje)
|
||||||
|
|
||||||
|
#work on field:
|
||||||
if predykcje == 'work':
|
if predykcje == 'work':
|
||||||
tractor.work_on_field(tile1, d1, p1)
|
tractor.work_on_field(goalTile, d1, p1)
|
||||||
time.sleep(30)
|
time.sleep(50)
|
||||||
print("\n")
|
print("\n")
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,7 +19,19 @@ class Plant:
|
|||||||
else:
|
else:
|
||||||
print("Unable to grow due to bad condition of the ground")
|
print("Unable to grow due to bad condition of the ground")
|
||||||
|
|
||||||
# more properties
|
def update_name(self, predicted_class):
|
||||||
|
if predicted_class == "Apple":
|
||||||
|
self.name = "apple"
|
||||||
|
self.plant_type = 'fruit'
|
||||||
|
|
||||||
|
elif predicted_class == "Radish":
|
||||||
|
self.name = "radish"
|
||||||
|
self.plant_type = 'vegetable'
|
||||||
|
|
||||||
# add init, getters,setters
|
elif predicted_class == "Cauliflower":
|
||||||
|
self.name = "cauliflower"
|
||||||
|
self.plant_type = 'vegetable'
|
||||||
|
|
||||||
|
elif predicted_class == "Wheat":
|
||||||
|
self.name = "wheat"
|
||||||
|
self.plant_type = 'cereal'
|
BIN
source/resources/images/plant_photos/0000000004079.webp
Normal file
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 48 KiB |
After Width: | Height: | Size: 245 KiB |
After Width: | Height: | Size: 281 KiB |
After Width: | Height: | Size: 234 KiB |
After Width: | Height: | Size: 190 KiB |
After Width: | Height: | Size: 266 KiB |
BIN
source/resources/images/plant_photos/5243790-apples-c-rex.jpg
Normal file
After Width: | Height: | Size: 22 KiB |
BIN
source/resources/images/plant_photos/91FfdTLrL7L.jpg
Normal file
After Width: | Height: | Size: 542 KiB |
After Width: | Height: | Size: 64 KiB |
After Width: | Height: | Size: 1.8 MiB |
BIN
source/resources/images/plant_photos/apple.jpeg
Normal file
After Width: | Height: | Size: 7.0 KiB |
After Width: | Height: | Size: 743 KiB |
After Width: | Height: | Size: 93 KiB |
BIN
source/resources/images/plant_photos/ccc.jpeg
Normal file
After Width: | Height: | Size: 8.3 KiB |
After Width: | Height: | Size: 37 KiB |
BIN
source/resources/images/plant_photos/honeycrisp2_5184x.webp
Normal file
After Width: | Height: | Size: 1.2 MiB |
After Width: | Height: | Size: 130 KiB |
BIN
source/resources/images/plant_photos/images.jpeg
Normal file
After Width: | Height: | Size: 6.8 KiB |
BIN
source/resources/images/plant_photos/imawes.jpeg
Normal file
After Width: | Height: | Size: 8.5 KiB |
BIN
source/resources/images/plant_photos/imewges.jpeg
Normal file
After Width: | Height: | Size: 9.7 KiB |
Before Width: | Height: | Size: 2.2 MiB |
Before Width: | Height: | Size: 341 KiB |
BIN
source/resources/images/plant_photos/pexels-photo-12801993.jpeg
Normal file
After Width: | Height: | Size: 197 KiB |
Before Width: | Height: | Size: 1.7 MiB |
Before Width: | Height: | Size: 305 KiB |
BIN
source/resources/images/plant_photos/radd.jpeg
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
source/resources/images/plant_photos/s-l1200.webp
Normal file
After Width: | Height: | Size: 62 KiB |
BIN
source/resources/test/Apple/img_1001.jpeg
Normal file
After Width: | Height: | Size: 7.1 KiB |
BIN
source/resources/test/Apple/img_1071.jpeg
Normal file
After Width: | Height: | Size: 7.4 KiB |
BIN
source/resources/test/Apple/img_1091.jpeg
Normal file
After Width: | Height: | Size: 3.7 KiB |
BIN
source/resources/test/Apple/img_1101.jpeg
Normal file
After Width: | Height: | Size: 4.5 KiB |
BIN
source/resources/test/Apple/img_1111.jpeg
Normal file
After Width: | Height: | Size: 3.2 KiB |
BIN
source/resources/test/Apple/img_1121.jpeg
Normal file
After Width: | Height: | Size: 8.6 KiB |
BIN
source/resources/test/Apple/img_1131.jpeg
Normal file
After Width: | Height: | Size: 4.5 KiB |
BIN
source/resources/test/Apple/img_1141.jpeg
Normal file
After Width: | Height: | Size: 5.8 KiB |
BIN
source/resources/test/Apple/img_1181.jpeg
Normal file
After Width: | Height: | Size: 5.0 KiB |
BIN
source/resources/test/Apple/img_1201.jpeg
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
source/resources/test/Apple/img_1211.jpeg
Normal file
After Width: | Height: | Size: 9.8 KiB |
BIN
source/resources/test/Apple/img_1221.jpeg
Normal file
After Width: | Height: | Size: 6.4 KiB |
BIN
source/resources/test/Apple/img_1241.jpeg
Normal file
After Width: | Height: | Size: 7.1 KiB |
BIN
source/resources/test/Apple/img_1251.jpeg
Normal file
After Width: | Height: | Size: 6.8 KiB |
BIN
source/resources/test/Apple/img_1261.jpeg
Normal file
After Width: | Height: | Size: 5.8 KiB |
BIN
source/resources/test/Apple/img_1271.jpeg
Normal file
After Width: | Height: | Size: 7.8 KiB |
BIN
source/resources/test/Apple/img_1281.jpeg
Normal file
After Width: | Height: | Size: 9.1 KiB |
BIN
source/resources/test/Apple/img_1301.jpeg
Normal file
After Width: | Height: | Size: 3.8 KiB |
BIN
source/resources/test/Apple/img_1311.jpeg
Normal file
After Width: | Height: | Size: 4.9 KiB |
BIN
source/resources/test/Apple/img_1321.jpeg
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
source/resources/test/Apple/img_1331.jpeg
Normal file
After Width: | Height: | Size: 8.8 KiB |
BIN
source/resources/test/Apple/img_1341.jpeg
Normal file
After Width: | Height: | Size: 6.9 KiB |
BIN
source/resources/test/Apple/img_1351.jpeg
Normal file
After Width: | Height: | Size: 6.7 KiB |
BIN
source/resources/test/Apple/img_1361.jpeg
Normal file
After Width: | Height: | Size: 5.9 KiB |
BIN
source/resources/test/Apple/img_1401.jpeg
Normal file
After Width: | Height: | Size: 3.8 KiB |
BIN
source/resources/test/Apple/img_1411.jpeg
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
source/resources/test/Apple/img_1451.jpeg
Normal file
After Width: | Height: | Size: 5.5 KiB |
BIN
source/resources/test/Apple/img_1461.jpeg
Normal file
After Width: | Height: | Size: 6.5 KiB |
BIN
source/resources/test/Apple/img_1521.jpeg
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
source/resources/test/Apple/img_1531.jpeg
Normal file
After Width: | Height: | Size: 8.7 KiB |
BIN
source/resources/test/Apple/img_1541.jpeg
Normal file
After Width: | Height: | Size: 4.6 KiB |
BIN
source/resources/test/Apple/img_1571.jpeg
Normal file
After Width: | Height: | Size: 7.0 KiB |
BIN
source/resources/test/Apple/img_1581.jpeg
Normal file
After Width: | Height: | Size: 5.0 KiB |
BIN
source/resources/test/Apple/img_1591.jpeg
Normal file
After Width: | Height: | Size: 5.3 KiB |
BIN
source/resources/test/Apple/img_1601.jpeg
Normal file
After Width: | Height: | Size: 9.3 KiB |
BIN
source/resources/test/Apple/img_1611.jpeg
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
source/resources/test/Apple/img_1621.jpeg
Normal file
After Width: | Height: | Size: 11 KiB |
BIN
source/resources/test/Apple/img_1671.jpeg
Normal file
After Width: | Height: | Size: 3.2 KiB |
BIN
source/resources/test/Apple/img_1711.jpeg
Normal file
After Width: | Height: | Size: 7.4 KiB |
BIN
source/resources/test/Apple/img_1741.jpeg
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
source/resources/test/Apple/img_1771.jpeg
Normal file
After Width: | Height: | Size: 7.6 KiB |
BIN
source/resources/test/Apple/img_1781.jpeg
Normal file
After Width: | Height: | Size: 12 KiB |
BIN
source/resources/test/Apple/img_1801.jpeg
Normal file
After Width: | Height: | Size: 6.6 KiB |
BIN
source/resources/test/Apple/img_1811.jpeg
Normal file
After Width: | Height: | Size: 6.3 KiB |
BIN
source/resources/test/Apple/img_1821.jpeg
Normal file
After Width: | Height: | Size: 5.4 KiB |
BIN
source/resources/test/Apple/img_1831.jpeg
Normal file
After Width: | Height: | Size: 5.3 KiB |
BIN
source/resources/test/Apple/img_1841.jpeg
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
source/resources/test/Apple/img_1851.jpeg
Normal file
After Width: | Height: | Size: 5.8 KiB |
BIN
source/resources/test/Apple/img_1911.jpeg
Normal file
After Width: | Height: | Size: 8.7 KiB |
BIN
source/resources/test/Apple/img_1921.jpeg
Normal file
After Width: | Height: | Size: 4.4 KiB |
BIN
source/resources/test/Apple/img_1931.jpeg
Normal file
After Width: | Height: | Size: 7.0 KiB |
BIN
source/resources/test/Apple/img_1941.jpeg
Normal file
After Width: | Height: | Size: 8.1 KiB |
BIN
source/resources/test/Apple/img_2011.jpeg
Normal file
After Width: | Height: | Size: 4.4 KiB |
BIN
source/resources/test/Apple/img_2021.jpeg
Normal file
After Width: | Height: | Size: 9.1 KiB |