Compare commits

..

2 Commits

Author SHA1 Message Date
Zofia Lorenc
82ab417bfc added photo recognition 2024-05-26 22:34:44 +02:00
Zofia Lorenc
4955e737c5 photos for predictions 2024-05-26 17:44:01 +02:00
6 changed files with 106 additions and 5 deletions

3
src/import torch.py Normal file
View File

@ -0,0 +1,3 @@
import torch
x = torch.rand(5, 3)
print(x)

View File

@ -1,4 +1,3 @@
import sys
import pygame
from field import Field
import os

View File

@ -4,6 +4,10 @@ from kb import tractor_kb
import pytholog as pl
import random
from config import TILE_SIZE, FREE_TILES
import torch
import torchvision.transforms as transforms
from PIL import Image
class Tile(pygame.sprite.Sprite):
@ -26,15 +30,40 @@ class Tile(pygame.sprite.Sprite):
self.set_type(random_vegetable)
self.water_level = random.randint(1, 5) * 10
self.stage = 'planted' # wczesniej to była self.faza = 'posadzono' ale stwierdzilem ze lepiej po angielsku???
classes = [
"bób", "brokuł", "brukselka", "burak", "cebula",
"cukinia", "dynia", "fasola", "groch", "jarmuż",
"kalafior", "kalarepa", "kapusta", "marchew",
"ogórek", "papryka", "pietruszka", "pomidor",
"por", "rzepa", "rzodkiewka", "sałata", "seler",
"szpinak", "ziemniak"]
model = torch.load("veggies_recognition/best_model.pth")
mean = [0.5322, 0.5120, 0.3696]
std = [0.2487, 0.2436, 0.2531]
image_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(torch.Tensor(mean),torch.Tensor(std))
])
self.prediction = self.predict(model, image_transforms, self.image_path, classes)
else:
if random.randint(1, 10) % 3 == 0:
self.set_type('water')
self.water_level = 100
self.stage = 'no_plant'
self.prediction = 'water'
else:
self.set_type('grass')
self.water_level = random.randint(1, 5) * 10
self.stage = 'no_plant'
self.prediction = 'grass'
self.rect = self.image.get_rect()
@ -44,6 +73,17 @@ class Tile(pygame.sprite.Sprite):
def draw(self, surface):
self.tiles.draw(surface)
def get_random_image_from_folder(self):
folder_path = f"veggies_recognition/veggies/testing/{self.type}"
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
random_file = random.choice(files)
#image_path = os.path.join(folder_path, random_file)
image_path = folder_path + "/" + random_file
#print(image_path)
return image_path
def set_type(self, type):
self.type = type
if self.type == 'grass':
@ -51,9 +91,26 @@ class Tile(pygame.sprite.Sprite):
elif self.type == 'water':
image_path = "images/water.png"
else:
image_path = f"images/vegetables/{self.type}.png"
#image_path = f"images/vegetables/{self.type}.png"
image_path = self.get_random_image_from_folder()
if not os.path.exists(image_path):
image_path = "images/question.jpg"
self.image_path = image_path
self.image = pygame.image.load(image_path).convert()
self.image = pygame.transform.scale(self.image, (TILE_SIZE, TILE_SIZE))
def predict(self, model, image_transforms, image_path, classes):
model = model.eval()
image = Image.open(image_path)
image = image.convert("RGB")
image = image_transforms(image).float()
image = image.unsqueeze(0)
output = model(image)
_, predicted = torch.max(output.data, 1)
#print("Rozpoznano: ", classes[predicted.item()])
return classes[predicted.item()]

View File

@ -67,7 +67,9 @@ class Tractor(pygame.sprite.Sprite):
neighbors.append('grass')
input_data = {
'tile_type': self.get_current_tile().type,
#tutaj będzie dostawał informację ze zdjęcia
'tile_type': self.get_current_tile().prediction,
#'tile_type': self.get_current_tile().type,
'water_level': self.get_current_tile().water_level,
"plant_stage": self.get_current_tile().stage,
"neighbor_N": neighbors[0],
@ -180,6 +182,7 @@ class Tractor(pygame.sprite.Sprite):
if (self.get_current_tile().type != 'grass' or self.get_current_tile().type == 'water'): action = 'move'
self.prev_action = action
match (action):
case ('move'):
pass
@ -240,7 +243,10 @@ class Tractor(pygame.sprite.Sprite):
self.get_current_tile().set_type('ziemniak')
self.move_2()
#self.action_index += 1
print(action)
print("Rozpoznano: ", self.get_current_tile().prediction)
print("Co jest faktycznie: ", self.get_current_tile().type)
print("\n")
return
def log_info(self):

View File

@ -0,0 +1,36 @@
# import torch
# import torchvision.transforms as transforms
# from PIL import Image
# classes = [
# "bób", "brokuł", "brukselka", "burak", "cebula",
# "cukinia", "dynia", "fasola", "groch", "jarmuż",
# "kalafior", "kalarepa", "kapusta", "marchew",
# "ogórek", "papryka", "pietruszka", "pomidor",
# "por", "rzepa", "rzodkiewka", "sałata", "seler",
# "szpinak", "ziemniak"]
# model = torch.load("best_model.pth")
# mean = [0.5322, 0.5120, 0.3696]
# std = [0.2487, 0.2436, 0.2531]
# image_transforms = transforms.Compose([
# transforms.Resize((224, 224)),
# transforms.ToTensor(),
# transforms.Normalize(torch.Tensor(mean),torch.Tensor(std))
# ])
# def predict(model, image_transforms, image_path, classes):
# model = model.eval()
# image = Image.open(image_path)
# print(image_path)
# image = image_transforms(image).float()
# image = image.unsqueeze(0)
# output = model(image)
# _, predicted = torch.max(output.data, 1)
# print(classes[predicted.item()])
# predict(model, image_transforms, "veggies/marchew_118.jpg", classes)

Binary file not shown.

After

Width:  |  Height:  |  Size: 9.2 KiB