Compare commits
No commits in common. "82ab417bfc648edfe4fcace83f14b0a166791255" and "d2ad851cab3583dc20097425fc9a0e9cc7bcc39a" have entirely different histories.
82ab417bfc
...
d2ad851cab
@ -1,3 +0,0 @@
|
||||
import torch
|
||||
x = torch.rand(5, 3)
|
||||
print(x)
|
@ -1,3 +1,4 @@
|
||||
import sys
|
||||
import pygame
|
||||
from field import Field
|
||||
import os
|
||||
|
59
src/tile.py
59
src/tile.py
@ -4,10 +4,6 @@ from kb import tractor_kb
|
||||
import pytholog as pl
|
||||
import random
|
||||
from config import TILE_SIZE, FREE_TILES
|
||||
import torch
|
||||
import torchvision.transforms as transforms
|
||||
from PIL import Image
|
||||
|
||||
|
||||
class Tile(pygame.sprite.Sprite):
|
||||
|
||||
@ -30,40 +26,15 @@ class Tile(pygame.sprite.Sprite):
|
||||
self.set_type(random_vegetable)
|
||||
self.water_level = random.randint(1, 5) * 10
|
||||
self.stage = 'planted' # wczesniej to była self.faza = 'posadzono' ale stwierdzilem ze lepiej po angielsku???
|
||||
|
||||
classes = [
|
||||
"bób", "brokuł", "brukselka", "burak", "cebula",
|
||||
"cukinia", "dynia", "fasola", "groch", "jarmuż",
|
||||
"kalafior", "kalarepa", "kapusta", "marchew",
|
||||
"ogórek", "papryka", "pietruszka", "pomidor",
|
||||
"por", "rzepa", "rzodkiewka", "sałata", "seler",
|
||||
"szpinak", "ziemniak"]
|
||||
|
||||
model = torch.load("veggies_recognition/best_model.pth")
|
||||
|
||||
mean = [0.5322, 0.5120, 0.3696]
|
||||
std = [0.2487, 0.2436, 0.2531]
|
||||
|
||||
image_transforms = transforms.Compose([
|
||||
transforms.Resize((224, 224)),
|
||||
transforms.ToTensor(),
|
||||
transforms.Normalize(torch.Tensor(mean),torch.Tensor(std))
|
||||
])
|
||||
|
||||
self.prediction = self.predict(model, image_transforms, self.image_path, classes)
|
||||
|
||||
|
||||
else:
|
||||
if random.randint(1, 10) % 3 == 0:
|
||||
self.set_type('water')
|
||||
self.water_level = 100
|
||||
self.stage = 'no_plant'
|
||||
self.prediction = 'water'
|
||||
else:
|
||||
self.set_type('grass')
|
||||
self.water_level = random.randint(1, 5) * 10
|
||||
self.stage = 'no_plant'
|
||||
self.prediction = 'grass'
|
||||
|
||||
|
||||
self.rect = self.image.get_rect()
|
||||
@ -73,17 +44,6 @@ class Tile(pygame.sprite.Sprite):
|
||||
def draw(self, surface):
|
||||
self.tiles.draw(surface)
|
||||
|
||||
def get_random_image_from_folder(self):
|
||||
folder_path = f"veggies_recognition/veggies/testing/{self.type}"
|
||||
|
||||
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
|
||||
random_file = random.choice(files)
|
||||
|
||||
#image_path = os.path.join(folder_path, random_file)
|
||||
image_path = folder_path + "/" + random_file
|
||||
#print(image_path)
|
||||
return image_path
|
||||
|
||||
def set_type(self, type):
|
||||
self.type = type
|
||||
if self.type == 'grass':
|
||||
@ -91,26 +51,9 @@ class Tile(pygame.sprite.Sprite):
|
||||
elif self.type == 'water':
|
||||
image_path = "images/water.png"
|
||||
else:
|
||||
#image_path = f"images/vegetables/{self.type}.png"
|
||||
image_path = self.get_random_image_from_folder()
|
||||
image_path = f"images/vegetables/{self.type}.png"
|
||||
if not os.path.exists(image_path):
|
||||
image_path = "images/question.jpg"
|
||||
|
||||
self.image_path = image_path
|
||||
self.image = pygame.image.load(image_path).convert()
|
||||
self.image = pygame.transform.scale(self.image, (TILE_SIZE, TILE_SIZE))
|
||||
|
||||
def predict(self, model, image_transforms, image_path, classes):
|
||||
model = model.eval()
|
||||
image = Image.open(image_path)
|
||||
image = image.convert("RGB")
|
||||
image = image_transforms(image).float()
|
||||
image = image.unsqueeze(0)
|
||||
|
||||
output = model(image)
|
||||
_, predicted = torch.max(output.data, 1)
|
||||
|
||||
#print("Rozpoznano: ", classes[predicted.item()])
|
||||
return classes[predicted.item()]
|
||||
|
||||
|
||||
|
@ -67,9 +67,7 @@ class Tractor(pygame.sprite.Sprite):
|
||||
neighbors.append('grass')
|
||||
|
||||
input_data = {
|
||||
#tutaj będzie dostawał informację ze zdjęcia
|
||||
'tile_type': self.get_current_tile().prediction,
|
||||
#'tile_type': self.get_current_tile().type,
|
||||
'tile_type': self.get_current_tile().type,
|
||||
'water_level': self.get_current_tile().water_level,
|
||||
"plant_stage": self.get_current_tile().stage,
|
||||
"neighbor_N": neighbors[0],
|
||||
@ -182,7 +180,6 @@ class Tractor(pygame.sprite.Sprite):
|
||||
if (self.get_current_tile().type != 'grass' or self.get_current_tile().type == 'water'): action = 'move'
|
||||
self.prev_action = action
|
||||
|
||||
|
||||
match (action):
|
||||
case ('move'):
|
||||
pass
|
||||
@ -243,10 +240,7 @@ class Tractor(pygame.sprite.Sprite):
|
||||
self.get_current_tile().set_type('ziemniak')
|
||||
self.move_2()
|
||||
#self.action_index += 1
|
||||
print("Rozpoznano: ", self.get_current_tile().prediction)
|
||||
print("Co jest faktycznie: ", self.get_current_tile().type)
|
||||
print("\n")
|
||||
|
||||
print(action)
|
||||
return
|
||||
|
||||
def log_info(self):
|
||||
|
@ -1,36 +0,0 @@
|
||||
# import torch
|
||||
# import torchvision.transforms as transforms
|
||||
# from PIL import Image
|
||||
|
||||
# classes = [
|
||||
# "bób", "brokuł", "brukselka", "burak", "cebula",
|
||||
# "cukinia", "dynia", "fasola", "groch", "jarmuż",
|
||||
# "kalafior", "kalarepa", "kapusta", "marchew",
|
||||
# "ogórek", "papryka", "pietruszka", "pomidor",
|
||||
# "por", "rzepa", "rzodkiewka", "sałata", "seler",
|
||||
# "szpinak", "ziemniak"]
|
||||
|
||||
# model = torch.load("best_model.pth")
|
||||
|
||||
# mean = [0.5322, 0.5120, 0.3696]
|
||||
# std = [0.2487, 0.2436, 0.2531]
|
||||
|
||||
# image_transforms = transforms.Compose([
|
||||
# transforms.Resize((224, 224)),
|
||||
# transforms.ToTensor(),
|
||||
# transforms.Normalize(torch.Tensor(mean),torch.Tensor(std))
|
||||
# ])
|
||||
|
||||
# def predict(model, image_transforms, image_path, classes):
|
||||
# model = model.eval()
|
||||
# image = Image.open(image_path)
|
||||
# print(image_path)
|
||||
# image = image_transforms(image).float()
|
||||
# image = image.unsqueeze(0)
|
||||
|
||||
# output = model(image)
|
||||
# _, predicted = torch.max(output.data, 1)
|
||||
|
||||
# print(classes[predicted.item()])
|
||||
|
||||
# predict(model, image_transforms, "veggies/marchew_118.jpg", classes)
|
Binary file not shown.
Before Width: | Height: | Size: 9.2 KiB |
Loading…
Reference in New Issue
Block a user