Merge pull request 'recognize_garbage_photos' (#36) from recognize_garbage_photos into master
Reviewed-on: #36
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 21 KiB |
Before Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 26 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 23 KiB |
Before Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 27 KiB |
Before Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 29 KiB |
Before Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 38 KiB |
Before Width: | Height: | Size: 35 KiB |
@ -1,8 +1,11 @@
|
||||
import os
|
||||
from keras.preprocessing.image import ImageDataGenerator
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
|
||||
|
||||
train_data_dir = "garbage_photos"
|
||||
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||
train_data_dir = os.path.join(location, train_data_dir)
|
||||
|
||||
input_shape = (150, 150, 3)
|
||||
num_classes = 5
|
||||
|
Before Width: | Height: | Size: 25 KiB After Width: | Height: | Size: 25 KiB |
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 22 KiB After Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
BIN
machine_learning/photos_not_from_train_set/21.jpg
Normal file
After Width: | Height: | Size: 5.3 KiB |
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 19 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
Before Width: | Height: | Size: 15 KiB After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
BIN
machine_learning/photos_not_from_train_set/7.jpg
Normal file
After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 46 KiB After Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 39 KiB After Width: | Height: | Size: 39 KiB |
46
movement.py
@ -2,7 +2,7 @@ import joblib
|
||||
from sklearn.calibration import LabelEncoder
|
||||
from agentActionType import AgentActionType
|
||||
import time
|
||||
from garbage import GarbageType, RecognizedGarbage
|
||||
from garbage import Garbage, GarbageType, RecognizedGarbage
|
||||
from garbageCan import GarbageCan
|
||||
from turnCar import turn_left_orientation, turn_right_orientation
|
||||
from garbageTruck import GarbageTruck
|
||||
@ -14,6 +14,12 @@ import pygame
|
||||
from bfs import find_path_to_nearest_can
|
||||
from agentState import AgentState
|
||||
|
||||
import tensorflow as tf
|
||||
from keras.models import load_model
|
||||
import keras.utils as image
|
||||
from keras.optimizers import Adam
|
||||
import numpy as np
|
||||
|
||||
|
||||
def collect_garbage(game_context: GameContext) -> None:
|
||||
while True:
|
||||
@ -30,11 +36,12 @@ def collect_garbage(game_context: GameContext) -> None:
|
||||
pass
|
||||
|
||||
def _recognize_garbage(dust_car: GarbageTruck, can: GarbageCan) -> None:
|
||||
loaded_model = joblib.load('machine_learning/model.pkl')
|
||||
tree_model = joblib.load('machine_learning/model.pkl')
|
||||
optimizer = Adam(learning_rate=0.001)
|
||||
neural_model = load_model('machine_learning/neuralModel.h5', compile=False)
|
||||
neural_model.compile(optimizer=optimizer)
|
||||
for garbage in can.garbage:
|
||||
attributes = [garbage.shape, garbage.flexibility, garbage.does_smell, garbage.weight, garbage.size, garbage.color, garbage.softness, garbage.does_din]
|
||||
encoded = attributes_to_floats(attributes)
|
||||
predicted_class = loaded_model.predict([encoded])[0]
|
||||
predicted_class = predict_class(garbage, tree_model, neural_model)
|
||||
garbage_type: GarbageType = None
|
||||
if predicted_class == 'PAPER':
|
||||
garbage_type = GarbageType.PAPER
|
||||
@ -50,6 +57,35 @@ def _recognize_garbage(dust_car: GarbageTruck, can: GarbageCan) -> None:
|
||||
recognized_garbage = RecognizedGarbage(garbage, garbage_type)
|
||||
dust_car.sort_garbage(recognized_garbage)
|
||||
|
||||
def predict_class(garbage: Garbage, tree_model, neural_model) -> str:
|
||||
if garbage.img is None:
|
||||
return predict_class_from_tree(garbage, tree_model)
|
||||
return predict_class_from_neural_model(garbage, neural_model)
|
||||
|
||||
def predict_class_from_tree(garbage: Garbage, tree_model) -> str:
|
||||
attributes = [garbage.shape, garbage.flexibility, garbage.does_smell, garbage.weight, garbage.size, garbage.color, garbage.softness, garbage.does_din]
|
||||
encoded = attributes_to_floats(attributes)
|
||||
return tree_model.predict([encoded])[0]
|
||||
|
||||
def predict_class_from_neural_model(garbage: Garbage, neural_model) -> str:
|
||||
img = image.load_img(garbage.img, target_size=(150, 150))
|
||||
img_array = image.img_to_array(img)
|
||||
img_array = np.expand_dims(img_array, axis=0)
|
||||
img_array /= 255.
|
||||
|
||||
predictions = neural_model.predict(img_array)
|
||||
prediction = np.argmax(predictions[0])
|
||||
if prediction == 0:
|
||||
return "BIO"
|
||||
if prediction == 1:
|
||||
return "GLASS"
|
||||
if prediction == 2:
|
||||
return "MIXED"
|
||||
if prediction == 3:
|
||||
return "PAPER"
|
||||
if prediction == 4:
|
||||
return "PLASTIC_AND_METAL"
|
||||
|
||||
def attributes_to_floats(attributes: list[str]) -> list[float]:
|
||||
output: list[float] = []
|
||||
if attributes[0] == 'Longitiudonal':
|
||||
|
20
startup.py
@ -36,12 +36,19 @@ def create_city() -> City:
|
||||
streets = create_streets()
|
||||
trashcans = create_trashcans()
|
||||
bumps = create_speed_bumps()
|
||||
garbage_pieces = create_garbage_pieces()
|
||||
garbage_pieces = create_garbage_pieces_witout_imgs()
|
||||
garbage_pieces_counter = 0
|
||||
for s in streets:
|
||||
city.add_street(s)
|
||||
for t in trashcans:
|
||||
for i in range(4):
|
||||
for _ in range(4):
|
||||
t.add_garbage(garbage_pieces[garbage_pieces_counter])
|
||||
garbage_pieces_counter = garbage_pieces_counter + 1
|
||||
city.add_can(t)
|
||||
garbage_pieces = create_garbage_pieces_with_images()
|
||||
garbage_pieces_counter = 0
|
||||
for t in trashcans:
|
||||
for _ in range(3):
|
||||
t.add_garbage(garbage_pieces[garbage_pieces_counter])
|
||||
garbage_pieces_counter = garbage_pieces_counter + 1
|
||||
city.add_can(t)
|
||||
@ -50,16 +57,21 @@ def create_city() -> City:
|
||||
return city
|
||||
|
||||
|
||||
def create_garbage_pieces() -> List[Garbage]:
|
||||
def create_garbage_pieces_witout_imgs() -> List[Garbage]:
|
||||
garbage_pieces = []
|
||||
with open('machine_learning/garbage_infill.csv', 'r') as file:
|
||||
lines = file.readlines()
|
||||
for line in lines[1:]:
|
||||
param = line.strip().split(',')
|
||||
garbage_pieces.append(
|
||||
Garbage('img', param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7].strip()))
|
||||
Garbage(None, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7].strip()))
|
||||
return garbage_pieces
|
||||
|
||||
def create_garbage_pieces_with_images() -> list[Garbage]:
|
||||
garbage_pieces = []
|
||||
for i in range(1, 22):
|
||||
garbage_pieces.append(Garbage('machine_learning/photos_not_from_train_set/' + str(i) + '.jpg', None, None, None, None, None, None, None, None))
|
||||
return garbage_pieces
|
||||
|
||||
def create_streets() -> List[Street]:
|
||||
streets = []
|
||||
|