Compare commits

...

11 Commits

46 changed files with 109 additions and 20 deletions

22
bfs.py
View File

@ -8,16 +8,16 @@ from queue import Queue, PriorityQueue
from turnCar import turn_left_orientation, turn_right_orientation
class Successor:
class Successor: # klasa reprezentuje sukcesora, stan i akcję którą można po nim podjąć
def __init__(self, state: AgentState, action: AgentActionType, cost: int, predicted_cost: int) -> None:
self.state = state
self.action = action
self.cost = cost
self.predicted_cost = cost
self.predicted_cost = predicted_cost
class SuccessorList:
class SuccessorList: # lista sukcesorów, czyli możliwych ścieżek po danym stanie
succ_list: list[Successor]
def __init__(self, succ_list: list[Successor]) -> None:
@ -31,17 +31,17 @@ class SuccessorList:
def find_path_to_nearest_can(startState: AgentState, grid: Dict[Tuple[int, int], GridCellType], city: City) -> List[
AgentActionType]:
AgentActionType]: # znajduje ścieżkę do najbliższego kosza na smieci
visited: List[AgentState] = []
queue: PriorityQueue[SuccessorList] = PriorityQueue()
queue: PriorityQueue[SuccessorList] = PriorityQueue() # kolejka priorytetowa przechodująca listę sukcesorów
queue.put(SuccessorList([Successor(startState, AgentActionType.UNKNOWN, 0, _heuristics(startState.position, city))]))
while not queue.empty():
while not queue.empty(): # dopóki kolejka nie jest pusta, pobiera z niej aktualny element
current = queue.get()
previous = current.succ_list[-1]
visited.append(previous.state)
if is_state_success(previous.state, grid):
if is_state_success(previous.state, grid): # jeśli ostatni stan w liście jest stanem końcowym (agent dotarł do śmietnika)
return extract_actions(current)
successors = get_successors(previous, grid, city)
@ -61,7 +61,7 @@ def find_path_to_nearest_can(startState: AgentState, grid: Dict[Tuple[int, int],
return []
def extract_actions(successors: SuccessorList) -> list[AgentActionType]:
def extract_actions(successors: SuccessorList) -> list[AgentActionType]: # wyodrębnienie akcji z listy sukcesorów, z pominięciem uknown
output: list[AgentActionType] = []
for s in successors.succ_list:
if s.action != AgentActionType.UNKNOWN:
@ -70,7 +70,7 @@ def extract_actions(successors: SuccessorList) -> list[AgentActionType]:
def get_successors(succ: Successor, grid: Dict[Tuple[int, int], GridCellType], city: City) -> List[Successor]:
result: List[Successor] = []
result: List[Successor] = [] # generuje następników dla danego stanu,
turn_left_cost = 1 + succ.cost
turn_left_state = AgentState(succ.state.position, turn_left_orientation(succ.state.orientation))
@ -128,7 +128,7 @@ def get_next_cell(state: AgentState) -> Tuple[int, int]:
def is_state_success(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool:
next_cell = get_next_cell(state)
try:
return grid[next_cell] == GridCellType.GARBAGE_CAN
return grid[next_cell] == GridCellType.GARBAGE_CAN # agent dotarł do śmietnika
except KeyError:
return False
@ -137,7 +137,7 @@ def get_cost_for_action(action: AgentActionType, cell_type: GridCellType) -> int
if action in [AgentActionType.TURN_LEFT, AgentActionType.TURN_RIGHT]:
return 1
if cell_type == GridCellType.SPEED_BUMP and action == AgentActionType.MOVE_FORWARD:
return 10
return -10000
if action == AgentActionType.MOVE_FORWARD:
return 3

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 21 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

View File

@ -0,0 +1,41 @@
import os
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
train_data_dir = "garbage_photos"
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
train_data_dir = os.path.join(location, train_data_dir)
input_shape = (150, 150, 3)
num_classes = 5
batch_size = 32
epochs = 20
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(input_shape[0], input_shape[1]),
batch_size=batch_size,
class_mode='categorical'
)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(train_generator, epochs=epochs)
classes = train_generator.class_indices
model.save("neuralModel.h5")

View File

Before

Width:  |  Height:  |  Size: 25 KiB

After

Width:  |  Height:  |  Size: 25 KiB

View File

Before

Width:  |  Height:  |  Size: 16 KiB

After

Width:  |  Height:  |  Size: 16 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 24 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

View File

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

Before

Width:  |  Height:  |  Size: 29 KiB

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.3 KiB

View File

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 19 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View File

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 11 KiB

View File

Before

Width:  |  Height:  |  Size: 46 KiB

After

Width:  |  Height:  |  Size: 46 KiB

View File

Before

Width:  |  Height:  |  Size: 39 KiB

After

Width:  |  Height:  |  Size: 39 KiB

View File

@ -2,7 +2,7 @@ import joblib
from sklearn.calibration import LabelEncoder
from agentActionType import AgentActionType
import time
from garbage import GarbageType, RecognizedGarbage
from garbage import Garbage, GarbageType, RecognizedGarbage
from garbageCan import GarbageCan
from turnCar import turn_left_orientation, turn_right_orientation
from garbageTruck import GarbageTruck
@ -14,6 +14,12 @@ import pygame
from bfs import find_path_to_nearest_can
from agentState import AgentState
import tensorflow as tf
from keras.models import load_model
import keras.utils as image
from keras.optimizers import Adam
import numpy as np
def collect_garbage(game_context: GameContext) -> None:
while True:
@ -30,11 +36,12 @@ def collect_garbage(game_context: GameContext) -> None:
pass
def _recognize_garbage(dust_car: GarbageTruck, can: GarbageCan) -> None:
loaded_model = joblib.load('machine_learning/model.pkl')
tree_model = joblib.load('machine_learning/model.pkl')
optimizer = Adam(learning_rate=0.001)
neural_model = load_model('machine_learning/neuralModel.h5', compile=False)
neural_model.compile(optimizer=optimizer)
for garbage in can.garbage:
attributes = [garbage.shape, garbage.flexibility, garbage.does_smell, garbage.weight, garbage.size, garbage.color, garbage.softness, garbage.does_din]
encoded = attributes_to_floats(attributes)
predicted_class = loaded_model.predict([encoded])[0]
predicted_class = predict_class(garbage, tree_model, neural_model)
garbage_type: GarbageType = None
if predicted_class == 'PAPER':
garbage_type = GarbageType.PAPER
@ -50,6 +57,35 @@ def _recognize_garbage(dust_car: GarbageTruck, can: GarbageCan) -> None:
recognized_garbage = RecognizedGarbage(garbage, garbage_type)
dust_car.sort_garbage(recognized_garbage)
def predict_class(garbage: Garbage, tree_model, neural_model) -> str:
if garbage.img is None:
return predict_class_from_tree(garbage, tree_model)
return predict_class_from_neural_model(garbage, neural_model)
def predict_class_from_tree(garbage: Garbage, tree_model) -> str:
attributes = [garbage.shape, garbage.flexibility, garbage.does_smell, garbage.weight, garbage.size, garbage.color, garbage.softness, garbage.does_din]
encoded = attributes_to_floats(attributes)
return tree_model.predict([encoded])[0]
def predict_class_from_neural_model(garbage: Garbage, neural_model) -> str:
img = image.load_img(garbage.img, target_size=(150, 150))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array /= 255.
predictions = neural_model.predict(img_array)
prediction = np.argmax(predictions[0])
if prediction == 0:
return "BIO"
if prediction == 1:
return "GLASS"
if prediction == 2:
return "MIXED"
if prediction == 3:
return "PAPER"
if prediction == 4:
return "PLASTIC_AND_METAL"
def attributes_to_floats(attributes: list[str]) -> list[float]:
output: list[float] = []
if attributes[0] == 'Longitiudonal':

View File

@ -36,12 +36,19 @@ def create_city() -> City:
streets = create_streets()
trashcans = create_trashcans()
bumps = create_speed_bumps()
garbage_pieces = create_garbage_pieces()
garbage_pieces = create_garbage_pieces_witout_imgs()
garbage_pieces_counter = 0
for s in streets:
city.add_street(s)
for t in trashcans:
for i in range(4):
for _ in range(4):
t.add_garbage(garbage_pieces[garbage_pieces_counter])
garbage_pieces_counter = garbage_pieces_counter + 1
city.add_can(t)
garbage_pieces = create_garbage_pieces_with_images()
garbage_pieces_counter = 0
for t in trashcans:
for _ in range(3):
t.add_garbage(garbage_pieces[garbage_pieces_counter])
garbage_pieces_counter = garbage_pieces_counter + 1
city.add_can(t)
@ -50,16 +57,21 @@ def create_city() -> City:
return city
def create_garbage_pieces() -> List[Garbage]:
def create_garbage_pieces_witout_imgs() -> List[Garbage]:
garbage_pieces = []
with open('machine_learning/garbage_infill.csv', 'r') as file:
lines = file.readlines()
for line in lines[1:]:
param = line.strip().split(',')
garbage_pieces.append(
Garbage('img', param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7].strip()))
Garbage(None, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7].strip()))
return garbage_pieces
def create_garbage_pieces_with_images() -> list[Garbage]:
garbage_pieces = []
for i in range(1, 22):
garbage_pieces.append(Garbage('machine_learning/photos_not_from_train_set/' + str(i) + '.jpg', None, None, None, None, None, None, None, None))
return garbage_pieces
def create_streets() -> List[Street]:
streets = []