diff --git a/AI_brain/genetic_algorytm.py b/AI_brain/genetic_algorytm.py index b6170d7..f8c3075 100644 --- a/AI_brain/genetic_algorytm.py +++ b/AI_brain/genetic_algorytm.py @@ -11,46 +11,127 @@ from domain.world import World from AI_brain.rotate_and_go_aStar import RotateAndGoAStar, State -steps_distance_cashed = {} +hits = 0 +misses = 0 + + +class Cashed_sub_paths(dict): + def __init__(self): + super().__init__() + + def __missing__(self, key): + self[key] = Cashed_sub_paths() + return self[key] + + +class Cashed_sub_path: + def __init__(self, sub_path: list[str] = [], distance: int = 0): + self.sub_path = sub_path + self.distance = distance + + +steps_distance_cashed: dict[tuple[int, int], Cashed_sub_path] = Cashed_sub_paths() class Path: def __init__(self): self.walk = [] + self.permutation = [] + self.real_path = [] self.distance = 0 def random_walk(self, dusts: list[Entity]): - random_permutation = generate_random_permutation(len(dusts)) - self.walk = addStopsForStopStation( - random_permutation, config.getint("CONSTANT", "BananaFilling") + permutation = generate_random_permutation(len(dusts)) + self.permutation = permutation + + self.walk = addStartAndStation( + permutation, config.getint("CONSTANT", "BananaFilling") ) def calculate_distance(self, world: World): distance = 0 for i in range(len(self.walk) - 1): - distance += self.step_distance(self.walk[i], self.walk[i + 1], world) + next_distance, next_real_path = self.step_distance( + self.walk[i], self.walk[i + 1], world + ) + distance += next_distance + + # BUG this part is not working and is not used, B.1 must be resolved + self.real_path = self.real_path + ["DEFAULT_ROTATION"] + next_real_path + self.distance = distance - def step_distance(self, from_id: int, to_id: int, world: World) -> int: + def step_distance( + self, from_id: int, to_id: int, world: World + ) -> tuple[int, list[str]]: + global hits, misses if (from_id, to_id) in steps_distance_cashed: - return steps_distance_cashed[(from_id, to_id)] + hits += 1 + distance = steps_distance_cashed[(from_id, to_id)].distance + sub_path = steps_distance_cashed[(from_id, to_id)].sub_path + return distance, sub_path + misses += 1 path_searcher = RotateAndGoAStar( world, - self.getPosition(from_id, world.dustList, world.doc_station), - self.getPosition(to_id, world.dustList, world.doc_station), + self.getPosition(from_id, world.dustList), + self.getPosition(to_id, world.dustList), ) path_searcher.search() - number_of_go = path_searcher.number_of_moves_forward() - steps_distance_cashed[(from_id, to_id)] = path_searcher.cost - steps_distance_cashed[(to_id, from_id)] = path_searcher.cost - return path_searcher.cost - def getPosition(self, number: int, dusts: list[Entity], station: Entity) -> State: + steps_distance_cashed[(from_id, to_id)] = Cashed_sub_path( + path_searcher.actions, path_searcher.cost + ) + + # BUG B.1 inverse path + inverse_sub_path = path_searcher.actions.copy() + steps_distance_cashed[(to_id, from_id)] = Cashed_sub_path( + inverse_sub_path, path_searcher.cost + ) + return path_searcher.cost, path_searcher.actions + + def inverse_sub_path(sub_path: list[str]) -> list[str]: + sub_path.reverse() + for command in sub_path: + command.replace("RL", "RR") + command.replace("RR", "RR") + + def getPosition( + self, + number: int, + dustList: list[Entity], + ) -> State: if number == -1: - return State(station.x, station.y) + dock_start_x, dock_start_y = config.get( + "CONSTANT", "DockStationStartPosition" + ).split(",") + dock_start_x, dock_start_y = int(dock_start_x), int(dock_start_y) - return State(dusts[number].x, dusts[number].y) + return State(dock_start_x, dock_start_y) + + if number == -2: + vacuum_start_x, vacuum_start_y = config.get( + "CONSTANT", "RobotStartPosition" + ).split(",") + + vacuum_start_x, vacuum_start_y = int(vacuum_start_x), int(vacuum_start_y) + return State(vacuum_start_x, vacuum_start_y) + + return State(dustList[number].x, dustList[number].y) + + def get_real_path(self, world: World): + full_path = [] + + for index_place in range(len(self.walk) - 1): + path_searcher = RotateAndGoAStar( + world, + self.getPosition(self.walk[index_place], world.dustList), + self.getPosition(self.walk[index_place + 1], world.dustList), + ) + path_searcher.search() + full_path = full_path + ["DEFAULT_ROTATION"] + path_searcher.actions + + self.real_path = full_path def generate_random_permutation(n): @@ -63,12 +144,104 @@ def generate_random_permutation(n): return numbers -def addStopsForStopStation(permutation: list[int], bananaFilling: int): +# BUG solution: inverse direction at the last step +def addStartAndStation(permutation: list[int], bananaFilling: int): frequency = math.ceil(100 / bananaFilling) numer_of_stops = math.ceil(len(permutation) / frequency) + walk = permutation.copy() for i in range(1, numer_of_stops): - permutation.insert((frequency + 1) * i - 1, -1) - permutation.insert(len(permutation), -1) + walk.insert((frequency + 1) * i - 1, -1) + walk.insert(len(walk), -1) + walk.insert(0, -2) - return permutation + return walk + + +class GeneticAlgorytm: + def __init__(self, world: World): + self.world = world + self.population_size = config.getint("GENETIC_ALGORITHM", "PopulationSize") + self.mutation_probability = config.getfloat( + "GENETIC_ALGORITHM", "MutationProbability" + ) + self.iteration_number = config.getint("GENETIC_ALGORITHM", "IterationNumber") + self.descendants_number = config.getint( + "GENETIC_ALGORITHM", "DescendantsNumber" + ) + self.dusts = world.dustList + self.doc_station = world.doc_station + self.paths: list[Path] = [] + self.checked_permutations = {} + + self.best_path = None + self.best_distance = math.inf + self.best_real_path = [] + + def generate_population(self): + for i in range(self.population_size): + path = Path() + path.random_walk(self.dusts) + self.checked_permutations[tuple(path.permutation)] = True + path.calculate_distance(self.world) + self.paths.append(path) + + def evaluate_population(self): + self.paths.sort(key=lambda x: x.distance, reverse=False) + + self.best_distance = self.paths[0].distance + self.best_path = self.paths[0] + + for path in self.paths[self.population_size :]: + del self.checked_permutations[tuple(path.permutation)] + + self.paths = self.paths[: self.population_size] + + def create_child(self, parent1: Path, parent2: Path) -> Path: + child = Path() + + child.permutation = parent1.permutation[: len(parent1.permutation) // 2] + + # Add missing items from parent2 in the order they appear + for item in parent2.permutation: + if item not in child.permutation: + child.permutation.append(item) + + child.walk = addStartAndStation( + child.permutation, config.getint("CONSTANT", "BananaFilling") + ) + + child.calculate_distance(self.world) + + return child + + def run(self): + self.generate_population() + + for i in range(self.iteration_number): + self.crossover() + # self.mutate() + + self.evaluate_population() + self.best_real_path = self.paths[0].get_real_path(self.world) + + print(hits, (misses + hits)) + + print(hits / (misses + hits)) + + def crossover(self): + for i in range(self.descendants_number): + parent1 = self.paths[random.randint(0, self.population_size - 1)] + + parent2 = self.paths[random.randint(0, self.population_size - 1)] + + child = self.create_child(parent1, parent2) + while tuple(child.permutation) in self.checked_permutations: + parent1 = self.paths[random.randint(0, self.population_size - 1)] + parent2 = self.paths[random.randint(0, self.population_size - 1)] + child = self.create_child(parent1, parent2) + + self.checked_permutations[tuple(child.permutation)] = True + self.paths.append(child) + + self.evaluate_population() diff --git a/config.ini b/config.ini index 5fdee30..366c466 100644 --- a/config.ini +++ b/config.ini @@ -8,10 +8,19 @@ NumberOfBananas = 5 NumberOfEarrings = 3 NumberOfPlants = 5 BananaFilling = 25 +RobotStartPosition = 1, 1 +DockStationStartPosition = 9, 8 +#9,8 [NEURAL_NETWORK] is_neural_network_off = True [AI_BRAIN] mode = full_clean -#accept: full_clean, to_station \ No newline at end of file +#accept: full_clean, to_station + +[GENETIC_ALGORITHM] +PopulationSize = 20 +DescendantsNumber = 5 +MutationProbability = 0.1 +IterationNumber = 100 diff --git a/domain/world.py b/domain/world.py index cfd9f78..40d3cf0 100644 --- a/domain/world.py +++ b/domain/world.py @@ -8,7 +8,7 @@ class World: self.width = width self.height = height self.dust = [[[] for j in range(height)] for i in range(width)] - self.dustList = [] + self.dustList: list[Entity] = [] self.obstacles = [[[] for j in range(height)] for i in range(width)] self.entity = [[[] for j in range(height)] for i in range(width)] diff --git a/main.py b/main.py index b4f2341..44ff3db 100644 --- a/main.py +++ b/main.py @@ -16,7 +16,7 @@ from domain.entities.earring import Earring from domain.entities.docking_station import Doc_Station from domain.world import World from view.renderer import Renderer -from AI_brain.genetic_algorytm import Path +from AI_brain.genetic_algorytm import GeneticAlgorytm, Path if not config.getboolean("NEURAL_NETWORK", "is_neural_network_off"): from AI_brain.image_recognition import VacuumRecognizer @@ -53,9 +53,6 @@ class Main: def run_robot(self): self.renderer.render(self.world) - # path_searcher = GoAnyDirectionBFS(self.world, start_state, end_state) - # path_searcher = RotateAndGoBFS(self.world, start_state, end_state) - if config["AI_BRAIN"]["mode"] == "to_station": start_state = State(self.world.vacuum.x, self.world.vacuum.y) end_state = State(self.world.doc_station.x, self.world.doc_station.y) @@ -67,13 +64,20 @@ class Main: exit(0) print(path_searcher.actions) print(path_searcher.cost) + robot_actions = path_searcher.actions + elif config["AI_BRAIN"]["mode"] == "full_clean": - x = Path() - x.random_walk(self.world.dustList) - x.calculate_distance(self.world) - print(x.walk) - print(x.distance) - exit(0) + genetic_searcher = GeneticAlgorytm(self.world) + genetic_searcher.run() + + print( + str(genetic_searcher.best_path.walk) + + ": " + + str(genetic_searcher.best_distance) + ) + + robot_actions = genetic_searcher.best_path.real_path + else: print("Wrong mode") exit(0) @@ -83,8 +87,8 @@ class Main: if event.type == pygame.QUIT: self.running = False - if len(path_searcher.actions) > 0: - action_direction = path_searcher.actions.pop(0) + if len(robot_actions) > 0: + action_direction = robot_actions.pop(0) # self.handle_action1(action_direction) self.handle_action2(action_direction) @@ -129,6 +133,8 @@ class Main: self.world.vacuum.direction[1], -self.world.vacuum.direction[0], ) + elif action == "DEFAULT_ROTATION": + self.world.vacuum.direction = (1, 0) def process_input(self): for event in pygame.event.get(): @@ -163,8 +169,14 @@ class Main: def generate_world(tiles_x: int, tiles_y: int) -> World: if config.getboolean("NEURAL_NETWORK", "is_neural_network_off"): world = World(tiles_x, tiles_y) - world.vacuum = Vacuum(1, 1) - world.doc_station = Doc_Station(9, 8) + + x, y = config.get("CONSTANT", "RobotStartPosition").split(",") + x, y = int(x), int(y) + world.vacuum = Vacuum(x, y) + + x, y = config.get("CONSTANT", "DockStationStartPosition").split(",") + x, y = int(x), int(y) + world.doc_station = Doc_Station(x, y) if config.getboolean("APP", "cat"): world.cat = Cat(7, 8) world.add_entity(world.cat) @@ -176,6 +188,8 @@ def generate_world(tiles_x: int, tiles_y: int) -> World: world.add_entity(Entity(3, 4, "PLANT2")) world.add_entity(Entity(8, 8, "PLANT2")) world.add_entity(Entity(9, 3, "PLANT3")) + + numberOfEarrings = config.getint("CONSTANT", "NumberOfEarrings") world.add_entity(Earring(9, 7)) world.add_entity(Earring(5, 5)) world.add_entity(Earring(4, 6))