from queue import PriorityQueue DEFAULT_COST_VALUE = 1 def is_border(x, y, max_x, max_y): return 0 <= x < max_x and 0 <= y < max_y def is_obstacle(x, y, obstacles): return (x, y) in obstacles def succ(current_state, max_x, max_y, obstacles): successors = [] x, y, direction = current_state # Akcja: Do przodu direction_x, direction_y = {'N': (0, -1), 'E': (1, 0), 'S': (0, 1), 'W': (-1, 0)}[direction] # Słownik przesunięć w zależności od kierunku new_x, new_y = x + direction_x, y + direction_y if is_border(new_x, new_y, max_x, max_y) and not(is_obstacle(new_x, new_y, obstacles)): successors.append(((new_x, new_y, direction), 'Go Forward')) # Akcja: Obrót w lewo left_turns = {'N': 'W', 'W': 'S', 'S': 'E', 'E': 'N'} # Słownik kierunków po obrocie w lewo successors.append(((x, y, left_turns[direction]), 'Turn Left')) # Akcja: Obrót w prawo right_turns = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'} # Słownik kierunków po obrocie w prawo successors.append(((x, y, right_turns[direction]), 'Turn Right')) return successors def graphsearch(istate, goal, max_x, max_y, obstacles, cost_map): fringe = PriorityQueue() explored = set() fringe.put((0, (istate, None , None))) while not fringe.empty(): _, node = fringe.get() state, _, _ = node if goaltest(state, goal): return build_action_sequence(node) explored.add(state) successors = succ(state, max_x, max_y, obstacles) for new_state, action in successors: new_node = (new_state, node, action) p_new_state = current_cost(node, cost_map) + heuristic(state, goal) if not is_state_in_queue(new_state, fringe) and new_state not in explored: fringe.put((p_new_state, new_node)) elif is_state_in_queue(new_state, fringe): for i, (p_existing_state, (existing_state, _, _)) in enumerate(fringe.queue): if existing_state == new_state and p_existing_state > p_new_state: fringe.queue[i] = (p_new_state, new_node) else: break return False def is_state_in_queue(state, queue): for _, (s, _, _) in queue.queue: if s == state: return True return False def build_action_sequence(node): actions = [] while node[1] is not None: # Dopóki nie dojdziemy do korzenia _, parent, action = node actions.append(action) node = parent actions.reverse() return actions def goaltest(state, goal): x, y, _ = state goal_x, goal_y = goal return (x,y) == (goal_x, goal_y) def current_cost(node, cost_map): cost = 0 while node[1] is not None: # Dopóki nie dojdziemy do korzenia _, parent, action = node # Dodaj koszt pola z mapy kosztów tylko jeśli akcja to "Forward" if action == 'Go Forward': state, _, _ = node cost += cost_map.get(state[:2], DEFAULT_COST_VALUE) # Pobiera koszt przejścia przez dane pole, a jeśli koszt nie jest zdefiniowany to bierze wartość domyślną if action == 'Turn Right' or action == 'Turn Left': cost += DEFAULT_COST_VALUE node = parent # Przejdź do rodzica return cost def heuristic(state, goal): x, y, _ = state goal_x, goal_y = goal return abs(x - goal_x) + abs(y - goal_y) # Odległość Manhattana do celu def generate_cost_map(Animals, Terrain_Obstacles, cost_map={}): adult_animal_cost = 15 # Default : 15 baby_animal_cost = 10 # Default : 10 puddle_cost = 50 # Default : 50 bush_cost = 20 # Default : 20 for animal in Animals: if animal.adult: # cost_map[(animal.x + 1, animal.y + 1)] = adult_animal_cost # cost_map[(animal.x + 1, animal.y)] = adult_animal_cost # cost_map[(animal.x, animal.y + 1)] = adult_animal_cost cost_map[(animal.x, animal.y)] = adult_animal_cost else: cost_map[(animal.x, animal.y)] = baby_animal_cost for terrain_obstacle in Terrain_Obstacles: if terrain_obstacle.type == 'puddle': cost_map[(terrain_obstacle.x , terrain_obstacle.y )] = puddle_cost else: cost_map[(terrain_obstacle.x , terrain_obstacle.y )] = bush_cost return cost_map