Zaimplementowanie A*
This commit is contained in:
parent
a8814a763b
commit
2698b33a99
33
main.py
33
main.py
@ -174,13 +174,29 @@ def main_fields_tests():
|
|||||||
pygame.display.flip()
|
pygame.display.flip()
|
||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
|
cost_map = {}
|
||||||
|
def generate_cost_map():
|
||||||
|
adult_animal_cost = 10
|
||||||
|
baby_animal_cost = 5
|
||||||
|
for animal in Animals:
|
||||||
|
if animal.adult:
|
||||||
|
cost_map[(animal.x + 1, animal.y + 1)] = baby_animal_cost
|
||||||
|
cost_map[(animal.x + 1, animal.y)] = baby_animal_cost
|
||||||
|
cost_map[(animal.x, animal.y + 1)] = baby_animal_cost
|
||||||
|
cost_map[(animal.x, animal.y)] = adult_animal_cost
|
||||||
|
else:
|
||||||
|
cost_map[(animal.x, animal.y)] = baby_animal_cost
|
||||||
|
|
||||||
|
# Inne pola z różnym kosztem
|
||||||
|
# cost_map[(x, y)] = cost_value
|
||||||
|
|
||||||
|
|
||||||
# region Main Code
|
# region Main Code
|
||||||
def main():
|
def main():
|
||||||
initial_state = (0,0,'S')
|
initial_state = (0,0,'S')
|
||||||
agent = Agent(initial_state, 'images/agent1.png', GRID_SIZE)
|
agent = Agent(initial_state, 'images/agent1.png', GRID_SIZE)
|
||||||
|
|
||||||
obstacles = generate_obstacles()
|
obstacles = generate_obstacles()
|
||||||
|
|
||||||
actions = []
|
actions = []
|
||||||
clock = pygame.time.Clock()
|
clock = pygame.time.Clock()
|
||||||
|
|
||||||
@ -199,6 +215,7 @@ def main():
|
|||||||
draw_gates()
|
draw_gates()
|
||||||
if not spawned:
|
if not spawned:
|
||||||
spawn_all_animals()
|
spawn_all_animals()
|
||||||
|
generate_cost_map()
|
||||||
for animal in Animals:
|
for animal in Animals:
|
||||||
animal._feed = 2 # Ustawienie aby zwierzę było głodne
|
animal._feed = 2 # Ustawienie aby zwierzę było głodne
|
||||||
spawned = True
|
spawned = True
|
||||||
@ -213,13 +230,21 @@ def main():
|
|||||||
pygame.time.wait(200)
|
pygame.time.wait(200)
|
||||||
else:
|
else:
|
||||||
animal = random.choice(Animals)
|
animal = random.choice(Animals)
|
||||||
actions = graphsearch(agent.istate, (animal.x, animal.y), GRID_WIDTH, GRID_HEIGHT, obstacles)
|
goal = (animal.x, animal.y)
|
||||||
|
|
||||||
|
# --- Zaznaczenie celu ---
|
||||||
|
pygame.draw.rect(screen, (255, 0, 0), (animal.x * GRID_SIZE, animal.y * GRID_SIZE, GRID_SIZE, GRID_SIZE))
|
||||||
|
pygame.display.flip()
|
||||||
|
pygame.time.delay(2000)
|
||||||
|
# ------------------------
|
||||||
|
|
||||||
|
actions = graphsearch(agent.istate, goal, GRID_WIDTH, GRID_HEIGHT, obstacles, cost_map)
|
||||||
# endregion
|
# endregion
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
debug_mode = False # Jeśli True to pokazuje dostępne pola
|
DEBUG_MODE = False # Jeśli True to pokazuje dostępne pola
|
||||||
|
|
||||||
if debug_mode:
|
if DEBUG_MODE:
|
||||||
main_fields_tests()
|
main_fields_tests()
|
||||||
else:
|
else:
|
||||||
main()
|
main()
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
from queue import PriorityQueue
|
||||||
|
|
||||||
|
DEFAULT_COST_VALUE = 1
|
||||||
|
|
||||||
def is_border(x, y, max_x, max_y):
|
def is_border(x, y, max_x, max_y):
|
||||||
return 0 <= x < max_x and 0 <= y < max_y
|
return 0 <= x < max_x and 0 <= y < max_y
|
||||||
|
|
||||||
@ -25,32 +29,52 @@ def succ(current_state, max_x, max_y, obstacles):
|
|||||||
|
|
||||||
return successors
|
return successors
|
||||||
|
|
||||||
def graphsearch(istate, goal, max_x, max_y, obstacles):
|
def graphsearch(istate, goal, max_x, max_y, obstacles, cost_map):
|
||||||
fringe = [{"state": istate, "parent": None, "action": None}]
|
fringe = PriorityQueue()
|
||||||
explored = set()
|
explored = set()
|
||||||
|
|
||||||
while fringe:
|
fringe.put((0, (istate, None , None)))
|
||||||
elem = fringe.pop(0)
|
|
||||||
state = elem["state"]
|
while not fringe.empty():
|
||||||
|
_, node = fringe.get()
|
||||||
|
state, _, _ = node
|
||||||
|
|
||||||
if goaltest(state, goal):
|
if goaltest(state, goal):
|
||||||
return build_action_sequence(elem)
|
return build_action_sequence(node)
|
||||||
|
|
||||||
explored.add(state)
|
explored.add(state)
|
||||||
|
|
||||||
successors = succ(state, max_x, max_y, obstacles)
|
successors = succ(state, max_x, max_y, obstacles)
|
||||||
|
|
||||||
for new_state, action in successors:
|
for new_state, action in successors:
|
||||||
if new_state not in fringe and new_state not in explored:
|
new_node = (new_state, node, action)
|
||||||
fringe.append({"state": new_state, "parent": elem, "action": action})
|
|
||||||
|
|
||||||
|
p_new_state = current_cost(node, cost_map) + heuristic(state, goal)
|
||||||
|
|
||||||
|
if not is_state_in_queue(new_state, fringe) and new_state not in explored:
|
||||||
|
fringe.put((p_new_state, new_node))
|
||||||
|
|
||||||
|
elif is_state_in_queue(new_state, fringe):
|
||||||
|
for i, (p_existing_state, (existing_state, _, _)) in enumerate(fringe.queue):
|
||||||
|
if existing_state == new_state and p_existing_state > p_new_state:
|
||||||
|
fringe.queue[i] = (p_new_state, new_node)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def is_state_in_queue(state, queue):
|
||||||
|
for _, (s, _, _) in queue.queue:
|
||||||
|
if s == state:
|
||||||
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def build_action_sequence(node):
|
def build_action_sequence(node):
|
||||||
actions = []
|
actions = []
|
||||||
while node["parent"]:
|
while node[1] is not None: # Dopóki nie dojdziemy do korzenia
|
||||||
actions.append(node["action"])
|
_, parent, action = node
|
||||||
node = node["parent"]
|
actions.append(action)
|
||||||
|
node = parent
|
||||||
actions.reverse()
|
actions.reverse()
|
||||||
return actions
|
return actions
|
||||||
|
|
||||||
@ -58,3 +82,19 @@ def goaltest(state, goal):
|
|||||||
x, y, _ = state
|
x, y, _ = state
|
||||||
goal_x, goal_y = goal
|
goal_x, goal_y = goal
|
||||||
return (x,y) == (goal_x, goal_y)
|
return (x,y) == (goal_x, goal_y)
|
||||||
|
|
||||||
|
def current_cost(node, cost_map):
|
||||||
|
cost = 0
|
||||||
|
while node[1] is not None: # Dopóki nie dojdziemy do korzenia
|
||||||
|
_, parent, action = node
|
||||||
|
# Dodaj koszt pola z mapy kosztów tylko jeśli akcja to "Forward"
|
||||||
|
if action == 'Go Forward':
|
||||||
|
state, _, _ = node
|
||||||
|
cost += cost_map.get(state[:2], DEFAULT_COST_VALUE) # Pobiera koszt przejścia przez dane pole, a jeśli koszt nie jest zdefiniowany to bierze wartość domyślną
|
||||||
|
node = parent # Przejdź do rodzica
|
||||||
|
return cost
|
||||||
|
|
||||||
|
def heuristic(state, goal):
|
||||||
|
x, y, _ = state
|
||||||
|
goal_x, goal_y = goal
|
||||||
|
return abs(x - goal_x) + abs(y - goal_y) # Odległość Manhattana do celu
|
Loading…
Reference in New Issue
Block a user