SI_InteligentnyWozekWidlowy/pathfinding/PathfinderOnStates.py

140 lines
6.4 KiB
Python
Raw Normal View History

2022-04-28 21:22:19 +02:00
from typing import List
from typing import Tuple
2022-05-11 19:05:44 +02:00
2022-04-28 21:22:19 +02:00
from data.GameConstants import GameConstants
2022-05-11 19:05:44 +02:00
from data.enum.Direction import Direction
2022-04-28 21:22:19 +02:00
from decision.ActionType import ActionType
from pathfinding.PathFinderState import PathFinderState
from pathfinding.PrioritizedItem import PrioritizedItem
from util.PathDefinitions import GridLocation
from util.PriorityQueue import PriorityQueue
class PathFinderOnStates:
def __init__(self, game_constants: GameConstants, goal: GridLocation, root_state: PathFinderState):
super().__init__()
self.game_constants = game_constants
self.goal = goal
self.queue = PriorityQueue()
self.queue.put(PrioritizedItem(root_state.cost, root_state), root_state.cost)
def heuristic(self, a: Tuple[int, int], b: Tuple[int, int]) -> float:
# tutaj mozna uzyc heury np. manhatan distance (zmodyfikowany bo masz obroty a to zmienia oplacalnosc)
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def evaluate(self, curr_state: PathFinderState) -> float:
# koszt dojscia do danego stanu+ heura
return curr_state.cost + self.heuristic(curr_state.agent_position, self.goal)
def get_position_after_move(self, curr_state: PathFinderState) -> GridLocation:
if curr_state.agent_direction == Direction.top:
return curr_state.agent_position[0], curr_state.agent_position[1] + 1
elif curr_state.agent_direction == Direction.down:
return curr_state.agent_position[0], curr_state.agent_position[1] - 1
elif curr_state.agent_direction == Direction.right:
return curr_state.agent_position[0] + 1, curr_state.agent_position[1]
elif curr_state.agent_direction == Direction.left:
return curr_state.agent_position[0] - 1, curr_state.agent_position[1]
def is_move_possible(self, curr_state: PathFinderState) -> bool:
position_after_move = self.get_position_after_move(curr_state)
if position_after_move in self.game_constants.walls:
return False
elif position_after_move[0] < 0 or position_after_move[0] > self.game_constants.grid_width:
return False
elif position_after_move[1] < 0 or position_after_move[1] > self.game_constants.grid_height:
return False
else:
return True
def create_state(self, curr_state: PathFinderState, action: ActionType) -> PathFinderState:
if action == action.MOVE:
if curr_state.agent_position in self.game_constants.diffTerrain:
cost = curr_state.cost + 20
# tutaj koszt kaluzy
else:
cost = curr_state.cost + 1
#TODO: jezeli bedziemy rozpatrywac rozne stany to nalezy rozbic na kazdy mozliwy obrot
else:
cost = curr_state.cost + 10
last_action = action
action_taken: List[ActionType] = []
action_taken.extend(curr_state.action_taken)
action_taken.append(last_action)
agent_position = curr_state.agent_position
agent_direction = curr_state.agent_direction
if action == ActionType.ROTATE_UP:
agent_direction = Direction.top
elif action == ActionType.ROTATE_DOWN:
agent_direction = Direction.down
elif action == ActionType.ROTATE_LEFT:
agent_direction = Direction.left
elif action == ActionType.ROTATE_RIGHT:
agent_direction = Direction.right
elif action == ActionType.MOVE:
agent_position = self.get_position_after_move(curr_state)
return PathFinderState(agent_position, agent_direction, cost, last_action, action_taken)
# Funkcja następnika
def expansion(self, curr_state: PathFinderState) -> List[PathFinderState]:
# dla stanu sprawdzamy jakie akcje z tego miejsca mozemy podjac (ActionType)
# reprezentacja kazdego stanu co moge podjac z tego miejsca
# generowanie stanu
# sprawdz w ktorym kierunku obrocony
possible_next_states: List[PathFinderState] = []
if self.is_move_possible(curr_state):
possible_next_states.append(self.create_state(curr_state, ActionType.MOVE))
if curr_state.agent_direction == Direction.top:
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_RIGHT))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_LEFT))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_DOWN))
elif curr_state.agent_direction == Direction.down:
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_RIGHT))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_LEFT))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_UP))
elif curr_state.agent_direction == Direction.left:
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_RIGHT))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_UP))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_DOWN))
elif curr_state.agent_direction == Direction.right:
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_UP))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_LEFT))
possible_next_states.append(self.create_state(curr_state, ActionType.ROTATE_DOWN))
return possible_next_states
def get_action_list(self) -> List[ActionType]:
already_visited = {}
while not self.queue.empty():
item: PrioritizedItem = self.queue.get()
best_state: PathFinderState = item.item
if best_state.agent_position == self.goal or (self.heuristic(best_state.agent_position, self.goal) == 1
and self.goal in self.game_constants.walls):
break
for state in self.expansion(best_state):
s_tuple = (state.agent_position[0], state.agent_position[1], state.agent_direction)
if s_tuple not in already_visited:
priority = self.evaluate(state)
self.queue.put(PrioritizedItem(priority, state), priority)
already_visited[s_tuple] = state
return best_state.action_taken