diff --git a/test/dlaMarcina.py b/test/dlaMarcina.py new file mode 100644 index 0000000..6925f51 --- /dev/null +++ b/test/dlaMarcina.py @@ -0,0 +1,58 @@ +import queue +from typing import List + +from data.Direction import Direction +from data.GameConstants import GameConstants +from decision.ActionType import ActionType +from util.PathDefinitions import GridLocation +from util.PriorityQueue import PriorityQueue + + + +class PathFinderState: + + def __init__(self, agent_position: GridLocation, agent_direction: Direction, cost: float): + super().__init__() + self.agent_position = agent_position + self.agent_direction = agent_direction + self.cost = cost + self.last_action: ActionType + self.action_taken: List[ActionType] + + def getActionTaken(self): + return self.getActionTaken() + + +# +# +# + + +class PathFinderOnStates: + def __init__(self, game_constants: GameConstants, goal: GridLocation, root_state: PathFinderState): + super().__init__() + self.game_constants = game_constants + self.goal = goal + self.queue = PriorityQueue() + self.queue.put(root_state, root_state.cost) + + def heuristic(self) -> float: + # tutaj mozna uzyc heury np. manhatan distance (zmodyfikowany bo masz obroty a to zmienia oplacalnosc) + pass + + def evaluate(self, state: PathFinderState) -> float: + # koszt dojscia do danego stanu + pass + + def expansion(self, state: PathFinderState) -> List[PathFinderState]: + # dla stanu sprawdzamy jakie akcje z tego miejsca mozemy podjac (ActionType) + pass + + def getActionList(self) -> List[ActionType]: + best_state: PathFinderState = self.queue.get() + + while best_state.agent_position != self.goal or self.queue.empty(): + # dodajesz do kolejki stany z expansion (po cost) + best_state = self.queue.get() + + return best_state.getActionTaken() \ No newline at end of file