diff --git a/.gitignore b/.gitignore index c151be6..0a858ea 100644 --- a/.gitignore +++ b/.gitignore @@ -50,7 +50,7 @@ MANIFEST pip-log.txt pip-delete-this-directory.txt -# Unit test / coverage reports +# Unit pathfinding / coverage reports htmlcov/ .tox/ .nox/ diff --git a/GameModel.py b/GameModel.py index f1574e9..7b9b683 100644 --- a/GameModel.py +++ b/GameModel.py @@ -1,16 +1,18 @@ from typing import List -from mesa import Model, Agent +from mesa import Model from mesa.space import MultiGrid from mesa.time import RandomActivation from AgentBase import AgentBase from ForkliftAgent import ForkliftAgent +from InitialStateFactory import InitialStateFactory from PatchAgent import PatchAgent from PatchType import PatchType -from util.PathDefinitions import inverse_y, GridLocation, GridWithWeights -from util.PathVisualiser import draw_grid, reconstruct_path -from util.Pathfinder import a_star_search +from data.GameConstants import GameConstants +from decision.ActionType import ActionType +from pathfinding.dlaMarcina import PathFinderOnStates, PathFinderState +from util.PathDefinitions import GridLocation, GridWithWeights class GameModel(Model): @@ -26,13 +28,29 @@ class GameModel(Model): self.schedule.add(self.forklift_agent) self.agents.append(self.forklift_agent) + initial_state_factory = InitialStateFactory() + + self.game_constants = GameConstants( + width, + height, + graph.walls + ) + # Add the agent to a random grid cell x = 5 y = 5 self.grid.place_agent(self.forklift_agent, (x, y)) self.forklift_agent.current_position = (x, y) - # start, goal = (x, y), (2, 1) + start, goal = (x, y), (8, 8) + + pathFinder = PathFinderOnStates( + self.game_constants, + goal, + PathFinderState(self.forklift_agent.current_position, self.forklift_agent.current_rotation, 0, + ActionType.NONE, []) + ) + # came_from, cost_so_far = a_star_search(graph, start, goal) # draw_grid(graph, point_to=came_from, start=start, goal=goal) # @@ -47,6 +65,10 @@ class GameModel(Model): self.place_patch_agents() self.place_walls_agents(graph.walls) + actions = pathFinder.getActionList() + print("PATHFINDING") + print(actions) + def place_patch_agents(self): agent = PatchAgent(self, PatchType.pickUp) self.schedule.add(agent) diff --git a/data/GameConstants.py b/data/GameConstants.py index bdfd898..c22f08f 100644 --- a/data/GameConstants.py +++ b/data/GameConstants.py @@ -5,11 +5,18 @@ from util.PathDefinitions import GridLocation class GameConstants: - def __init__(self, grid_width: int, grid_height: int, delivery_pos: GridLocation, order_pos: GridLocation, - special_positions: Dict[ItemType, GridLocation], walls: [GridLocation]): + def __init__( + self, + grid_width: int, + grid_height: int, + # delivery_pos: GridLocation, + # order_pos: GridLocation, + # special_positions: Dict[ItemType, GridLocation], + walls: [GridLocation] + ): self.grid_width = grid_width self.grid_height = grid_height - self.delivery_pos = delivery_pos - self.order_pos = order_pos - self.special_positions = special_positions + # self.delivery_pos = delivery_pos + # self.order_pos = order_pos + # self.special_positions = special_positions self.walls = walls diff --git a/main.py b/main.py index cd5d0a8..1c270db 100644 --- a/main.py +++ b/main.py @@ -64,5 +64,6 @@ if __name__ == '__main__': [grid], "Automatyczny Wózek Widłowy", {"width": gridHeight, "height": gridWidth, "graph": diagram4}) + server.port = 8888 server.launch() diff --git a/test/dlaMarcina.py b/pathfinding/dlaMarcina.py similarity index 67% rename from test/dlaMarcina.py rename to pathfinding/dlaMarcina.py index a96e218..4d51508 100644 --- a/test/dlaMarcina.py +++ b/pathfinding/dlaMarcina.py @@ -1,13 +1,18 @@ -import queue -from typing import List -from typing import Optional, Dict, Tuple +from dataclasses import dataclass, field +from typing import List, Any +from typing import Tuple from data.Direction import Direction from data.GameConstants import GameConstants from decision.ActionType import ActionType from util.PathDefinitions import GridLocation from util.PriorityQueue import PriorityQueue -from decision.StateTree import StateTree + + +@dataclass(order=True) +class PrioritizedItem: + priority: float + item: Any = field(compare=False) class PathFinderState: @@ -21,14 +26,6 @@ class PathFinderState: self.last_action = last_action self.action_taken = action_taken - def getActionTaken(self): - return self.getActionTaken() - - -# -# -# - class PathFinderOnStates: def __init__(self, game_constants: GameConstants, goal: GridLocation, root_state: PathFinderState): @@ -36,7 +33,7 @@ class PathFinderOnStates: self.game_constants = game_constants self.goal = goal self.queue = PriorityQueue() - self.queue.put(root_state, root_state.cost) + self.queue.put(PrioritizedItem(root_state.cost, root_state), root_state.cost) def heuristic(self, a: Tuple[int, int], b: Tuple[int, int]) -> float: # tutaj mozna uzyc heury np. manhatan distance (zmodyfikowany bo masz obroty a to zmienia oplacalnosc) @@ -49,20 +46,17 @@ class PathFinderOnStates: return currState.cost + self.heuristic(currState.agent_position, self.goal) def getPositionAfterMove(self, currState: PathFinderState) -> GridLocation: - result: GridLocation = None - if currState.agent_position == Direction.top: - currState.agent_position[1] += 1 - result = currState.agent_position - elif currState.agent_position == Direction.down: - currState.agent_position[1] -= 1 - result = currState.agent_position - elif currState.agent_position == Direction.right: - currState.agent_position[0] += 1 - result = currState.agent_position - elif currState.agent_position == Direction.left: - currState.agent_position[0] -= 1 - result = currState.agent_position - return result + if currState.agent_direction == Direction.top: + return currState.agent_position[0], currState.agent_position[1] + 1 + + elif currState.agent_direction == Direction.down: + return currState.agent_position[0], currState.agent_position[1] - 1 + + elif currState.agent_direction == Direction.right: + return currState.agent_position[0] + 1, currState.agent_position[1] + + elif currState.agent_direction == Direction.left: + return currState.agent_position[0] - 1, currState.agent_position[1] def isMovePossible(self, currState: PathFinderState) -> bool: positionAfterMove = self.getPositionAfterMove(currState) @@ -80,6 +74,7 @@ class PathFinderOnStates: last_action = action action_taken: List[ActionType] = [] action_taken.extend(currState.action_taken) + action_taken.append(last_action) agent_position = currState.agent_position agent_direction = currState.agent_direction @@ -129,35 +124,45 @@ class PathFinderOnStates: return possibleNextStates def getActionList(self) -> List[ActionType]: - best_state: PathFinderState = self.queue.get() + already_visited = {} + + while not self.queue.empty(): + item: PrioritizedItem = self.queue.get() + best_state: PathFinderState = item.item + + if best_state.agent_position == self.goal: + break - while best_state.agent_position != self.goal and not self.queue.empty(): # dodajesz do kolejki stany z expansion (po cost) for state in self.expansion(best_state): - self.queue.put(state, self.evaluate(state)) + s_tuple = (state.agent_position[0], state.agent_position[1], state.agent_direction) - best_state = self.queue.get() + if s_tuple not in already_visited: + priority = self.evaluate(state) + self.queue.put(PrioritizedItem(priority, state), priority) + already_visited[s_tuple] = state - return best_state.getActionTaken() - # do kosztu dokładam koszt starego stanu plus 1 - # def a_star(self,stateTree:StateTree, start: Tuple[int, int], goal: Tuple[int, int]): - # frontier = PriorityQueue() - # frontier.put(start, 0) - # came_from = dict() - # cost_so_far = dict() - # came_from[start] = None - # cost_so_far[start] = 0 - # - # while not frontier.empty(): - # current = frontier.get() - # - # if current == goal: - # break - # - # for next in graph.neighbors(current): - # new_cost = cost_so_far[current] + graph.cost(current, next) - # if next not in cost_so_far or new_cost < cost_so_far[next]: - # cost_so_far[next] = new_cost - # priority = new_cost + heuristic(goal, next) - # frontier.put(next, priority) - # came_from[next] = current + return best_state.action_taken + + # do kosztu dokładam koszt starego stanu plus 1 + # def a_star(self,stateTree:StateTree, start: Tuple[int, int], goal: Tuple[int, int]): + # frontier = PriorityQueue() + # frontier.put(start, 0) + # came_from = dict() + # cost_so_far = dict() + # came_from[start] = None + # cost_so_far[start] = 0 + # + # while not frontier.empty(): + # current = frontier.get() + # + # if current == goal: + # break + # + # for next in graph.neighbors(current): + # new_cost = cost_so_far[current] + graph.cost(current, next) + # if next not in cost_so_far or new_cost < cost_so_far[next]: + # cost_so_far[next] = new_cost + # priority = new_cost + heuristic(goal, next) + # frontier.put(next, priority) + # came_from[next] = current diff --git a/test/nastepnik.py b/pathfinding/nastepnik.py similarity index 100% rename from test/nastepnik.py rename to pathfinding/nastepnik.py