diff --git a/hero.py b/hero.py index cbf26f5..3d1b6b6 100644 --- a/hero.py +++ b/hero.py @@ -1,4 +1,5 @@ import random +import heapq from mesa import Agent @@ -6,6 +7,7 @@ from othercharacters import dice, Box, Creature, Armor, Weapon from actions import actions, actionsInterpreter from state import AgentState from direction import Direction +from node import Node class Player(Creature): @@ -117,7 +119,7 @@ class Player(Creature): def openChest(self, chest): self.gold = self.gold + chest.gold - print("Chest opened. Gold inside:", chest.gold) + print("------Chest opened. Gold inside:", chest.gold,"-----") chest.gold = 0 self.openedchests += 1 self.hasgoalchest = False @@ -183,9 +185,15 @@ class Player(Creature): [actions["rotateRight"], rotateRight] ] + def heuristics(self, state, target_state): + # cost is initially step distance in manhattan metric + return abs(state.get_x() - target_state.get_x()) + abs(state.get_y() - target_state.get_y()) + def graphsearch(self, fringe, explored, istate, succesorFunction, goalState): finalActionList = [] - fringe.append([None, istate]) # at beginning do nothing + init_state = [None, istate] + root = Node(None, init_state, 0) + heapq.heappush(fringe, (0, root)) # at beginning do nothing while len(fringe) != 0: _flag = True @@ -193,41 +201,68 @@ class Player(Creature): if len(fringe) == 0: return False - tmpState = fringe.pop(0) + tmpNode = (heapq.heappop(fringe))[1] # node # build dictionary - parent = tmpState[1].get_predecessor() #fetch paren state - tmpState[1].set_predecessor(None) # clear predecessor - don't build a tree chain - if parent is None: - finalActionList.append([parent, tmpState]) - else: - finalActionList.append([parent[1], tmpState]) # pair(key, value) - key: parent state, value: current state + action + # parent = tmpNode.get_predecessor() # fetch parent state + # tmpNode.set_predecessor(None) # clear predecessor - don't build a tree chain + # if parent is None: + # finalActionList.append([parent, tmpNode]) + # else: + # finalActionList.append( + # [parent[1], tmpNode]) # pair(key, value) - key: parent state, value: current state + action + if tmpNode._state.get_x() == goalState.get_x() and tmpNode._state.get_y() == goalState.get_y(): + while tmpNode._parent is not None: + finalActionList.append(tmpNode._action) + tmpNode = tmpNode._parent + finalActionList = list(reversed(finalActionList)) + return finalActionList # TODO change step! - if tmpState[1].get_x() == goalState.get_x() and tmpState[1].get_y() == goalState.get_y(): - return finalActionList + explored.append(tmpNode) - explored.append(tmpState) - - tmpList = succesorFunction(tmpState[1]) + tmpList = succesorFunction(tmpNode._state) for newState in tmpList: _flag = True + _flagFringe = True + _flagExplored = True if newState[1] is None: continue - for fringeState in fringe: - if fringeState[1].get_x() == newState[1].get_x() and fringeState[1].get_y() == newState[1].get_y() and fringeState[1].get_direction() == newState[1].get_direction(): + # calculating priority + monster = 0 + if any([thing.isCreature for thing in self.model.grid.get_cell_list_contents([(newState[1].get_x(), newState[1].get_y())])]): + if newState[0] == 0: + monster = 10 + p = self.heuristics(newState[1], goalState) + tmpNode._cost + monster + 1 + + r = 0 + counter = 0 + pos = 0 + for fringeNode in fringe: + if fringeNode[1]._state.get_x() == newState[1].get_x() and fringeNode[1]._state.get_y() == newState[1].get_y() and fringeNode[1]._state.get_direction() == newState[1].get_direction(): + _flagFringe = False + _flag = False + r = fringeNode[0] + pos = counter + counter = counter + 1 + + for exploredNode in explored: + if exploredNode._state.get_x() == newState[1].get_x() and exploredNode._state.get_y() == newState[1].get_y() and exploredNode._state.get_direction() == newState[1].get_direction(): + _flagExplored = False _flag = False - for exploredState in explored: - if exploredState[1].get_x() == newState[1].get_x() and exploredState[1].get_y() == newState[1].get_y() and exploredState[1].get_direction() == newState[1].get_direction(): - _flag = False - - if _flag: - newState[1].set_predecessor(tmpState) - fringe.append(newState) + # if _flag: + # newState[1].set_predecessor(tmpNode) + if _flagFringe and _flagExplored: + newNode = Node(tmpNode, newState, tmpNode._cost + 1 + monster) + heapq.heappush(fringe, (p, newNode)) + elif not _flagFringe and (p < r): + newNode = Node(tmpNode, newState, tmpNode._cost + 1 + monster) + fringe[pos][0] = p + fringe[pos][1] = newNode return None @@ -256,38 +291,7 @@ class Player(Creature): if self.__actionsCollection is None: raise Exception("CRITICAL ERROR - Algorithm error - Path doesn't exist!!! ://") - else: #build list from dictionary by last element - goalActionsList = [] - #keysList = list(self.__actionsCollection.keys()) - #valuesList = list(self.__actionsCollection.values()) - - stateWithChest = self.__actionsCollection[-1] # fetch last item - - #stateWithChest: - # [ - # [0] key - parent ActionState: object, - # [1] value - node successor: - # { - # [0] action: string - action to get state - # [1] AgentState: object - current state - # } - # ] - - goalActionsList.append(stateWithChest[1][0]) # save action - tmpState = stateWithChest[0] - while tmpState is not None: # iterate while key (parent state) != None - - index = 0 - for valeState in self.__actionsCollection: # find new key(parent status) index in array data - if valeState[1][1].get_x() == tmpState.get_x() and valeState[1][1].get_y() == tmpState.get_y() and valeState[1][1].get_direction() == tmpState.get_direction(): - break - index += 1 - - - goalActionsList.append(self.__actionsCollection[index][1][0]) # get action - tmpState = self.__actionsCollection[index][0] # get state - and next we will find key equal it's value - - self.__actionsCollection = list(reversed(goalActionsList)) + else: self.__actionsCollection = [action for action in self.__actionsCollection if action is not None] # remove first None action else: raise Exception("WIN!!! :D") diff --git a/node.py b/node.py new file mode 100644 index 0000000..77f60ab --- /dev/null +++ b/node.py @@ -0,0 +1,15 @@ +class Node: + def __init__(self, parent, state_tuple, cost): + self._cost = cost + self._action = state_tuple[0] + self._state = state_tuple[1] + self._parent = parent + + def get_predecessor(self): + return self._parent + + def set_predecessor(self, predecessor): + self._parent = predecessor + + def __lt__(self, other): + return self._cost < other._cost diff --git a/server.py b/server.py index 3b46704..1d0b90d 100644 --- a/server.py +++ b/server.py @@ -18,5 +18,5 @@ server = ModularServer(GameMap, [grid], "Map", {"x":10, "y":10}) -server.port = 8080 +server.port = 8081 server.launch() \ No newline at end of file diff --git a/state.py b/state.py index 9e39e5b..38a29dc 100644 --- a/state.py +++ b/state.py @@ -1,9 +1,8 @@ class AgentState: - def __init__(self, x, y, direction, predecessor = None): + def __init__(self, x, y, direction): self.__x = x self.__y = y self.__direction = direction - self.__predecessor = predecessor def get_x(self): return self.__x @@ -28,9 +27,3 @@ class AgentState: def set_direction(self, direction): self.__direction = direction - - def get_predecessor(self): - return self.__predecessor - - def set_predecessor(self, predecessor): - self.__predecessor = predecessor \ No newline at end of file