implement astar, add pickle rick

This commit is contained in:
eugenep 2021-04-25 23:32:06 +02:00
parent 520efb84ea
commit 23d60003df
8 changed files with 175 additions and 46 deletions

112
astar2.py
View File

@ -1,5 +1,6 @@
from os import path
import heapq
import copy
from settings import *
from sprites import Direction
@ -27,16 +28,16 @@ class PlanRoute():
orientation = state.get_orientation()
# Prevent Bumps
if x == 1 and orientation == 'LEFT':
if y == 0 and orientation == 'LEFT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == 1 and orientation == 'DOWN':
if x == 0 and orientation == 'DOWN':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if x == self.dimrow and orientation == 'RIGHT':
if y == self.dimrow and orientation == 'RIGHT':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
if y == self.dimrow and orientation == 'UP':
if x == self.dimrow and orientation == 'UP':
if 'Forward' in possible_actions:
possible_actions.remove('Forward')
@ -46,24 +47,24 @@ class PlanRoute():
""" Given state and action, return a new state that is the result of the action.
Action is assumed to be a valid action in the state """
x, y = state.get_location()
#proposed_loc = list()
proposed_loc = []
proposed_loc = list()
#proposed_loc = []
# Move Forward
if action == 'Forward':
if state.get_orientation() == 'UP':
proposed_loc = [x, y + 1]
elif state.get_orientation() == 'DOWN':
proposed_loc = [x, y - 1]
elif state.get_orientation() == 'LEFT':
proposed_loc = [x - 1, y]
elif state.get_orientation() == 'RIGHT':
proposed_loc = [x + 1, y]
elif state.get_orientation() == 'DOWN':
proposed_loc = [x - 1, y]
elif state.get_orientation() == 'LEFT':
proposed_loc = [x, y - 1]
elif state.get_orientation() == 'RIGHT':
proposed_loc = [x, y + 1]
else:
raise Exception('InvalidOrientation')
# Rotate counter-clockwise
elif action == 'Left':
elif action == 'Right':
if state.get_orientation() == 'UP':
state.set_orientation('LEFT')
elif state.get_orientation() == 'DOWN':
@ -76,7 +77,7 @@ class PlanRoute():
raise Exception('InvalidOrientation')
# Rotate clockwise
elif action == 'Right':
elif action == 'Left':
if state.get_orientation() == 'UP':
state.set_orientation('RIGHT')
elif state.get_orientation() == 'DOWN':
@ -87,9 +88,13 @@ class PlanRoute():
state.set_orientation('DOWN')
else:
raise Exception('InvalidOrientation')
if tuple(proposed_loc) in self.allowed:
state.set_location(proposed_loc[0], [proposed_loc[1]])
if(proposed_loc):
tupled_proposed_loc = tuple([proposed_loc[0], proposed_loc[1]])
if tupled_proposed_loc in self.allowed:
state.set_location(proposed_loc[0], proposed_loc[1])
return state
@ -115,10 +120,10 @@ class PlanRoute():
if location2 in self.puddles:
return c + 1
return c + 2
if location1 == location2 and state1 in self.puddles:
return c + 1
return c
return c + 2
return c+1
def h(self, node):
""" Return the heuristic value for a given state."""
@ -148,21 +153,30 @@ class Node:
def expand(self, problem):
"""List the nodes reachable in one step from this node."""
test_node_list = [self.child_node(problem, action)
for action in problem.actions(self.state)]
return [self.child_node(problem, action)
for action in problem.actions(self.state)]
def child_node(self, problem, action):
next_state = problem.result(self.state, action)
next_state = problem.result(copy.deepcopy(self.state), action)
next_node = Node(next_state, self, action, problem.path_cost(
self.path_cost, self.state, action, next_state))
print(problem.path_cost(
self.path_cost, self.state, action, next_state))
#print(problem.path_cost(
# self.path_cost, self.state, action, next_state))
return next_node
def __eq__(self, other):
return isinstance(other, Node) and self.state == other.state
def __lt__(self, other):
return isinstance(other, Node) and self.state == other.state
def __hash__(self):
# We use the hash value of the state
# stored in the node instead of the node
@ -203,7 +217,8 @@ class AgentPosition:
other.get_orientation() == self.get_orientation()):
return True
else:
return False
return False
def __hash__(self):
return hash((self.X, self.Y, self.orientation))
@ -362,7 +377,7 @@ class SweeperAgent:
agent_position = AgentPosition(x, y, self.orientation)
goal_position = AgentPosition(x1, y1, goal_orientation)
self.plan_route(agent_position, goal_position, self.allowed_points, self.puddle_points)
return self.plan_route(agent_position, goal_position, self.allowed_points, self.puddle_points)
@ -380,8 +395,8 @@ class SweeperAgent:
def plan_route(self, current, goals, allowed, puddles):
problem = PlanRoute(current, goals, allowed, puddles)
return SweeperAgent.astar_search(problem, problem.h).solution()
problem = PlanRoute(current, goals, allowed, puddles, MAP_SIZE-1)
return SweeperAgent.astar_search(problem, problem.h)
#return SweeperAgent.astar_search(problem, problem.h)
@ -422,7 +437,7 @@ class SweeperAgent:
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
# f = memoize(f, 'f')
#f = memoize(f, 'f')
"""TODO"""
# Zaimplementować klasę Node dla Astar
@ -438,16 +453,29 @@ class SweeperAgent:
if problem.goal_test(node.state):
if display:
print(len(explored), "paths have been expanded and", len(frontier), "paths remain in the frontier")
return node
explored.add(node.state)
while(node.parent != None):
history.append(node.action)
node = node.parent
#return child
history.reverse()
print(history)
return history
#return history
#break
#return node
#break
explored.add(copy.deepcopy(node.state))
test_child_chamber = node.expand(problem)
for child in node.expand(problem):
if child.state not in explored and child not in frontier:
frontier.append(child)
frontier.append(copy.deepcopy(child))
elif child in frontier:
if f(child) < frontier[child]:
del frontier[child]
frontier.append(child)
return None
return history
#return None
@ -539,4 +567,24 @@ class Test:
print("goal:")
print(goal)
print("orientation:")
print(orientation)
print(orientation)
def memoize(fn, slot=None, maxsize=32):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, use lru_cache for caching the values."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
@functools.lru_cache(maxsize=maxsize)
def memoized_fn(*args):
return fn(*args)
return memoized_fn

7
bfs_maps/goal_map.txt Normal file
View File

@ -0,0 +1,7 @@
.......
###....
.......
.......
...###.
...#^#.
.......

7
bfs_maps/map.txt Normal file
View File

@ -0,0 +1,7 @@
.>.....
###....
.......
.......
...###.
...#.#.
.......

View File

@ -1,7 +1,7 @@
.......
...p...
###.ppp
....p..
....p.p
...###p
...#^#p
......p
...pp.p
..p###.
..p#^#.
..ppp..

View File

@ -241,8 +241,8 @@ class BFS:
@staticmethod
def run():
initial_map = tuple(map(tuple,BFS.loadMap('map.txt')))
goal_map = tuple(map(tuple,BFS.loadMap('goal_map.txt')))
initial_map = tuple(map(tuple,BFS.loadMap('bfs_maps/map.txt')))
goal_map = tuple(map(tuple,BFS.loadMap('bfs_maps/goal_map.txt')))
problem = Problem(initial_map, goal_map)
#BFS.print_node_state(initial_map)

View File

@ -109,9 +109,11 @@ class Game:
self.graph_move(player_moves)
self.wentyl_bezpieczenstwa = 1
if event.key == pg.K_F4 and self.wentyl_bezpieczenstwa == 0:
print("test1")
#print("test1")
agent = SweeperAgent()
SweeperAgent.run(agent)
player_moves = SweeperAgent.run(agent)
self.graph_move(player_moves)
self.wentyl_bezpieczenstwa = 1
# Test.run()

10
map.txt
View File

@ -1,7 +1,7 @@
.>.....
.>.p...
###.ppp
....p..
....p.p
...###p
...#.#p
......p
...pp.p
..p###.
..p#.#.
..ppp..

65
pickle_rick.txt Normal file
View File

@ -0,0 +1,65 @@
████████████████████████████████████████████████████████████████
██ ██ ████████████
██ ██ ██ ████▒▒▒▒▒▒▒▒▒▒▒▒████
██ ██▓▓▓▓▓▓██ ██ ██▓▓ ████ ██ ▓▓▒▒░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▓▓██
██ ██ ▓▓ ██ ██ ██ ██ ██▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ██ ██▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ██ ██▒▒▒▒░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ██ ▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ██ ██▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ██ ██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒██
██ ██████████ ██ ██ ██ ██▒▒▒▒▒▒▒▒░░░░░░░░░░▒▒▒▒▒▒░░░░░░░░░░▒▒▒▒▒▒██
██ ██ ██▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░▒▒▒▒░░░░░░▒▒▒▒▒▒▒▒██
██ ██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░▒▒▒▒░░▒▒▒▒▒▒▒▒██
██ ██████████ ██████████ ████████▓▓ ██ ▓▓ ██ ██████████ ██▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ▓▓ ██ ██ ░░██ ██ ██▒▒▒▒░░ ░░░░░░░░░░░░▒▒▒▒░░░░░░▒▒▒▒▒▒▒▒██
██ ██ ██ ██ ▓▓ ██ ██ ░░██ ██ ██▒▒▒▒ ▒▒░░░░░░░░░░░░░░▒▒▒▒░░▒▒▒▒▒▒▒▒██
██ ██▓▓▓▓▓▓██ ██ ▓▓ ██▓▓░░ ░░██ ██▓▓▓▓▓▓██ ▓▓▒▒▒▒ ▒▒░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ▓▓ ████ ░░██ ██ ██▒▒▒▒ ▓▓ ▒▒░░░░░░ ▒▒░░░░▒▒▒▒▒▒▒▒██
██ ██ ██ ▓▓ ██ ██ ░░██ ██ ██▒▒▒▒ ▒▒░░░░ ▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██ ██ ▓▓ ██ ██ ░░██ ██ ██▒▒▒▒▒▒▒▒ ▒▒░░ ▒▒▒▒▒▒▒▒██
██ ██ ██████████ ██████████ ██ ▓▓░░██████████ ██████████ ▓▓▒▒▒▒▒▒▒▒░░ ▒▒░░░░ ██ ▒▒▒▒▒▒▒▒██
██ ▓▓▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░ ▒▒▒▒▒▒▒▒██
██ ▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░▒▒░░░░░░ ▒▒▒▒▒▒▒▒██
██ ██████████ ██████████ ██████████ ██ ██ ██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒░░░░░░░░ ▒▒▒▒▒▒▒▒██
██ ██ ▓▓ ██ ██ ▓▓ ██ ██▒▒▒▒▒▒▒▒██▓▓ ▒▒░░▒▒░░▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██ ▓▓ ██ ██ ▓▓ ██ ██▒▒▒▒▒▒▒▒▓▓████▓▓ ▒▒▒▒▒▒░░░░▒▒░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██▓▓▓▓▓▓▓▓ ██ ██ ██▓▓ ██▒▒▒▒▒▒▒▒████████▓▓░░ ▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ████ ██ ██ ████ ██▒▒▒▒▒▒▒▒████████████▓▓░░ ▒▒ ██▒▒▒▒▒▒▒▒▒▒▓▓
██ ██ ██ ██ ██ ██ ██ ██▒▒▒▒▒▒▒▒▒▒████████████████████████████▓▓▒▒▒▒▒▒▒▒▓▓
██ ██ ██ ██ ██ ▓▓ ██ ██▒▒▒▒▒▒▒▒▒▒▒▒ ██████▒▒▒▒██████████████▓▓▒▒▒▒▒▒▒▒▓▓
██ ██ ▒▒ ██████▓▓██ ██▓▓▓▓▓▓██ ▓▓ ▓▓ ██▒▒▒▒▒▒▒▒▒▒▒▒░░ ██▓▓▒▒▒▒▒▒██████████▓▓▒▒▒▒▒▒▒▒▓▓
██ ██▒▒▒▒░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒ ▓▓▒▒▒▒▒▒████████▒▒▒▒▒▒▒▒██
██ ██ ██▒▒░░▒▒▒▒▒▒▒▒░░░░░░░░▒▒░░ ▒▒ ██▒▒▒▒▒▒▒▒▒▒▒▒██
████████████████████████████████████████████████████████████ ██ ██▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██ ██▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
██▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
░░▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒
██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
░░▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓██
██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓░░
▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒▒▒▒▒▓▓
▒▒▒▒▒▒▒▒░░▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒▒▒▓▓░░
██▒▒▒▒░░░░▒▒▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒▒▒▒▒██
██▒▒▒▒▒▒░░▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒▒▒██
██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
▒▒▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓░░
░░██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
░░██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓▒▒
░░██▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓
▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓
██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒██
▓▓▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒▒▒██
▓▓▒▒▒▒▒▒░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒▒▒██
▓▓▒▒▒▒▒▒░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓
██░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓
▓▓▒▒▒▒░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▒▒██
░░██░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒░░▓▓██
░░██░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓░░
██▒▒░░░░░░░░░░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▓▓
██▒▒░░░░░░░░░░░░░░▒▒▒▒▒▒████
██████▒▒░░░░░░░░██████
░░░░ ██▓▓▓▓▓▓██░░░░░░