Merge remote-tracking branch 'origin/A_star' into A_star

# Conflicts:
#	bfs.py
#	main.py
#	movement.py
This commit is contained in:
majkellll 2023-05-15 11:50:30 +02:00
commit aacee0e493
4 changed files with 33 additions and 70 deletions

93
bfs.py
View File

@ -4,97 +4,67 @@ from city import City
from gridCellType import GridCellType from gridCellType import GridCellType
from agentActionType import AgentActionType from agentActionType import AgentActionType
from agentOrientation import AgentOrientation from agentOrientation import AgentOrientation
from queue import Queue, PriorityQueue from queue import Queue
from turnCar import turn_left_orientation, turn_right_orientation from turnCar import turn_left_orientation, turn_right_orientation
class Succ: class Succ:
state: AgentState state: AgentState
action: AgentActionType action: AgentActionType
cost: int ##cost: int
predicted_cost: int
def __init__(self, state: AgentState, action: AgentActionType, cost: int, predicted_cost: int) -> None: def __init__(self, state: AgentState, action: AgentActionType) -> None:
self.state = state self.state = state
self.action = action self.action = action
self.cost = cost ##self.cost = cost
self.predicted_cost = cost
def find_path_to_nearest_can(startState: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> list[AgentActionType]:
class SuccList: q: Queue[list[Succ]] = Queue()
succ_list: list[Succ]
def __init__(self, succ_list: list[Succ]) -> None:
self.succ_list = succ_list
def __lt__(self, other):
return self.succ_list[-1].predicted_cost < other.succ_list[-1].predicted_cost
def __gt__(self, other):
return self.succ_list[-1].predicted_cost > other.succ_list[-1].predicted_cost
def find_path_to_nearest_can(startState: AgentState, grid: Dict[Tuple[int, int], GridCellType], city: City) -> list[
AgentActionType]:
q: PriorityQueue[SuccList] = PriorityQueue()
visited: list[AgentState] = [] visited: list[AgentState] = []
startStates: SuccList = SuccList( startStates: list[Succ] = [Succ(startState, AgentActionType.UNKNOWN)]
[Succ(startState, AgentActionType.UNKNOWN, 0, _heuristics(startState.position, city))])
q.put(startStates) q.put(startStates)
while not q.empty(): while not q.empty():
currently_checked = q.get() currently_checked = q.get()
visited.append(currently_checked.succ_list[-1].state) visited.append(currently_checked[-1].state)
if is_state_success(currently_checked.succ_list[-1].state, grid): if is_state_success(currently_checked[-1].state, grid):
return extract_actions(currently_checked) return extract_actions(currently_checked)
successors = succ(currently_checked.succ_list[-1], grid, city) successors = succ(currently_checked[-1].state)
for s in successors: for s in successors:
already_visited = False already_visited = False
for v in visited: for v in visited:
if v.position[0] == s.state.position[0] and v.position[1] == s.state.position[ if v.position[0] == s.state.position[0] and v.position[1] == s.state.position[1] and s.state.orientation == v.orientation:
1] and s.state.orientation == v.orientation:
already_visited = True already_visited = True
break break
if already_visited: if already_visited:
continue continue
if is_state_valid(s.state, grid): if is_state_valid(s.state, grid):
new_list = currently_checked.succ_list.copy() new_list = currently_checked.copy()
new_list.append(s) new_list.append(s)
q.put(SuccList(new_list)) q.put(new_list)
return [] return []
def extract_actions(successors: SuccList) -> list[AgentActionType]:
def extract_actions(successors: list[Succ]) -> list[AgentActionType]:
output: list[AgentActionType] = [] output: list[AgentActionType] = []
for s in successors.succ_list: for s in successors:
if s.action != AgentActionType.UNKNOWN: if s.action != AgentActionType.UNKNOWN:
output.append(s.action) output.append(s.action)
return output return output
def succ(state: AgentState) -> list[Succ]:
def succ(succ: Succ, grid: Dict[Tuple[int, int], GridCellType], city: City) -> list[Succ]:
result: list[Succ] = [] result: list[Succ] = []
turn_left_cost = 1 + succ.cost result.append(Succ(AgentState(state.position, turn_left_orientation(state.orientation)), AgentActionType.TURN_LEFT))
result.append( result.append(Succ(AgentState(state.position, turn_right_orientation(state.orientation)), AgentActionType.TURN_RIGHT))
Succ(AgentState(succ.state.position, turn_left_orientation(succ.state.orientation)), AgentActionType.TURN_LEFT, state_succ = move_forward_succ(state)
turn_left_cost, turn_left_cost + _heuristics(succ.state.position, city)))
turn_right_cost = 1 + succ.cost
result.append(Succ(AgentState(succ.state.position, turn_right_orientation(succ.state.orientation)),
AgentActionType.TURN_RIGHT, turn_right_cost,
turn_right_cost + _heuristics(succ.state.position, city)))
state_succ = move_forward_succ(succ, city, grid)
if state_succ != None: if state_succ != None:
result.append(state_succ) result.append(move_forward_succ(state))
return result return result
def move_forward_succ(state: AgentState) -> Succ:
def move_forward_succ(succ: Succ, city: City, grid: Dict[Tuple[int, int], GridCellType]) -> Succ: position = get_next_cell(state)
position = get_next_cell(succ.state)
if position == None: if position == None:
return None return None
return Succ(AgentState(position, state.orientation), AgentActionType.MOVE_FORWARD)
cost = get_cost_for_action(AgentActionType.MOVE_FORWARD, grid[position]) + succ.cost
return Succ(AgentState(position, succ.state.orientation), AgentActionType.MOVE_FORWARD, cost,
cost + _heuristics(position, city))
def get_next_cell(state: AgentState) -> Tuple[int, int]: def get_next_cell(state: AgentState) -> Tuple[int, int]:
@ -114,7 +84,6 @@ def get_next_cell(state: AgentState) -> Tuple[int, int]:
return None return None
return (state.position[0] + 1, state.position[1]) return (state.position[0] + 1, state.position[1])
def is_state_success(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool: def is_state_success(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool:
next_cell = get_next_cell(state) next_cell = get_next_cell(state)
try: try:
@ -122,7 +91,6 @@ def is_state_success(state: AgentState, grid: Dict[Tuple[int, int], GridCellType
except: except:
return False return False
def get_cost_for_action(action: AgentActionType, cell_type: GridCellType) -> int: def get_cost_for_action(action: AgentActionType, cell_type: GridCellType) -> int:
if action == AgentActionType.TURN_LEFT or action == AgentActionType.TURN_RIGHT: if action == AgentActionType.TURN_LEFT or action == AgentActionType.TURN_RIGHT:
return 1 return 1
@ -134,13 +102,11 @@ def get_cost_for_action(action: AgentActionType, cell_type: GridCellType) -> int
def is_state_valid(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool: def is_state_valid(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool:
try: try:
return grid[state.position] == GridCellType.STREET_HORIZONTAL or grid[ return grid[state.position] == GridCellType.STREET_HORIZONTAL or grid[state.position] == GridCellType.STREET_VERTICAL or grid[state.position] == GridCellType.SPEED_BUMP
state.position] == GridCellType.STREET_VERTICAL or grid[state.position] == GridCellType.SPEED_BUMP
except: except:
return False return False
def _heuristics(position: Tuple[int, int], city: City): def _heuristics(position: Tuple[int, int], city: City):
min_distance: int = 300 min_distance: int = 300
found_nonvisited: bool = False found_nonvisited: bool = False
@ -153,4 +119,5 @@ def _heuristics(position: Tuple[int, int], city: City):
min_distance = distance min_distance = distance
if found_nonvisited: if found_nonvisited:
return min_distance return min_distance
return -1 return -1

View File

@ -41,4 +41,4 @@ class City:
def _render_bumps(self, game_context: GameContext) -> None: def _render_bumps(self, game_context: GameContext) -> None:
for bump in self.bumps: for bump in self.bumps:
bump.render(game_context) bump.render(game_context)

View File

@ -1,6 +1,4 @@
import pygame import pygame
from city import City
from gameEventHandler import handle_game_event from gameEventHandler import handle_game_event
from gameContext import GameContext from gameContext import GameContext
from startup import startup from startup import startup
@ -19,7 +17,6 @@ game_context = GameContext()
game_context.dust_car_pil = dust_car_pil game_context.dust_car_pil = dust_car_pil
game_context.dust_car_pygame = pygame.image.frombuffer(dust_car_pil.tobytes(), dust_car_pil.size, 'RGB') game_context.dust_car_pygame = pygame.image.frombuffer(dust_car_pil.tobytes(), dust_car_pil.size, 'RGB')
game_context.canvas = canvas game_context.canvas = canvas
startup(game_context) startup(game_context)
collect_garbage(game_context) collect_garbage(game_context)

View File

@ -10,11 +10,10 @@ import pygame
from bfs import find_path_to_nearest_can from bfs import find_path_to_nearest_can
from agentState import AgentState from agentState import AgentState
def collect_garbage(game_context: GameContext) -> None: def collect_garbage(game_context: GameContext) -> None:
while True: while True:
start_agent_state = AgentState(game_context.dust_car.position, game_context.dust_car.orientation) start_agent_state = AgentState(game_context.dust_car.position, game_context.dust_car.orientation)
path = find_path_to_nearest_can(start_agent_state, game_context.grid, game_context.city) path = find_path_to_nearest_can(start_agent_state, game_context.grid)
if path == None or len(path) == 0: if path == None or len(path) == 0:
break break
move_dust_car(path, game_context) move_dust_car(path, game_context)
@ -23,7 +22,6 @@ def collect_garbage(game_context: GameContext) -> None:
game_context.city.cans_dict[next_position].is_visited = True game_context.city.cans_dict[next_position].is_visited = True
pass pass
def move_dust_car(actions: list[AgentActionType], game_context: GameContext) -> None: def move_dust_car(actions: list[AgentActionType], game_context: GameContext) -> None:
for action in actions: for action in actions:
street_position = game_context.dust_car.position street_position = game_context.dust_car.position
@ -46,6 +44,7 @@ def move_dust_car(actions: list[AgentActionType], game_context: GameContext) ->
pygame.display.update() pygame.display.update()
time.sleep(0.15) time.sleep(0.15)
def calculate_next_position(car: GarbageTruck) -> Tuple[int, int]: def calculate_next_position(car: GarbageTruck) -> Tuple[int, int]:
if car.orientation == AgentOrientation.UP: if car.orientation == AgentOrientation.UP: