Merge remote-tracking branch 'origin/A_star' into A_star

# Conflicts:
#	bfs.py
#	main.py
#	movement.py
This commit is contained in:
majkellll 2023-05-15 11:50:30 +02:00
commit aacee0e493
4 changed files with 33 additions and 70 deletions

93
bfs.py
View File

@ -4,97 +4,67 @@ from city import City
from gridCellType import GridCellType
from agentActionType import AgentActionType
from agentOrientation import AgentOrientation
from queue import Queue, PriorityQueue
from queue import Queue
from turnCar import turn_left_orientation, turn_right_orientation
class Succ:
state: AgentState
action: AgentActionType
cost: int
predicted_cost: int
##cost: int
def __init__(self, state: AgentState, action: AgentActionType, cost: int, predicted_cost: int) -> None:
def __init__(self, state: AgentState, action: AgentActionType) -> None:
self.state = state
self.action = action
self.cost = cost
self.predicted_cost = cost
##self.cost = cost
class SuccList:
succ_list: list[Succ]
def __init__(self, succ_list: list[Succ]) -> None:
self.succ_list = succ_list
def __lt__(self, other):
return self.succ_list[-1].predicted_cost < other.succ_list[-1].predicted_cost
def __gt__(self, other):
return self.succ_list[-1].predicted_cost > other.succ_list[-1].predicted_cost
def find_path_to_nearest_can(startState: AgentState, grid: Dict[Tuple[int, int], GridCellType], city: City) -> list[
AgentActionType]:
q: PriorityQueue[SuccList] = PriorityQueue()
def find_path_to_nearest_can(startState: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> list[AgentActionType]:
q: Queue[list[Succ]] = Queue()
visited: list[AgentState] = []
startStates: SuccList = SuccList(
[Succ(startState, AgentActionType.UNKNOWN, 0, _heuristics(startState.position, city))])
startStates: list[Succ] = [Succ(startState, AgentActionType.UNKNOWN)]
q.put(startStates)
while not q.empty():
currently_checked = q.get()
visited.append(currently_checked.succ_list[-1].state)
if is_state_success(currently_checked.succ_list[-1].state, grid):
visited.append(currently_checked[-1].state)
if is_state_success(currently_checked[-1].state, grid):
return extract_actions(currently_checked)
successors = succ(currently_checked.succ_list[-1], grid, city)
successors = succ(currently_checked[-1].state)
for s in successors:
already_visited = False
for v in visited:
if v.position[0] == s.state.position[0] and v.position[1] == s.state.position[
1] and s.state.orientation == v.orientation:
if v.position[0] == s.state.position[0] and v.position[1] == s.state.position[1] and s.state.orientation == v.orientation:
already_visited = True
break
if already_visited:
continue
if is_state_valid(s.state, grid):
new_list = currently_checked.succ_list.copy()
new_list = currently_checked.copy()
new_list.append(s)
q.put(SuccList(new_list))
q.put(new_list)
return []
def extract_actions(successors: SuccList) -> list[AgentActionType]:
def extract_actions(successors: list[Succ]) -> list[AgentActionType]:
output: list[AgentActionType] = []
for s in successors.succ_list:
for s in successors:
if s.action != AgentActionType.UNKNOWN:
output.append(s.action)
return output
def succ(succ: Succ, grid: Dict[Tuple[int, int], GridCellType], city: City) -> list[Succ]:
def succ(state: AgentState) -> list[Succ]:
result: list[Succ] = []
turn_left_cost = 1 + succ.cost
result.append(
Succ(AgentState(succ.state.position, turn_left_orientation(succ.state.orientation)), AgentActionType.TURN_LEFT,
turn_left_cost, turn_left_cost + _heuristics(succ.state.position, city)))
turn_right_cost = 1 + succ.cost
result.append(Succ(AgentState(succ.state.position, turn_right_orientation(succ.state.orientation)),
AgentActionType.TURN_RIGHT, turn_right_cost,
turn_right_cost + _heuristics(succ.state.position, city)))
state_succ = move_forward_succ(succ, city, grid)
result.append(Succ(AgentState(state.position, turn_left_orientation(state.orientation)), AgentActionType.TURN_LEFT))
result.append(Succ(AgentState(state.position, turn_right_orientation(state.orientation)), AgentActionType.TURN_RIGHT))
state_succ = move_forward_succ(state)
if state_succ != None:
result.append(state_succ)
result.append(move_forward_succ(state))
return result
def move_forward_succ(succ: Succ, city: City, grid: Dict[Tuple[int, int], GridCellType]) -> Succ:
position = get_next_cell(succ.state)
def move_forward_succ(state: AgentState) -> Succ:
position = get_next_cell(state)
if position == None:
return None
cost = get_cost_for_action(AgentActionType.MOVE_FORWARD, grid[position]) + succ.cost
return Succ(AgentState(position, succ.state.orientation), AgentActionType.MOVE_FORWARD, cost,
cost + _heuristics(position, city))
return Succ(AgentState(position, state.orientation), AgentActionType.MOVE_FORWARD)
def get_next_cell(state: AgentState) -> Tuple[int, int]:
@ -114,7 +84,6 @@ def get_next_cell(state: AgentState) -> Tuple[int, int]:
return None
return (state.position[0] + 1, state.position[1])
def is_state_success(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool:
next_cell = get_next_cell(state)
try:
@ -122,7 +91,6 @@ def is_state_success(state: AgentState, grid: Dict[Tuple[int, int], GridCellType
except:
return False
def get_cost_for_action(action: AgentActionType, cell_type: GridCellType) -> int:
if action == AgentActionType.TURN_LEFT or action == AgentActionType.TURN_RIGHT:
return 1
@ -134,13 +102,11 @@ def get_cost_for_action(action: AgentActionType, cell_type: GridCellType) -> int
def is_state_valid(state: AgentState, grid: Dict[Tuple[int, int], GridCellType]) -> bool:
try:
return grid[state.position] == GridCellType.STREET_HORIZONTAL or grid[
state.position] == GridCellType.STREET_VERTICAL or grid[state.position] == GridCellType.SPEED_BUMP
try:
return grid[state.position] == GridCellType.STREET_HORIZONTAL or grid[state.position] == GridCellType.STREET_VERTICAL or grid[state.position] == GridCellType.SPEED_BUMP
except:
return False
def _heuristics(position: Tuple[int, int], city: City):
min_distance: int = 300
found_nonvisited: bool = False
@ -153,4 +119,5 @@ def _heuristics(position: Tuple[int, int], city: City):
min_distance = distance
if found_nonvisited:
return min_distance
return -1
return -1

View File

@ -41,4 +41,4 @@ class City:
def _render_bumps(self, game_context: GameContext) -> None:
for bump in self.bumps:
bump.render(game_context)
bump.render(game_context)

View File

@ -1,6 +1,4 @@
import pygame
from city import City
from gameEventHandler import handle_game_event
from gameContext import GameContext
from startup import startup
@ -19,7 +17,6 @@ game_context = GameContext()
game_context.dust_car_pil = dust_car_pil
game_context.dust_car_pygame = pygame.image.frombuffer(dust_car_pil.tobytes(), dust_car_pil.size, 'RGB')
game_context.canvas = canvas
startup(game_context)
collect_garbage(game_context)

View File

@ -10,11 +10,10 @@ import pygame
from bfs import find_path_to_nearest_can
from agentState import AgentState
def collect_garbage(game_context: GameContext) -> None:
while True:
start_agent_state = AgentState(game_context.dust_car.position, game_context.dust_car.orientation)
path = find_path_to_nearest_can(start_agent_state, game_context.grid, game_context.city)
path = find_path_to_nearest_can(start_agent_state, game_context.grid)
if path == None or len(path) == 0:
break
move_dust_car(path, game_context)
@ -23,7 +22,6 @@ def collect_garbage(game_context: GameContext) -> None:
game_context.city.cans_dict[next_position].is_visited = True
pass
def move_dust_car(actions: list[AgentActionType], game_context: GameContext) -> None:
for action in actions:
street_position = game_context.dust_car.position
@ -46,6 +44,7 @@ def move_dust_car(actions: list[AgentActionType], game_context: GameContext) ->
pygame.display.update()
time.sleep(0.15)
def calculate_next_position(car: GarbageTruck) -> Tuple[int, int]:
if car.orientation == AgentOrientation.UP: