Projekt_Sztuczna_Inteligencja/searching_algorithms/bfs.py
2021-05-04 16:43:01 +02:00

123 lines
3.9 KiB
Python

from __future__ import annotations
from typing import List
import ctypes
from project_constants import Direction, Action
from minefield import Minefield
# temporary goal for testing
GOAL = (9, 9)
class State:
def __init__(self, row, column, direction: Direction):
self.row = row
self.column = column
self.direction = direction
class Node:
def __init__(self, state: State, parent: Node = None, action: Action = None):
self.state = state
self.parent = parent
self.action = action
def goal_test(state: State):
if (state.row, state.column) == GOAL:
return True
return False
def get_successors(state: State, minefield: Minefield):
successors = list()
state_left = State(state.row, state.column, state.direction.previous())
successors.append((Action.ROTATE_LEFT, state_left))
state_right = State(state.row, state.column, state.direction.next())
successors.append((Action.ROTATE_RIGHT, state_right))
target = go(state.row, state.column, state.direction)
if minefield.is_valid_move(target[0], target[1]):
state_go = State(target[0], target[1], state.direction)
successors.append((Action.GO, state_go))
return successors
def graphsearch(initial_state: State, minefield: Minefield, fringe: List[Node] = None, explored: List[Node] = None, tox: int = None,toy: int = None):
# fringe and explored initialization
global GOAL
if fringe is None:
fringe = list()
if explored is None:
explored = list()
if tox is not None and toy is not None:
GOAL = (tox, toy)
explored_states = set()
fringe_states = set()
# root Node
fringe.append(Node(initial_state))
fringe_states.add((initial_state.row, initial_state.column, initial_state.direction))
while True:
# fringe empty -> solution not found
if not any(fringe):
ctypes.windll.user32.MessageBoxW(0, "Brak rozwiązania", "GAME OVER", 1)
return []
# get first element from fringe
element = fringe.pop(0)
fringe_states.remove((element.state.row, element.state.column, element.state.direction))
# if solution was found, prepare and return actions sequence
if goal_test(element.state):
actions_sequence = [element.action]
parent = element.parent
while parent is not None:
# root's action will be None, don't add it
if parent.action is not None:
actions_sequence.append(parent.action)
parent = parent.parent
actions_sequence.reverse()
return actions_sequence
# add current node to explored (prevents infinite cycles)
explored.append(element)
explored_states.add((element.state.row, element.state.column, element.state.direction))
# loop through every possible next action
for successor in get_successors(element.state, minefield):
# make sure not to fall into a cycle
successor_state = (successor[1].row, successor[1].column, successor[1].direction)
if successor_state not in fringe_states and \
successor_state not in explored_states:
# create new Node and add it at the end of fringe
new_node = Node(state=successor[1],
parent=element,
action=successor[0])
fringe.append(new_node)
fringe_states.add((new_node.state.row, new_node.state.column, new_node.state.direction))
# TEMPORARY METHOD
def go(row, column, direction):
target = tuple()
if direction == Direction.RIGHT:
target = row, column + 1
elif direction == Direction.LEFT:
target = row, column - 1
elif direction == Direction.UP:
target = row - 1, column
elif direction == Direction.DOWN:
target = row + 1, column
return target