Astar z generycznym
This commit is contained in:
parent
7c5d9fa852
commit
d7ae68a8c7
BIN
__pycache__/generate_board.cpython-312.pyc
Normal file
BIN
__pycache__/generate_board.cpython-312.pyc
Normal file
Binary file not shown.
BIN
__pycache__/newastar.cpython-312.pyc
Normal file
BIN
__pycache__/newastar.cpython-312.pyc
Normal file
Binary file not shown.
157
newastar.py
Normal file
157
newastar.py
Normal file
@ -0,0 +1,157 @@
|
||||
import pygame
|
||||
from board import Board
|
||||
from constant import width, height, rows, cols
|
||||
from tractor import Tractor
|
||||
from kolejka import Stan, Odwiedzone
|
||||
from queue import Queue
|
||||
from neuralnetwork import load_model
|
||||
import pandas as pd
|
||||
import heapq
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
data = pd.read_csv('dane.csv')
|
||||
|
||||
|
||||
from decisiontree import train_decision_tree
|
||||
|
||||
model_path = 'model.pth'
|
||||
neuralnetwork_model = load_model(model_path)
|
||||
|
||||
model, feature_columns = train_decision_tree(data)
|
||||
|
||||
fps = 5
|
||||
WIN = pygame.display.set_mode((width, height))
|
||||
pygame.display.set_caption('Inteligentny Traktor')
|
||||
|
||||
def heuristic(current_state, goal_state):
|
||||
# Funkcja heurystyki (tu: Manhattan distance)
|
||||
return abs(current_state.row - goal_state.row) + abs(current_state.col - goal_state.col)
|
||||
|
||||
def goal_test(elem, board):
|
||||
# Test celu: sprawdzenie, czy w bieżącej pozycji jest brud
|
||||
return board.is_dirt(elem.row, elem.col)
|
||||
|
||||
def cost(next_state, board):
|
||||
if board.board[next_state.row][next_state.col] == 0:
|
||||
this_cost = 100
|
||||
elif board.board[next_state.row][next_state.col] == 1:
|
||||
this_cost = 100
|
||||
else:
|
||||
this_cost = 1
|
||||
print(board.vegetable_names[next_state.row][next_state.col], " --->", this_cost)
|
||||
return this_cost
|
||||
|
||||
def actions(elem, istate):
|
||||
# Śledzenie działań prowadzących od stanu początkowego do stanu docelowego
|
||||
akcje = []
|
||||
while elem.row != istate.row or elem.col != istate.col or elem.direction != istate.direction:
|
||||
akcje.append(elem.a)
|
||||
elem = elem.p[0]
|
||||
return akcje[::-1]
|
||||
|
||||
def find_next_goal(board, visited):
|
||||
for row in range(rows):
|
||||
for col in range(cols):
|
||||
if board.is_dirt(row, col) and (row, col) not in visited:
|
||||
return Stan(row, col, "down")
|
||||
return None
|
||||
|
||||
@dataclass(order=True)
|
||||
class PrioritizedItem:
|
||||
priority: int
|
||||
item: object = field(compare=False)
|
||||
|
||||
def astar(istate, goalstate, board):
|
||||
explored = Odwiedzone()
|
||||
fringe = []
|
||||
initial_priority = heuristic(istate, goalstate)
|
||||
heapq.heappush(fringe, PrioritizedItem(initial_priority, istate))
|
||||
moves = ["up", "left", "right"]
|
||||
while fringe:
|
||||
current = heapq.heappop(fringe).item
|
||||
if goal_test(current, board):
|
||||
return actions(current, istate), current
|
||||
explored.dodaj_stan(current)
|
||||
for action in moves:
|
||||
stan = current.succ(action, board)
|
||||
if stan is not None:
|
||||
new_g = current.cost + cost(stan, board)
|
||||
f = new_g + heuristic(stan, goalstate)
|
||||
if not fringe_check(fringe, stan) and not explored.check(stan):
|
||||
stan.parrent(current, action)
|
||||
heapq.heappush(fringe, PrioritizedItem(f, stan))
|
||||
return [], None
|
||||
|
||||
def fringe_check(fringe, stan):
|
||||
for item in fringe:
|
||||
if stan.direction == item.item.direction and stan.col == item.item.col and stan.row == item.item.row:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
initial_state = Stan(9, 1, "down")
|
||||
run = True
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
board = Board(load_from_file=True, filename='generated_board.npy')
|
||||
|
||||
|
||||
tractor = Tractor(9, 1, model, feature_columns, neuralnetwork_model)
|
||||
visited = set()
|
||||
|
||||
while run:
|
||||
clock.tick(fps)
|
||||
|
||||
if all(not board.is_dirt(row, col) for row in range(rows) for col in range(cols)):
|
||||
print("Traktor odwiedził wszystkie pola.")
|
||||
break
|
||||
|
||||
goal_state = find_next_goal(board, visited)
|
||||
if not goal_state:
|
||||
print("Wszystkie pola zostały odwiedzone.")
|
||||
break
|
||||
|
||||
akcje, nowy_stan = astar(initial_state, goal_state, board)
|
||||
|
||||
if not akcje:
|
||||
print("Nie znaleziono ścieżki do najbliższego pola dirt.")
|
||||
board = Board(load_from_file=True, filename='generated_board.npy')
|
||||
initial_state = Stan(0, 1, "down")
|
||||
tractor = Tractor(0, 1, model, feature_columns, neuralnetwork_model)
|
||||
while board.is_rock(initial_state.row, initial_state.col):
|
||||
board = Board(load_from_file=True, filename='generated_board.npy')
|
||||
continue
|
||||
|
||||
print("akcje: >", akcje)
|
||||
|
||||
while akcje:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
run = False
|
||||
|
||||
akcja = akcje.pop(0)
|
||||
if akcja == "left":
|
||||
tractor.turn_left()
|
||||
elif akcja == "right":
|
||||
tractor.turn_right()
|
||||
elif akcja == "up":
|
||||
tractor.move_forward(board)
|
||||
|
||||
board.draw_cubes(WIN)
|
||||
tractor.draw(WIN)
|
||||
pygame.display.update()
|
||||
|
||||
visited.add((nowy_stan.row, nowy_stan.col))
|
||||
initial_state = nowy_stan
|
||||
initial_state.direction = tractor.direction
|
||||
|
||||
while True:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT:
|
||||
pygame.quit()
|
||||
return
|
||||
|
||||
pygame.quit()
|
||||
|
||||
main()
|
Loading…
Reference in New Issue
Block a user