add: astar in agent.py
This commit is contained in:
parent
d7c3f50322
commit
d130f2360b
23
app.py
23
app.py
@ -9,7 +9,6 @@ from classes.agent import Agent
|
||||
from collections import deque
|
||||
import threading
|
||||
import time
|
||||
from astar import AStar
|
||||
|
||||
pygame.init()
|
||||
window = pygame.display.set_mode((prefs.WIDTH, prefs.HEIGHT))
|
||||
@ -22,6 +21,16 @@ def initBoard():
|
||||
row = []
|
||||
for j in range(prefs.GRID_SIZE):
|
||||
cell = Cell(i, j, 1)
|
||||
# Wybierz kolor dla płytki na podstawie jej położenia
|
||||
if i == 0 or i == prefs.GRID_SIZE - 1 or j == 0 or j == prefs.GRID_SIZE - 1:
|
||||
color = (100, 20, 20)
|
||||
elif i == 1 or i == prefs.GRID_SIZE - 2 or j == 1 or j == prefs.GRID_SIZE - 2:
|
||||
color = (20, 100, 20)
|
||||
elif i == 2 or i == prefs.GRID_SIZE - 3 or j == 2 or j == prefs.GRID_SIZE - 3:
|
||||
color = (20, 20, 100)
|
||||
else:
|
||||
color = (150, 200, 200)
|
||||
cell.color = color
|
||||
row.append(cell)
|
||||
cells.append(row)
|
||||
|
||||
@ -50,7 +59,9 @@ def initBoard():
|
||||
def draw_grid(window, cells, agent):
|
||||
for i in range(prefs.GRID_SIZE):
|
||||
for j in range(prefs.GRID_SIZE):
|
||||
cells[i][j].update(window)
|
||||
cell = cells[i][j]
|
||||
color = cell.color
|
||||
pygame.draw.rect(window, cell.color, (i*prefs.CELL_SIZE, j*prefs.CELL_SIZE, prefs.CELL_SIZE, prefs.CELL_SIZE))
|
||||
if(cells[i][j].interactableItem):
|
||||
cells[i][j].interactableItem.update(window)
|
||||
cells[i][j].blit_text(cells[i][j].waga, i*50+6, j*52+6, 12,window)
|
||||
@ -115,8 +126,14 @@ while running:
|
||||
watek.start()
|
||||
|
||||
if keys[K_g]:
|
||||
path = AStar(cells).astar((agent.current_cell.X, agent.current_cell.Y), (target_x, target_y))
|
||||
path, cost = agent.astar((target_x, target_y), start_cost=0)
|
||||
print("Shortest path:", path)
|
||||
print("Total cost:", cost)
|
||||
watek = threading.Thread(target=watekDlaSciezkiAgenta)
|
||||
watek.daemon = True
|
||||
watek.start()
|
||||
|
||||
|
||||
|
||||
if pygame.key.get_pressed()[pygame.K_e]:
|
||||
if agent.current_cell.interactableItem and pygame.time.get_ticks() - agent.last_interact_time > 500:
|
||||
|
49
astar.py
49
astar.py
@ -1,49 +0,0 @@
|
||||
from collections import deque
|
||||
import heapq
|
||||
|
||||
class AStar:
|
||||
def __init__(self, cells):
|
||||
self.cells = cells
|
||||
|
||||
def heuristic(self, current, target):
|
||||
# Euclidean distance heuristic
|
||||
dx = abs(current[0] - target[0])
|
||||
dy = abs(current[1] - target[1])
|
||||
return dx + dy
|
||||
|
||||
def get_neighbors(self, cell):
|
||||
neighbors = []
|
||||
x, y = cell[0], cell[1]
|
||||
if x > 0 and not self.cells[x - 1][y].blocking_movement:
|
||||
neighbors.append((x - 1, y))
|
||||
if x < len(self.cells) - 1 and not self.cells[x + 1][y].blocking_movement:
|
||||
neighbors.append((x + 1, y))
|
||||
if y > 0 and not self.cells[x][y - 1].blocking_movement:
|
||||
neighbors.append((x, y - 1))
|
||||
if y < len(self.cells[x]) - 1 and not self.cells[x][y + 1].blocking_movement:
|
||||
neighbors.append((x, y + 1))
|
||||
return neighbors
|
||||
|
||||
def astar(self, start, target):
|
||||
open_list = [(0, start)]
|
||||
came_from = {}
|
||||
g_score = {start: 0}
|
||||
|
||||
while open_list:
|
||||
_, current = heapq.heappop(open_list)
|
||||
if current == target:
|
||||
path = []
|
||||
while current in came_from:
|
||||
path.append(current)
|
||||
current = came_from[current]
|
||||
return path[::-1]
|
||||
|
||||
for neighbor in self.get_neighbors(current):
|
||||
tentative_g_score = g_score[current] + 1
|
||||
if tentative_g_score < g_score.get(neighbor, float('inf')):
|
||||
came_from[neighbor] = current
|
||||
g_score[neighbor] = tentative_g_score
|
||||
f_score = tentative_g_score + self.heuristic(neighbor, target)
|
||||
heapq.heappush(open_list, (f_score, neighbor))
|
||||
|
||||
return []
|
@ -1,6 +1,9 @@
|
||||
import pygame
|
||||
from collections import deque
|
||||
from classes.cell import Cell
|
||||
import prefs
|
||||
import heapq
|
||||
|
||||
class Agent:
|
||||
def __init__(self, x, y, cells, baseScore=0):
|
||||
self.sprite = pygame.image.load("sprites/BartenderNew64.png").convert_alpha()
|
||||
@ -258,6 +261,71 @@ class Agent:
|
||||
|
||||
return []
|
||||
|
||||
#Algorytm astar
|
||||
def get_cost(self, cell):
|
||||
x, y = cell[0], cell[1]
|
||||
if x == 0 or x == len(self.cells) - 1 or y == 0 or y == len(self.cells[0]) - 1:
|
||||
return 15 # Koszt dla pól na krawędziach
|
||||
elif x == 1 or x == len(self.cells) - 2 or y == 1 or y == len(self.cells[0]) - 2:
|
||||
return 10 # Koszt dla pól drugiego rzędu i przedostatniego oraz drugiej kolumny i przedostatniej
|
||||
elif x == 2 or x == len(self.cells) - 3 or y == 2 or y == len(self.cells[0]) - 3:
|
||||
return 5 # Koszt dla pól trzeciego rzędu i trzeciego od końca oraz trzeciej kolumny i trzeciej od końca
|
||||
else:
|
||||
return 1
|
||||
|
||||
def heuristic(self, current, target):
|
||||
# Manhattan distance heuristic
|
||||
dx = abs(current[0] - target[0])
|
||||
dy = abs(current[1] - target[1])
|
||||
return dx + dy
|
||||
|
||||
def priority(self, state, target):
|
||||
# Oblicza priorytet dla danego stanu
|
||||
g_score = self.g_score[state]
|
||||
h_score = self.heuristic(state, target)
|
||||
return g_score + h_score
|
||||
|
||||
def astar(self, target, start_cost=0):
|
||||
if not isinstance(target, tuple) or len(target) != 2:
|
||||
raise ValueError("Target must be a tuple of two elements (x, y).")
|
||||
|
||||
open_list = [(start_cost, (self.current_cell.X, self.current_cell.Y, self.directionPOM))]
|
||||
came_from = {}
|
||||
g_score = {(self.current_cell.X, self.current_cell.Y, self.directionPOM): start_cost}
|
||||
|
||||
while open_list:
|
||||
_, current = heapq.heappop(open_list)
|
||||
if isinstance(current, int):
|
||||
raise ValueError("Current must be a tuple of three elements (x, y, direction).")
|
||||
|
||||
x, y, _ = current # Unpack the current tuple
|
||||
if (x, y) == target: # Check if the current cell's coordinates match the target
|
||||
path = []
|
||||
while current in came_from:
|
||||
path.append((current[0], current[1])) # Append only coordinates (x, y) to the path
|
||||
current = came_from[current]
|
||||
path = path[::-1] # Reverse the path
|
||||
cost = g_score[(x, y, self.directionPOM)] # Retrieve the cost from the g_score dictionary
|
||||
return path, cost
|
||||
|
||||
for neighbor in self.get_neighbors(self.cells[x][y], self.cells):
|
||||
neighbor_coords = (neighbor.X, neighbor.Y, self.directionPOM) # Convert neighbor cell to tuple
|
||||
tentative_g_score = g_score[current] + self.get_cost(neighbor_coords)
|
||||
if tentative_g_score < g_score.get(neighbor_coords, float('inf')):
|
||||
came_from[neighbor_coords] = current
|
||||
g_score[neighbor_coords] = tentative_g_score
|
||||
f_score = tentative_g_score + self.heuristic(neighbor_coords, target)
|
||||
heapq.heappush(open_list, (f_score, neighbor_coords))
|
||||
|
||||
|
||||
return [], float('inf') # If no path found, return an empty path and infinite cost
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user