rpg-szi/hero.py

326 lines
13 KiB
Python

import random
import heapq
from mesa import Agent
from othercharacters import dice, Box, Creature, Armor, Weapon
from actions import actions, actionsInterpreter
from state import AgentState
from direction import Direction
from node import Node
class Player(Creature):
def __init__(self, unique_id, model, n, s, a, w, maxhp, hp, weap, arm, g, w2, w3, listOfChests):
super().__init__(unique_id, model, n, s, a, w, maxhp, hp, weap, arm, g)
self.name = n
self.strength = s
self.agility = a
self.wisdom = w
self.maxHealth = maxhp
self.health = hp
self.gold = g
self.weapon1 = weap
self.weapon2 = w2
self.weapon3 = w3
self.armor = arm
self.isBox = False
self.isCreature = False
self.directions = {
Direction.N : [0, 1],
Direction.E : [1, 0],
Direction.S : [0, -1],
Direction.W : [-1, 0]
}
self.direction = Direction.N
self.queue = []
self.hasgoalchest = False
self.openedchests = 0
self.__listOfChests = listOfChests
self.__actionsCollection = []
def meleeAttack(self, opponent):
attackValue = self.strength + dice(6)
defenseValue = opponent.strength + opponent.armor.defence
damage = attackValue - defenseValue
if damage > 0:
opponent.health = opponent.health - (damage + self.weapon1.damage)
def rangeAttack(self, opponent):
attackValue = self.agility + dice(6)
defenseValue = opponent.agility
damage = attackValue - defenseValue
if (damage > 0) and (damage + self.weapon2.damage - opponent.armor.defence > 0):
opponent.health = opponent.health - (damage + self.weapon2.damage - opponent.armor.defence)
def magicAttack(self, opponent):
attackValue = self.wisdom + dice(6)
defenseValue = opponent.wisdom
damage = attackValue - defenseValue
if (damage > 0) and (damage + self.weapon3.damage - opponent.armor.mag_protection > 0):
opponent.health = opponent.health - (damage + self.weapon3.damage - opponent.armor.mag_protection)
def fightOrFlight(self, opponent):
combat = True
while combat:
choice = dice(4)
print("dice rolled:", choice)
if choice == 1:
running_speed = self.agility + dice(6)
opponent_speed = opponent.agility + dice(6)
if running_speed > opponent_speed:
combat = False
print("Player ran away")
self.step()
else:
opponent.defaultAttack(self)
if self.health <= 0:
combat = False
print("Player died :/")
elif choice == 2:
self.meleeAttack(opponent)
if opponent.health > 0:
opponent.defaultAttack(self)
if self.health <= 0:
combat = False
print("Player died :/")
else:
combat = False
self.gold = self.gold + opponent.gold
opponent.gold = 0
opponent.model.grid.remove_agent(opponent)
print("Fight won")
elif choice == 3:
self.rangeAttack(opponent)
if opponent.health > 0:
opponent.defaultAttack(self)
if self.health <= 0:
combat = False
print("Player died :/")
else:
combat = False
self.gold = self.gold + opponent.gold
opponent.gold = 0
opponent.model.grid.remove_agent(opponent)
print("Fight won")
else:
self.magicAttack(opponent)
if opponent.health > 0:
opponent.defaultAttack(self)
if self.health <= 0:
combat = False
print("Player died :/")
else:
combat = False
self.gold = self.gold + opponent.gold
opponent.gold = 0
opponent.model.grid.remove_agent(opponent)
print("Fight won")
def openChest(self, chest):
self.gold = self.gold + chest.gold
print("------Chest opened. Gold inside:", chest.gold,"-----")
chest.gold = 0
self.openedchests += 1
self.hasgoalchest = False
chest.model.grid.remove_agent(chest)
#self.direction = 0 # po osiągnięciu jednego celu 'restartuje sie' na szukanie ścieżki do kolejnego -- NIE ZEROWAĆ OBROTU - to psuje goldState w bfs!!!
# if isinstance(chest.loot,Armor):
# buffer = self.armor
# self.armor = chest.loot
# chest.loot = buffer
# if isinstance(chest.loot,Weapon):
# if chest.loot.type == "Melee":
# buffer = self.weapon1
# self.weapon1 = chest.loot
# chest.loot = buffer
# elif chest.loot.type == "Range":
# buffer = self.weapon2
# self.weapon2 = chest.loot
# chest.loot = buffer
# elif chest.loot.type == "Magic":
# buffer = self.weapon3
# self.weapon3 = chest.loot
# chest.loot = buffer
#- - - - bfs & successor - - - -#
def successor(self, append):
rotateLeft = AgentState(
append.get_x(),
append.get_y(),
append.get_direction().counterClockwise()
)
rotateRight = AgentState(
append.get_x(),
append.get_y(),
append.get_direction().clockwise()
)
move_x = 0
move_y = 0
if append.get_direction() == Direction.N:
move_y = 1
elif append.get_direction() == Direction.E:
move_x = 1
elif append.get_direction() == Direction.S:
move_y = -1
elif append.get_direction() == Direction.W:
move_x = -1
if append.get_x() + move_x >= 0 and append.get_x() + move_x < 10 and append.get_y() + move_y >=0 and append.get_y() + move_y < 10:
moveForward = AgentState(
append.get_x() + move_x,
append.get_y() + move_y,
append.get_direction()
)
else:
moveForward = None
return [
[actions["rotateLeft"], rotateLeft],
[actions["moveForward"], moveForward],
[actions["rotateRight"], rotateRight]
]
def heuristics(self, state, target_state):
# cost is initially step distance in manhattan metric
return abs(state.get_x() - target_state.get_x()) + abs(state.get_y() - target_state.get_y())
def graphsearch(self, fringe, explored, istate, succesorFunction, goalState):
finalActionList = []
init_state = [None, istate]
root = Node(None, init_state, 0)
heapq.heappush(fringe, (0, root)) # at beginning do nothing
while len(fringe) != 0:
_flag = True
if len(fringe) == 0:
return False
tmpNode = (heapq.heappop(fringe))[1] # node
# build dictionary
# parent = tmpNode.get_predecessor() # fetch parent state
# tmpNode.set_predecessor(None) # clear predecessor - don't build a tree chain
# if parent is None:
# finalActionList.append([parent, tmpNode])
# else:
# finalActionList.append(
# [parent[1], tmpNode]) # pair(key, value) - key: parent state, value: current state + action
if tmpNode._state.get_x() == goalState.get_x() and tmpNode._state.get_y() == goalState.get_y():
while tmpNode._parent is not None:
finalActionList.append(tmpNode._action)
tmpNode = tmpNode._parent
finalActionList = list(reversed(finalActionList))
return finalActionList # TODO change step!
explored.append(tmpNode)
tmpList = succesorFunction(tmpNode._state)
for newState in tmpList:
_flag = True
_flagFringe = True
_flagExplored = True
if newState[1] is None:
continue
# calculating priority
monster = 0
if any([thing.isCreature for thing in self.model.grid.get_cell_list_contents([(newState[1].get_x(), newState[1].get_y())])]):
if newState[0] == 0:
monster = 10
p = self.heuristics(newState[1], goalState) + tmpNode._cost + monster + 1
r = 0
counter = 0
pos = 0
for fringeNode in fringe:
if fringeNode[1]._state.get_x() == newState[1].get_x() and fringeNode[1]._state.get_y() == newState[1].get_y() and fringeNode[1]._state.get_direction() == newState[1].get_direction():
_flagFringe = False
_flag = False
r = fringeNode[0]
pos = counter
counter = counter + 1
for exploredNode in explored:
if exploredNode._state.get_x() == newState[1].get_x() and exploredNode._state.get_y() == newState[1].get_y() and exploredNode._state.get_direction() == newState[1].get_direction():
_flagExplored = False
_flag = False
# if _flag:
# newState[1].set_predecessor(tmpNode)
if _flagFringe and _flagExplored:
newNode = Node(tmpNode, newState, tmpNode._cost + 1 + monster)
heapq.heappush(fringe, (p, newNode))
elif not _flagFringe and (p < r):
newNode = Node(tmpNode, newState, tmpNode._cost + 1 + monster)
fringe[pos][0] = p
fringe[pos][1] = newNode
return None
def step(self):
if self.health > 0:
print("position: ", self.pos)
# print("direction: ", self.direction)
if not self.hasgoalchest: # jeśli nie ma wyznaczonej skrzynki do której idzie to robi bfs żeby ją wyznaczyć
# self.path=self.findShortestPathToTarget()
if len(self.__listOfChests) != 0:
# select and remove element from list
randomChest = random.choice(self.__listOfChests)
self.__listOfChests.remove(randomChest)
self.hasgoalchest = True
currentState = AgentState(self.pos[0], self.pos[1], self.direction)
goalState = AgentState(randomChest[1][0], randomChest[1][1], self.direction)
# find way to goal state
self.__actionsCollection = self.graphsearch([],
[],
currentState,
self.successor,
goalState)
if self.__actionsCollection is None:
raise Exception("CRITICAL ERROR - Algorithm error - Path doesn't exist!!! ://")
else:
self.__actionsCollection = [action for action in self.__actionsCollection if action is not None] # remove first None action
else:
raise Exception("WIN!!! :D")
elif len(self.__actionsCollection) == 0: # jeśli jest wyznaczona skrzynka - cel & nie ma akcji do wykonania - cel osiągnięty
self.hasgoalchest = False
elif len(self.__actionsCollection) != 0: # jeśli jest wyznaczona skrzynka - cel & są akcje do wykoannia to je realizuje
actionIndex = self.__actionsCollection[0] # ignore -1 because it's None
self.__actionsCollection.remove(actionIndex)
newState = actionsInterpreter(actionIndex, AgentState(self.pos[0], self.pos[1], self.direction), self.directions)
self.model.grid.move_agent(self, (newState.get_x(), newState.get_y()))
self.direction = newState.get_direction()
print("moved to - ", [newState.get_x(), newState.get_y()])
cellmates = self.model.grid.get_cell_list_contents([self.pos])
if len(cellmates) > 1:
if isinstance(cellmates[0], Box):
self.openChest(cellmates[0])
else:
opponent = cellmates[0]
print("Fighting")
self.fightOrFlight(opponent)
# print("HP: " + str(self.health) + " / " + str(self.maxHealth))
print("Gold: " + str(self.gold))
else:
print("HP: 0 / " + str(self.maxHealth))