Merge pull request 'development' (#12) from development into master

Reviewed-on: #12
This commit is contained in:
Jakub Klupieć 2021-06-21 17:12:28 +02:00
commit 3dd86ea96b
45 changed files with 50733 additions and 229 deletions

View File

@ -1,14 +1,15 @@
import pygame import pygame
from settings import SCREEN_WIDTH, SCREEN_HEIGHT from survival.ai.genetic_algorithm import GeneticAlgorithm
from survival.camera import Camera
from survival.components.inventory_component import InventoryComponent from survival.components.inventory_component import InventoryComponent
from survival.game_map import GameMap from survival.game.game_map import GameMap
from survival.generators.building_generator import BuildingGenerator from survival.generators.building_generator import BuildingGenerator
from survival.generators.player_generator import PlayerGenerator from survival.generators.player_generator import PlayerGenerator
from survival.generators.resource_generator import ResourceGenerator from survival.generators.resource_generator import ResourceGenerator
from survival.generators.world_generator import WorldGenerator from survival.generators.world_generator import WorldGenerator
from survival.settings import SCREEN_WIDTH, SCREEN_HEIGHT, MUTATE_NETWORKS, LEARN
from survival.systems.draw_system import DrawSystem from survival.systems.draw_system import DrawSystem
from survival.systems.neural_system import NeuralSystem
class Game: class Game:
@ -16,10 +17,17 @@ class Game:
self.world_generator = WorldGenerator(win, self.reset) self.world_generator = WorldGenerator(win, self.reset)
self.game_map, self.world, self.camera = self.world_generator.create_world() self.game_map, self.world, self.camera = self.world_generator.create_world()
self.run = True self.run = True
if LEARN and MUTATE_NETWORKS:
self.genetic_algorithm = GeneticAlgorithm(self.world.get_processor(NeuralSystem), self.finish_training)
def reset(self): def reset(self):
if LEARN and MUTATE_NETWORKS:
self.genetic_algorithm.train()
self.world_generator.reset_world() self.world_generator.reset_world()
def finish_training(self):
self.run = False
def update(self, ms): def update(self, ms):
events = pygame.event.get() events = pygame.event.get()
@ -47,4 +55,4 @@ if __name__ == '__main__':
game = Game() game = Game()
while game.run: while game.run:
game.update(clock.tick(60)) game.update(clock.tick(500))

Binary file not shown.

View File

@ -0,0 +1,60 @@
import json
from joblib import dump, load
from matplotlib import pyplot as plt
from sklearn import tree
from sklearn.feature_extraction import DictVectorizer
class DecisionTree:
def __init__(self):
self.clf = None
self.vec = None
def build(self, depth: int):
path = "tree_data.json"
samples = []
results = []
with open(path, "r") as training_file:
for sample in training_file:
sample, result = self.process_input(sample)
samples.append(sample)
results.append(result)
self.vec = DictVectorizer()
self.clf = tree.DecisionTreeClassifier(max_depth=depth)
self.clf = self.clf.fit(self.vec.fit_transform(samples).toarray(), results)
def save_model(self, clf_file, vec_file):
dump(self.clf, clf_file)
dump(self.vec, vec_file)
def load_model(self, clf_file, vec_file):
self.clf = load(clf_file)
self.vec = load(vec_file)
def predict_answer(self, params):
return self.clf.predict(self.vec.transform(params).toarray())
def plot_tree(self):
print('Plotting tree...')
fig = plt.figure(figsize=(36, 27))
_ = tree.plot_tree(self.clf,
feature_names=self.vec.get_feature_names(),
filled=True)
fig.savefig("decistion_tree.png")
print('Success!')
@staticmethod
def process_input(line):
data = json.loads(line.strip())
result = data['result']
del data['result']
del data['food_result']
del data['water_result']
del data['wood_result']
sample = data
return sample, result

View File

@ -0,0 +1,124 @@
import random
from typing import Dict
from survival.ai.decision_tree.decision_tree import DecisionTree
from survival.generators.resource_type import ResourceType
class TreeDataGenerator:
INV_RANGE = (1, 100)
VISIBLE = (True, False)
DISTANCE_RANGE = (3, 7)
DISTANCE_FACTOR = 0.2
COUNT = (1, 2, 3)
def generate(self, count=1000):
full_data = []
self.process(count, full_data)
self.write_data_to_file(full_data)
return full_data
def process(self, count, full_data):
for i in range(count):
# if i % 10000 == 0:
# print(i)
package = {}
# Create resource data for each resource type.
for resource in ResourceType:
package[resource] = self.create_resource_data()
# Get the resource with highest result among all generated resource types.
best_resource = self.get_best_resource(package)
# Unpack packaged resources.
(food, water, wood) = (
package[ResourceType.FOOD], package[ResourceType.WATER], package[ResourceType.WOOD])
# Create dictionary filled with data.
data = {"food_inv": food[0], 'food_visible': str(food[1]), 'food_distance': food[2],
'food_count': food[3], 'food_result': food[4],
'water_inv': water[0], 'water_visible': str(water[1]), 'water_distance': water[2],
'water_count': water[3], 'water_result': water[4],
'wood_inv': wood[0], 'wood_visible': str(wood[1]), 'wood_distance': wood[2],
'wood_count': wood[3], 'wood_result': wood[4],
'result': best_resource.name.lower()}
full_data.append(data)
@staticmethod
def write_data_to_file(full_data):
print("Writing to file...")
# Open the target file to which the data will be saved and write all the data to it.
with open('tree_data.json', 'w') as f:
for data in full_data:
data_str = str(data).replace("'", '"').replace('"False"', 'false').replace('"True"', 'true')
f.write(data_str)
f.write('\n')
print("Success!")
def create_resource_data(self):
is_visible = random.choice(self.VISIBLE)
inventory = random.randint(min(self.INV_RANGE), max(self.INV_RANGE))
if is_visible:
cnt = random.choice(self.COUNT)
distance = random.randint(min(self.DISTANCE_RANGE), max(self.DISTANCE_RANGE))
else:
cnt = 0
distance = 0
# Equation determining the results processed by decision tree.
result = (self.INV_RANGE[1] / inventory) * (1 * cnt if is_visible else 0.9) + (
max(self.DISTANCE_RANGE) / distance if is_visible else 0.5) * self.DISTANCE_FACTOR
return [inventory, is_visible, distance, cnt, result]
@staticmethod
def get_best_resource(package: Dict) -> ResourceType:
best_resource = None
for resource, data in package.items():
if best_resource is None or data[:-1] < package[best_resource][:-1]:
best_resource = resource
return best_resource
@staticmethod
def print_data(full_data):
for data in full_data:
print(TreeDataGenerator.format_words(["Data", "Apple", "Water", "Wood"]))
print(TreeDataGenerator.format_words(["Inventory", data["food_inv"], data["water_inv"], data["wood_inv"]]))
print(TreeDataGenerator.format_words(
["Visible", data["food_visible"], data["water_visible"], data["wood_visible"]]))
print(TreeDataGenerator.format_words(
["Distance", data["food_distance"], data["water_distance"], data["wood_distance"]]))
print(
TreeDataGenerator.format_words(["Count", data["food_count"], data["water_count"], data["wood_count"]]))
print(TreeDataGenerator.format_words(
["Result", round(data["food_result"], 3), round(data["water_result"], 3),
round(data["wood_result"], 3)]))
print(f'Best resource: {data["result"]}')
print('--------------------------------------------------------------')
@staticmethod
def format_words(words):
return '{:>12} {:>12} {:>12} {:>12}'.format(words[0], words[1], words[2], words[3])
# Train tree
generator = TreeDataGenerator()
data = generator.generate(50000)
generator.print_data(data)
tree = DecisionTree()
tree.build(1000)
tree.plot_tree()
tree.save_model('classifier.joblib', 'vectorizer.joblib')
# ----------------------------------------------------------- #
# Use trained tree
# tree = DecisionTree()
# tree.load_model('classifier.joblib', 'vectorizer.joblib')
#
# answ = tree.predict_answer({'food_inv': 40, 'water_inv': 10, 'wood_inv': 20,
# 'food_distance': 2, 'water_distance': -1, 'wood_distance': 4,
# 'food_visible': True, 'water_visible': False, 'wood_visible': True,
# 'food_count': 1, 'water_count': 1, 'wood_count': 1})
# print(answ)

Binary file not shown.

After

Width:  |  Height:  |  Size: 722 KiB

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -0,0 +1,84 @@
import sys
from survival.ai.model import LinearQNetwork
from survival.ai.optimizer import Optimizer
class GeneticAlgorithm:
GAMES_PER_NETWORK = 40
PLOTS_COUNTER = 0
CURRENT_GENERATION = 1
def __init__(self, neural_system, callback):
self.callback = callback
self.logs_file = open('genetic_logs.txt', 'w')
self.original_stdout = sys.stdout
sys.stdout = self.logs_file
self.neural_system = neural_system
self.generations = 20
self.population = 10 # Minimum 5 needed to allow breeding
self.nn_params = {
'neurons': [128, 192, 256, 384, 512],
'layers': [0, 1, 2, 3],
'activation': ['relu', 'elu', 'tanh'],
'ratio': [0.0007, 0.0009, 0.0011, 0.0013, 0.0015],
'optimizer': ['RMSprop', 'Adam', 'SGD', 'Adagrad', 'Adadelta'],
}
self.optimizer = Optimizer(self.nn_params)
self.networks: list[LinearQNetwork] = self.optimizer.create_population(self.population)
self.finished = False
self.trained_counter = 0
self.iterations = 0
self.trained_generations = 0
print('Started generation 1...')
self.change_network(self.networks[0])
def train(self):
if self.iterations < GeneticAlgorithm.GAMES_PER_NETWORK - 1:
self.iterations += 1
return
self.iterations = 0
print(f'Network score: {self.optimizer.fitness(self.networks[self.trained_counter])}')
self.trained_counter += 1
# If all networks in current population were trained
if self.trained_counter >= self.population:
# Get average score in current population
avg_score = self.calculate_average_score(self.networks)
print(f'Average population score: {avg_score}.')
results_file = open('genetic_results.txt', 'w')
for network in self.networks:
results_file.write(
f'Network {network.id} params {network.network_params}. Avg score = {sum(network.scores) / len(network.scores)}\n')
results_file.close()
if self.trained_generations >= self.generations - 1:
# Sort the final population
self.networks = sorted(self.networks, key=lambda x: sum(x.scores) / len(x.scores), reverse=True)
self.finished = True
self.logs_file.close()
sys.stdout = self.original_stdout
self.callback()
return
self.trained_generations += 1
GeneticAlgorithm.CURRENT_GENERATION = self.trained_generations + 1
print(f'Started generation {GeneticAlgorithm.CURRENT_GENERATION}...')
self.networks = self.optimizer.evolve(self.networks)
self.trained_counter = 0
self.change_network(self.networks[self.trained_counter])
def calculate_average_score(self, networks):
sums = 0
lengths = 0
for network in networks:
sums += self.optimizer.fitness(network)
lengths += len(network.scores)
return sums / lengths
def change_network(self, net):
GeneticAlgorithm.PLOTS_COUNTER += 1
print(f"Changed network to {GeneticAlgorithm.PLOTS_COUNTER} {net.network_params}")
self.logs_file.flush()
net.id = GeneticAlgorithm.PLOTS_COUNTER
self.neural_system.load_model(net)

View File

@ -6,8 +6,9 @@ from survival.components.direction_component import DirectionChangeComponent
from survival.components.moving_component import MovingComponent from survival.components.moving_component import MovingComponent
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.components.resource_component import ResourceComponent from survival.components.resource_component import ResourceComponent
from survival.enums import Direction from survival.game.enums import Direction
from survival.esper import World from survival.esper import World
from survival.systems.consumption_system import ConsumeComponent
class Action(Enum): class Action(Enum):
@ -35,9 +36,11 @@ class Action(Enum):
if action == Action.ROTATE_LEFT: if action == Action.ROTATE_LEFT:
world.add_component(entity, DirectionChangeComponent( world.add_component(entity, DirectionChangeComponent(
Direction.rotate_left(world.component_for_entity(entity, PositionComponent).direction))) Direction.rotate_left(world.component_for_entity(entity, PositionComponent).direction)))
world.add_component(entity, ConsumeComponent(0.2))
elif action == Action.ROTATE_RIGHT: elif action == Action.ROTATE_RIGHT:
world.add_component(entity, DirectionChangeComponent( world.add_component(entity, DirectionChangeComponent(
Direction.rotate_right(world.component_for_entity(entity, PositionComponent).direction))) Direction.rotate_right(world.component_for_entity(entity, PositionComponent).direction)))
world.add_component(entity, ConsumeComponent(0.2))
else: else:
world.add_component(entity, MovingComponent()) world.add_component(entity, MovingComponent())
return action return action

View File

@ -2,10 +2,12 @@ import numpy as np
from IPython import display from IPython import display
from matplotlib import pyplot as plt from matplotlib import pyplot as plt
from survival.settings import MUTATE_NETWORKS
from survival.ai.genetic_algorithm import GeneticAlgorithm
from survival.components.learning_component import LearningComponent from survival.components.learning_component import LearningComponent
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.enums import Direction from survival.game.enums import Direction
from survival.graph_search import Action from survival.ai.graph_search import Action
class LearningUtils: class LearningUtils:
@ -23,8 +25,8 @@ class LearningUtils:
self.plot_mean_scores.append(mean_score) self.plot_mean_scores.append(mean_score)
def plot(self): def plot(self):
display.clear_output(wait=True) # display.clear_output(wait=True)
display.display(plt.gcf()) # display.display(plt.gcf())
plt.clf() plt.clf()
plt.title('Results') plt.title('Results')
plt.xlabel('Number of Games') plt.xlabel('Number of Games')
@ -35,9 +37,12 @@ class LearningUtils:
plt.text(len(self.plot_scores) - 1, self.plot_scores[-1], str(self.plot_scores[-1])) plt.text(len(self.plot_scores) - 1, self.plot_scores[-1], str(self.plot_scores[-1]))
plt.text(len(self.plot_mean_scores) - 1, self.plot_mean_scores[-1], str(self.plot_mean_scores[-1])) plt.text(len(self.plot_mean_scores) - 1, self.plot_mean_scores[-1], str(self.plot_mean_scores[-1]))
self.plots += 1 self.plots += 1
plt.savefig(f'model/plots/{self.plots}.png') if MUTATE_NETWORKS:
plt.savefig(f'model/plots/{GeneticAlgorithm.PLOTS_COUNTER}_{self.plots}.png')
else:
plt.savefig(f'model/plots/{self.plots}.png')
plt.show(block=False) plt.show(block=False)
plt.pause(.1) # plt.pause(.1)
def append_action(self, action: Action, pos: PositionComponent): def append_action(self, action: Action, pos: PositionComponent):
self.last_actions.append([action, pos.grid_position]) self.last_actions.append([action, pos.grid_position])

View File

@ -1,4 +1,5 @@
import os import os
import random
import torch import torch
from torch import nn, optim from torch import nn, optim
@ -6,16 +7,46 @@ import torch.nn.functional as functional
class LinearQNetwork(nn.Module): class LinearQNetwork(nn.Module):
def __init__(self, input_size, hidden_size, output_size, pretrained=False): TORCH_ACTiVATIONS = 'tanh'
def __init__(self, nn_params, input_size, output_size, randomize=True, params=None):
super().__init__() super().__init__()
self.linear_one = nn.Linear(input_size, hidden_size) self.id = 0
self.linear_two = nn.Linear(hidden_size, output_size) if params is None:
self.pretrained = pretrained params = {}
self.params_choice = nn_params
self.scores = []
self.network_params = params
if randomize:
self.randomize()
self.layers = nn.ModuleList()
if self.network_params['layers'] == 0:
self.layers.append(nn.Linear(input_size, output_size))
else:
self.layers.append(nn.Linear(input_size, self.network_params['neurons']))
for i in range(self.network_params['layers'] - 1):
self.layers.append(nn.Linear(self.network_params['neurons'], self.network_params['neurons']))
if self.network_params['layers'] > 0:
self.ending_linear = nn.Linear(self.network_params['neurons'], output_size)
self.layers.append(self.ending_linear)
if self.network_params['activation'] in self.TORCH_ACTiVATIONS:
self.forward_func = getattr(torch, self.network_params['activation'])
else:
self.forward_func = getattr(functional, self.network_params['activation'])
def randomize(self):
"""
Sets random parameters for network.
"""
for key in self.params_choice:
self.network_params[key] = random.choice(self.params_choice[key])
def forward(self, x): def forward(self, x):
x = functional.relu(self.linear_one(x)) for i in range(len(self.layers) - 1):
x = self.linear_two(x) x = self.forward_func(self.layers[i](x))
x = self.layers[-1](x)
return x return x
def save(self, file_name='model.pth'): def save(self, file_name='model.pth'):
@ -27,24 +58,25 @@ class LinearQNetwork(nn.Module):
torch.save(self.state_dict(), file_path) torch.save(self.state_dict(), file_path)
@staticmethod @staticmethod
def load(input_size, hidden_size, output_size, file_name='model.pth'): def load(params, input_size, output_size, file_name='model.pth'):
model_directory = 'model' model_directory = 'model'
file_path = os.path.join(model_directory, file_name) file_path = os.path.join(model_directory, file_name)
if os.path.isfile(file_path): if os.path.isfile(file_path):
model = LinearQNetwork(input_size, hidden_size, output_size, True) model = LinearQNetwork(params, input_size, output_size, True)
model.load_state_dict(torch.load(file_path)) model.load_state_dict(torch.load(file_path))
model.eval() model.eval()
return model return model
return LinearQNetwork(11, 256, 3) raise Exception(f'Could not find file {file_path}.')
class QTrainer: class QTrainer:
def __init__(self, model, lr, gamma): def __init__(self, model, lr, gamma, optimizer):
self.model = model self.model = model
self.lr = lr self.lr = lr
self.gamma = gamma self.gamma = gamma
self.optimizer = optim.Adam(model.parameters(), lr=self.lr) self.optimizer = getattr(optim, optimizer)(model.parameters(), lr=self.lr)
self.criterion = nn.MSELoss() # Mean squared error # self.optimizer = optim.Adam(model.parameters(), lr=self.lr)
self.criterion = nn.MSELoss() # Mean squared error
def train_step(self, state, action, reward, next_state, done): def train_step(self, state, action, reward, next_state, done):
state = torch.tensor(state, dtype=torch.float) state = torch.tensor(state, dtype=torch.float)

144
survival/ai/optimizer.py Normal file
View File

@ -0,0 +1,144 @@
from functools import reduce
from operator import add
import random
from typing import List
from survival.ai.model import LinearQNetwork
from survival.settings import NEURAL_INPUT_SIZE, NEURAL_OUTPUT_SIZE
class Optimizer:
def __init__(self, params, retain=0.4,
random_select=0.1, mutation_chance=0.2):
self.mutation_chance = mutation_chance
self.random_select = random_select
self.retain = retain
self.nn_params = params
def create_population(self, count: int):
"""
Creates 'count' networks from random parameters.
:param count:
:return:
"""
pop = []
for _ in range(0, count):
# Create a random network.
network = LinearQNetwork(self.nn_params, NEURAL_INPUT_SIZE, NEURAL_OUTPUT_SIZE)
# Add network to the population.
pop.append(network)
return pop
@staticmethod
def fitness(network: LinearQNetwork):
return sum(network.scores) / len(network.scores)
def grade(self, pop: List[LinearQNetwork]) -> float:
"""
Finds average fitness for given population.
"""
summed = reduce(add, (self.fitness(network) for network in pop))
return summed / float((len(pop)))
def breed(self, parent_one, parent_two):
"""
Creates a new network from given parents.
:param parent_one:
:param parent_two:
:return:
"""
children = []
for _ in range(2):
child = {}
# Loop through the parameters and pick params for the kid.
for param in self.nn_params:
child[param] = random.choice(
[parent_one.network_params[param], parent_two.network_params[param]]
)
# Create new network object.
network = LinearQNetwork(self.nn_params, NEURAL_INPUT_SIZE, NEURAL_OUTPUT_SIZE)
network.network_params = child
children.append(network)
return children
def mutate(self, network: LinearQNetwork):
"""
Randomly mutates one parameter of the given network.
:param network:
:return:
"""
mutation = random.choice(list(self.nn_params.keys()))
# Mutate one of the params.
network.network_params[mutation] = random.choice(self.nn_params[mutation])
return network
def evolve(self, pop):
"""
Evolves a population of networks.
"""
# Get scores for each network.
scores = [(self.fitness(network), network) for network in pop]
# Sort the scores.
scores = [x[1] for x in sorted(scores, key=lambda x: x[0], reverse=True)]
# Get the number we want to keep for the next gen.
retain_length = int(len(scores) * self.retain)
# Keep the best networks as parents for next generation.
parents = scores[:retain_length]
# Keep some other networks
for network in scores[retain_length:]:
if self.random_select > random.random():
parents.append(network)
# Reset kept networks
reseted_networks = []
for network in parents:
net = LinearQNetwork(self.nn_params, NEURAL_INPUT_SIZE, NEURAL_OUTPUT_SIZE)
net.network_params = network.network_params
reseted_networks.append(net)
parents = reseted_networks
# Randomly mutate some of the networks.
for parent in parents:
if self.mutation_chance > random.random():
parent = self.mutate(parent)
# Determine the number of freed spots for the next generation.
parents_length = len(parents)
desired_length = len(pop) - parents_length
children = []
# Fill missing spots with new children.
while len(children) < desired_length:
# Get random parents.
p1 = random.randint(0, parents_length - 1)
p2 = random.randint(0, parents_length - 1)
# Ensure they are not the same network.
if p1 != p2:
p1 = parents[p1]
p2 = parents[p2]
# Breed networks.
babies = self.breed(p1, p2)
# Add children one at a time.
for baby in babies:
# Don't grow larger than the desired length.
if len(children) < desired_length:
children.append(baby)
parents.extend(children)
return parents

93
survival/ai/test.py Normal file
View File

@ -0,0 +1,93 @@
import torch
import pygad
from pygad.torchga import torchga
def fitness_func(solution, sol_idx):
global data_inputs, data_outputs, torch_ga, model, loss_function
model_weights_dict = torchga.model_weights_as_dict(model=model,
weights_vector=solution)
# Use the current solution as the model parameters.
model.load_state_dict(model_weights_dict)
predictions = model(data_inputs)
abs_error = loss_function(predictions, data_outputs).detach().numpy() + 0.00000001
solution_fitness = 1.0 / abs_error
return solution_fitness
def callback_generation(ga_instance):
print("Generation = {generation}".format(generation=ga_instance.generations_completed))
print("Fitness = {fitness}".format(fitness=ga_instance.best_solution()[1]))
# Create the PyTorch model.
input_layer = torch.nn.Linear(3, 2)
relu_layer = torch.nn.ReLU()
output_layer = torch.nn.Linear(2, 1)
model = torch.nn.Sequential(input_layer,
relu_layer,
output_layer)
# print(model)
# Create an instance of the pygad.torchga.TorchGA class to build the initial population.
torch_ga = torchga.TorchGA(model=model,
num_solutions=10)
loss_function = torch.nn.L1Loss()
# Data inputs
data_inputs = torch.tensor([[0.02, 0.1, 0.15],
[0.7, 0.6, 0.8],
[1.5, 1.2, 1.7],
[3.2, 2.9, 3.1]])
# Data outputs
data_outputs = torch.tensor([[0.1],
[0.6],
[1.3],
[2.5]])
# Prepare the PyGAD parameters. Check the documentation for more information: https://pygad.readthedocs.io/en/latest/README_pygad_ReadTheDocs.html#pygad-ga-class
num_generations = 250 # Number of generations.
num_parents_mating = 5 # Number of solutions to be selected as parents in the mating pool.
initial_population = torch_ga.population_weights # Initial population of network weights
parent_selection_type = "sss" # Type of parent selection.
crossover_type = "single_point" # Type of the crossover operator.
mutation_type = "random" # Type of the mutation operator.
mutation_percent_genes = 10 # Percentage of genes to mutate. This parameter has no action if the parameter mutation_num_genes exists.
keep_parents = -1 # Number of parents to keep in the next population. -1 means keep all parents and 0 means keep nothing.
ga_instance = pygad.GA(num_generations=num_generations,
num_parents_mating=num_parents_mating,
initial_population=initial_population,
fitness_func=fitness_func,
parent_selection_type=parent_selection_type,
crossover_type=crossover_type,
mutation_type=mutation_type,
mutation_percent_genes=mutation_percent_genes,
keep_parents=keep_parents,
on_generation=callback_generation)
ga_instance.run()
# After the generations complete, some plots are showed that summarize how the outputs/fitness values evolve over generations.
ga_instance.plot_result(title="PyGAD & PyTorch - Iteration vs. Fitness", linewidth=4)
# Returning the details of the best solution.
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print("Fitness value of the best solution = {solution_fitness}".format(solution_fitness=solution_fitness))
print("Index of the best solution : {solution_idx}".format(solution_idx=solution_idx))
# Fetch the parameters of the best solution.
best_solution_weights = torchga.model_weights_as_dict(model=model,
weights_vector=solution)
model.load_state_dict(best_solution_weights)
predictions = model(data_inputs)
print("Predictions : \n", predictions.detach().numpy())
abs_error = loss_function(predictions, data_outputs)
print("Absolute Error : ", abs_error.detach().numpy())

View File

@ -1,5 +1,6 @@
from survival.generators.resource_type import ResourceType
class ConsumptionComponent: class ConsumptionComponent:
def __init__(self, inventory_state=0): def __init__(self):
self.timer_value: float = 2000 self.status = {ResourceType.FOOD: 1, ResourceType.WOOD: 1, ResourceType.WATER: 1}
self.timer: float = self.timer_value
self.last_inventory_state = inventory_state

View File

@ -1,4 +1,4 @@
from survival.enums import Direction from survival.game.enums import Direction
class DirectionChangeComponent: class DirectionChangeComponent:

View File

@ -17,6 +17,9 @@ class InventoryComponent:
if self.items[item] < 0: if self.items[item] < 0:
self.items[item] = 0 self.items[item] = 0
def count(self, item):
return self.items[item]
def has_item(self, item): def has_item(self, item):
return item in self.items and self.items[item] != 0 return item in self.items and self.items[item] != 0

View File

@ -7,9 +7,9 @@ class OnCollisionComponent:
callbacks = [] callbacks = []
self.callbacks = callbacks self.callbacks = callbacks
def callAll(self): def call_all(self):
for func in self.callbacks: for func in self.callbacks:
func() func()
def addCallback(self, fn, **kwargs): def add_callback(self, fn, **kwargs):
self.callbacks.append(partial(fn, **kwargs)) self.callbacks.append(partial(fn, **kwargs))

View File

@ -1,4 +1,4 @@
from survival.enums import Direction from survival.game.enums import Direction
class PositionComponent: class PositionComponent:

View File

@ -1,4 +1,4 @@
from survival.image import Image from survival.game.image import Image
class SpriteComponent: class SpriteComponent:

View File

@ -1,48 +0,0 @@
import json
import os
from sklearn import tree
from sklearn.feature_extraction import DictVectorizer
from survival.components.resource_component import ResourceComponent
class DecisionTree:
def __init__(self):
self.clf = None
self.vec = None
def build(self, depth: int):
path = os.path.join("..", "data.txt")
samples = list()
results = list()
with open(path, "r") as training_file:
for sample in training_file:
sample, result = self.process_input(sample)
samples.append(sample)
results.append(result)
self.vec = DictVectorizer()
self.clf = tree.DecisionTreeClassifier(max_depth=depth)
self.clf = self.clf.fit(self.vec.fit_transform(samples).toarray(), results)
# print(tree.export_text(self.clf, feature_names=self.vec.get_feature_names()))
def predict_answer(self, resource: ResourceComponent):
params = {
"weight": resource.weight,
"eatable": resource.eatable,
"toughness": resource.toughness
}
return self.clf.predict(self.vec.transform(params).toarray())
@staticmethod
def process_input(line):
data = json.loads(line.strip())
result = data['resource']
del data['resource']
sample = data
return sample, result

View File

@ -1,4 +1,4 @@
from survival.biomes.biome_preset import BiomePreset from survival.game.biomes.biome_preset import BiomePreset
class BiomeData: class BiomeData:

View File

@ -1,7 +1,7 @@
import random import random
from typing import List from typing import List
from survival.tile import Tile from survival.game.tile import Tile
class BiomePreset: class BiomePreset:

View File

@ -1,6 +1,6 @@
from pygame.rect import Rect from pygame.rect import Rect
from survival import SCREEN_WIDTH, SCREEN_HEIGHT from survival.settings import SCREEN_WIDTH, SCREEN_HEIGHT
class Camera: class Camera:

View File

@ -1,10 +1,10 @@
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.components.resource_component import ResourceComponent from survival.components.resource_component import ResourceComponent
from survival.entity_layer import EntityLayer from survival.game.entity_layer import EntityLayer
from survival.esper import World from survival.esper import World
from survival.graph_search import graph_search from survival.ai.graph_search import graph_search
from survival.settings import AGENT_VISION_RANGE from survival.settings import AGENT_VISION_RANGE
from survival.tile_layer import TileLayer from survival.game.tile_layer import TileLayer
class GameMap: class GameMap:

View File

@ -6,7 +6,7 @@ import pygame
class Image: class Image:
def __init__(self, filename='', pos=(0, 0), scale=1, surface=None): def __init__(self, filename='', pos=(0, 0), scale=1, surface=None):
if surface is None: if surface is None:
self.texture = pygame.image.load(os.path.join('..', 'assets', filename)).convert_alpha() self.texture = pygame.image.load(os.path.join('../', 'assets', filename)).convert_alpha()
else: else:
self.texture = surface self.texture = surface
self.image = self.texture self.image = self.texture

View File

@ -1,6 +1,6 @@
from survival.generators.tile_generator import TileGenerator from survival.generators.tile_generator import TileGenerator
from survival.image import Image from survival.game.image import Image
from survival.tile import Tile from survival.game.tile import Tile
class TileLayer: class TileLayer:
@ -8,7 +8,6 @@ class TileLayer:
self.width = width self.width = width
self.height = height self.height = height
self.tiles: list[list[Tile]] = TileGenerator.generate_biome_tiles(width, height) self.tiles: list[list[Tile]] = TileGenerator.generate_biome_tiles(width, height)
# self.tiles: list[list[Tile]] = TileGenerator.generate_random_tiles(width, height)
self.image = Image('atlas.png') self.image = Image('atlas.png')
def draw(self, camera, visible_area): def draw(self, camera, visible_area):

View File

@ -1,15 +1,16 @@
import pygame.font import pygame.font
from survival import settings from survival.ai.genetic_algorithm import GeneticAlgorithm
from survival.settings import MUTATE_NETWORKS, SCREEN_HEIGHT, SCREEN_WIDTH
from survival.components.inventory_component import InventoryComponent from survival.components.inventory_component import InventoryComponent
from survival.generators.resource_type import ResourceType from survival.generators.resource_type import ResourceType
from survival.image import Image from survival.game.image import Image
class UserInterface: class UserInterface:
def __init__(self, window): def __init__(self, window):
self.width = settings.SCREEN_WIDTH self.width = SCREEN_WIDTH
self.height = settings.SCREEN_HEIGHT self.height = SCREEN_HEIGHT
self.window = window self.window = window
self.pos = (self.width - 240, 50) self.pos = (self.width - 240, 50)
self.scale = 2 self.scale = 2
@ -25,9 +26,11 @@ class UserInterface:
i += 1 i += 1
self.slot_image = Image('ui.png', self.pos, scale=2) self.slot_image = Image('ui.png', self.pos, scale=2)
self.font = pygame.font.SysFont('Comic Sans MS', 20) self.font = pygame.font.SysFont('Comic Sans MS', 20)
self.initialized = False
def load_inventory(self, inventory: InventoryComponent): def load_inventory(self, inventory: InventoryComponent):
self.inventory = inventory self.inventory = inventory
self.initialized = True
def update(self): def update(self):
pass pass
@ -42,4 +45,3 @@ class UserInterface:
textsurface = self.font.render(str(items_count), False, (255, 255, 255)) textsurface = self.font.render(str(items_count), False, (255, 255, 255))
self.window.blit(textsurface, (image.pos[0] + 48, image.pos[1] + 36)) self.window.blit(textsurface, (image.pos[0] + 48, image.pos[1] + 36))

View File

@ -1,4 +1,4 @@
from survival.components.OnCollisionComponent import OnCollisionComponent from survival.components.on_collision_component import OnCollisionComponent
from survival.components.camera_target_component import CameraTargetComponent from survival.components.camera_target_component import CameraTargetComponent
from survival.components.consumption_component import ConsumptionComponent from survival.components.consumption_component import ConsumptionComponent
from survival.components.input_component import InputComponent from survival.components.input_component import InputComponent
@ -25,7 +25,7 @@ class PlayerGenerator:
inv = InventoryComponent() inv = InventoryComponent()
for resource in ResourceType: for resource in ResourceType:
inv.add_item(resource, STARTING_RESOURCES_AMOUNT) inv.add_item(resource, STARTING_RESOURCES_AMOUNT)
world.add_component(player, ConsumptionComponent(inv.total_items_count())) world.add_component(player, ConsumptionComponent())
world.add_component(player, inv) world.add_component(player, inv)
camera_target = CameraTargetComponent(pos) camera_target = CameraTargetComponent(pos)
world.add_component(player, camera_target) world.add_component(player, camera_target)

View File

@ -1,7 +1,7 @@
import random import random
from survival import GameMap from survival import GameMap
from survival.components.OnCollisionComponent import OnCollisionComponent from survival.components.on_collision_component import OnCollisionComponent
from survival.components.inventory_component import InventoryComponent from survival.components.inventory_component import InventoryComponent
from survival.components.learning_component import LearningComponent from survival.components.learning_component import LearningComponent
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
@ -35,7 +35,7 @@ class ResourceGenerator:
resource_type = random.choice(list(ResourceType)) resource_type = random.choice(list(ResourceType))
sprite = SpriteComponent(sprites[resource_type]) sprite = SpriteComponent(sprites[resource_type])
col = OnCollisionComponent() col = OnCollisionComponent()
col.addCallback(self.remove_resource, world=self.world, game_map=self.map, resource_ent=obj, player=player) col.add_callback(self.remove_resource, world=self.world, game_map=self.map, resource_ent=obj, player=player)
self.world.add_component(obj, pos) self.world.add_component(obj, pos)
self.world.add_component(obj, sprite) self.world.add_component(obj, sprite)
self.world.add_component(obj, col) self.world.add_component(obj, col)

View File

@ -3,10 +3,10 @@ import random
from pathlib import Path from pathlib import Path
from typing import List from typing import List
from survival.biomes.biome_data import BiomeData from survival.game.biomes.biome_data import BiomeData
from survival.biomes.biome_preset import BiomePreset from survival.game.biomes.biome_preset import BiomePreset
from survival.biomes.noise import generate_noise from survival.game.biomes.noise import generate_noise
from survival.tile import Tile from survival.game.tile import Tile
class TileGenerator: class TileGenerator:
@ -46,7 +46,8 @@ class TileGenerator:
@staticmethod @staticmethod
def generate_biome_tiles(width: int, height: int): def generate_biome_tiles(width: int, height: int):
seed = random.randint(1, 10) # Use static seed to allow smooth learning of genetic algorithm
seed = 1
octaves = 10 octaves = 10
file_name = f'seeds/{seed}.bin' file_name = f'seeds/{seed}.bin'
biomes_file = Path(file_name) biomes_file = Path(file_name)

View File

@ -1,5 +1,7 @@
from survival import esper, PlayerGenerator, ResourceGenerator, SCREEN_WIDTH, SCREEN_HEIGHT, GameMap, \ from pathlib import Path
Camera
from survival import esper, ResourceGenerator, PlayerGenerator
from survival.ai.model import LinearQNetwork
from survival.components.consumption_component import ConsumptionComponent from survival.components.consumption_component import ConsumptionComponent
from survival.components.direction_component import DirectionChangeComponent from survival.components.direction_component import DirectionChangeComponent
from survival.components.inventory_component import InventoryComponent from survival.components.inventory_component import InventoryComponent
@ -10,8 +12,11 @@ from survival.components.position_component import PositionComponent
from survival.components.resource_component import ResourceComponent from survival.components.resource_component import ResourceComponent
from survival.components.time_component import TimeComponent from survival.components.time_component import TimeComponent
from survival.esper import World from survival.esper import World
from survival.game.camera import Camera
from survival.game.game_map import GameMap
from survival.generators.resource_type import ResourceType from survival.generators.resource_type import ResourceType
from survival.settings import PLAYER_START_POSITION, STARTING_RESOURCES_AMOUNT from survival.settings import PLAYER_START_POSITION, STARTING_RESOURCES_AMOUNT, SCREEN_WIDTH, SCREEN_HEIGHT, \
MUTATE_NETWORKS, NETWORK_PARAMS, NEURAL_OUTPUT_SIZE, NEURAL_INPUT_SIZE
from survival.systems.automation_system import AutomationSystem from survival.systems.automation_system import AutomationSystem
from survival.systems.camera_system import CameraSystem from survival.systems.camera_system import CameraSystem
from survival.systems.collision_system import CollisionSystem from survival.systems.collision_system import CollisionSystem
@ -29,7 +34,7 @@ class WorldGenerator:
def __init__(self, win, callback): def __init__(self, win, callback):
self.win = win self.win = win
self.callback = callback self.callback = callback
self.world: World = esper.World() self.world: World = esper.World(timed=True)
self.game_map: GameMap = GameMap(int(SCREEN_WIDTH / 32) * 2, 2 * int(SCREEN_HEIGHT / 32) + 1) self.game_map: GameMap = GameMap(int(SCREEN_WIDTH / 32) * 2, 2 * int(SCREEN_HEIGHT / 32) + 1)
self.camera = Camera(self.game_map.width * 32, self.game_map.height * 32, self.win) self.camera = Camera(self.game_map.width * 32, self.game_map.height * 32, self.win)
self.resource_generator: ResourceGenerator = ResourceGenerator(self.world, self.game_map) self.resource_generator: ResourceGenerator = ResourceGenerator(self.world, self.game_map)
@ -41,6 +46,14 @@ class WorldGenerator:
self.world.add_processor(MovementSystem(self.game_map), priority=20) self.world.add_processor(MovementSystem(self.game_map), priority=20)
self.world.add_processor(CollisionSystem(self.game_map), priority=30) self.world.add_processor(CollisionSystem(self.game_map), priority=30)
self.world.add_processor(NeuralSystem(self.game_map, self.callback), priority=50) self.world.add_processor(NeuralSystem(self.game_map, self.callback), priority=50)
if not MUTATE_NETWORKS:
model_path = Path("/model/model.pth")
if model_path.is_file():
self.world.get_processor(NeuralSystem).load_model(
LinearQNetwork.load(NETWORK_PARAMS, NEURAL_INPUT_SIZE, NEURAL_OUTPUT_SIZE))
else:
self.world.get_processor(NeuralSystem).load_model(
LinearQNetwork(NETWORK_PARAMS, NEURAL_INPUT_SIZE, NEURAL_OUTPUT_SIZE, False, NETWORK_PARAMS))
self.world.add_processor(DrawSystem(self.camera)) self.world.add_processor(DrawSystem(self.camera))
self.world.add_processor(TimeSystem()) self.world.add_processor(TimeSystem())
self.world.add_processor(AutomationSystem(self.game_map)) self.world.add_processor(AutomationSystem(self.game_map))
@ -83,7 +96,7 @@ class WorldGenerator:
if self.world.has_component(self.player, ConsumptionComponent): if self.world.has_component(self.player, ConsumptionComponent):
self.world.remove_component(self.player, ConsumptionComponent) self.world.remove_component(self.player, ConsumptionComponent)
self.world.add_component(self.player, ConsumptionComponent(inv.total_items_count())) self.world.add_component(self.player, ConsumptionComponent())
pos = self.world.component_for_entity(self.player, PositionComponent) pos = self.world.component_for_entity(self.player, PositionComponent)
old_pos = pos.grid_position old_pos = pos.grid_position

View File

@ -1,78 +0,0 @@
from random import randint
import pygame
class Player:
def __init__(self):
# self.pos = [1024, 512]
# self.velocity = [0, 0]
# self.image = Image('stevenson.png')
# self.image.set_scale(2)
# self.speed = 30
# self.movement_target = [self.pos[0], self.pos[1]]
# self.timer = 0
pass
def draw(self, camera):
self.image.pos = self.pos
camera.draw(self.image)
def is_moving(self):
return self.pos != self.movement_target
def move_in_random_direction(self):
value = randint(0, 3)
random_movement = {
0: self.move_up,
1: self.move_down,
2: self.move_left,
3: self.move_right
}
random_movement[value]()
def update(self, delta, pressed_keys):
if self.is_moving():
if self.velocity[0] != 0:
self.pos[0] += self.velocity[0] * self.speed * delta / 100
if abs(self.movement_target[0] - self.pos[0]) < 0.1 * self.speed:
self.velocity = [0, 0]
self.pos = self.movement_target
else:
self.pos[1] += self.velocity[1] * self.speed * delta / 100
if abs(self.pos[1] - self.movement_target[1]) < 0.1 * self.speed:
self.velocity = [0, 0]
self.pos = self.movement_target
return
self.timer += delta
if self.timer > 1000:
self.move_in_random_direction()
self.timer = 0
if pressed_keys[pygame.K_LEFT]:
self.move_left()
elif pressed_keys[pygame.K_RIGHT]:
self.move_right()
elif pressed_keys[pygame.K_DOWN]:
self.move_down()
elif pressed_keys[pygame.K_UP]:
self.move_up()
def move_left(self):
self.velocity = [-1, 0]
self.movement_target = [self.pos[0] - 32, self.pos[1]]
def move_right(self):
self.velocity = [1, 0]
self.movement_target = [self.pos[0] + 32, self.pos[1]]
def move_up(self):
self.velocity = [0, -1]
self.movement_target = [self.pos[0], self.pos[1] - 32]
def move_down(self):
self.velocity = [0, 1]
self.movement_target = [self.pos[0], self.pos[1] + 32]

View File

@ -1,7 +1,18 @@
SCREEN_WIDTH = 1000 SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 600 SCREEN_HEIGHT = 600
RESOURCES_AMOUNT = 100 RESOURCES_AMOUNT = 175
DIRECTION_CHANGE_DELAY = 5 DIRECTION_CHANGE_DELAY = 5
PLAYER_START_POSITION = [20, 10] PLAYER_START_POSITION = [20, 10]
STARTING_RESOURCES_AMOUNT = 10 STARTING_RESOURCES_AMOUNT = 5
AGENT_VISION_RANGE = 5 AGENT_VISION_RANGE = 5
NEURAL_INPUT_SIZE = 11
NEURAL_OUTPUT_SIZE = 3
LEARN = True
MUTATE_NETWORKS = True
NETWORK_PARAMS = {
"neurons": 256,
"layers": 1,
"activation": 'relu',
"ratio": 0.001,
"optimizer": 'Adam'
}

View File

@ -7,8 +7,6 @@ from survival.components.resource_component import ResourceComponent
class AutomationComponent: class AutomationComponent:
pass pass
# def __init__(self):
# self.resources = []
class AutomationSystem(esper.Processor): class AutomationSystem(esper.Processor):

View File

@ -1,11 +1,11 @@
import operator import operator
from survival import esper from survival import esper
from survival.components.OnCollisionComponent import OnCollisionComponent from survival.components.on_collision_component import OnCollisionComponent
from survival.components.moving_component import MovingComponent from survival.components.moving_component import MovingComponent
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.components.learning_component import LearningComponent from survival.game.enums import Direction
from survival.enums import Direction from survival.systems.consumption_system import ConsumeComponent
class CollisionSystem(esper.Processor): class CollisionSystem(esper.Processor):
@ -23,18 +23,20 @@ class CollisionSystem(esper.Processor):
moving.target = tuple(map(operator.add, vector, pos.grid_position)) moving.target = tuple(map(operator.add, vector, pos.grid_position))
moving.direction_vector = vector moving.direction_vector = vector
if self.check_collision(moving.target): if self.check_collision(moving.target):
self.world.add_component(ent, ConsumeComponent(0.05))
self.world.remove_component(ent, MovingComponent) self.world.remove_component(ent, MovingComponent)
onCol.callAll() onCol.call_all()
colliding_object: int = self.map.get_entity(moving.target) colliding_object: int = self.map.get_entity(moving.target)
if colliding_object is None or not self.world.entity_exists(colliding_object): if colliding_object is None or not self.world.entity_exists(colliding_object):
continue continue
if self.world.has_component(colliding_object, OnCollisionComponent): if self.world.has_component(colliding_object, OnCollisionComponent):
self.world.component_for_entity(colliding_object, OnCollisionComponent).callAll() self.world.component_for_entity(colliding_object, OnCollisionComponent).call_all()
else: else:
self.map.move_entity(pos.grid_position, moving.target) self.map.move_entity(pos.grid_position, moving.target)
self.world.add_component(ent, ConsumeComponent(self.map.get_cost(moving.target)))
pos.grid_position = moving.target pos.grid_position = moving.target
def check_collision(self, pos): def check_collision(self, pos):

View File

@ -1,30 +1,64 @@
import random
from survival import esper from survival import esper
from survival.components.consumption_component import ConsumptionComponent from survival.components.consumption_component import ConsumptionComponent
from survival.components.inventory_component import InventoryComponent from survival.components.inventory_component import InventoryComponent
from survival.components.learning_component import LearningComponent from survival.components.learning_component import LearningComponent
from survival.components.moving_component import MovingComponent
from survival.generators.resource_type import ResourceType from survival.generators.resource_type import ResourceType
class ConsumeComponent:
def __init__(self, cost):
self.cost = cost
class ConsumptionSystem(esper.Processor): class ConsumptionSystem(esper.Processor):
CONSUMPTION_FACTOR = 0.05
CONSUMPTION_RANGE = 0.07
def __init__(self, callback): def __init__(self, callback):
self.callback = callback self.callback = callback
def process(self, dt): def process(self, dt):
for ent, (cons, inventory) in self.world.get_components(ConsumptionComponent, InventoryComponent): cons: ConsumptionComponent
cons.timer -= dt inventory: InventoryComponent
if cons.timer > 0: c: ConsumeComponent
continue for ent, (cons, inventory, c) in self.world.get_components(ConsumptionComponent, InventoryComponent,
cons.timer = cons.timer_value ConsumeComponent):
for resource in cons.status.keys():
cons.status[resource] -= c.cost * self.CONSUMPTION_FACTOR + random.uniform(-self.CONSUMPTION_RANGE,
self.CONSUMPTION_RANGE)
if cons.status[resource] < 0:
inventory.items[resource] -= 1
cons.status[resource] = 1
if self.world.has_component(ent, LearningComponent): if self.world.has_component(ent, LearningComponent):
# If no item was picked up for resource in cons.status.keys():
if cons.last_inventory_state == inventory.total_items_count(): if inventory.items[resource] <= 0 and self.world.has_component(ent, LearningComponent):
learning: LearningComponent = self.world.component_for_entity(ent, LearningComponent) # If entity has run out of items
learning.reward += -10 learning: LearningComponent = self.world.component_for_entity(ent, LearningComponent)
learning.done = True learning.reward -= 1
cons.last_inventory_state = inventory.total_items_count() learning.done = True
break
else: else:
if inventory.has_item(ResourceType.FOOD): self.callback(ent)
inventory.remove_item(ResourceType.FOOD, 1)
else: self.world.remove_component(ent, ConsumeComponent)
self.callback() # cons.timer -= dt
# if cons.timer > 0:
# continue
# cons.timer = cons.timer_value
#
# if self.world.has_component(ent, LearningComponent):
# # If no item was picked up
# if cons.last_inventory_state == inventory.total_items_count():
# learning: LearningComponent = self.world.component_for_entity(ent, LearningComponent)
# learning.reward += -10
# learning.done = True
# cons.last_inventory_state = inventory.total_items_count()
# else:
# if inventory.has_item(ResourceType.FOOD):
# inventory.remove_item(ResourceType.FOOD, 1)
# else:
# self.callback()

View File

@ -1,7 +1,7 @@
from survival import esper from survival import esper
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.components.sprite_component import SpriteComponent from survival.components.sprite_component import SpriteComponent
from survival.user_interface import UserInterface from survival.game.user_interface import UserInterface
class DrawSystem(esper.Processor): class DrawSystem(esper.Processor):
@ -17,5 +17,6 @@ class DrawSystem(esper.Processor):
sprite.image.pos = pos.position sprite.image.pos = pos.position
sprite.image.origin = (32 * pos.direction.value, 0) sprite.image.origin = (32 * pos.direction.value, 0)
self.camera.draw(sprite.image) self.camera.draw(sprite.image)
self.ui.update() if self.ui.initialized:
self.ui.draw() self.ui.update()
self.ui.draw()

View File

@ -4,20 +4,20 @@ from collections import deque
import torch import torch
from survival import esper, GameMap from survival import esper, GameMap
from survival.ai.genetic_algorithm import GeneticAlgorithm
from survival.components.direction_component import DirectionChangeComponent from survival.components.direction_component import DirectionChangeComponent
from survival.components.inventory_component import InventoryComponent from survival.components.inventory_component import InventoryComponent
from survival.components.moving_component import MovingComponent from survival.components.moving_component import MovingComponent
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.components.learning_component import LearningComponent from survival.components.learning_component import LearningComponent
from survival.components.time_component import TimeComponent from survival.components.time_component import TimeComponent
from survival.graph_search import Action from survival.ai.graph_search import Action
from survival.learning_utils import get_state, LearningUtils from survival.ai.learning_utils import get_state, LearningUtils
from survival.model import LinearQNetwork, QTrainer from survival.ai.model import LinearQNetwork, QTrainer
from survival.settings import LEARN, MUTATE_NETWORKS
MAX_MEMORY = 100_000 MAX_MEMORY = 100_000
BATCH_SIZE = 1000 BATCH_SIZE = 1000
LR = 0.001
LEARN = False
class NeuralSystem(esper.Processor): class NeuralSystem(esper.Processor):
@ -25,17 +25,27 @@ class NeuralSystem(esper.Processor):
self.game_map = game_map self.game_map = game_map
self.reset_game = callback self.reset_game = callback
self.n_games = 0 # number of games played self.n_games = 0 # number of games played
self.starting_epsilon = 100 if MUTATE_NETWORKS:
self.starting_epsilon = GeneticAlgorithm.GAMES_PER_NETWORK / 2
else:
self.starting_epsilon = 100
self.epsilon = 0 # controlls the randomness self.epsilon = 0 # controlls the randomness
self.gamma = 0.9 # discount rate self.gamma = 0.9 # discount rate
self.memory = deque(maxlen=MAX_MEMORY) # exceeding memory removes the left elements to make more space self.memory = deque(maxlen=MAX_MEMORY) # exceeding memory removes the left elements to make more space
self.model = LinearQNetwork.load(11, 256, 3) self.model = None # self.model = LinearQNetwork.load(11, 256, 3)
if self.model.pretrained: self.trainer = None # QTrainer(self.model, lr=LR, gamma=self.gamma)
self.starting_epsilon = -1
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
self.utils = LearningUtils() self.utils = LearningUtils()
self.best_action = None self.best_action = None
def load_model(self, model: LinearQNetwork):
self.model = model
self.trainer = QTrainer(self.model, self.model.network_params['ratio'], self.gamma,
self.model.network_params['optimizer'])
self.utils = LearningUtils()
self.memory = deque(maxlen=MAX_MEMORY)
self.starting_epsilon = GeneticAlgorithm.GAMES_PER_NETWORK / 2
self.n_games = 0
def remember(self, state, action, reward, next_state, done): def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done)) self.memory.append((state, action, reward, next_state, done))
@ -119,11 +129,13 @@ class NeuralSystem(esper.Processor):
self.train_long_memory() self.train_long_memory()
if learning.score > learning.record: if learning.score > learning.record:
learning.record = learning.score learning.record = learning.score
if LEARN: if LEARN and not MUTATE_NETWORKS:
self.model.save() self.model.save()
print('Game', self.n_games, 'Score', learning.score, 'Record', learning.record) # print('Game', self.n_games, 'Score', learning.score, 'Record', learning.record)
self.utils.add_scores(learning, self.n_games) self.utils.add_scores(learning, self.n_games)
self.model.scores.append(learning.score)
learning.score = 0 learning.score = 0
self.utils.plot() self.utils.plot()

View File

@ -3,8 +3,8 @@ from survival.components.direction_component import DirectionChangeComponent
from survival.components.movement_component import MovementComponent from survival.components.movement_component import MovementComponent
from survival.components.moving_component import MovingComponent from survival.components.moving_component import MovingComponent
from survival.components.position_component import PositionComponent from survival.components.position_component import PositionComponent
from survival.enums import Direction from survival.game.enums import Direction
from survival.graph_search import graph_search, Action from survival.ai.graph_search import graph_search, Action
from survival.systems.input_system import PathfindingComponent from survival.systems.input_system import PathfindingComponent