Compare commits
3 Commits
Author | SHA1 | Date | |
---|---|---|---|
cf203978c6 | |||
74e6baecfa | |||
08def183f7 |
354
AI_projecty.py
354
AI_projecty.py
@ -1,354 +0,0 @@
|
||||
import pandas as pd
|
||||
import pygame
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
from sklearn.tree import DecisionTreeClassifier
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.metrics import confusion_matrix
|
||||
from pygame.locals import *
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn import tree
|
||||
import random
|
||||
from textpygame import get_key, ask, display_box
|
||||
|
||||
|
||||
|
||||
#read csv file
|
||||
dataset= pd.read_csv("veganism.csv")
|
||||
|
||||
#show shape of dataset
|
||||
print(dataset.shape)
|
||||
|
||||
#create a new dataset
|
||||
newdataset = pd.DataFrame(dataset, columns=['ethnicity', 'gender', 'appearence', 'vegan'])
|
||||
|
||||
# creating instance of labelencoder
|
||||
labelencoder = LabelEncoder()
|
||||
# Assigning numerical values and storing in another column
|
||||
newdataset['ethnicity_no'] = labelencoder.fit_transform(newdataset['ethnicity'])
|
||||
newdataset['gender_no']= labelencoder.fit_transform(newdataset['gender'])
|
||||
newdataset['appearence_no']= labelencoder.fit_transform(newdataset['appearence'])
|
||||
print(newdataset)
|
||||
|
||||
# for x values drop unimportant columns, axis=1 specifies that we want the columns not rows
|
||||
Y=newdataset['vegan']
|
||||
X=newdataset.drop(newdataset.columns[0:4], axis=1)
|
||||
print(X,Y)
|
||||
|
||||
#test 14% of the data
|
||||
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
|
||||
classifier = DecisionTreeClassifier()
|
||||
classifier.fit(X_train, Y_train)
|
||||
y_pred = classifier.predict(X_test)
|
||||
|
||||
#ignores minority groups which results in 0 for prediction. to solve this i have to use smthing else
|
||||
print(pd.DataFrame(confusion_matrix(Y_test, y_pred),columns=['Predicted not vegan', 'Predicted vegan'], index=['Actually not vegan', 'Actually vegan']))
|
||||
|
||||
#accuracy score is high as expected
|
||||
print(accuracy_score(Y_test, y_pred))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
fn=['ethnicity','gender','appearence']
|
||||
cn=['yes', 'no']
|
||||
|
||||
# Setting dpi = 300 to make image clearer than default
|
||||
fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=300)
|
||||
|
||||
tree.plot_tree(classifier,
|
||||
feature_names = fn,
|
||||
class_names=cn,
|
||||
filled = True);
|
||||
|
||||
fig.savefig('imagenamenew.png')
|
||||
|
||||
|
||||
|
||||
#using the my_grid3 program
|
||||
|
||||
# Colors:
|
||||
# Define some colors
|
||||
BLACK = (0, 0, 0)
|
||||
WHITE = (255, 255, 255)
|
||||
GREEN = (0, 255, 0)
|
||||
RED = (255, 0, 0)
|
||||
BLUE = (0, 0, 240)
|
||||
MAGENTA=(255, 0, 255)
|
||||
# Width and Height of each square:
|
||||
WIDTH = 20
|
||||
HEIGHT = 20
|
||||
|
||||
# Margin:
|
||||
MARGIN = 5
|
||||
grid = [[0 for x in range(16)] for y in range(16)]
|
||||
|
||||
|
||||
def change_value(i, j, width, n):
|
||||
for r in range(i, i + width):
|
||||
for c in range(j, j + width):
|
||||
grid[r][c] = n
|
||||
if grid[r][c]==5:
|
||||
break
|
||||
|
||||
|
||||
class Table:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 2, 1)
|
||||
|
||||
|
||||
|
||||
|
||||
class Kitchen:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 3, 2)
|
||||
|
||||
|
||||
class Agent:
|
||||
def __init__(self, orig_coordinate_i, orig_coordinate_j):
|
||||
self.orig_coordinate_i = orig_coordinate_i
|
||||
self.orig_coordinate_j = orig_coordinate_j
|
||||
self.state = np.array([1, 2])
|
||||
change_value(orig_coordinate_j, orig_coordinate_j, 1, 3)
|
||||
self.state_update(orig_coordinate_i, orig_coordinate_j)
|
||||
|
||||
def state_update(self, c1, c2):
|
||||
self.state[0] = c1
|
||||
self.state[1] = c2
|
||||
print(self.state)
|
||||
|
||||
def leave(self):
|
||||
change_value(self.state[0], self.state[1], 1, 0)
|
||||
|
||||
def move_up(self):
|
||||
self.leave()
|
||||
self.state_update(x - 1, y)
|
||||
change_value(self.state[0], self.state[1], 1, 3)
|
||||
|
||||
def move_down(self):
|
||||
self.leave()
|
||||
change_value(self.state[0], self.state[1], 1, 0)
|
||||
self.state_update(x + 1, y)
|
||||
change_value(self.state[0], self.state[1], 1, 3)
|
||||
|
||||
def move_right(self):
|
||||
self.leave()
|
||||
self.state_update(x, y + 1)
|
||||
change_value(self.state[0], self.state[1], 1, 3)
|
||||
|
||||
def move_left(self):
|
||||
self.leave()
|
||||
self.state_update(x, y - 1)
|
||||
change_value(self.state[0], self.state[1], 1, 3)
|
||||
class Customer:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j,1 , 4)
|
||||
class CustomerPlace:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j,1 , 5)
|
||||
|
||||
|
||||
## default positions of the agent:
|
||||
x = 11
|
||||
y = 11
|
||||
agent = Agent(x, y)
|
||||
|
||||
table1 = Table(2, 2)
|
||||
table2 = Table(2, 7)
|
||||
table3 = Table(2, 12)
|
||||
table4 = Table(7, 2)
|
||||
table5 = Table(7, 7)
|
||||
table6 = Table(7, 12)
|
||||
table7 = Table(12, 2)
|
||||
table8 = Table(12, 7)
|
||||
|
||||
|
||||
pygame.init()
|
||||
# create a font object.
|
||||
# 1st parameter is the font file
|
||||
# which is present in pygame.
|
||||
# 2nd parameter is size of the font
|
||||
font = pygame.font.Font('freesansbold.ttf', 14)
|
||||
X = 400
|
||||
Y = 400
|
||||
# create a text suface object,
|
||||
# on which text is drawn on it.
|
||||
text = font.render('waiter: hello, let me help you with your order.', True, WHITE, BLACK)
|
||||
userText=font.render('user: ', True, BLUE, BLACK)
|
||||
# create a rectangular object for the
|
||||
# text surface object
|
||||
textRect = text.get_rect()
|
||||
inputRect = userText.get_rect()
|
||||
# set the center of the rectangular object.
|
||||
textRect.center= (200, 340)
|
||||
inputRect.center=(200,370)
|
||||
# class Kitchen:
|
||||
kitchen = Kitchen(13, 13)
|
||||
|
||||
x=[2,7,12]
|
||||
y=[2,7]
|
||||
|
||||
random_customer_seat_x=random.choice(x)
|
||||
random_customer_seat_y=random.choice(y)
|
||||
print(random_customer_seat_x,random_customer_seat_y)
|
||||
seat=Customer(random_customer_seat_x,random_customer_seat_y)
|
||||
|
||||
next_to=CustomerPlace(random_customer_seat_x,random_customer_seat_y-1)
|
||||
|
||||
WINDOW_SIZE = [405, 405]
|
||||
screen = pygame.display.set_mode(WINDOW_SIZE)
|
||||
|
||||
pygame.display.set_caption("Waiter_Grid3")
|
||||
|
||||
done = False
|
||||
print(random_customer_seat_x,random_customer_seat_y-1)
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#updating the drawing
|
||||
def updateDraw():
|
||||
x = agent.state[0]
|
||||
y = agent.state[1]
|
||||
screen.fill(BLACK) # Background color
|
||||
for row in range(16): # Drawing the grid
|
||||
for column in range(16):
|
||||
color = WHITE
|
||||
if grid[row][column] == 1:
|
||||
color = GREEN
|
||||
if grid[row][column] == 2:
|
||||
color = RED
|
||||
if grid[row][column] == 3:
|
||||
color = BLUE
|
||||
if grid[row][column] == 4:
|
||||
color = MAGENTA
|
||||
surface = pygame.draw.rect(screen,
|
||||
color,
|
||||
|
||||
[(MARGIN + WIDTH) * column + MARGIN,
|
||||
(MARGIN + HEIGHT) * row + MARGIN,
|
||||
WIDTH,
|
||||
HEIGHT])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def customer():
|
||||
if x == random_customer_seat_x and y == random_customer_seat_y - 1:
|
||||
screen.blit(text, textRect)
|
||||
ethnicity3= ask(screen, question="ethnicity ")
|
||||
gender3= ask(screen,question="gender ")
|
||||
appearence3=ask(screen, question="appearence ")
|
||||
if ethnicity3 == "black":
|
||||
ethnicity3 = 1
|
||||
elif ethnicity3 == "asian":
|
||||
ethnicity3 = 0
|
||||
else:
|
||||
ethnicity3 = 2
|
||||
|
||||
if gender3 == "male":
|
||||
gender3 = 0
|
||||
else:
|
||||
gender3 = 1
|
||||
|
||||
if appearence3 == "hippie":
|
||||
appearence3= 0
|
||||
else:
|
||||
appearence3 = 1
|
||||
prediction = classifier.predict([[ethnicity3, gender3, appearence3]])
|
||||
pygame.quit()
|
||||
|
||||
if prediction == [0]:
|
||||
print("You're probably not vegan. Would you like a regular menu?")
|
||||
else:
|
||||
print("It seems like youre vegan. Would you like a vegan menu?")
|
||||
exit()
|
||||
|
||||
|
||||
|
||||
running=True
|
||||
|
||||
# pick a font you have and set its size
|
||||
text2 = 'this text is editable'
|
||||
font2 = pygame.font.SysFont(None, 48)
|
||||
img2 = font.render(text2, True, RED)
|
||||
|
||||
rect2 = img2.get_rect()
|
||||
rect2.topleft = (20, 20)
|
||||
|
||||
|
||||
background = BLACK
|
||||
# put the label object on the screen at point x=100, y=100
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
cursor = Rect(rect2.topright, (3, rect2.height))
|
||||
while running:
|
||||
|
||||
x = agent.state[0]
|
||||
y = agent.state[1]
|
||||
pygame.time.delay(100)
|
||||
for event in pygame.event.get(): # Checking for the event
|
||||
if event.type == pygame.QUIT: # If the program is closed:
|
||||
done = True # To exit the loop
|
||||
|
||||
keys = pygame.key.get_pressed() # Moving the agent:
|
||||
|
||||
if keys[pygame.K_LEFT]: # Left:
|
||||
# Checking if not a table, a kitchen, or a wall:
|
||||
if y - 1 < 0 or grid[x][y - 1] == 1:
|
||||
continue
|
||||
else:
|
||||
agent.move_left() # If okay, the move
|
||||
if keys[pygame.K_RIGHT]: # The same procedure with right:
|
||||
if y + 1 > 15 or grid[x][y + 1] == 1 or grid[x][y + 1] == 2:
|
||||
continue
|
||||
else:
|
||||
agent.move_right()
|
||||
if keys[pygame.K_UP]: # The same procedure with up:
|
||||
if x - 1 < 0 or grid[x - 1][y] == 1 or grid[x - 1][y] == 2:
|
||||
continue
|
||||
|
||||
else:
|
||||
agent.move_up()
|
||||
if keys[pygame.K_DOWN]: # The same procedure with down
|
||||
if x + 1 > 15 or grid[x + 1][y] == 1 or grid[x + 1][y] == 2:
|
||||
continue
|
||||
else:
|
||||
agent.move_down()
|
||||
|
||||
updateDraw()
|
||||
customer()
|
||||
clock.tick(60) # Limit to 60 frames per second
|
||||
pygame.display.flip() # Updating the screen
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,235 +0,0 @@
|
||||
# Reinforcement learning for route planning in restaurant
|
||||
##### Tao-Sen Chang s442720
|
||||
|
||||
###### We did the route planning by special algorithm on last task. In this machine learning sub-project I try to show different approach for the agent who can traversal multiple destinations on the grid system, and of course, get the shortest path of the route. What I want to use is called reinforcement learning.
|
||||
|
||||
## What is reinforcement learning?
|
||||
###### Reinforcement learning is how software agents ought to take actions in an environment in order to maximize the notion of cumulative reward. The agent makes a sequence of decisions, and learn to perform the best actions every step. For example, in my project there is a waiter in the grid and he has to reach many tables for serving the meal, so he must learn the shortest path to get a table.
|
||||
|
||||
## How to do that?
|
||||
###### The idea of how to complete the reinforcement learning is not quite easy. However, there is an example - rat in a maze. Instead of writing the whole algorithm at the beginning, I use the existing codes(tools) by adjusting some parameters to train the agent on our 16x16 grid.
|
||||
https://www.samyzaf.com/ML/rl/qmaze.html
|
||||
###### I train the agent(waiter) with rewards and penalties, the waiter in the above grid gets a small penalty for every legal move. The reason is that we want it to get to the target table in the shortest possible path. However, the shortest path to the target table is sometimes long and winding, and our agent (the waiter) may have to endure many errors until he gets to the table.
|
||||
###### For example, one of the training parameters(rewards) are:
|
||||
```
|
||||
if rat_row == win_target_x and rat_col == win_target_y: # if reach the final target
|
||||
return 1.0
|
||||
if mode == 'blocked': # move to the block in the grid (blocks are tables or kitchen in our grid)
|
||||
return -1.0
|
||||
if (rat_row, rat_col) in self.visited: # when get to the visited grid point
|
||||
return -0.5
|
||||
if mode == 'invalid': # when move to the boundary
|
||||
return -0.75
|
||||
if mode == 'valid': # to make the route shorter, we give a penalty by moving to valid grid point
|
||||
return -0.04
|
||||
if (rat_row, rat_col) in self.curr_win_targets: # if reach any table
|
||||
return 1.0
|
||||
```
|
||||
```
|
||||
self.min_reward = -0.5 * self.maze.size
|
||||
```
|
||||
## Q-learning
|
||||
###### We want to get the maximum reward from each action in a state. Here defines action=π(s).
|
||||
###### Q(s,a) = the maximum total reward we can get by choosing action a in state s. Hence it's obvious that we get the function π(s)=argmaxQ(s,ai) Now the question is how to get Q(s,a)?
|
||||
###### There is a solution called Bellman's Equation: Q(s,a) = R(s,a) + maxQ(s′,ai)
|
||||
###### R(s,a) is the reward in current state s, action a. And s′ means the next state, so maxQ(s′,ai) means the maximum reward in 4 actions from next state. In the code we have the Experience Class to memorize each "episode", but the memory is limited, therefore if reach the max_memory, then delete the old episode which has lower effect to current episode.
|
||||
###### There is a coefficient called discount factor, usually denoted by γ which is required for the Bellman equation for stochastic environments. So the new Bellman's Equation can be written as Q(s,a) = R(s,a) + γ * maxQ(s′,ai). This discount factor is to diminish the effects which are far from current state.
|
||||
```
|
||||
class Experience(object):
|
||||
def __init__(self, model, max_memory=100, discount=0.95):
|
||||
self.model = model
|
||||
self.max_memory = max_memory
|
||||
self.discount = discount
|
||||
self.memory = list()
|
||||
self.num_actions = model.output_shape[-1]
|
||||
|
||||
def remember(self, episode):
|
||||
# episode = [envstate, action, reward, envstate_next, game_over]
|
||||
# memory[i] = episode
|
||||
# envstate == flattened 1d maze cells info, including rat cell (see method: observe)
|
||||
self.memory.append(episode)
|
||||
if len(self.memory) > self.max_memory:
|
||||
del self.memory[0]
|
||||
|
||||
def predict(self, envstate):
|
||||
return self.model.predict(envstate)[0]
|
||||
|
||||
def get_data(self, data_size=10):
|
||||
env_size = self.memory[0][0].shape[1] # envstate 1d size (1st element of episode)
|
||||
mem_size = len(self.memory)
|
||||
data_size = min(mem_size, data_size)
|
||||
inputs = np.zeros((data_size, env_size))
|
||||
targets = np.zeros((data_size, self.num_actions))
|
||||
for i, j in enumerate(np.random.choice(range(mem_size), data_size, replace=False)):
|
||||
envstate, action, reward, envstate_next, game_over = self.memory[j]
|
||||
inputs[i] = envstate
|
||||
# There should be no target values for actions not taken.
|
||||
targets[i] = self.predict(envstate)
|
||||
# Q_sa = derived policy = max quality env/action = max_a' Q(s', a')
|
||||
Q_sa = np.max(self.predict(envstate_next))
|
||||
if game_over:
|
||||
targets[i, action] = reward
|
||||
else:
|
||||
# reward + gamma * max_a' Q(s', a')
|
||||
targets[i, action] = reward + self.discount * Q_sa
|
||||
return inputs, targets
|
||||
```
|
||||
|
||||
## Training
|
||||
###### Following is the algorithm for training neural network model to solve the problem. One epoch means one loop of the training, and in each epoch the agent will finally become either "win" or "lose".
|
||||
###### Another coefficient "epsilon" is exploration factor which decides the probability of whether the agent will perform new actions instead of following the previous experiences (which is called exploitation). By this way the agent could not only collect better rewards from previous experiences, but also have the chances to explore unknown area where might get more rewards. If one of the strategies is determined, then let's start training it by neural network. (inputs: size equals to the maze size, targets: size is the same as the number of actions (4 in our case)).
|
||||
```
|
||||
# Exploration factor
|
||||
epsilon = 0.1
|
||||
def qtrain(model, maze, **opt):
|
||||
global epsilon
|
||||
n_epoch = opt.get('n_epoch', 15000)
|
||||
max_memory = opt.get('max_memory', 1000)
|
||||
data_size = opt.get('data_size', 50)
|
||||
weights_file = opt.get('weights_file', "")
|
||||
name = opt.get('name', 'model')
|
||||
start_time = datetime.datetime.now()
|
||||
|
||||
# If you want to continue training from a previous model,
|
||||
# just supply the h5 file name to weights_file option
|
||||
if weights_file:
|
||||
print("loading weights from file: %s" % (weights_file,))
|
||||
model.load_weights(weights_file)
|
||||
|
||||
# Construct environment/game from numpy array: maze (see above)
|
||||
qmaze = Qmaze(maze)
|
||||
|
||||
# Initialize experience replay object
|
||||
experience = Experience(model, max_memory=max_memory)
|
||||
|
||||
win_history = [] # history of win/lose game
|
||||
n_free_cells = len(qmaze.free_cells)
|
||||
hsize = qmaze.maze.size//2 # history window size
|
||||
win_rate = 0.0
|
||||
imctr = 1
|
||||
pre_episodes = 2**31 - 1
|
||||
|
||||
for epoch in range(n_epoch):
|
||||
loss = 0.0
|
||||
#rat_cell = random.choice(qmaze.free_cells)
|
||||
#rat_cell = (0, 0)
|
||||
rat_cell = (12, 12)
|
||||
|
||||
qmaze.reset(rat_cell)
|
||||
game_over = False
|
||||
|
||||
# get initial envstate (1d flattened canvas)
|
||||
envstate = qmaze.observe()
|
||||
|
||||
n_episodes = 0
|
||||
while not game_over:
|
||||
valid_actions = qmaze.valid_actions()
|
||||
if not valid_actions: break
|
||||
prev_envstate = envstate
|
||||
# Get next action
|
||||
if np.random.rand() < epsilon:
|
||||
action = random.choice(valid_actions)
|
||||
else:
|
||||
action = np.argmax(experience.predict(prev_envstate))
|
||||
|
||||
# Apply action, get reward and new envstate
|
||||
envstate, reward, game_status = qmaze.act(action)
|
||||
if game_status == 'win':
|
||||
print("win")
|
||||
win_history.append(1)
|
||||
game_over = True
|
||||
# save_pic(qmaze)
|
||||
if n_episodes <= pre_episodes:
|
||||
# output_route(qmaze)
|
||||
print(qmaze.visited)
|
||||
with open('res.data', 'wb') as filehandle:
|
||||
pickle.dump(qmaze.visited, filehandle)
|
||||
pre_episodes = n_episodes
|
||||
|
||||
elif game_status == 'lose':
|
||||
print("lose")
|
||||
win_history.append(0)
|
||||
game_over = True
|
||||
# save_pic(qmaze)
|
||||
else:
|
||||
game_over = False
|
||||
|
||||
# Store episode (experience)
|
||||
episode = [prev_envstate, action, reward, envstate, game_over]
|
||||
experience.remember(episode)
|
||||
n_episodes += 1
|
||||
|
||||
# Train neural network model
|
||||
inputs, targets = experience.get_data(data_size=data_size)
|
||||
h = model.fit(
|
||||
inputs,
|
||||
targets,
|
||||
epochs=8,
|
||||
batch_size=16,
|
||||
verbose=0,
|
||||
)
|
||||
loss = model.evaluate(inputs, targets, verbose=0)
|
||||
|
||||
|
||||
if len(win_history) > hsize:
|
||||
win_rate = sum(win_history[-hsize:]) / hsize
|
||||
|
||||
dt = datetime.datetime.now() - start_time
|
||||
t = format_time(dt.total_seconds())
|
||||
|
||||
template = "Epoch: {:03d}/{:d} | Loss: {:.4f} | Episodes: {:d} | Win count: {:d} | Win rate: {:.3f} | time: {}"
|
||||
print(template.format(epoch, n_epoch-1, loss, n_episodes, sum(win_history), win_rate, t))
|
||||
```
|
||||
|
||||
## Testing
|
||||
###### Use this algorithm to our 16x16 grid and train.
|
||||
```
|
||||
grid = [[1 for x in range(16)] for y in range(16)]
|
||||
table1 = Table(2, 2)
|
||||
table2 = Table (2,7)
|
||||
table3 = Table(2, 12)
|
||||
table4 = Table(7, 2)
|
||||
table5 = Table(7, 7)
|
||||
table6 = Table(7, 12)
|
||||
table7 = Table(12, 2)
|
||||
table8 = Table(12, 7)
|
||||
|
||||
kitchen = Kitchen(13, 13)
|
||||
maze = np.array(grid)
|
||||
model = build_model(maze)
|
||||
qtrain(model, maze, epochs=1000, max_memory=8*maze.size, data_size=32)
|
||||
```
|
||||
###### Also I create a list called win_targets to put the position of tables in the grid.
|
||||
```
|
||||
win_targets = [(4, 4),(4, 9),(4, 14),(9, 4),(9, 9),(9, 14),(14, 4),(14, 9)]
|
||||
```
|
||||
###### After tons of training, I realize it is not an easy task to obtain the shortest route in every training - that means most of the training are failed - especially in the case that the win_targets has more targets. For example, the result of training 8 targets is like this(part of result):
|
||||
```
|
||||
...
|
||||
Epoch: 167/14999 | Loss: 0.0299 | Episodes: 407 | Win count: 63 | Win rate: 0.422 | time: 2.44 hours
|
||||
Epoch: 168/14999 | Loss: 0.0112 | Episodes: 650 | Win count: 63 | Win rate: 0.414 | time: 2.46 hours
|
||||
Epoch: 169/14999 | Loss: 0.0147 | Episodes: 392 | Win count: 64 | Win rate: 0.422 | time: 2.47 hours
|
||||
Epoch: 170/14999 | Loss: 0.0112 | Episodes: 668 | Win count: 65 | Win rate: 0.422 | time: 2.48 hours
|
||||
Epoch: 171/14999 | Loss: 0.0101 | Episodes: 487 | Win count: 66 | Win rate: 0.430 | time: 2.50 hours
|
||||
Epoch: 172/14999 | Loss: 0.0121 | Episodes: 362 | Win count: 67 | Win rate: 0.438 | time: 2.51 hours
|
||||
Epoch: 173/14999 | Loss: 0.0101 | Episodes: 484 | Win count: 68 | Win rate: 0.445 | time: 2.52 hours
|
||||
...
|
||||
```
|
||||
###### The only one which is successful contains 4 targets(win_targets = [(4, 4),(4, 9),(4, 14),(9, 4)])
|
||||
```
|
||||
...
|
||||
Epoch: 223/14999 | Loss: 0.0228 | Episodes: 30 | Win count: 165 | Win rate: 0.906 | time: 64.02 minutes
|
||||
Epoch: 224/14999 | Loss: 0.0160 | Episodes: 52 | Win count: 166 | Win rate: 0.906 | time: 64.09 minutes
|
||||
Epoch: 225/14999 | Loss: 0.0702 | Episodes: 34 | Win count: 167 | Win rate: 0.914 | time: 64.14 minutes
|
||||
Epoch: 226/14999 | Loss: 0.0175 | Episodes: 40 | Win count: 168 | Win rate: 0.922 | time: 64.19 minutes
|
||||
Epoch: 227/14999 | Loss: 0.0271 | Episodes: 46 | Win count: 169 | Win rate: 0.930 | time: 64.25 minutes
|
||||
Epoch: 228/14999 | Loss: 0.0194 | Episodes: 40 | Win count: 170 | Win rate: 0.938 | time: 64.30 minutes
|
||||
...
|
||||
Epoch: 460/14999 | Loss: 0.0236 | Episodes: 60 | Win count: 401 | Win rate: 1.000 | time: 1.48 hours
|
||||
Reached 100% win rate at epoch: 460
|
||||
n_epoch: 460, max_mem: 2048, data: 32, time: 1.48 hours
|
||||
```
|
||||
###### In my opinion, there are 3 reasons cause such bad results.
|
||||
###### 1. The parameters in the algorithm are not optimal including the rewards, exploration rate, and discount factor. To adjust the parameters and to validate them costs lots of time, and the most intuitive way is always not the best solution. For example, the parameters of 4 targets are fine, but if the number of targets expanded to 8, the parameters are not just 1/2 of the original ones.
|
||||
###### 2. Because of the exploration rate, every time the same training and testing data may have a different result. It increases the difficulty to verify our result. The only way to check whether the parameters generate ideal results is training continuously until we collect sufficient data.
|
||||
###### 3. The algorithm is for a rat in a maze at the beginning, and the number of default target is only one. If we apply it for multiple targets, there may be inadequate for some reason. Moreover, the default size is 7x7. It is possible that the 16x16 grid is too huge for this algorithm.
|
@ -1,29 +0,0 @@
|
||||
# Final Evaluation - Waiter Project
|
||||
Authors: Tao Sen Chang, Martyna Druminska, Weronika Skowronska.
|
||||
|
||||
The project aim is to simulate waiter's behaviour in a restaurant
|
||||
# Features
|
||||
|
||||
The waiter is able to move from table to table, choosing the most optimal way; It can decide whether the customer wants vegan menu or not, and evaluate the customer's plate as empty (waiting to order) full (eating) or dirty (and waiting for receipt)
|
||||
|
||||
# Route choosing
|
||||
Author: Tao Sen Chang
|
||||
|
||||
At beginning of our project, we use reinforcement learning to get the shortest route for our agent to traversal specific tables. Reinforcement learning is how software agent ought to take actions in an environment in order to maximize the notion of cumulative reward. The agent makes a sequence of decisions, and learns to perform the best actions every step. After tons of training loops, the agent could get the optimal strategy which might collect maximal reward. In the case of our program, the “maximal reward”, would be to find the minimum distance to the customer, which in turn would save time and effort.
|
||||
|
||||
# Plate evaluation
|
||||
Author: Weronika Skowronska
|
||||
|
||||
For this part, we used CNNs to classify the plate on the table as empty, full or dirty. CNN is a kind of Neural Networks, used to work with pictures - it's good in extracting features from a photo, and therefore it demands less computations than a fully connected layer used for the same task.
|
||||
As it is often hard to say, if the client has done eating - if the waiter is not sure, he asks.
|
||||
The state of the table is then passed to the table object, and is available for further actions.
|
||||
|
||||
# Client evaluation
|
||||
Author: Martyna Druminska
|
||||
|
||||
If the plate is classified as empty - which means that the client hasn't ordered yet - we want to propose him a menu. We used decision trees to make a prediction on whether the person would like a vegan menu or not, based on the “customers” appearance (answers in the console). The decision tree algorithm learns off of a dataset with a diverse set of variables to make the best prediction. The program generates a flow-like structure that represents the decisions, where each node is a “test” on an attribute. The decision tree finishes when there are only leaf nodes left.
|
||||
|
||||
# Conclusions
|
||||
|
||||
We tried to perform an action as similar as possible to a real-life situation. In our opinion, we did pretty well - we managed to create a program that runs efficiently and cohesively to satisfy the needs of the customer.
|
||||
One remark is that our program has little to do about clients' orders and the paying process - we think that one more subproject could solve this problem.
|
489
main_training.py
489
main_training.py
@ -1,489 +0,0 @@
|
||||
from __future__ import print_function
|
||||
import os, sys, time, datetime, json, random
|
||||
import numpy as np
|
||||
from keras.models import Sequential
|
||||
from keras.layers.core import Dense, Activation
|
||||
from keras.optimizers import SGD , Adam, RMSprop
|
||||
from keras.layers.advanced_activations import PReLU
|
||||
import matplotlib.pyplot as plt
|
||||
import pickle
|
||||
|
||||
visited_mark = 0.8 # Cells visited by the rat will be painted by gray 0.8
|
||||
rat_mark = 0.5 # The current rat cell will be painteg by gray 0.5
|
||||
LEFT = 0
|
||||
UP = 1
|
||||
RIGHT = 2
|
||||
DOWN = 3
|
||||
|
||||
# Actions dictionary
|
||||
actions_dict = {
|
||||
LEFT: 'left',
|
||||
UP: 'up',
|
||||
RIGHT: 'right',
|
||||
DOWN: 'down',
|
||||
}
|
||||
|
||||
num_actions = len(actions_dict)
|
||||
|
||||
# Exploration factor
|
||||
epsilon = 0.1
|
||||
file_name_num = 1
|
||||
win_targets = [(4, 4),(4, 9),(4, 14),(9, 4)]
|
||||
|
||||
class Qmaze(object):
|
||||
def __init__(self, maze, rat=(12,12)):
|
||||
global win_targets
|
||||
self._maze = np.array(maze)
|
||||
nrows, ncols = self._maze.shape
|
||||
#self.target = (nrows-1, ncols-1) # target cell where the "cheese" is
|
||||
self.target = win_targets[0]
|
||||
self.free_cells = [(r,c) for r in range(nrows) for c in range(ncols) if self._maze[r,c] == 1.0]
|
||||
self.free_cells.remove(win_targets[-1])
|
||||
if self._maze[self.target] == 0.0:
|
||||
raise Exception("Invalid maze: target cell cannot be blocked!")
|
||||
if not rat in self.free_cells:
|
||||
raise Exception("Invalid Rat Location: must sit on a free cell")
|
||||
self.reset(rat)
|
||||
|
||||
def reset(self, rat):
|
||||
global win_targets
|
||||
self.rat = rat
|
||||
self.maze = np.copy(self._maze)
|
||||
nrows, ncols = self.maze.shape
|
||||
row, col = rat
|
||||
self.maze[row, col] = rat_mark
|
||||
self.state = (row, col, 'start')
|
||||
self.min_reward = -0.5 * self.maze.size
|
||||
self.total_reward = 0
|
||||
self.visited = list()
|
||||
self.curr_win_targets = win_targets[:]
|
||||
|
||||
def update_state(self, action):
|
||||
nrows, ncols = self.maze.shape
|
||||
nrow, ncol, nmode = rat_row, rat_col, mode = self.state
|
||||
|
||||
if self.maze[rat_row, rat_col] > 0.0:
|
||||
self.visited.append((rat_row, rat_col)) # mark visited cell
|
||||
|
||||
valid_actions = self.valid_actions()
|
||||
|
||||
if not valid_actions:
|
||||
nmode = 'blocked'
|
||||
elif action in valid_actions:
|
||||
nmode = 'valid'
|
||||
if action == LEFT:
|
||||
ncol -= 1
|
||||
elif action == UP:
|
||||
nrow -= 1
|
||||
if action == RIGHT:
|
||||
ncol += 1
|
||||
elif action == DOWN:
|
||||
nrow += 1
|
||||
else: # invalid action, no change in rat position
|
||||
mode = 'invalid'
|
||||
|
||||
# new state
|
||||
self.state = (nrow, ncol, nmode)
|
||||
|
||||
def get_reward(self):
|
||||
win_target_x, win_target_y = self.target
|
||||
rat_row, rat_col, mode = self.state
|
||||
nrows, ncols = self.maze.shape
|
||||
if rat_row == win_target_x and rat_col == win_target_y:
|
||||
return 1.0
|
||||
if mode == 'blocked': # move to the block in the grid
|
||||
return -1.0
|
||||
if (rat_row, rat_col) in self.visited:
|
||||
return -0.5 # default -0.25 -> -0.5
|
||||
if mode == 'invalid':
|
||||
return -0.75 # default -0.75 move to the boundary
|
||||
if mode == 'valid': # default -0.04 -> -0.1
|
||||
return -0.04
|
||||
if (rat_row, rat_col) in self.curr_win_targets:
|
||||
return 1.0
|
||||
|
||||
def act(self, action):
|
||||
self.update_state(action)
|
||||
reward = self.get_reward()
|
||||
self.total_reward += reward
|
||||
status = self.game_status()
|
||||
envstate = self.observe()
|
||||
return envstate, reward, status
|
||||
|
||||
def observe(self):
|
||||
canvas = self.draw_env()
|
||||
envstate = canvas.reshape((1, -1))
|
||||
return envstate
|
||||
|
||||
def draw_env(self):
|
||||
canvas = np.copy(self.maze)
|
||||
nrows, ncols = self.maze.shape
|
||||
# clear all visual marks
|
||||
for r in range(nrows):
|
||||
for c in range(ncols):
|
||||
if canvas[r,c] > 0.0:
|
||||
canvas[r,c] = 1.0
|
||||
# draw the rat
|
||||
row, col, valid = self.state
|
||||
canvas[row, col] = rat_mark
|
||||
return canvas
|
||||
|
||||
def game_status(self):
|
||||
if self.total_reward < self.min_reward:
|
||||
return 'lose'
|
||||
rat_row, rat_col, mode = self.state
|
||||
nrows, ncols = self.maze.shape
|
||||
|
||||
curPos = (rat_row, rat_col)
|
||||
|
||||
if curPos in self.curr_win_targets:
|
||||
self.curr_win_targets.remove(curPos)
|
||||
if len(self.curr_win_targets) == 0:
|
||||
return 'win'
|
||||
else:
|
||||
self.target = self.curr_win_targets[0]
|
||||
|
||||
return 'not_over'
|
||||
|
||||
def valid_actions(self, cell=None):
|
||||
if cell is None:
|
||||
row, col, mode = self.state
|
||||
else:
|
||||
row, col = cell
|
||||
actions = [0, 1, 2, 3]
|
||||
nrows, ncols = self.maze.shape
|
||||
if row == 0:
|
||||
actions.remove(1)
|
||||
elif row == nrows-1:
|
||||
actions.remove(3)
|
||||
|
||||
if col == 0:
|
||||
actions.remove(0)
|
||||
elif col == ncols-1:
|
||||
actions.remove(2)
|
||||
|
||||
if row>0 and self.maze[row-1,col] == 0.0:
|
||||
actions.remove(1)
|
||||
if row<nrows-1 and self.maze[row+1,col] == 0.0:
|
||||
actions.remove(3)
|
||||
|
||||
if col>0 and self.maze[row,col-1] == 0.0:
|
||||
actions.remove(0)
|
||||
if col<ncols-1 and self.maze[row,col+1] == 0.0:
|
||||
actions.remove(2)
|
||||
|
||||
return actions
|
||||
|
||||
def show(qmaze):
|
||||
global win_target
|
||||
win_target_row, win_target_col = win_target
|
||||
plt.grid('on')
|
||||
nrows, ncols = qmaze.maze.shape
|
||||
ax = plt.gca()
|
||||
ax.set_xticks(np.arange(0.5, nrows, 1))
|
||||
ax.set_yticks(np.arange(0.5, ncols, 1))
|
||||
ax.set_xticklabels([])
|
||||
ax.set_yticklabels([])
|
||||
canvas = np.copy(qmaze.maze)
|
||||
for row,col in qmaze.visited:
|
||||
canvas[row,col] = 0.6
|
||||
rat_row, rat_col, _ = qmaze.state
|
||||
canvas[rat_row, rat_col] = 0.3 # rat cell
|
||||
canvas[win_target_row, win_target_col] = 0.9 # cheese cell
|
||||
img = plt.imshow(canvas, interpolation='none', cmap='gray')
|
||||
return img
|
||||
|
||||
|
||||
def save_pic(qmaze):
|
||||
global file_name_num
|
||||
global win_target
|
||||
win_target_row, win_target_col = win_target
|
||||
plt.grid('on')
|
||||
nrows, ncols = qmaze.maze.shape
|
||||
ax = plt.gca()
|
||||
ax.set_xticks(np.arange(0.5, nrows, 1))
|
||||
ax.set_yticks(np.arange(0.5, ncols, 1))
|
||||
ax.set_xticklabels([])
|
||||
ax.set_yticklabels([])
|
||||
canvas = np.copy(qmaze.maze)
|
||||
for row,col in qmaze.visited:
|
||||
canvas[row,col] = 0.6
|
||||
rat_row, rat_col, _ = qmaze.state
|
||||
canvas[rat_row, rat_col] = 0.3 # rat cell
|
||||
canvas[win_target_row, win_target_col] = 0.9 # cheese cell
|
||||
plt.imshow(canvas, interpolation='none', cmap='gray')
|
||||
plt.savefig(str(file_name_num) + ".png")
|
||||
file_name_num += 1
|
||||
|
||||
def output_route(qmaze):
|
||||
global win_target
|
||||
win_target_row, win_target_col = win_target
|
||||
print(qmaze._maze)
|
||||
|
||||
def play_game(model, qmaze, rat_cell):
|
||||
qmaze.reset(rat_cell)
|
||||
envstate = qmaze.observe()
|
||||
while True:
|
||||
prev_envstate = envstate
|
||||
# get next action
|
||||
q = model.predict(prev_envstate)
|
||||
action = np.argmax(q[0])
|
||||
|
||||
# apply action, get rewards and new state
|
||||
envstate, reward, game_status = qmaze.act(action)
|
||||
if game_status == 'win':
|
||||
return True
|
||||
elif game_status == 'lose':
|
||||
return False
|
||||
|
||||
|
||||
def completion_check(model, qmaze):
|
||||
for cell in qmaze.free_cells:
|
||||
if not qmaze.valid_actions(cell):
|
||||
return False
|
||||
if not play_game(model, qmaze, cell):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class Experience(object):
|
||||
def __init__(self, model, max_memory=100, discount=0.9):
|
||||
self.model = model
|
||||
self.max_memory = max_memory
|
||||
self.discount = discount
|
||||
self.memory = list()
|
||||
self.num_actions = model.output_shape[-1]
|
||||
|
||||
def remember(self, episode):
|
||||
# episode = [envstate, action, reward, envstate_next, game_over]
|
||||
# memory[i] = episode
|
||||
# envstate == flattened 1d maze cells info, including rat cell (see method: observe)
|
||||
self.memory.append(episode)
|
||||
if len(self.memory) > self.max_memory:
|
||||
del self.memory[0]
|
||||
|
||||
def predict(self, envstate):
|
||||
return self.model.predict(envstate)[0]
|
||||
|
||||
def get_data(self, data_size=10):
|
||||
env_size = self.memory[0][0].shape[1] # envstate 1d size (1st element of episode)
|
||||
mem_size = len(self.memory)
|
||||
data_size = min(mem_size, data_size)
|
||||
inputs = np.zeros((data_size, env_size))
|
||||
targets = np.zeros((data_size, self.num_actions))
|
||||
for i, j in enumerate(np.random.choice(range(mem_size), data_size, replace=False)):
|
||||
envstate, action, reward, envstate_next, game_over = self.memory[j]
|
||||
inputs[i] = envstate
|
||||
# There should be no target values for actions not taken.
|
||||
targets[i] = self.predict(envstate)
|
||||
# Q_sa = derived policy = max quality env/action = max_a' Q(s', a')
|
||||
Q_sa = np.max(self.predict(envstate_next))
|
||||
if game_over:
|
||||
targets[i, action] = reward
|
||||
else:
|
||||
# reward + gamma * max_a' Q(s', a')
|
||||
targets[i, action] = reward + self.discount * Q_sa
|
||||
return inputs, targets
|
||||
|
||||
def qtrain(model, maze, **opt):
|
||||
global epsilon
|
||||
n_epoch = opt.get('n_epoch', 15000)
|
||||
max_memory = opt.get('max_memory', 1000)
|
||||
data_size = opt.get('data_size', 50)
|
||||
weights_file = opt.get('weights_file', "")
|
||||
name = opt.get('name', 'model')
|
||||
start_time = datetime.datetime.now()
|
||||
|
||||
# If you want to continue training from a previous model,
|
||||
# just supply the h5 file name to weights_file option
|
||||
if weights_file:
|
||||
print("loading weights from file: %s" % (weights_file,))
|
||||
model.load_weights(weights_file)
|
||||
|
||||
# Construct environment/game from numpy array: maze (see above)
|
||||
qmaze = Qmaze(maze)
|
||||
|
||||
# Initialize experience replay object
|
||||
experience = Experience(model, max_memory=max_memory)
|
||||
|
||||
win_history = [] # history of win/lose game
|
||||
n_free_cells = len(qmaze.free_cells)
|
||||
hsize = qmaze.maze.size//2 # history window size
|
||||
win_rate = 0.0
|
||||
imctr = 1
|
||||
pre_episodes = 2**31 - 1
|
||||
|
||||
for epoch in range(n_epoch):
|
||||
loss = 0.0
|
||||
#rat_cell = random.choice(qmaze.free_cells)
|
||||
#rat_cell = (0, 0)
|
||||
rat_cell = (12, 12)
|
||||
|
||||
qmaze.reset(rat_cell)
|
||||
game_over = False
|
||||
|
||||
# get initial envstate (1d flattened canvas)
|
||||
envstate = qmaze.observe()
|
||||
|
||||
n_episodes = 0
|
||||
while not game_over:
|
||||
valid_actions = qmaze.valid_actions()
|
||||
if not valid_actions: break
|
||||
prev_envstate = envstate
|
||||
# Get next action
|
||||
if np.random.rand() < epsilon:
|
||||
action = random.choice(valid_actions)
|
||||
else:
|
||||
action = np.argmax(experience.predict(prev_envstate))
|
||||
|
||||
# Apply action, get reward and new envstate
|
||||
envstate, reward, game_status = qmaze.act(action)
|
||||
if game_status == 'win':
|
||||
print("win")
|
||||
win_history.append(1)
|
||||
game_over = True
|
||||
# save_pic(qmaze)
|
||||
if n_episodes <= pre_episodes:
|
||||
# output_route(qmaze)
|
||||
print(qmaze.visited)
|
||||
with open('res.data', 'wb') as filehandle:
|
||||
pickle.dump(qmaze.visited, filehandle)
|
||||
pre_episodes = n_episodes
|
||||
|
||||
elif game_status == 'lose':
|
||||
print("lose")
|
||||
win_history.append(0)
|
||||
game_over = True
|
||||
# save_pic(qmaze)
|
||||
else:
|
||||
game_over = False
|
||||
|
||||
# Store episode (experience)
|
||||
episode = [prev_envstate, action, reward, envstate, game_over]
|
||||
experience.remember(episode)
|
||||
n_episodes += 1
|
||||
|
||||
# Train neural network model
|
||||
inputs, targets = experience.get_data(data_size=data_size)
|
||||
h = model.fit(
|
||||
inputs,
|
||||
targets,
|
||||
epochs=8,
|
||||
batch_size=16,
|
||||
verbose=0,
|
||||
)
|
||||
loss = model.evaluate(inputs, targets, verbose=0)
|
||||
|
||||
|
||||
if len(win_history) > hsize:
|
||||
win_rate = sum(win_history[-hsize:]) / hsize
|
||||
|
||||
dt = datetime.datetime.now() - start_time
|
||||
t = format_time(dt.total_seconds())
|
||||
|
||||
template = "Epoch: {:03d}/{:d} | Loss: {:.4f} | Episodes: {:d} | Win count: {:d} | Win rate: {:.3f} | time: {}"
|
||||
print(template.format(epoch, n_epoch-1, loss, n_episodes, sum(win_history), win_rate, t))
|
||||
# we simply check if training has exhausted all free cells and if in all
|
||||
# cases the agent won
|
||||
if win_rate > 0.9 : epsilon = 0.05
|
||||
train_max = 192
|
||||
# print(sum(win_history[-192*1.5:]))
|
||||
# print(192)
|
||||
if sum(win_history[-192:]) >= 192:
|
||||
print("Reached 100%% win rate at epoch: %d" % (epoch,))
|
||||
break
|
||||
|
||||
# Save trained model weights and architecture, this will be used by the visualization code
|
||||
h5file = name + ".h5"
|
||||
json_file = name + ".json"
|
||||
model.save_weights(h5file, overwrite=True)
|
||||
with open(json_file, "w") as outfile:
|
||||
json.dump(model.to_json(), outfile)
|
||||
end_time = datetime.datetime.now()
|
||||
dt = datetime.datetime.now() - start_time
|
||||
seconds = dt.total_seconds()
|
||||
t = format_time(seconds)
|
||||
print('files: %s, %s' % (h5file, json_file))
|
||||
print("n_epoch: %d, max_mem: %d, data: %d, time: %s" % (epoch, max_memory, data_size, t))
|
||||
return seconds
|
||||
|
||||
# This is a small utility for printing readable time strings:
|
||||
def format_time(seconds):
|
||||
if seconds < 400:
|
||||
s = float(seconds)
|
||||
return "%.1f seconds" % (s,)
|
||||
elif seconds < 4000:
|
||||
m = seconds / 60.0
|
||||
return "%.2f minutes" % (m,)
|
||||
else:
|
||||
h = seconds / 3600.0
|
||||
return "%.2f hours" % (h,)
|
||||
|
||||
def build_model(maze, lr=0.001):
|
||||
model = Sequential()
|
||||
model.add(Dense(maze.size, input_shape=(maze.size,)))
|
||||
model.add(PReLU())
|
||||
model.add(Dense(maze.size))
|
||||
model.add(PReLU())
|
||||
model.add(Dense(num_actions))
|
||||
model.compile(optimizer='adam', loss='mse')
|
||||
return model
|
||||
|
||||
|
||||
|
||||
class Table:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 2, 0.)
|
||||
def get_destination_coor(self):
|
||||
return [self.coordinate_i, self.coordinate_j-1]
|
||||
|
||||
class Kitchen:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 3, 0.)
|
||||
|
||||
if __name__== "__main__":
|
||||
|
||||
def change_value(i, j, width, n):
|
||||
for r in range (i, i+width):
|
||||
for c in range (j, j+width):
|
||||
grid[r][c] = n
|
||||
|
||||
grid = [[1 for x in range(16)] for y in range(16)]
|
||||
table1 = Table(2, 2)
|
||||
table2 = Table (2,7)
|
||||
table3 = Table(2, 12)
|
||||
table4 = Table(7, 2)
|
||||
table5 = Table(7, 7)
|
||||
table6 = Table(7, 12)
|
||||
table7 = Table(12, 2)
|
||||
table8 = Table(12, 7)
|
||||
|
||||
|
||||
kitchen = Kitchen(13, 13)
|
||||
maze = np.array(grid)
|
||||
|
||||
# print(maze)
|
||||
# maze = np.array([
|
||||
# [ 1., 0., 1., 1., 1., 1., 1., 1.],
|
||||
# [ 1., 1., 1., 0., 0., 1., 0., 1.],
|
||||
# [ 1., 1., 1., 1., 1., 1., 0., 1.],
|
||||
# [ 1., 1., 1., 1., 0., 0., 1., 1.],
|
||||
# [ 1., 0., 0., 0., 1., 1., 1., 1.],
|
||||
# [ 1., 0., 1., 1., 1., 1., 1., 1.],
|
||||
# [ 1., 1., 1., 0., 1., 1., 1., 1.]
|
||||
# ])
|
||||
# print(maze)
|
||||
|
||||
|
||||
# qmaze = Qmaze(maze)
|
||||
# show(qmaze)
|
||||
|
||||
model = build_model(maze)
|
||||
qtrain(model, maze, epochs=1000, max_memory=8*maze.size, data_size=32)
|
||||
|
||||
|
||||
|
Binary file not shown.
@ -1,151 +0,0 @@
|
||||
import pygame
|
||||
import numpy as np
|
||||
import math
|
||||
import pickle
|
||||
|
||||
# Colors:
|
||||
# Define some colors
|
||||
BLACK = (0, 0, 0)
|
||||
WHITE = (255, 255, 255)
|
||||
GREEN = (0, 255, 0)
|
||||
RED = (255, 0, 0)
|
||||
BLUE = (0, 0, 240)
|
||||
YELLOW = (255, 255, 0)
|
||||
#Width and Height of each square:
|
||||
WIDTH = 20
|
||||
HEIGHT = 20
|
||||
|
||||
#Margin:
|
||||
MARGIN = 5
|
||||
grid = [[0 for x in range(16)] for y in range(16)]
|
||||
|
||||
def change_value(i, j, width, n):
|
||||
for r in range (i, i+width):
|
||||
for c in range (j, j+width):
|
||||
grid[r][c] = n
|
||||
|
||||
class Table:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 2, 1)
|
||||
def get_destination_coor(self):
|
||||
return [self.coordinate_i, self.coordinate_j-1]
|
||||
|
||||
class Kitchen:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 3, 2)
|
||||
|
||||
class Agent:
|
||||
def __init__(self,orig_coordinate_i, orig_coordinate_j):
|
||||
self.orig_coordinate_i = orig_coordinate_i
|
||||
self.orig_coordinate_j = orig_coordinate_j
|
||||
self.state = np.array([orig_coordinate_i,orig_coordinate_j])
|
||||
change_value(orig_coordinate_j, orig_coordinate_j, 1, 3)
|
||||
self.state_update(orig_coordinate_i, orig_coordinate_j)
|
||||
|
||||
def state_update(self, c1, c2):
|
||||
self.state[0] = c1
|
||||
self.state[1] = c2
|
||||
|
||||
def leave(self):
|
||||
change_value(self.state[0], self.state[1], 1, 0)
|
||||
|
||||
|
||||
def move_to(self, nextPos):
|
||||
self.leave()
|
||||
nextPos_x, nextPos_y = nextPos
|
||||
self.state_update(nextPos_x, nextPos_y)
|
||||
change_value(self.state[0], self.state[1], 1, 3)
|
||||
|
||||
|
||||
def check_done():
|
||||
for event in pygame.event.get(): # Checking for the event
|
||||
if event.type == pygame.QUIT: # If the program is closed:
|
||||
return True # To exit the loop
|
||||
|
||||
def draw_grid(visited):
|
||||
for row in range(16): # Drawing the grid
|
||||
for column in range(16):
|
||||
color = WHITE
|
||||
if grid[row][column] == 1:
|
||||
color = GREEN
|
||||
if grid[row][column] == 2:
|
||||
color = RED
|
||||
if grid[row][column] == 3:
|
||||
color = BLUE
|
||||
if (row, column) in visited or (row, column) in table_targets:
|
||||
color = YELLOW
|
||||
pygame.draw.rect(screen,
|
||||
color,
|
||||
[(MARGIN + WIDTH) * column + MARGIN,
|
||||
(MARGIN + HEIGHT) * row + MARGIN,
|
||||
WIDTH,
|
||||
HEIGHT])
|
||||
|
||||
|
||||
## default positions of the agent:
|
||||
x = 12
|
||||
y = 12
|
||||
agent = Agent(x, y)
|
||||
|
||||
table1 = Table(2, 2)
|
||||
table2 = Table (2,7)
|
||||
table3 = Table(2, 12)
|
||||
table4 = Table(7, 2)
|
||||
table5 = Table(7, 7)
|
||||
table6 = Table(7, 12)
|
||||
table7 = Table(12, 2)
|
||||
table8 = Table(12, 7)
|
||||
|
||||
#class Kitchen:
|
||||
kitchen = Kitchen(13, 13)
|
||||
|
||||
pygame.init()
|
||||
WINDOW_SIZE = [405, 405]
|
||||
screen = pygame.display.set_mode(WINDOW_SIZE)
|
||||
|
||||
pygame.display.set_caption("Waiter_Grid3")
|
||||
|
||||
done = False
|
||||
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
with open('res_targets_4-1.data', 'rb') as filehandle:
|
||||
# read the data as binary data stream
|
||||
trained_route = pickle.load(filehandle)
|
||||
|
||||
print(trained_route)
|
||||
destination = (9, 4)
|
||||
trained_route.append(destination)
|
||||
|
||||
table_targets = [(4, 4),(4, 9),(4, 14),(9, 4)]
|
||||
|
||||
# -------- Main Program Loop -----------
|
||||
while not done:
|
||||
visited = set()
|
||||
screen.fill(BLACK) # Background color
|
||||
draw_grid(visited)
|
||||
done = check_done()
|
||||
new_route = trained_route[:]
|
||||
|
||||
while len(new_route) != 0:
|
||||
x = agent.state[0]
|
||||
y = agent.state[1]
|
||||
|
||||
agent.move_to(new_route[0])
|
||||
new_route = new_route[1:]
|
||||
|
||||
|
||||
pygame.time.delay(150)
|
||||
screen.fill(BLACK)
|
||||
visited.add((x,y))
|
||||
draw_grid(visited)
|
||||
# Drawing the grid
|
||||
clock.tick(100) # Limit to 60 frames per second
|
||||
pygame.display.flip() # Updating the screen
|
||||
|
||||
|
||||
pygame.quit()
|
@ -1,67 +0,0 @@
|
||||
import pygame
|
||||
import pygame.font
|
||||
import pygame.event
|
||||
import pygame.draw
|
||||
import os
|
||||
import sys
|
||||
from pygame.locals import *
|
||||
|
||||
|
||||
bad_words_file = os.path.os.path.dirname(os.path.realpath(sys.argv[0])) \
|
||||
+ '/bad_words.txt'
|
||||
|
||||
def get_key():
|
||||
while 1:
|
||||
event = pygame.event.poll()
|
||||
if event.type == KEYDOWN:
|
||||
return event.key
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
def display_box(screen, message):
|
||||
"Print a message in a box in the middle of the screen"
|
||||
fontobject = pygame.font.Font(None, 18)
|
||||
pygame.draw.rect(screen, (0, 0, 0),
|
||||
((screen.get_width() / 2) - 100,
|
||||
(screen.get_height() / 2) - 10,
|
||||
200, 20), 0)
|
||||
pygame.draw.rect(screen, (255, 255, 255),
|
||||
((screen.get_width() / 2) - 102,
|
||||
(screen.get_height() / 2) - 12,
|
||||
204, 24), 1)
|
||||
if len(message) != 0:
|
||||
screen.blit(fontobject.render(message, 1, (255, 255, 255)),
|
||||
((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10))
|
||||
pygame.display.flip()
|
||||
|
||||
|
||||
def ask(screen, question):
|
||||
"ask(screen, question) -> answer"
|
||||
pygame.font.init()
|
||||
current_string = []
|
||||
display_box(screen, question + ": " + "".join(current_string))
|
||||
while 1:
|
||||
inkey = get_key()
|
||||
if inkey == K_BACKSPACE:
|
||||
current_string = current_string[0:-1]
|
||||
elif inkey == K_RETURN:
|
||||
file = open(bad_words_file, 'r').readlines()
|
||||
if "".join(current_string) in [thing[:-1] for thing in file]:
|
||||
current_string = []
|
||||
else:
|
||||
break
|
||||
elif inkey == K_MINUS:
|
||||
current_string.append("_")
|
||||
elif inkey <= 127:
|
||||
current_string.append(chr(inkey))
|
||||
display_box(screen, question + ": " + "".join(current_string))
|
||||
return "".join(current_string)
|
||||
|
||||
|
||||
def main():
|
||||
screen = pygame.display.set_mode((320, 240))
|
||||
print(ask(screen, "Name") + " was entered")
|
||||
|
||||
|
||||
if __name__ == '__main__': main()
|
751
veganism.csv
751
veganism.csv
@ -1,751 +0,0 @@
|
||||
ethnicity,gender,appearence,vegan
|
||||
asian,male,hippie,0
|
||||
asian,male,hippie,1
|
||||
asian,male,hippie,1
|
||||
asian,male,hippie,0
|
||||
asian,male,hippie,1
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,1
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,male,regular,0
|
||||
asian,female,hippie,1
|
||||
asian,female,hippie,1
|
||||
asian,female,hippie,1
|
||||
asian,female,hippie,1
|
||||
asian,female,hippie,1
|
||||
asian,female,hippie,1
|
||||
asian,female,hippie,1
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
asian,female,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,1
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,male,regular,0
|
||||
black,female,hippie,1
|
||||
black,female,hippie,1
|
||||
black,female,hippie,1
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,1
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
black,female,regular,0
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,0
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,0
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,hippie,1
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,male,regular,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,0
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,hippie,1
|
||||
white,female,regular,0
|
||||
white,female,regular,1
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,1
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
||||
white,female,regular,0
|
|
478
waiter1406.py
478
waiter1406.py
@ -1,478 +0,0 @@
|
||||
#### MD #######
|
||||
|
||||
import pandas as pd
|
||||
import pygame
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.preprocessing import LabelEncoder
|
||||
from sklearn.tree import DecisionTreeClassifier
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.metrics import confusion_matrix
|
||||
from pygame.locals import *
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn import tree
|
||||
import random
|
||||
from textpygame import get_key, ask, display_box
|
||||
###### /MD ######
|
||||
|
||||
|
||||
import math
|
||||
import pickle
|
||||
|
||||
|
||||
########################
|
||||
### WS ###
|
||||
########################
|
||||
# For CNN:
|
||||
|
||||
import keras
|
||||
from keras.preprocessing import image
|
||||
from keras.models import Sequential
|
||||
from keras.layers import Convolution2D
|
||||
from keras.layers import MaxPooling2D
|
||||
from keras.layers import Flatten
|
||||
from keras.layers import Dense
|
||||
|
||||
|
||||
#initializing:
|
||||
cnn_model = Sequential()
|
||||
#Convolution:
|
||||
cnn_model.add(Convolution2D(32, (3, 3), input_shape =(256, 256, 3), activation = "relu"))
|
||||
#Pooling:
|
||||
cnn_model.add(MaxPooling2D(pool_size = (2,2)))
|
||||
|
||||
# Adding a second convolutional layer
|
||||
cnn_model.add(Convolution2D(32, 3, 3, activation = 'relu'))
|
||||
cnn_model.add(MaxPooling2D(pool_size = (2, 2)))
|
||||
|
||||
#Flattening:
|
||||
cnn_model.add(Flatten())
|
||||
|
||||
#Fully connected layers::
|
||||
cnn_model.add(Dense(units = 128, activation = "relu"))
|
||||
cnn_model.add(Dense(units = 3, activation = "softmax"))
|
||||
|
||||
# loading weigjts:
|
||||
cnn_model.load_weights('s444523/best_model_weights2.h5')
|
||||
#Making CNN:
|
||||
cnn_model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"])
|
||||
|
||||
########################
|
||||
### /WS ###
|
||||
########################
|
||||
|
||||
|
||||
|
||||
#### MD#####
|
||||
|
||||
#read csv file
|
||||
dataset= pd.read_csv("veganism.csv")
|
||||
|
||||
|
||||
#create a new dataset
|
||||
newdataset = pd.DataFrame(dataset, columns=['ethnicity', 'gender', 'appearence', 'vegan'])
|
||||
|
||||
# creating instance of labelencoder
|
||||
labelencoder = LabelEncoder()
|
||||
# Assigning numerical values and storing in another column
|
||||
newdataset['ethnicity_no'] = labelencoder.fit_transform(newdataset['ethnicity'])
|
||||
newdataset['gender_no']= labelencoder.fit_transform(newdataset['gender'])
|
||||
newdataset['appearence_no']= labelencoder.fit_transform(newdataset['appearence'])
|
||||
|
||||
|
||||
# for x values drop unimportant columns, axis=1 specifies that we want the columns not rows
|
||||
Y=newdataset['vegan']
|
||||
X=newdataset.drop(newdataset.columns[0:4], axis=1)
|
||||
|
||||
|
||||
#test 14% of the data
|
||||
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.15)
|
||||
classifier = DecisionTreeClassifier()
|
||||
classifier.fit(X_train, Y_train)
|
||||
y_pred = classifier.predict(X_test)
|
||||
|
||||
|
||||
|
||||
fn=['ethnicity','gender','appearence']
|
||||
cn=['yes', 'no']
|
||||
|
||||
# Setting dpi = 300 to make image clearer than default (for the decision tree visualisation)
|
||||
fig, axes = plt.subplots(nrows = 1,ncols = 1,figsize = (4,4), dpi=300)
|
||||
|
||||
tree.plot_tree(classifier,
|
||||
feature_names = fn,
|
||||
class_names=cn,
|
||||
filled = True);
|
||||
|
||||
fig.savefig('imagenamenew.png')
|
||||
|
||||
|
||||
class Customer:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j,1 , 4)
|
||||
class CustomerPlace:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j,1 , 5)
|
||||
|
||||
|
||||
|
||||
###### /MD #######
|
||||
# Colors:
|
||||
# Define some colors
|
||||
BLACK = (0, 0, 0)
|
||||
WHITE = (255, 255, 255)
|
||||
GREEN = (0, 255, 0)
|
||||
RED = (255, 0, 0)
|
||||
BLUE = (0, 0, 240)
|
||||
YELLOW = (255, 255, 0)
|
||||
#Width and Height of each square:
|
||||
WIDTH = 20
|
||||
HEIGHT = 20
|
||||
|
||||
#Margin:
|
||||
MARGIN = 5
|
||||
grid = [[0 for x in range(16)] for y in range(16)]
|
||||
|
||||
def change_value(i, j, width, n):
|
||||
for r in range (i, i+width):
|
||||
for c in range (j, j+width):
|
||||
grid[r][c] = n
|
||||
|
||||
class Table:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 2, 1)
|
||||
def get_destination_coor(self):
|
||||
return [self.coordinate_i+2, self.coordinate_j+2]
|
||||
|
||||
########################
|
||||
### WS ###
|
||||
########################
|
||||
|
||||
# The finction "state of meal" chooses a photo of a plate at the given table.
|
||||
def state_of_meal(self):
|
||||
## !!!!!!###
|
||||
num = np.random.randint(67, 100)
|
||||
## !!!!!!###
|
||||
|
||||
if num<=67:
|
||||
img_name = 'plates/{}.png'.format(num)
|
||||
else:
|
||||
img_name = 'plates/{}.jpg'.format(num)
|
||||
return img_name
|
||||
|
||||
# The function "change state" changes the value of the state variable.
|
||||
# It informs, whether the client are waiting for the waiter to make an order
|
||||
# (0 - empty plates) are eating (2 - full plates) or are waiting for the
|
||||
# waiter to get a recipt (1 - dirty plates)
|
||||
|
||||
def change_state(self, st):
|
||||
self.state = st
|
||||
|
||||
########################
|
||||
### /WS ###
|
||||
########################
|
||||
|
||||
class Kitchen:
|
||||
def __init__(self, coordinate_i, coordinate_j):
|
||||
self.coordinate_i = coordinate_i
|
||||
self.coordinate_j = coordinate_j
|
||||
change_value(coordinate_i, coordinate_j, 3, 2)
|
||||
|
||||
class Agent:
|
||||
def __init__(self,orig_coordinate_i, orig_coordinate_j):
|
||||
self.orig_coordinate_i = orig_coordinate_i
|
||||
self.orig_coordinate_j = orig_coordinate_j
|
||||
self.state = np.array([orig_coordinate_i,orig_coordinate_j])
|
||||
change_value(orig_coordinate_j, orig_coordinate_j, 1, 3)
|
||||
self.state_update(orig_coordinate_i, orig_coordinate_j)
|
||||
|
||||
def state_update(self, c1, c2):
|
||||
self.state[0] = c1
|
||||
self.state[1] = c2
|
||||
|
||||
def leave(self):
|
||||
change_value(self.state[0], self.state[1], 1, 0)
|
||||
|
||||
|
||||
def move_to(self, nextPos):
|
||||
self.leave()
|
||||
nextPos_x, nextPos_y = nextPos
|
||||
self.state_update(nextPos_x, nextPos_y)
|
||||
change_value(self.state[0], self.state[1], 1, 3)
|
||||
|
||||
########################
|
||||
### WS ###
|
||||
########################
|
||||
|
||||
#The function "define_table" serches for the apropriate table in the
|
||||
# table_dict (to enable the usage of class attributes and methods)
|
||||
def define_table(self, t_num):
|
||||
t_num = 'table{}'.format(t_num)
|
||||
t_num = table_dict[t_num]
|
||||
return t_num
|
||||
|
||||
# The function "check_plates" uses the pretrained CNN model to classify
|
||||
# the plate on the table as empty(0), full(2) or dirty(1)
|
||||
def check_plates(self, table_number):
|
||||
table = self.define_table(table_number)
|
||||
plate = table.state_of_meal()
|
||||
plate= image.load_img(plate, target_size = (256, 256))
|
||||
plate = image.img_to_array(plate)
|
||||
#plate = plate.reshape((256, 256))
|
||||
plate = np.expand_dims(plate, axis = 0)
|
||||
|
||||
result = cnn_model.predict(plate)[0]
|
||||
print(result)
|
||||
if result[1] == 1:
|
||||
result[1] = 0
|
||||
x = int(input("Excuse me, have You done eating? 1=Yes, 2 = No \n"))
|
||||
result[x] = 1
|
||||
for i, x in enumerate(result):
|
||||
if result[i] == 1:
|
||||
table.change_state(i)
|
||||
return i
|
||||
########################
|
||||
### /WS ###
|
||||
########################
|
||||
|
||||
|
||||
def check_done():
|
||||
for event in pygame.event.get(): # Checking for the event
|
||||
if event.type == pygame.QUIT: # If the program is closed:
|
||||
return True # To exit the loop
|
||||
|
||||
def draw_grid():
|
||||
for row in range(16): # Drawing the grid
|
||||
for column in range(16):
|
||||
color = WHITE
|
||||
if grid[row][column] == 1:
|
||||
color = GREEN
|
||||
if grid[row][column] == 2:
|
||||
color = RED
|
||||
if grid[row][column] == 3:
|
||||
color = BLUE
|
||||
pygame.draw.rect(screen,
|
||||
color,
|
||||
[(MARGIN + WIDTH) * column + MARGIN,
|
||||
(MARGIN + HEIGHT) * row + MARGIN,
|
||||
WIDTH,
|
||||
HEIGHT])
|
||||
|
||||
|
||||
## default positions of the agent:
|
||||
x = 12
|
||||
y = 12
|
||||
agent = Agent(x, y)
|
||||
table1 = Table(2, 2)
|
||||
table2 = Table (2,7)
|
||||
table3 = Table(2, 12)
|
||||
table4 = Table(7, 2)
|
||||
table5 = Table(7, 7)
|
||||
table6 = Table(7, 12)
|
||||
table7 = Table(12, 2)
|
||||
table8 = Table(12, 7)
|
||||
|
||||
|
||||
################### WS #####################
|
||||
# I added the dict to loop through tables.
|
||||
table_dict = {"table1":table1, "table2":table2, "table3":table3,"table4":table4,
|
||||
"table5":table5,"table6":table6,"table7":table7,"table8":table8
|
||||
}
|
||||
################### WS #####################
|
||||
|
||||
pygame.init()
|
||||
|
||||
|
||||
####MD ####
|
||||
'''
|
||||
# create a font object.
|
||||
# 1st parameter is the font file
|
||||
# which is present in pygame.
|
||||
# 2nd parameter is size of the font
|
||||
font = pygame.font.Font('freesansbold.ttf', 14)
|
||||
X = 400
|
||||
Y = 400
|
||||
# create a text suface object,
|
||||
# on which text is drawn on it.
|
||||
text = font.render('waiter: hello, let me help you with your order.', True, WHITE, BLACK)
|
||||
userText=font.render('user: ', True, BLUE, BLACK)
|
||||
# create a rectangular object for the
|
||||
# text surface object
|
||||
textRect = text.get_rect()
|
||||
inputRect = userText.get_rect()
|
||||
# set the center of the rectangular object.
|
||||
textRect.center= (200, 340)
|
||||
inputRect.center=(200,370)
|
||||
|
||||
'''
|
||||
#### /MD ####
|
||||
|
||||
|
||||
#class Kitchen:
|
||||
kitchen = Kitchen(13, 13)
|
||||
|
||||
WINDOW_SIZE = [405, 405]
|
||||
screen = pygame.display.set_mode(WINDOW_SIZE)
|
||||
|
||||
pygame.display.set_caption("Waiter_Grid3")
|
||||
|
||||
done = False
|
||||
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
with open('res_targets_4-1.data', 'rb') as filehandle:
|
||||
# read the data as binary data stream
|
||||
trained_route = pickle.load(filehandle)
|
||||
|
||||
destination = (4, 14)
|
||||
trained_route.append(destination)
|
||||
|
||||
table_targets = [(9, 4),(4, 4),(4, 9),(4, 14)]
|
||||
|
||||
|
||||
destination_tables = []
|
||||
|
||||
###### MD /########3
|
||||
x=[2,7,12]
|
||||
y=[2,7]
|
||||
|
||||
random_customer_seat_x=random.choice(x)
|
||||
random_customer_seat_y=random.choice(y)
|
||||
print(random_customer_seat_x,random_customer_seat_y)
|
||||
seat=Customer(random_customer_seat_x,random_customer_seat_y)
|
||||
|
||||
next_to=CustomerPlace(random_customer_seat_x,random_customer_seat_y-1)
|
||||
|
||||
WINDOW_SIZE = [405, 405]
|
||||
screen = pygame.display.set_mode(WINDOW_SIZE)
|
||||
|
||||
pygame.display.set_caption("Waiter_Grid3")
|
||||
|
||||
done = False
|
||||
print(random_customer_seat_x,random_customer_seat_y-1)
|
||||
clock = pygame.time.Clock()
|
||||
|
||||
|
||||
|
||||
#updating the drawing
|
||||
def updateDraw():
|
||||
x = agent.state[0]
|
||||
y = agent.state[1]
|
||||
screen.fill(BLACK) # Background color
|
||||
for row in range(16): # Drawing the grid
|
||||
for column in range(16):
|
||||
color = WHITE
|
||||
if grid[row][column] == 1:
|
||||
color = GREEN
|
||||
if grid[row][column] == 2:
|
||||
color = RED
|
||||
if grid[row][column] == 3:
|
||||
color = BLUE
|
||||
if grid[row][column] == 4:
|
||||
color = MAGENTA
|
||||
surface = pygame.draw.rect(screen,
|
||||
color,
|
||||
|
||||
[(MARGIN + WIDTH) * column + MARGIN,
|
||||
(MARGIN + HEIGHT) * row + MARGIN,
|
||||
WIDTH,
|
||||
HEIGHT])
|
||||
|
||||
def customer():
|
||||
screen.blit(text, textRect)
|
||||
ethnicity3 = input("Excuse me, What's you're ethnicity? <black>, <asian>, <white>\n")
|
||||
gender3= input("Excuse me, What's you're gender? <male>, <female>, <other> \n")
|
||||
appearence3=input("Excuse me, What's you're appearance? <hippie>, <other> \n")
|
||||
if (ethnicity3 =="black"):
|
||||
ethnicity3 = 1
|
||||
elif ethnicity3 == "asian":
|
||||
ethnicity3 = 0
|
||||
else:
|
||||
ethnicity3 = 2
|
||||
|
||||
if gender3 == "male":
|
||||
gender3 = 0
|
||||
else:
|
||||
gender3 = 1
|
||||
|
||||
if appearence3 == "hippie":
|
||||
appearence3= 0
|
||||
else:
|
||||
appearence3 = 1
|
||||
prediction = classifier.predict([[ethnicity3, gender3, appearence3]])
|
||||
#pygame.quit()
|
||||
|
||||
if prediction == [0]:
|
||||
print("You're probably not vegan. Would you like a regular menu?")
|
||||
else:
|
||||
print("It seems like you're vegan. Would you like a vegan menu?")
|
||||
#exit()
|
||||
###### /MD ###
|
||||
# -------- Main Program Loop -----------
|
||||
exit_counter = 0
|
||||
while not done:
|
||||
screen.fill(BLACK) # Background color
|
||||
draw_grid()
|
||||
done = check_done()
|
||||
new_route = trained_route[:]
|
||||
for value in table_dict.values():
|
||||
destination_tables.append(value.get_destination_coor())
|
||||
num_of_table = 1
|
||||
while len(new_route) != 0:
|
||||
# move to next grid
|
||||
agent.move_to(new_route[0])
|
||||
|
||||
# update the grid
|
||||
pygame.time.delay(150)
|
||||
screen.fill(BLACK)
|
||||
draw_grid()
|
||||
# Drawing the grid
|
||||
clock.tick(100) # Limit to 60 frames per second
|
||||
pygame.display.flip() # Updating the screen
|
||||
|
||||
# get current grid coordinate
|
||||
x = agent.state[0]
|
||||
y = agent.state[1]
|
||||
# if reached the table, ask to collect the plates
|
||||
if [x, y] in destination_tables:
|
||||
########################
|
||||
### WS ###
|
||||
########################
|
||||
#pygame.time.delay(100)
|
||||
print("I'm at a table no. {}".format(num_of_table))
|
||||
## Checking at what state are the plates:
|
||||
state_of_table = agent.check_plates(num_of_table)
|
||||
num_of_table +=1
|
||||
|
||||
if state_of_table == 0:
|
||||
customer()
|
||||
|
||||
# Early stopping (after 10 rounds)
|
||||
exit_counter += 1
|
||||
print("exit_counter", exit_counter)
|
||||
if exit_counter == 10:
|
||||
play_again = 1
|
||||
play_again = int(input("Exit? 0=No, 1=Yes \n"))
|
||||
if play_again:
|
||||
pygame.quit()
|
||||
else:
|
||||
exit_counter = 0
|
||||
|
||||
########################
|
||||
### /WS ###
|
||||
########################
|
||||
destination_tables = destination_tables[1:]
|
||||
|
||||
new_route = new_route[1:]
|
||||
|
||||
# After each fool loop, we can quit the program:.
|
||||
|
||||
|
||||
|
||||
pygame.quit()
|
Loading…
Reference in New Issue
Block a user