2020-06-03 08:34:20 +02:00
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
from pprint import pprint
|
|
|
|
import dataset
|
|
|
|
import random
|
|
|
|
|
|
|
|
# obliczenie entropii dla wskazanej kolumny
|
2020-06-03 10:58:32 +02:00
|
|
|
def entropy(attribute):
|
|
|
|
values, counts = np.unique(attribute, return_counts=True)
|
2020-06-03 08:34:20 +02:00
|
|
|
entropy = np.sum(
|
|
|
|
[(-counts[i] / np.sum(counts)) * np.log2(counts[i] / np.sum(counts)) for i in range(len(values))])
|
|
|
|
return entropy
|
|
|
|
|
|
|
|
#obliczanie wartości przyrostu informacji
|
2020-06-03 10:58:32 +02:00
|
|
|
def info_gain(data, split_attribute, target):
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
# Wartość entropii zbioru
|
2020-06-03 10:58:32 +02:00
|
|
|
_entropy = entropy(data[target])
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
# Wyodrębnienie poszczególnych podzbiorów
|
|
|
|
vals, counts = np.unique(data[split_attribute], return_counts=True)
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
# Średnia ważona entropii każdego podzbioru
|
|
|
|
weighted_entropy = np.sum(
|
2020-06-03 10:58:32 +02:00
|
|
|
[(counts[i] / np.sum(counts)) * entropy(data.where(data[split_attribute] == vals[i]).dropna()[target])
|
2020-06-03 08:34:20 +02:00
|
|
|
for i in range(len(vals))])
|
|
|
|
|
|
|
|
# Przyrost informacji
|
2020-06-03 10:58:32 +02:00
|
|
|
information_gain = _entropy - weighted_entropy
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
return information_gain
|
|
|
|
|
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
def ID3(data, original_data, attributes, target, parent_node_class=None):
|
|
|
|
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
# Jeżeli wszystkie atrybuty są takie same, zwracamy liść z pierwszą napotkaną wartością
|
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
if len(np.unique(data[target])) <= 1:
|
|
|
|
return np.unique(data[target])[0]
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
elif len(data) == 0:
|
2020-06-03 10:58:32 +02:00
|
|
|
return np.unique(original_data[target])[
|
|
|
|
np.argmax(np.unique(original_data[target], return_counts=True)[1])]
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
elif len(attributes) == 0:
|
2020-06-03 08:34:20 +02:00
|
|
|
return parent_node_class
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
# nadrzędna wartość
|
|
|
|
parent_node_class = np.unique(data[target])[
|
|
|
|
np.argmax(np.unique(data[target], return_counts=True)[1])]
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
# obliczenie przyrostu informacji dla każdego atrybutu
|
|
|
|
item_values = [info_gain(data, i, target) for i in
|
|
|
|
attributes]
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
# Wybór najlepszego atrybutu
|
|
|
|
best_attribute_index = np.argmax(item_values)
|
|
|
|
best_attribute = attributes[best_attribute_index]
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
# Struktura drzewa
|
2020-06-03 10:58:32 +02:00
|
|
|
tree = {best_attribute: {}}
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
# Aktualizacja zbioru atrybutów
|
2020-06-03 10:58:32 +02:00
|
|
|
attributes = [i for i in attributes if i != best_attribute]
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
# Budowa poddrzewa dla każdej wartości wybranego atrybutu
|
|
|
|
for value in np.unique(data[best_attribute]):
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
sub_data = data.where(data[best_attribute] == value).dropna()
|
|
|
|
subtree = ID3(sub_data, data, attributes, target, parent_node_class)
|
2020-06-03 08:34:20 +02:00
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
tree[best_attribute][value] = subtree
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
return (tree)
|
|
|
|
|
|
|
|
#tesownie drzewa
|
|
|
|
def test(data, tree):
|
|
|
|
queries = data.iloc[:, :-1].to_dict(orient="records")
|
|
|
|
|
|
|
|
predicted = pd.DataFrame(columns=["predicted"])
|
|
|
|
|
|
|
|
for i in range(len(data)):
|
|
|
|
predicted.loc[i, "predicted"] = search(queries[i], tree, 'nie')
|
|
|
|
print('Precyzja przewidywań: ', (np.sum(predicted["predicted"] == data['czy_chce_pracowac']) / len(data)) * 100, '%')
|
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
#dostowanie danych (lista na słownik) i wywolanie na nich funkcji serach
|
|
|
|
def data_to_dict(data, tree):
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
queries = pd.DataFrame(data=data, columns=dataset.header)
|
|
|
|
predicted = pd.DataFrame(columns=["predicted"])
|
|
|
|
dict = queries.iloc[:, :-1].to_dict(orient="records")
|
|
|
|
|
|
|
|
for i in range(len(data)):
|
|
|
|
predicted.loc[i, "predicted"] = search(dict[i], tree, 'nie')
|
|
|
|
|
|
|
|
predicted_list = predicted.values.tolist()
|
|
|
|
return predicted_list[0][0]
|
|
|
|
|
|
|
|
#przeszukwianie drzewa
|
|
|
|
def search(query, tree, default='nie'):
|
|
|
|
|
|
|
|
for key in list(query.keys()):
|
|
|
|
if key in list(tree.keys()):
|
|
|
|
try:
|
|
|
|
result = tree[key][query[key]]
|
|
|
|
except:
|
|
|
|
return default
|
|
|
|
result = tree[key][query[key]]
|
|
|
|
if isinstance(result, dict):
|
|
|
|
return search(query, result)
|
|
|
|
|
|
|
|
else:
|
|
|
|
return result
|
|
|
|
|
2020-05-03 17:13:59 +02:00
|
|
|
class main():
|
|
|
|
def __init__(self,traktor,field,ui,path):
|
|
|
|
self.traktor = traktor
|
|
|
|
self.field = field
|
|
|
|
self.ui = ui
|
|
|
|
self.path = path
|
|
|
|
|
|
|
|
def main(self):
|
2020-06-03 08:34:20 +02:00
|
|
|
training_data = pd.DataFrame(data=dataset.training_data, columns=dataset.header)
|
|
|
|
testing_data = pd.DataFrame(data=dataset.testing_data, columns=dataset.header)
|
|
|
|
|
2020-06-03 10:58:32 +02:00
|
|
|
# Utworzenie drzewa
|
2020-06-03 08:34:20 +02:00
|
|
|
tree = ID3(training_data, training_data, training_data.columns[:-1], 'czy_chce_pracowac')
|
|
|
|
pprint(tree)
|
2020-06-03 10:58:32 +02:00
|
|
|
|
|
|
|
# Testowanie drzewa
|
|
|
|
print(test(testing_data, tree))
|
|
|
|
|
|
|
|
# Uzyskanie danych od agenta
|
2020-06-03 08:34:20 +02:00
|
|
|
ocena_burakow = self.ocen_ile_burakow()
|
|
|
|
ocena_chwastow = self.ocen_ile_chwastow()
|
|
|
|
pogoda = self.pogoda()
|
|
|
|
print('chwasty: ' + ocena_chwastow)
|
|
|
|
print('buraki: ' + ocena_burakow)
|
|
|
|
print('pogoda: ' + pogoda)
|
|
|
|
data = [[pogoda, ocena_chwastow, ocena_burakow, '']]
|
|
|
|
|
|
|
|
#podjecie decyzji
|
2020-06-03 10:58:32 +02:00
|
|
|
result = data_to_dict(data, tree)
|
2020-06-03 08:34:20 +02:00
|
|
|
print('czy traktor chce pracowac: ' + result)
|
2020-06-03 10:58:32 +02:00
|
|
|
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
def licz_chwasty_buraki(self):
|
|
|
|
chwasty = 0
|
2020-06-03 10:58:32 +02:00
|
|
|
buraki = 0
|
2020-06-03 08:34:20 +02:00
|
|
|
|
|
|
|
for i in self.field.field_matrix:
|
|
|
|
for j in i:
|
|
|
|
if(j==8):
|
|
|
|
buraki = buraki + 1
|
|
|
|
elif(j%2==1):
|
|
|
|
chwasty = chwasty + 1
|
|
|
|
return chwasty, buraki
|
2020-06-03 10:58:32 +02:00
|
|
|
|
2020-06-03 08:34:20 +02:00
|
|
|
def ocen_ile_burakow(self):
|
|
|
|
chwasty, buraki = self.licz_chwasty_buraki()
|
|
|
|
if buraki < 5:
|
|
|
|
return 'bardzo_malo'
|
|
|
|
elif buraki >= 5 and buraki<10:
|
|
|
|
return 'malo'
|
|
|
|
elif buraki >=10 and buraki<15:
|
|
|
|
return 'srednio'
|
|
|
|
elif buraki >=15 and buraki<20:
|
|
|
|
return 'duzo'
|
|
|
|
elif buraki >=20:
|
|
|
|
return 'bardzo_duzo'
|
|
|
|
|
|
|
|
def ocen_ile_chwastow(self):
|
|
|
|
chwasty, buraki = self.licz_chwasty_buraki()
|
|
|
|
if chwasty < 40:
|
|
|
|
return 'bardzo_malo'
|
|
|
|
elif chwasty >= 40 and chwasty<42:
|
|
|
|
return 'malo'
|
|
|
|
elif chwasty >=42 and chwasty<45:
|
|
|
|
return 'srednio'
|
|
|
|
elif chwasty >=45 and chwasty<48:
|
|
|
|
return 'duzo'
|
|
|
|
elif chwasty >=48:
|
|
|
|
return 'bardzo_duzo'
|
2020-06-03 10:58:32 +02:00
|
|
|
|
2020-06-03 08:34:20 +02:00
|
|
|
def pogoda(self):
|
|
|
|
number = random.randrange(0, 4)
|
|
|
|
if number==0:
|
|
|
|
return 'slonecznie'
|
|
|
|
elif number==1:
|
|
|
|
return 'deszcz'
|
|
|
|
elif number==2:
|
|
|
|
return 'grad'
|
|
|
|
elif number==3:
|
|
|
|
return 'zachmurzenie'
|