Upload files to "ai-wozek"
This commit is contained in:
parent
a024da6e00
commit
df9fa479c6
@ -9,6 +9,11 @@ from classes import *
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
import math
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.tree import DecisionTreeClassifier
|
||||
import category_encoders
|
||||
from sklearn import metrics
|
||||
from sklearn import tree
|
||||
|
||||
class Node():
|
||||
def __init__(self,position,rotation,action,parent,cost):
|
||||
@ -335,8 +340,31 @@ def astar(isstate,final):
|
||||
heapq.heappush(fringe,(p,successor))
|
||||
|
||||
#drzewko
|
||||
cols=['Height', 'Width', 'Depth', 'Weight', 'Damage', 'Label_State', 'Content', 'Value']
|
||||
tree_data_base = pd.read_csv('paczki.csv')
|
||||
def entropy(data):
|
||||
x=tree_data_base.drop(columns='Acceptance')
|
||||
y=tree_data_base['Acceptance']
|
||||
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.4)
|
||||
# Create Decision Tree classifer object
|
||||
encoder = category_encoders.OrdinalEncoder(cols=cols)
|
||||
|
||||
x_train = encoder.fit_transform(x_train)
|
||||
|
||||
x_test = encoder.transform(x_test)
|
||||
|
||||
clf = DecisionTreeClassifier(criterion='entropy')
|
||||
clf = clf.fit(x_train,y_train)
|
||||
|
||||
#Predict the response for test dataset
|
||||
y_pred = clf.predict(x_test)
|
||||
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
|
||||
|
||||
text_tree=tree.export_text(clf)
|
||||
print(text_tree)
|
||||
# Train Decision Tree Classifer
|
||||
#clf = clf.fit(x_train,y_train)
|
||||
|
||||
"""def entropy(data):
|
||||
labels = data.iloc[:, -1] # Ostatnia kolumna zawiera etykiety klas i pomija 1 wiersz bo jest tytulowy
|
||||
counts = labels.value_counts() #tu zlicza wszystkie opcje
|
||||
probabilities = counts / len(labels)
|
||||
@ -351,10 +379,10 @@ def information_gain(data, attribute):
|
||||
subset = data[data[attribute] == value] # przypisujesz wszystkie wiersze danego value do subset
|
||||
subset_entropy = entropy(subset)
|
||||
weighted_entropy += (len(subset) / len(data)) * subset_entropy
|
||||
return (total_entropy - weighted_entropy)
|
||||
return (total_entropy - weighted_entropy)"""
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Main game loop
|
||||
def game_loop():
|
||||
|
Loading…
Reference in New Issue
Block a user