30 lines
963 B
Python
30 lines
963 B
Python
from sklearn import tree
|
|
|
|
# Define the training dataset with 8 attributes and corresponding labels
|
|
training_data = [
|
|
[1, 0, 0, 1, 0, 1, 1, 'A'],
|
|
[1, 0, 0, 0, 1, 1, 1, 'A'],
|
|
[0, 1, 0, 1, 0, 1, 1, 'B'],
|
|
[0, 0, 0, 1, 0, 0, 1, 'B'],
|
|
[0, 1, 1, 0, 1, 0, 0, 'B'],
|
|
[1, 0, 0, 0, 1, 0, 1, 'A'],
|
|
[0, 0, 0, 1, 0, 0, 0, 'B'],
|
|
[1, 1, 0, 1, 1, 1, 0, 'A'],
|
|
[0, 0, 0, 0, 0, 0, 1, 'B'],
|
|
[0, 0, 1, 0, 0, 1, 0, 'B']
|
|
]
|
|
|
|
# Separate the attributes and labels
|
|
X_train = [data[:-1] for data in training_data]
|
|
y_train = [data[-1] for data in training_data]
|
|
|
|
# Create the decision tree classifier using the ID3 algorithm
|
|
clf = tree.DecisionTreeClassifier(criterion='entropy')
|
|
|
|
# Train the decision tree on the training data
|
|
clf.fit(X_train, y_train)
|
|
|
|
# Test the decision tree with a new example
|
|
new_example = [1, 0, 0, 1, 1, 0, 0] # Example with 8 attributes
|
|
predicted_label = clf.predict([new_example])
|
|
print("Predicted Label:", predicted_label[0]) |