diff --git a/learn_tree.py b/learn_tree.py new file mode 100644 index 00000000..22443ea8 --- /dev/null +++ b/learn_tree.py @@ -0,0 +1,51 @@ +from collections import Counter + +def tree_learn(examples, attributes, default_class): + if len(examples) == 0: + return default_class + + if all(examples[0][-1] == example[-1] for example in examples): + return examples[0][-1] + + if len(attributes) == 0: + class_counts = Counter(example[-1] for example in examples) + majority_class = class_counts.most_common(1)[0][0] + return majority_class + + # Choose the attribute A as the root of the decision tree + A = select_attribute(attributes, examples) + + tree = {A: {}} + new_attributes = [attr for attr in attributes if attr != A] + new_default_class = Counter(example[-1] for example in examples).most_common(1)[0][0] + + for value in get_attribute_values(A): + new_examples = [example for example in examples if example[attributes.index(A)] == value] + subtree = tree_learn(new_examples, new_attributes, new_default_class) + tree[A][value] = subtree + + return tree + +# Helper function: Select the best attribute based on a certain criterion (e.g., information gain) +def select_attribute(attributes, examples): + # Implement your attribute selection criterion here + pass + +# Helper function: Get the possible values of an attribute from the examples +def get_attribute_values(attribute): + # Implement your code to retrieve the attribute values from the examples here + pass + +# Example usage with coordinates +examples = [ + [1, 2, 'A'], + [3, 4, 'A'], + [5, 6, 'B'], + [7, 8, 'B'] +] + +attributes = ['x', 'y'] +default_class = 'unknown' + +decision_tree = tree_learn(examples, attributes, default_class) +print(decision_tree) \ No newline at end of file diff --git a/neural_network/nueralnet.py b/neural_network/nueralnet.py new file mode 100644 index 00000000..d08bb488 --- /dev/null +++ b/neural_network/nueralnet.py @@ -0,0 +1,103 @@ +#imports +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torch.utils.data import DataLoader +import torchvision.datasets as datasets +import torchvision.transforms as transforms + +#create fully connected network +class NN(nn.Module): + def __init__(self, input_size, num_classes): #1 layer (28x28 = 784 nodes) + super(NN,self)._init_() + self.fc1 = nn.Linear(input_size, 50) + self.fc2 = nn.Linear(50,num_classes) + + def forward(self, x): + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return x + +# model = NN(784, 10) +# x = torch.rand(64, 784) +# print(model(x).shape) + +#set device +device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + +#hyperparameters +input_size = 784 +num_classes = 10 +learning_rate = 0.001 +batch_size = 64 +num_epochs = 1 + +#load data +train_dataset = datasets.MNIST(root='dataset/', train = True, transform = transforms.toTensor(), download = True) +train_loader = DataLoader(dataset= train_dataset, batch_size = batch_size, shuffle = True) +test_dataset = datasets.MNIST(root='dataset/', train = False, transform = transforms.toTensor(), download = True) +test_loader = DataLoader(dataset= test_dataset, batch_size = batch_size, shuffle = True) + + +#initialize network +model = NN(input_size=input_size, num_classes=num_classes).to(device) + +#loss and optimizer +criterion = nn.CrossEntropyLoss() +optimizer = optim.Adam(model.parameters(), lr=learning_rate) + +#train network +for epoch in range(num_epochs): + for batch_idx, (data, targets) in enumerate(train_loader): + #get data to cuda if possible + data = data.to(device = device) + targets = targets.to(device = device) + + #get to correct shape + data = data.reshape(data.shape[0], -1) + + # forward + scores = model(data) + loss = criterion(scores, targets) + + #backward + optimizer.zero_grad() + loss.backward() + + #gradient descent or adam step + optimizer.step() + + +#check accuracy on training and test to see how good our model + +def check_accuracy(loader, model): + if loader.dataset.train: + print("Checking accuracy on training data") + else: + print("Checking accuracy on test data") + + num_correct = 0 + num_samples = 0 + model.eval() + + with torch.no_grad(): + for x,y in loader: + x = x.to(device = device) + y = y.to(device = device) + x = x.reshape(x.shape[0], -1) + + scores = model(x) + _, predictions = scores.max(1) + num_correct += (predictions == y).sum() + num_samples += predictions.size(0) + + print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}') + + model.train() + return acc + +check_accuracy(train_loader, model) +check_accuracy(test_loader, model) + +