Added pytorch DL python script

This commit is contained in:
Adam Wojdyla 2022-04-24 09:06:57 +02:00
parent 6a5383850d
commit 80974027c7

View File

@ -0,0 +1,94 @@
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import torch.nn.functional as F
import pandas as pd
from sklearn import preprocessing
class Model(nn.Module):
def __init__(self, input_dim):
super(Model, self).__init__()
self.layer1 = nn.Linear(input_dim, 160)
# self.layer2 = nn.Linear(320, 160)
self.layer2 = nn.Linear(160, 80)
self.layer3 = nn.Linear(80, 23)
def forward(self, x):
x = F.relu(self.layer1(x))
x = F.relu(self.layer2(x))
x = F.softmax(self.layer3(x)) # To check with the loss function
return x
def load_dataset():
""" Load data from .csv file. """
cars = pd.read_csv('./Car_Prices_Poland_Kaggle.csv', usecols=[1, 4, 5, 6, 10], sep=',')
# cars = cars.iloc()
return cars
def prepare_dataset(dataset):
""" Label make column"""
le = preprocessing.LabelEncoder()
mark_column = np.array(dataset[:]['mark'])
le.fit(mark_column)
print(list(le.classes_))
labels = le.transform(mark_column)
features = dataset.drop(['mark'], axis=1).to_numpy()
mm_scaler = preprocessing.MinMaxScaler()
features = mm_scaler.fit_transform(features)
return labels, features
# Prepare dataset
dataset = load_dataset()
labels, features = prepare_dataset(dataset)
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, random_state=42, shuffle=True)
#
# import matplotlib
#
# plt = matplotlib.pyplot.hist(features, 16)
# Training
model = Model(features_train.shape[1])
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
loss_fn = nn.CrossEntropyLoss()
epochs = 1000
x_train, y_train = Variable(torch.from_numpy(features_train)).float(), Variable(torch.from_numpy(labels_train)).long()
for epoch in range(1, epochs + 1):
print("Epoch #", epoch)
y_pred = model(x_train)
loss = loss_fn(y_pred, y_train)
print(f"The loss calculated: {loss}")
# Zero gradients
optimizer.zero_grad()
loss.backward() # Gradients
optimizer.step() # Update
print(1)
x_test = Variable(torch.from_numpy(features_test)).float()
pred = model(x_test)
pred = pred.detach().numpy()
print(pred)
print("The accuracy is", accuracy_score(labels_test, np.argmax(pred, axis=1)))
# Checking for first value
print(np.argmax(model(x_test[0]).detach().numpy(), axis=0))
print(labels_test[0])
torch.save(model, "iris-pytorch.pkl")
saved_model = torch.load("iris-pytorch.pkl")
print(np.argmax(saved_model(x_test[0]).detach().numpy(), axis=0))