2023-05-12 23:28:51 +02:00
|
|
|
import torch
|
|
|
|
import torch.nn as nn
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
2023-05-12 23:52:29 +02:00
|
|
|
from sklearn.model_selection import train_test_split
|
|
|
|
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
|
|
|
|
from sklearn.preprocessing import StandardScaler
|
|
|
|
import torch.nn.functional as F
|
|
|
|
|
|
|
|
class ANN_Model(nn.Module):
|
|
|
|
def __init__(self,input_features=82,hidden1=20,hidden2=20,out_features=3):
|
|
|
|
super().__init__()
|
|
|
|
self.f_connected1=nn.Linear(input_features,hidden1)
|
|
|
|
self.f_connected2=nn.Linear(hidden1,hidden2)
|
|
|
|
self.out=nn.Linear(hidden2,out_features)
|
|
|
|
def forward(self, x):
|
|
|
|
x=F.relu(self.f_connected1(x))
|
|
|
|
x=F.relu(self.f_connected2(x))
|
|
|
|
x=self.out(x)
|
|
|
|
return x
|
2023-05-12 23:28:51 +02:00
|
|
|
|
|
|
|
data = pd.read_csv("./Sales.csv")
|
|
|
|
|
|
|
|
data["Profit_Category"] = pd.cut(data["Profit"], bins=[-np.inf, 500, 1000, np.inf], labels=[0, 1, 2])
|
|
|
|
bike = data.loc[:, ['Customer_Age', 'Customer_Gender', 'Country','State', 'Product_Category', 'Sub_Category', 'Profit_Category']]
|
|
|
|
bikes = pd.get_dummies(bike, columns=['Country', 'State', 'Product_Category', 'Sub_Category', 'Customer_Gender'])
|
|
|
|
X = bikes.drop('Profit_Category', axis=1).values
|
|
|
|
y = bikes['Profit_Category'].values
|
|
|
|
X_train, X_test, y_train, y_test=train_test_split(X,y,test_size=0.2,random_state=0)
|
|
|
|
scaler = StandardScaler()
|
|
|
|
X = scaler.fit_transform(X)
|
|
|
|
X_test = X_test.astype(np.float32)
|
|
|
|
y_test = y_test.astype(np.float32)
|
|
|
|
X_test=torch.FloatTensor(X_test)
|
|
|
|
y_test=torch.LongTensor(y_test)
|
|
|
|
|
2023-05-12 23:52:29 +02:00
|
|
|
model = torch.load("classificationn_model.pt")
|
2023-05-12 23:28:51 +02:00
|
|
|
|
2023-05-12 23:52:29 +02:00
|
|
|
def calculate_predictions(model, X):
|
2023-05-12 23:28:51 +02:00
|
|
|
with torch.no_grad():
|
|
|
|
outputs = model(X)
|
|
|
|
_, predicted = torch.max(outputs.data, 1)
|
2023-05-12 23:52:29 +02:00
|
|
|
return predicted
|
|
|
|
|
|
|
|
y_pred = calculate_predictions(model, X_test)
|
|
|
|
y_pred_np = y_pred.numpy()
|
|
|
|
|
|
|
|
np.savetxt("predictions.txt", y_pred_np, fmt='%d')
|
|
|
|
|
|
|
|
accuracy = accuracy_score(y_test.numpy(), y_pred_np)
|
|
|
|
f1 = f1_score(y_test.numpy(), y_pred_np, average='micro')
|
|
|
|
precision = precision_score(y_test.numpy(), y_pred_np, average='micro')
|
|
|
|
recall = recall_score(y_test.numpy(), y_pred_np, average='micro')
|
|
|
|
|
|
|
|
with open("metrics.txt", "w") as f:
|
|
|
|
f.write(f"Accuracy: {accuracy}\n")
|
|
|
|
f.write(f"F1 Score: {f1}\n")
|
|
|
|
f.write(f"Precision: {precision}\n")
|
|
|
|
f.write(f"Recall: {recall}\n")
|
2023-05-12 23:28:51 +02:00
|
|
|
|