61 lines
1.2 KiB
Python
61 lines
1.2 KiB
Python
# Import dependencies
|
|
|
|
import numpy as np
|
|
import pandas as pd
|
|
import torch
|
|
|
|
from torch import nn
|
|
from torch.utils.data import DataLoader, TensorDataset
|
|
|
|
|
|
# Load data
|
|
|
|
test_data = pd.read_csv('spambase.data.test.csv')
|
|
|
|
|
|
# X, Y
|
|
|
|
x_test = np.array(test_data.drop('is_spam', axis=1).values)
|
|
y_test = np.array(test_data['is_spam'].values)
|
|
|
|
test = TensorDataset(torch.Tensor(x_test), torch.LongTensor(y_test))
|
|
test_loader = DataLoader(test, batch_size=64)
|
|
|
|
|
|
# Load model
|
|
|
|
model = torch.jit.load("model.pth")
|
|
|
|
|
|
# Evaluate
|
|
|
|
def test_loop(dataloader, model, loss_fn):
|
|
open("results.txt", "w").close() # Create empty file
|
|
|
|
model.eval()
|
|
|
|
size = len(dataloader.dataset)
|
|
num_batches = len(dataloader)
|
|
test_loss, correct = 0, 0
|
|
|
|
f = open("results.txt", "a")
|
|
|
|
with torch.no_grad():
|
|
for X, y in dataloader:
|
|
pred = model(X)
|
|
test_loss += loss_fn(pred, y).item()
|
|
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
|
|
|
|
for val in pred:
|
|
f.write(str(torch.argmax(val)) + "\n")
|
|
|
|
test_loss /= num_batches
|
|
correct /= size
|
|
|
|
print(f"Avg loss: {test_loss:>8f}")
|
|
|
|
|
|
loss_fn = nn.CrossEntropyLoss()
|
|
|
|
test_loop(test_loader, model, loss_fn)
|