470 KiB
470 KiB
Experiments - neural networks in breast cancer classification problem
# Data manipulation
import numpy as np
import pandas as pd
# Data visualization
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# Data preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# Metrics
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import accuracy_score
# Deep learning
import torch
import torch.nn as nn
import torch.optim as optim
Methods for visualizing confusion matrix and classification report
# Plot confusion matrix
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap='Blues', figsize=(10, 6), axis=None):
"""
Plot the confusion matrix.
"""
if axis is None:
fig, ax = plt.subplots(figsize=figsize)
else:
ax = axis
sns.heatmap(cm, annot=True, fmt='d', xticklabels=classes, yticklabels=classes, cmap=cmap, ax=ax)
ax.set_title(title)
ax.set_xlabel('Predicted label')
ax.set_ylabel('True label')
if axis is None:
plt.show()
# Plot classification report
def plot_classification_report(report, title='Classification report', axis=None):
"""
Plot the classification report.
"""
if axis is None:
fig, ax = plt.subplots(figsize=(10, 6))
else:
ax = axis
sns.heatmap(pd.DataFrame(report).iloc[:-1, :].T, annot=True, cmap='Blues', ax=ax)
ax.set_title('Classification report')
ax.set_xlabel('Metrics')
ax.set_ylabel('Classes')
if axis is None:
plt.show()
Load data
# Load data
data = pd.read_csv('datasets/data.csv')
# Delete unnecessary columns
data.drop(['id'], axis=1, inplace=True)
data
diagnosis | radius_mean | texture_mean | perimeter_mean | area_mean | smoothness_mean | compactness_mean | concavity_mean | concave points_mean | symmetry_mean | ... | radius_worst | texture_worst | perimeter_worst | area_worst | smoothness_worst | compactness_worst | concavity_worst | concave points_worst | symmetry_worst | fractal_dimension_worst | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 1.0 | 0.521037 | 0.022658 | 0.545989 | 0.363733 | 0.593753 | 0.792037 | 0.703140 | 0.731113 | 0.686364 | ... | 0.620776 | 0.141525 | 0.668310 | 0.450698 | 0.601136 | 0.619292 | 0.568610 | 0.912027 | 0.598462 | 0.418864 |
1 | 1.0 | 0.643144 | 0.272574 | 0.615783 | 0.501591 | 0.289880 | 0.181768 | 0.203608 | 0.348757 | 0.379798 | ... | 0.606901 | 0.303571 | 0.539818 | 0.435214 | 0.347553 | 0.154563 | 0.192971 | 0.639175 | 0.233590 | 0.222878 |
2 | 1.0 | 0.601496 | 0.390260 | 0.595743 | 0.449417 | 0.514309 | 0.431017 | 0.462512 | 0.635686 | 0.509596 | ... | 0.556386 | 0.360075 | 0.508442 | 0.374508 | 0.483590 | 0.385375 | 0.359744 | 0.835052 | 0.403706 | 0.213433 |
3 | 1.0 | 0.210090 | 0.360839 | 0.233501 | 0.102906 | 0.811321 | 0.811361 | 0.565604 | 0.522863 | 0.776263 | ... | 0.248310 | 0.385928 | 0.241347 | 0.094008 | 0.915472 | 0.814012 | 0.548642 | 0.884880 | 1.000000 | 0.773711 |
4 | 1.0 | 0.629893 | 0.156578 | 0.630986 | 0.489290 | 0.430351 | 0.347893 | 0.463918 | 0.518390 | 0.378283 | ... | 0.519744 | 0.123934 | 0.506948 | 0.341575 | 0.437364 | 0.172415 | 0.319489 | 0.558419 | 0.157500 | 0.142595 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
564 | 1.0 | 0.690000 | 0.428813 | 0.678668 | 0.566490 | 0.526948 | 0.296055 | 0.571462 | 0.690358 | 0.336364 | ... | 0.623266 | 0.383262 | 0.576174 | 0.452664 | 0.461137 | 0.178527 | 0.328035 | 0.761512 | 0.097575 | 0.105667 |
565 | 1.0 | 0.622320 | 0.626987 | 0.604036 | 0.474019 | 0.407782 | 0.257714 | 0.337395 | 0.486630 | 0.349495 | ... | 0.560655 | 0.699094 | 0.520892 | 0.379915 | 0.300007 | 0.159997 | 0.256789 | 0.559450 | 0.198502 | 0.074315 |
566 | 1.0 | 0.455251 | 0.621238 | 0.445788 | 0.303118 | 0.288165 | 0.254340 | 0.216753 | 0.263519 | 0.267677 | ... | 0.393099 | 0.589019 | 0.379949 | 0.230731 | 0.282177 | 0.273705 | 0.271805 | 0.487285 | 0.128721 | 0.151909 |
567 | 1.0 | 0.644564 | 0.663510 | 0.665538 | 0.475716 | 0.588336 | 0.790197 | 0.823336 | 0.755467 | 0.675253 | ... | 0.633582 | 0.730277 | 0.668310 | 0.402035 | 0.619626 | 0.815758 | 0.749760 | 0.910653 | 0.497142 | 0.452315 |
568 | 0.0 | 0.036869 | 0.501522 | 0.028540 | 0.015907 | 0.000000 | 0.074351 | 0.000000 | 0.000000 | 0.266162 | ... | 0.054287 | 0.489072 | 0.043578 | 0.020497 | 0.124084 | 0.036043 | 0.000000 | 0.000000 | 0.257441 | 0.100682 |
569 rows × 31 columns
Data preprocessing
# Split data into training and testing sets
X = data.iloc[:, 1:]
y = data.iloc[:, 0]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Standardize the data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Convert data to PyTorch tensors
X_train = torch.FloatTensor(X_train)
X_test = torch.FloatTensor(X_test)
y_train = torch.FloatTensor(y_train.values).view(-1, 1)
y_test = torch.FloatTensor(y_test.values).view(-1, 1)
# Transfer data to GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
X_train = X_train.to(device)
X_test = X_test.to(device)
y_train = y_train.to(device)
y_test = y_test.to(device)
Neural network architectures
# V1
# Three fully connected layers with ReLU activation function
# Output layer with Sigmoid activation function
class NeuralNetworkV1(nn.Module):
def __init__(self, input_size, hidden_size):
super(NeuralNetworkV1, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size // 2)
self.fc3 = nn.Linear(hidden_size // 2, 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
out = self.sigmoid(out)
return out
# V2
# Four fully connected layers with ReLU activation function and dropout layers
# Output layer with Sigmoid activation function
class NeuralNetworkV2(nn.Module):
def __init__(self, input_size, hidden_size):
super(NeuralNetworkV2, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.dropout1 = nn.Dropout(0.5)
self.fc2 = nn.Linear(hidden_size, hidden_size // 2)
self.dropout2 = nn.Dropout(0.5)
self.fc3 = nn.Linear(hidden_size // 2, hidden_size // 4)
self.fc4 = nn.Linear(hidden_size // 4, 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.dropout1(out)
out = self.fc2(out)
out = self.relu(out)
out = self.dropout2(out)
out = self.fc3(out)
out = self.relu(out)
out = self.fc4(out)
out = self.sigmoid(out)
return out
# V3
# Four fully connected layers with Leaky ReLU activation function
# Output layer with Sigmoid activation function
class NeuralNetworkV3(nn.Module):
def __init__(self, input_size, hidden_size):
super(NeuralNetworkV3, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size // 2)
self.fc3 = nn.Linear(hidden_size // 2, hidden_size // 4)
self.fc4 = nn.Linear(hidden_size // 4, 1)
self.leaky_relu = nn.LeakyReLU(0.1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.fc1(x)
out = self.leaky_relu(out)
out = self.fc2(out)
out = self.leaky_relu(out)
out = self.fc3(out)
out = self.leaky_relu(out)
out = self.fc4(out)
out = self.sigmoid(out)
return out
# V4
# Two convolutional layers with ReLU activation function and max pooling layers
# Two fully connected layers with ReLU activation function
# Output layer with Sigmoid activation function
class NeuralNetworkV4(nn.Module):
def __init__(self, input_size, hidden_size):
super(NeuralNetworkV4, self).__init__()
self.conv1 = nn.Conv1d(1, 16, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv1d(16, 32, kernel_size=3, stride=1, padding=1)
self.pool = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
# Calculate the output size after the conv and pooling layers
conv_output_size = input_size // 2 # After two pooling layers with stride 2
conv_output_size = conv_output_size // 2 # After the second pooling layer
self.fc1 = nn.Linear(32 * conv_output_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = x.unsqueeze(1) # Add channel dimension
out = self.conv1(x)
out = self.relu(out)
out = self.pool(out)
out = self.conv2(out)
out = self.relu(out)
out = self.pool(out)
out = out.view(out.size(0), -1) # Flatten the tensor
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
return out
# V5
# LSTM layer with ReLU activation function
# Two fully connected layers with ReLU activation function
# Output layer with Sigmoid activation function
class NeuralNetworkV5(nn.Module):
def __init__(self, input_size, hidden_size):
super(NeuralNetworkV5, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
self.fc1 = nn.Linear(hidden_size, hidden_size // 2)
self.fc2 = nn.Linear(hidden_size // 2, 1)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out, _ = self.lstm(x)
out = out[:, -1, :] # Take the last output of the LSTM
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
return out
Training and evaluation
# Training function
def train(model, X_train, y_train, criterion, optimizer, epochs=100):
"""
Train the neural network.
"""
for epoch in range(epochs):
optimizer.zero_grad()
y_pred = model(X_train)
loss = criterion(y_pred, y_train)
loss.backward()
optimizer.step()
if (epoch + 1) % 10 == 0:
print(f'Epoch {epoch + 1}/{epochs}, Loss: {loss.item()}')
# Evaluation function
def evaluate(model, X_test, y_test):
"""
Evaluate the neural network.
"""
with torch.no_grad():
y_pred = model(X_test)
y_pred = (y_pred > 0.5).float()
cm = confusion_matrix(y_test.cpu(), y_pred.cpu())
cr = classification_report(y_test.cpu(), y_pred.cpu(), target_names=['Benign', 'Malignant'], output_dict=True)
acc = accuracy_score(y_test.cpu(), y_pred.cpu())
return cm, cr, acc
# Neural network parameters
input_size = X_train.shape[1]
hidden_size = 128
learning_rate = 0.001
weight_decay = 0.0001
epochs = 1000
Neural network V1
# Neural network V1
model_v1 = NeuralNetworkV1(input_size, hidden_size).to(device)
criterion_v1 = nn.BCELoss()
optimizer_v1 = optim.Adam(model_v1.parameters(), lr=learning_rate, weight_decay=weight_decay)
# Train the model
train(model_v1, X_train, y_train, criterion_v1, optimizer_v1, epochs)
# Evaluate the model
cm_v1, cr_v1, acc_v1 = evaluate(model_v1, X_test, y_test)
Epoch 10/1000, Loss: 0.4779872000217438 Epoch 20/1000, Loss: 0.27251550555229187 Epoch 30/1000, Loss: 0.15164388716220856 Epoch 40/1000, Loss: 0.10145729035139084 Epoch 50/1000, Loss: 0.07789547741413116 Epoch 60/1000, Loss: 0.06484830379486084 Epoch 70/1000, Loss: 0.05551866441965103 Epoch 80/1000, Loss: 0.04840008169412613 Epoch 90/1000, Loss: 0.042578838765621185 Epoch 100/1000, Loss: 0.03765585273504257 Epoch 110/1000, Loss: 0.03336893767118454 Epoch 120/1000, Loss: 0.02957029454410076 Epoch 130/1000, Loss: 0.02613999880850315 Epoch 140/1000, Loss: 0.023030269891023636 Epoch 150/1000, Loss: 0.02021847851574421 Epoch 160/1000, Loss: 0.01771070621907711 Epoch 170/1000, Loss: 0.015495861880481243 Epoch 180/1000, Loss: 0.013572530820965767 Epoch 190/1000, Loss: 0.011898759752511978 Epoch 200/1000, Loss: 0.010441687889397144 Epoch 210/1000, Loss: 0.009163236245512962 Epoch 220/1000, Loss: 0.00809294544160366 Epoch 230/1000, Loss: 0.007174020167440176 Epoch 240/1000, Loss: 0.006399359088391066 Epoch 250/1000, Loss: 0.005741355009377003 Epoch 260/1000, Loss: 0.005172951612621546 Epoch 270/1000, Loss: 0.004688368644565344 Epoch 280/1000, Loss: 0.004283885937184095 Epoch 290/1000, Loss: 0.003929890692234039 Epoch 300/1000, Loss: 0.0036243184003978968 Epoch 310/1000, Loss: 0.0033553754910826683 Epoch 320/1000, Loss: 0.0031130558345466852 Epoch 330/1000, Loss: 0.0028987200930714607 Epoch 340/1000, Loss: 0.0027084406465291977 Epoch 350/1000, Loss: 0.0025372877717018127 Epoch 360/1000, Loss: 0.002381594618782401 Epoch 370/1000, Loss: 0.002238793997094035 Epoch 380/1000, Loss: 0.002110145753249526 Epoch 390/1000, Loss: 0.001990949036553502 Epoch 400/1000, Loss: 0.0018805447034537792 Epoch 410/1000, Loss: 0.0017787017859518528 Epoch 420/1000, Loss: 0.0016847399529069662 Epoch 430/1000, Loss: 0.001597930327989161 Epoch 440/1000, Loss: 0.001518208417110145 Epoch 450/1000, Loss: 0.0014447758439928293 Epoch 460/1000, Loss: 0.0013784606708213687 Epoch 470/1000, Loss: 0.0013172627659514546 Epoch 480/1000, Loss: 0.0012608648976311088 Epoch 490/1000, Loss: 0.001208683243021369 Epoch 500/1000, Loss: 0.0011611600639298558 Epoch 510/1000, Loss: 0.0011176610132679343 Epoch 520/1000, Loss: 0.0010775947012007236 Epoch 530/1000, Loss: 0.0010414356365799904 Epoch 540/1000, Loss: 0.0010077828774228692 Epoch 550/1000, Loss: 0.000977047486230731 Epoch 560/1000, Loss: 0.0009483486064709723 Epoch 570/1000, Loss: 0.0009219619678333402 Epoch 580/1000, Loss: 0.000897533493116498 Epoch 590/1000, Loss: 0.0008748812833800912 Epoch 600/1000, Loss: 0.0008537117973901331 Epoch 610/1000, Loss: 0.0008338657789863646 Epoch 620/1000, Loss: 0.0008152445661835372 Epoch 630/1000, Loss: 0.000797846878413111 Epoch 640/1000, Loss: 0.0007811780087649822 Epoch 650/1000, Loss: 0.0007652725907973945 Epoch 660/1000, Loss: 0.0007502862135879695 Epoch 670/1000, Loss: 0.0007362824399024248 Epoch 680/1000, Loss: 0.0007233091746456921 Epoch 690/1000, Loss: 0.0007109907455742359 Epoch 700/1000, Loss: 0.0006993163260631263 Epoch 710/1000, Loss: 0.0006883330643177032 Epoch 720/1000, Loss: 0.000678009819239378 Epoch 730/1000, Loss: 0.0006681602098979056 Epoch 740/1000, Loss: 0.0006588594405911863 Epoch 750/1000, Loss: 0.0006500912713818252 Epoch 760/1000, Loss: 0.0006416388787329197 Epoch 770/1000, Loss: 0.0006337724043987691 Epoch 780/1000, Loss: 0.0006261061644181609 Epoch 790/1000, Loss: 0.0006186887621879578 Epoch 800/1000, Loss: 0.0006119576282799244 Epoch 810/1000, Loss: 0.0006054439581930637 Epoch 820/1000, Loss: 0.0005992540973238647 Epoch 830/1000, Loss: 0.0005932800122536719 Epoch 840/1000, Loss: 0.000587515824008733 Epoch 850/1000, Loss: 0.0005819853395223618 Epoch 860/1000, Loss: 0.0005764576490037143 Epoch 870/1000, Loss: 0.0005712246056646109 Epoch 880/1000, Loss: 0.0005661725299432874 Epoch 890/1000, Loss: 0.0005614019464701414 Epoch 900/1000, Loss: 0.0005567952175624669 Epoch 910/1000, Loss: 0.0005523671861737967 Epoch 920/1000, Loss: 0.000548191019333899 Epoch 930/1000, Loss: 0.0005440027453005314 Epoch 940/1000, Loss: 0.0005400135414674878 Epoch 950/1000, Loss: 0.0005360668292269111 Epoch 960/1000, Loss: 0.0005323308287188411 Epoch 970/1000, Loss: 0.0005282927886582911 Epoch 980/1000, Loss: 0.0005240424652583897 Epoch 990/1000, Loss: 0.0005197781720198691 Epoch 1000/1000, Loss: 0.0005156174884177744
# Plot confusion matrix
plot_confusion_matrix(cm_v1, ['Benign', 'Malignant'], title='Confusion matrix - Neural Network V1')
# Plot classification report
plot_classification_report(cr_v1)
Neural network V2
# Neural network V2
model_v2 = NeuralNetworkV2(input_size, hidden_size).to(device)
criterion_v2 = nn.BCELoss()
optimizer_v2 = optim.Adam(model_v2.parameters(), lr=learning_rate, weight_decay=weight_decay)
# Train the model
train(model_v2, X_train, y_train, criterion_v2, optimizer_v2, epochs)
# Evaluate the model
cm_v2, cr_v2, acc_v2 = evaluate(model_v2, X_test, y_test)
Epoch 10/1000, Loss: 0.6100791096687317 Epoch 20/1000, Loss: 0.40138334035873413 Epoch 30/1000, Loss: 0.2066049426794052 Epoch 40/1000, Loss: 0.1198694109916687 Epoch 50/1000, Loss: 0.10492949932813644 Epoch 60/1000, Loss: 0.08525355905294418 Epoch 70/1000, Loss: 0.07265784591436386 Epoch 80/1000, Loss: 0.07437089085578918 Epoch 90/1000, Loss: 0.04634793847799301 Epoch 100/1000, Loss: 0.04539191350340843 Epoch 110/1000, Loss: 0.03791217878460884 Epoch 120/1000, Loss: 0.056155040860176086 Epoch 130/1000, Loss: 0.02974613755941391 Epoch 140/1000, Loss: 0.028519876301288605 Epoch 150/1000, Loss: 0.02841273508965969 Epoch 160/1000, Loss: 0.02827402390539646 Epoch 170/1000, Loss: 0.03137960284948349 Epoch 180/1000, Loss: 0.021595297381281853 Epoch 190/1000, Loss: 0.03367958217859268 Epoch 200/1000, Loss: 0.03138892352581024 Epoch 210/1000, Loss: 0.024735331535339355 Epoch 220/1000, Loss: 0.013547890819609165 Epoch 230/1000, Loss: 0.016778510063886642 Epoch 240/1000, Loss: 0.013662113808095455 Epoch 250/1000, Loss: 0.014643474481999874 Epoch 260/1000, Loss: 0.04232195019721985 Epoch 270/1000, Loss: 0.011198709718883038 Epoch 280/1000, Loss: 0.014255641028285027 Epoch 290/1000, Loss: 0.017376599833369255 Epoch 300/1000, Loss: 0.006715432740747929 Epoch 310/1000, Loss: 0.015104355290532112 Epoch 320/1000, Loss: 0.005779958330094814 Epoch 330/1000, Loss: 0.006878014653921127 Epoch 340/1000, Loss: 0.010289205238223076 Epoch 350/1000, Loss: 0.008154270239174366 Epoch 360/1000, Loss: 0.0052977693267166615 Epoch 370/1000, Loss: 0.0059393011033535 Epoch 380/1000, Loss: 0.003750022267922759 Epoch 390/1000, Loss: 0.006243106909096241 Epoch 400/1000, Loss: 0.0048174685798585415 Epoch 410/1000, Loss: 0.008404634892940521 Epoch 420/1000, Loss: 0.005285304039716721 Epoch 430/1000, Loss: 0.003210554365068674 Epoch 440/1000, Loss: 0.0030219131149351597 Epoch 450/1000, Loss: 0.003663143841549754 Epoch 460/1000, Loss: 0.004113232716917992 Epoch 470/1000, Loss: 0.011188282631337643 Epoch 480/1000, Loss: 0.008383953012526035 Epoch 490/1000, Loss: 0.005484223831444979 Epoch 500/1000, Loss: 0.001833457383327186 Epoch 510/1000, Loss: 0.0026361148338764906 Epoch 520/1000, Loss: 0.0018964618211612105 Epoch 530/1000, Loss: 0.005411419551819563 Epoch 540/1000, Loss: 0.005162812303751707 Epoch 550/1000, Loss: 0.004074939526617527 Epoch 560/1000, Loss: 0.001993684796616435 Epoch 570/1000, Loss: 0.002496592467650771 Epoch 580/1000, Loss: 0.012827489525079727 Epoch 590/1000, Loss: 0.0010587115539237857 Epoch 600/1000, Loss: 0.0020602247677743435 Epoch 610/1000, Loss: 0.0010980992810800672 Epoch 620/1000, Loss: 0.0023741163313388824 Epoch 630/1000, Loss: 0.00123070168774575 Epoch 640/1000, Loss: 0.011475415900349617 Epoch 650/1000, Loss: 0.00989847257733345 Epoch 660/1000, Loss: 0.0012280159862712026 Epoch 670/1000, Loss: 0.0017485406715422869 Epoch 680/1000, Loss: 0.0012420162092894316 Epoch 690/1000, Loss: 0.0004315624828450382 Epoch 700/1000, Loss: 0.0007627215818502009 Epoch 710/1000, Loss: 0.00213691801764071 Epoch 720/1000, Loss: 0.0021272513549774885 Epoch 730/1000, Loss: 0.0009174205479212105 Epoch 740/1000, Loss: 0.0015678246272727847 Epoch 750/1000, Loss: 0.0018770662136375904 Epoch 760/1000, Loss: 0.00043499123421497643 Epoch 770/1000, Loss: 0.001615240820683539 Epoch 780/1000, Loss: 0.0023441719822585583 Epoch 790/1000, Loss: 0.0004717250994872302 Epoch 800/1000, Loss: 0.0006681744707748294 Epoch 810/1000, Loss: 0.0018840441480278969 Epoch 820/1000, Loss: 0.0016851990949362516 Epoch 830/1000, Loss: 0.006307181902229786 Epoch 840/1000, Loss: 0.00034771396894939244 Epoch 850/1000, Loss: 0.0006758614326827228 Epoch 860/1000, Loss: 0.0003922640171367675 Epoch 870/1000, Loss: 0.0011983055155724287 Epoch 880/1000, Loss: 0.0005702194175682962 Epoch 890/1000, Loss: 0.0027277739718556404 Epoch 900/1000, Loss: 0.000657720142044127 Epoch 910/1000, Loss: 0.0007201721309684217 Epoch 920/1000, Loss: 0.002012366196140647 Epoch 930/1000, Loss: 0.0005625162739306688 Epoch 940/1000, Loss: 0.0008252564002759755 Epoch 950/1000, Loss: 0.0021678118500858545 Epoch 960/1000, Loss: 0.002342545660212636 Epoch 970/1000, Loss: 0.0015022775623947382 Epoch 980/1000, Loss: 0.0007447443204000592 Epoch 990/1000, Loss: 0.00044577810331247747 Epoch 1000/1000, Loss: 0.0006745096179656684
# Plot confusion matrix
plot_confusion_matrix(cm_v2, ['Benign', 'Malignant'], title='Confusion matrix - Neural Network V2')
# Plot classification report
plot_classification_report(cr_v2)
Neural network V3
# Neural network V3
model_v3 = NeuralNetworkV3(input_size, hidden_size).to(device)
criterion_v3 = nn.BCELoss()
optimizer_v3 = optim.Adam(model_v3.parameters(), lr=learning_rate, weight_decay=weight_decay)
# Train the model
train(model_v3, X_train, y_train, criterion_v3, optimizer_v3, epochs)
# Evaluate the model
cm_v3, cr_v3, acc_v3 = evaluate(model_v3, X_test, y_test)
Epoch 10/1000, Loss: 0.5781828761100769 Epoch 20/1000, Loss: 0.35954442620277405 Epoch 30/1000, Loss: 0.16433778405189514 Epoch 40/1000, Loss: 0.09435044229030609 Epoch 50/1000, Loss: 0.06897494941949844 Epoch 60/1000, Loss: 0.05602168291807175 Epoch 70/1000, Loss: 0.046810463070869446 Epoch 80/1000, Loss: 0.03961234167218208 Epoch 90/1000, Loss: 0.033818356692790985 Epoch 100/1000, Loss: 0.02865622565150261 Epoch 110/1000, Loss: 0.023877572268247604 Epoch 120/1000, Loss: 0.019604215398430824 Epoch 130/1000, Loss: 0.01610736735165119 Epoch 140/1000, Loss: 0.01334200520068407 Epoch 150/1000, Loss: 0.011029877699911594 Epoch 160/1000, Loss: 0.009049472399055958 Epoch 170/1000, Loss: 0.007442420814186335 Epoch 180/1000, Loss: 0.0061035449616611 Epoch 190/1000, Loss: 0.004539611749351025 Epoch 200/1000, Loss: 0.0030651914421468973 Epoch 210/1000, Loss: 0.0022714021615684032 Epoch 220/1000, Loss: 0.001738564227707684 Epoch 230/1000, Loss: 0.0013824186753481627 Epoch 240/1000, Loss: 0.0011372618610039353 Epoch 250/1000, Loss: 0.0009611304849386215 Epoch 260/1000, Loss: 0.0008317003957927227 Epoch 270/1000, Loss: 0.0007334426045417786 Epoch 280/1000, Loss: 0.0006564902723766863 Epoch 290/1000, Loss: 0.0005941776908002794 Epoch 300/1000, Loss: 0.0005428834119811654 Epoch 310/1000, Loss: 0.0005009892047382891 Epoch 320/1000, Loss: 0.00046646010014228523 Epoch 330/1000, Loss: 0.00043750536860898137 Epoch 340/1000, Loss: 0.0004131169698666781 Epoch 350/1000, Loss: 0.0003923749318346381 Epoch 360/1000, Loss: 0.00037453131517395377 Epoch 370/1000, Loss: 0.0003590169653762132 Epoch 380/1000, Loss: 0.0003454721299931407 Epoch 390/1000, Loss: 0.00033352847094647586 Epoch 400/1000, Loss: 0.0003230379370506853 Epoch 410/1000, Loss: 0.00031372407102026045 Epoch 420/1000, Loss: 0.00030547339702025056 Epoch 430/1000, Loss: 0.0002980876306537539 Epoch 440/1000, Loss: 0.0002914170909207314 Epoch 450/1000, Loss: 0.00028551131254062057 Epoch 460/1000, Loss: 0.000280180451227352 Epoch 470/1000, Loss: 0.00027529962244443595 Epoch 480/1000, Loss: 0.00027088262140750885 Epoch 490/1000, Loss: 0.00026690459344536066 Epoch 500/1000, Loss: 0.0002632274990901351 Epoch 510/1000, Loss: 0.00025982430088333786 Epoch 520/1000, Loss: 0.000256663013715297 Epoch 530/1000, Loss: 0.00025376983103342354 Epoch 540/1000, Loss: 0.0002510569174773991 Epoch 550/1000, Loss: 0.0002485134173184633 Epoch 560/1000, Loss: 0.0002461685216985643 Epoch 570/1000, Loss: 0.00024397078959736973 Epoch 580/1000, Loss: 0.00024182727793231606 Epoch 590/1000, Loss: 0.0002398582291789353 Epoch 600/1000, Loss: 0.00023796758614480495 Epoch 610/1000, Loss: 0.00023617268016096205 Epoch 620/1000, Loss: 0.00023440059158019722 Epoch 630/1000, Loss: 0.00023271011014003307 Epoch 640/1000, Loss: 0.00023108486493583769 Epoch 650/1000, Loss: 0.00022952101426199079 Epoch 660/1000, Loss: 0.00022804134641774 Epoch 670/1000, Loss: 0.00022659693786408752 Epoch 680/1000, Loss: 0.00022519213962368667 Epoch 690/1000, Loss: 0.0002238261658931151 Epoch 700/1000, Loss: 0.0002225149655714631 Epoch 710/1000, Loss: 0.000221233261981979 Epoch 720/1000, Loss: 0.00022013885609339923 Epoch 730/1000, Loss: 0.0002189427614212036 Epoch 740/1000, Loss: 0.00021769681188743562 Epoch 750/1000, Loss: 0.00021648130496032536 Epoch 760/1000, Loss: 0.00021531064703594893 Epoch 770/1000, Loss: 0.00021418495452962816 Epoch 780/1000, Loss: 0.0002131147193722427 Epoch 790/1000, Loss: 0.00021202574134804308 Epoch 800/1000, Loss: 0.0002110681962221861 Epoch 810/1000, Loss: 0.00021014301455579698 Epoch 820/1000, Loss: 0.0002092804352287203 Epoch 830/1000, Loss: 0.00020846931147389114 Epoch 840/1000, Loss: 0.00020768567628692836 Epoch 850/1000, Loss: 0.00020691761164925992 Epoch 860/1000, Loss: 0.00020612987282220274 Epoch 870/1000, Loss: 0.00020533577480819076 Epoch 880/1000, Loss: 0.00020457926439121366 Epoch 890/1000, Loss: 0.00020382019283715636 Epoch 900/1000, Loss: 0.00020311155822128057 Epoch 910/1000, Loss: 0.00020234761177562177 Epoch 920/1000, Loss: 0.00020164126181043684 Epoch 930/1000, Loss: 0.00020095478976145387 Epoch 940/1000, Loss: 0.0002003153640544042 Epoch 950/1000, Loss: 0.00019972550217062235 Epoch 960/1000, Loss: 0.00019920482009183615 Epoch 970/1000, Loss: 0.0001986794959520921 Epoch 980/1000, Loss: 0.00019817189604509622 Epoch 990/1000, Loss: 0.00019766329205594957 Epoch 1000/1000, Loss: 0.00019715774396900088
# Plot confusion matrix
plot_confusion_matrix(cm_v3, ['Benign', 'Malignant'], title='Confusion matrix - Neural Network V3')
# Plot classification report
plot_classification_report(cr_v3)
Neural network V4
# Neural network V4
model_v4 = NeuralNetworkV4(input_size, hidden_size).to(device)
criterion_v4 = nn.BCELoss()
optimizer_v4 = optim.Adam(model_v4.parameters(), lr=learning_rate, weight_decay=weight_decay)
# Train the model
train(model_v4, X_train, y_train, criterion_v4, optimizer_v4, epochs)
# Evaluate the model
cm_v4, cr_v4, acc_v4 = evaluate(model_v4, X_test, y_test)
Epoch 10/1000, Loss: 0.4186047613620758 Epoch 20/1000, Loss: 0.20972707867622375 Epoch 30/1000, Loss: 0.14643631875514984 Epoch 40/1000, Loss: 0.1106308326125145 Epoch 50/1000, Loss: 0.09039844572544098 Epoch 60/1000, Loss: 0.07973706722259521 Epoch 70/1000, Loss: 0.07261230796575546 Epoch 80/1000, Loss: 0.06730187684297562 Epoch 90/1000, Loss: 0.06239919736981392 Epoch 100/1000, Loss: 0.05727291852235794 Epoch 110/1000, Loss: 0.05158916860818863 Epoch 120/1000, Loss: 0.04558803513646126 Epoch 130/1000, Loss: 0.03927084803581238 Epoch 140/1000, Loss: 0.03285034000873566 Epoch 150/1000, Loss: 0.026732975617051125 Epoch 160/1000, Loss: 0.021154126152396202 Epoch 170/1000, Loss: 0.016432005912065506 Epoch 180/1000, Loss: 0.012630755081772804 Epoch 190/1000, Loss: 0.009745683521032333 Epoch 200/1000, Loss: 0.007611896842718124 Epoch 210/1000, Loss: 0.0060510095208883286 Epoch 220/1000, Loss: 0.004891632590442896 Epoch 230/1000, Loss: 0.004030665848404169 Epoch 240/1000, Loss: 0.003379524452611804 Epoch 250/1000, Loss: 0.002879904117435217 Epoch 260/1000, Loss: 0.002489611506462097 Epoch 270/1000, Loss: 0.002184198470786214 Epoch 280/1000, Loss: 0.0019437369192019105 Epoch 290/1000, Loss: 0.0017541276756674051 Epoch 300/1000, Loss: 0.0016026693629100919 Epoch 310/1000, Loss: 0.0014796099858358502 Epoch 320/1000, Loss: 0.0013786517083644867 Epoch 330/1000, Loss: 0.0012941397726535797 Epoch 340/1000, Loss: 0.0012196070747449994 Epoch 350/1000, Loss: 0.0011414645705372095 Epoch 360/1000, Loss: 0.001061757793650031 Epoch 370/1000, Loss: 0.0009867347544059157 Epoch 380/1000, Loss: 0.0009273262694478035 Epoch 390/1000, Loss: 0.0008805892430245876 Epoch 400/1000, Loss: 0.0008431114838458598 Epoch 410/1000, Loss: 0.0008109930204227567 Epoch 420/1000, Loss: 0.0007825929205864668 Epoch 430/1000, Loss: 0.0007562871905975044 Epoch 440/1000, Loss: 0.0007321131997741759 Epoch 450/1000, Loss: 0.0007092245505191386 Epoch 460/1000, Loss: 0.0006881365552544594 Epoch 470/1000, Loss: 0.0006688928115181625 Epoch 480/1000, Loss: 0.0006500301533378661 Epoch 490/1000, Loss: 0.0006336761871352792 Epoch 500/1000, Loss: 0.0006172554567456245 Epoch 510/1000, Loss: 0.00060228758957237 Epoch 520/1000, Loss: 0.0005888384766876698 Epoch 530/1000, Loss: 0.0005763943772763014 Epoch 540/1000, Loss: 0.0005643281037919223 Epoch 550/1000, Loss: 0.0005541218561120331 Epoch 560/1000, Loss: 0.0005427465075626969 Epoch 570/1000, Loss: 0.0005325390957295895 Epoch 580/1000, Loss: 0.0005224572960287333 Epoch 590/1000, Loss: 0.0005137791740708053 Epoch 600/1000, Loss: 0.0005050148465670645 Epoch 610/1000, Loss: 0.0004971114685758948 Epoch 620/1000, Loss: 0.0004892157157883048 Epoch 630/1000, Loss: 0.00048245518701151013 Epoch 640/1000, Loss: 0.0004754861583933234 Epoch 650/1000, Loss: 0.0004669078625738621 Epoch 660/1000, Loss: 0.0004601963155437261 Epoch 670/1000, Loss: 0.00045247498201206326 Epoch 680/1000, Loss: 0.00044596134102903306 Epoch 690/1000, Loss: 0.0004407193046063185 Epoch 700/1000, Loss: 0.000434816291090101 Epoch 710/1000, Loss: 0.00042790695442818105 Epoch 720/1000, Loss: 0.0004230188496876508 Epoch 730/1000, Loss: 0.00041854308801703155 Epoch 740/1000, Loss: 0.0004127133288420737 Epoch 750/1000, Loss: 0.00040834766696207225 Epoch 760/1000, Loss: 0.0004034344747196883 Epoch 770/1000, Loss: 0.00039839293458499014 Epoch 780/1000, Loss: 0.0003941936884075403 Epoch 790/1000, Loss: 0.00039072500658221543 Epoch 800/1000, Loss: 0.0003858723503071815 Epoch 810/1000, Loss: 0.0003812974609900266 Epoch 820/1000, Loss: 0.00037719475221820176 Epoch 830/1000, Loss: 0.0003737532824743539 Epoch 840/1000, Loss: 0.00036979030119255185 Epoch 850/1000, Loss: 0.00036644612555392087 Epoch 860/1000, Loss: 0.0003625146928243339 Epoch 870/1000, Loss: 0.000359336263500154 Epoch 880/1000, Loss: 0.00035612270585261285 Epoch 890/1000, Loss: 0.00035236674011684954 Epoch 900/1000, Loss: 0.0003493966069072485 Epoch 910/1000, Loss: 0.0003470622468739748 Epoch 920/1000, Loss: 0.00034316032542847097 Epoch 930/1000, Loss: 0.00034029941889457405 Epoch 940/1000, Loss: 0.00033748531132005155 Epoch 950/1000, Loss: 0.0003346360463183373 Epoch 960/1000, Loss: 0.00033203166094608605 Epoch 970/1000, Loss: 0.0003288303851149976 Epoch 980/1000, Loss: 0.0003261156671214849 Epoch 990/1000, Loss: 0.000323760905303061 Epoch 1000/1000, Loss: 0.0003211983712390065
# Plot confusion matrix
plot_confusion_matrix(cm_v4, ['Benign', 'Malignant'], title='Confusion matrix - Neural Network V4')
# Plot classification report
plot_classification_report(cr_v4)
Neural network V5
# Neural network V5
model_v5 = NeuralNetworkV5(input_size, hidden_size).to(device)
criterion_v5 = nn.BCELoss()
optimizer_v5 = optim.Adam(model_v5.parameters(), lr=learning_rate, weight_decay=weight_decay)
# Train the model
train(model_v5, X_train.unsqueeze(1), y_train, criterion_v5, optimizer_v5, epochs)
# Evaluate the model
cm_v5, cr_v5, acc_v5 = evaluate(model_v5, X_test.unsqueeze(1), y_test)
Epoch 10/1000, Loss: 0.6472257971763611 Epoch 20/1000, Loss: 0.5392731428146362 Epoch 30/1000, Loss: 0.36916524171829224 Epoch 40/1000, Loss: 0.21396635472774506 Epoch 50/1000, Loss: 0.13106301426887512 Epoch 60/1000, Loss: 0.09312008321285248 Epoch 70/1000, Loss: 0.07527194172143936 Epoch 80/1000, Loss: 0.06560871005058289 Epoch 90/1000, Loss: 0.05922595039010048 Epoch 100/1000, Loss: 0.054361775517463684 Epoch 110/1000, Loss: 0.050068166106939316 Epoch 120/1000, Loss: 0.04590252414345741 Epoch 130/1000, Loss: 0.042265865951776505 Epoch 140/1000, Loss: 0.03903531655669212 Epoch 150/1000, Loss: 0.03611545264720917 Epoch 160/1000, Loss: 0.03336109220981598 Epoch 170/1000, Loss: 0.030720891430974007 Epoch 180/1000, Loss: 0.028099115937948227 Epoch 190/1000, Loss: 0.02529078908264637 Epoch 200/1000, Loss: 0.022276364266872406 Epoch 210/1000, Loss: 0.01945570483803749 Epoch 220/1000, Loss: 0.016959620639681816 Epoch 230/1000, Loss: 0.014633278362452984 Epoch 240/1000, Loss: 0.012433870695531368 Epoch 250/1000, Loss: 0.01063645537942648 Epoch 260/1000, Loss: 0.009215538389980793 Epoch 270/1000, Loss: 0.008075335994362831 Epoch 280/1000, Loss: 0.007146317046135664 Epoch 290/1000, Loss: 0.006382191088050604 Epoch 300/1000, Loss: 0.005759004037827253 Epoch 310/1000, Loss: 0.005265630315989256 Epoch 320/1000, Loss: 0.004859541077166796 Epoch 330/1000, Loss: 0.0045250700786709785 Epoch 340/1000, Loss: 0.00425321189686656 Epoch 350/1000, Loss: 0.0040284693241119385 Epoch 360/1000, Loss: 0.0038447484839707613 Epoch 370/1000, Loss: 0.0036921321880072355 Epoch 380/1000, Loss: 0.003561016172170639 Epoch 390/1000, Loss: 0.003450624644756317 Epoch 400/1000, Loss: 0.003364400239661336 Epoch 410/1000, Loss: 0.0032908162102103233 Epoch 420/1000, Loss: 0.003222778672352433 Epoch 430/1000, Loss: 0.003165710251778364 Epoch 440/1000, Loss: 0.0031129783019423485 Epoch 450/1000, Loss: 0.0030633530113846064 Epoch 460/1000, Loss: 0.0029811717104166746 Epoch 470/1000, Loss: 0.0028529497794806957 Epoch 480/1000, Loss: 0.0026854067109525204 Epoch 490/1000, Loss: 0.0025055729784071445 Epoch 500/1000, Loss: 0.002334937220439315 Epoch 510/1000, Loss: 0.002182029653340578 Epoch 520/1000, Loss: 0.0020450232550501823 Epoch 530/1000, Loss: 0.0019291980424895883 Epoch 540/1000, Loss: 0.0018309173174202442 Epoch 550/1000, Loss: 0.0017435038462281227 Epoch 560/1000, Loss: 0.0016699342522770166 Epoch 570/1000, Loss: 0.0016051153652369976 Epoch 580/1000, Loss: 0.0015472021186724305 Epoch 590/1000, Loss: 0.0014925338327884674 Epoch 600/1000, Loss: 0.0014427873538807034 Epoch 610/1000, Loss: 0.0013984435936436057 Epoch 620/1000, Loss: 0.0013581964885815978 Epoch 630/1000, Loss: 0.00132227991707623 Epoch 640/1000, Loss: 0.0012905021430924535 Epoch 650/1000, Loss: 0.0012572426348924637 Epoch 660/1000, Loss: 0.001225507934577763 Epoch 670/1000, Loss: 0.0012008383637294173 Epoch 680/1000, Loss: 0.001180665334686637 Epoch 690/1000, Loss: 0.001158036757260561 Epoch 700/1000, Loss: 0.0011395757319405675 Epoch 710/1000, Loss: 0.0011222346220165491 Epoch 720/1000, Loss: 0.0011062605772167444 Epoch 730/1000, Loss: 0.0010908363619819283 Epoch 740/1000, Loss: 0.0010754970135167241 Epoch 750/1000, Loss: 0.0010610275203362107 Epoch 760/1000, Loss: 0.0010471524437889457 Epoch 770/1000, Loss: 0.0010340394219383597 Epoch 780/1000, Loss: 0.0010215329239144921 Epoch 790/1000, Loss: 0.0010098188649863005 Epoch 800/1000, Loss: 0.0009981722105294466 Epoch 810/1000, Loss: 0.000987424748018384 Epoch 820/1000, Loss: 0.0009770964970812201 Epoch 830/1000, Loss: 0.0009671879815869033 Epoch 840/1000, Loss: 0.000957623531576246 Epoch 850/1000, Loss: 0.00094812415773049 Epoch 860/1000, Loss: 0.0009392902138642967 Epoch 870/1000, Loss: 0.0009308967855758965 Epoch 880/1000, Loss: 0.0009222882217727602 Epoch 890/1000, Loss: 0.0009139023022726178 Epoch 900/1000, Loss: 0.0009058943251147866 Epoch 910/1000, Loss: 0.0008985912427306175 Epoch 920/1000, Loss: 0.0008904503774829209 Epoch 930/1000, Loss: 0.0008828166173771024 Epoch 940/1000, Loss: 0.0008756622555665672 Epoch 950/1000, Loss: 0.000868525356054306 Epoch 960/1000, Loss: 0.0008616363047622144 Epoch 970/1000, Loss: 0.0008551458013243973 Epoch 980/1000, Loss: 0.0008486591395922005 Epoch 990/1000, Loss: 0.0008415335905738175 Epoch 1000/1000, Loss: 0.0008350368589162827
# Plot confusion matrix
plot_confusion_matrix(cm_v5, ['Benign', 'Malignant'], title='Confusion matrix - Neural Network V5')
# Plot classification report
plot_classification_report(cr_v5)
# Accuracy comparison
accuracies = [acc_v1, acc_v2, acc_v3, acc_v4, acc_v5]
models = ['Neural Network V1', 'Neural Network V2', 'Neural Network V3', 'Neural Network V4', 'Neural Network V5']
fig, ax = plt.subplots(figsize=(10, 6))
sns.barplot(x=models, y=accuracies, hue=models, ax=ax)
# Add labels
for i, v in enumerate(accuracies):
ax.text(i, v + 0.01, str(round(v, 2)), ha='center', va='bottom')
ax.set_title('Accuracy comparison')
ax.set_xlabel('Model')
ax.set_ylabel('Accuracy')
plt.show()
acc_v4
0.9824561403508771