zadanie 9

This commit is contained in:
Asiek 2024-01-07 22:06:32 +01:00
parent 81f7c45eaa
commit a060e9e572
2 changed files with 87 additions and 0 deletions

35
zad9_keras.py Normal file
View File

@ -0,0 +1,35 @@
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras.datasets import fashion_mnist
np.random.seed(10)
device = "gpu" if tf.config.list_physical_devices("GPU") else "cpu"
print(device)
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images = train_images.reshape(-1, 28, 28, 1)
test_images = test_images.reshape(-1, 28, 28, 1)
neurons = 300
model = Sequential([
Flatten(input_shape=(28, 28)),
Dense(neurons, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\naccuracy:', test_acc)

52
zad9_torch.py Normal file
View File

@ -0,0 +1,52 @@
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import Compose, Lambda, ToTensor
torch.manual_seed(10)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(device)
trainset = datasets.FashionMNIST('data', train=True, download=True,
transform=Compose([ToTensor(), Lambda(lambda x: x.flatten())]))
testset = datasets.FashionMNIST('data', train=False, download=True,
transform=Compose([ToTensor(), Lambda(lambda x: x.flatten())]))
def train(model, dataset, n_iter=10, batch_size=256):
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
criterion = nn.NLLLoss()
dl = DataLoader(dataset, batch_size=batch_size)
model.train()
for epoch in range(n_iter):
for images, targets in dl:
optimizer.zero_grad()
out = model(images.to(device))
loss = criterion(out, targets.to(device))
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print('epoch: %3d loss: %.4f' % (epoch, loss))
def accuracy(model, dataset):
model.eval()
correct = sum([(model(images.to(device)).argmax(dim=1) == targets.to(device)).sum()
for images, targets in DataLoader(dataset, batch_size=256)])
print(correct.float() / len(dataset))
neurons = 300
model = nn.Sequential(
nn.Linear(28 * 28, neurons),
nn.ReLU(),
nn.Linear(neurons, 10),
nn.LogSoftmax(dim=-1)
).to(device)
train(model, trainset)
accuracy(model, testset)