diff --git a/.gitignore b/.gitignore index ac91879..3d25d2a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ +.ipynb_checkpoints data/ *.zip - # https://github.com/microsoft/vscode-python/blob/main/.gitignore .DS_Store .huskyrc.json @@ -51,4 +51,4 @@ dist/** # translation files *.xlf package.nls.*.json -l10n/ \ No newline at end of file +l10n/ diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..142a05b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,36 @@ +FROM ubuntu:22.04 + +# Packages +RUN apt-get update && apt-get upgrade && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + curl liblzma-dev python-tk python3-tk tk-dev libssl-dev libffi-dev libncurses5-dev zlib1g zlib1g-dev \ + libreadline-dev libbz2-dev libsqlite3-dev make gcc curl git-all wget python3-openssl gnupg2 + +# Setup CUDA +RUN apt-key del 7fa2af80 && \ + wget https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin && \ + mv cuda-wsl-ubuntu.pin /etc/apt/preferences.d/cuda-repository-pin-600 && \ + wget https://developer.download.nvidia.com/compute/cuda/12.2.2/local_installers/cuda-repo-wsl-ubuntu-12-2-local_12.2.2-1_amd64.deb && \ + dpkg -i cuda-repo-wsl-ubuntu-12-2-local_12.2.2-1_amd64.deb && \ + cp /var/cuda-repo-wsl-ubuntu-12-2-local/cuda-*-keyring.gpg /usr/share/keyrings/ && \ + apt-get update && \ + apt-get -y install cuda-toolkit-12-2 + +# Pyenv +ENV PYENV_ROOT="$HOME/.pyenv" +ENV PATH="$PYENV_ROOT/bin:$PYENV_ROOT/versions/3.10.12/bin:$PATH" + +RUN curl https://pyenv.run | bash +RUN pyenv install 3.10.12 && \ + pyenv global 3.10.12 && \ + echo 'eval "$(pyenv init --path)"' >> ~/.bashrc && \ + echo 'eval "$(pyenv virtualenv-init -)"' >> ~/.bashrc + +SHELL ["/bin/bash", "-c"] + +WORKDIR /app +ADD ./requirements.txt /app/requirements.txt +RUN pip install -r requirements.txt + +ENV CUDNN_PATH="/.pyenv/versions/3.10.12/lib/python3.10/site-packages/nvidia/cudnn/" +ENV LD_LIBRARY_PATH="$CUDNN_PATH/lib":"/usr/local/cuda-12.2/lib64" +ENV PATH="$PATH":"/usr/local/cuda-12.2/bin" \ No newline at end of file diff --git a/Makefile b/Makefile index c500c69..0888760 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,6 @@ .PHONY: download-dataset resize-dataset sobel-dataset +# Use inside docker container download-dataset: python3 ./file_manager/data_manager.py --download @@ -7,4 +8,17 @@ resize-dataset: python3 ./file_manager/data_manager.py --resize --shape 64 64 --source "original_dataset" sobel-dataset: - python3 ./file_manager/data_manager.py --sobel --source "resized_dataset" \ No newline at end of file + python3 ./file_manager/data_manager.py --sobel --source "resized_dataset" + +login: + wandb login $$(cat "$$API_KEY_SECRET") + +# Outside docker +docker-run: + docker-compose run --entrypoint=/bin/bash gpu + +docker-build: + docker-compose build + +check-gpu: + python3 ./gpu_check.py diff --git a/README.md b/README.md index 79f7604..32b3509 100644 --- a/README.md +++ b/README.md @@ -12,85 +12,26 @@ | 15.06.2024 | Prezentacja działania systemu | | Prezentacja wyników i skuteczności wybranego modelu -# Szczegółowy harmonogram +# Dokumentacja -Spotkania dot. progresu prac - każda niedziela, godzina 18:00-20:00. -Poniżej, kolumna "działanie" jest w formacie ` ()`. -Brak osoby oznacza, że zadanie nie zostało jeszcze przypisane. +[Link do dokumentacji](https://uam-my.sharepoint.com/personal/krzboj_st_amu_edu_pl/_layouts/15/doc.aspx?sourcedoc={dc695bbe-68d1-4947-8c29-1d008f252a3b}&action=edit) -| Data | Działanie -|----------------------------:|:------------------------------------------------------------| -| 05.05.2024 | Sergiusz (1), Mateusz (3), Krzysztof (2) -| 12.05.2024 | Wszyscy (5), (4), (6), (7.1) -| 19.05.2024 | Wszyscy (5), (7.2) -| 26.05.2024 | Wszyscy (5), (7.3), (9) -| 02.06.2024 | (8) -| 09.06.2024 | Feedback, ewentualne poprawki -| 15.06.2024 | Finalna prezentacja - -Szczegóły działań: - -1) Przygotowanie danych i modułu do ich przetwarzania - - Napisanie skryptu, który pobiera dane oraz rozpakowuje je lokalnie. - - Napisanie szablonu skryptu do przetwarzania danych. Skrypt powinien tworzyć katalogi (struktura katalogowa) z danymi po transformacji. Każda transformacja na oryginalnych danych będzie commit'owana do repozytorium, tak aby reszta zeszpołu mogła ją uruchomić. - - Napisać jedną przykładową transformację, np. resize i kontury, korzystając z szablonu. - - Utworzyć README.md z instrukcją tworzenia nowego modułu do przetwarzania. - -2) Modele do przygotowania: - - Przygotować wstępnie 3 modele w formacie WanDB, np. MobileNet, ResNet, ew. custom CNN z klasyfikacją wielozadaniową. - - Uruchomić modele na WanDB żeby zobaczyć czy się uruchamiają i generują poprawne wykresy. - - Utworzyć README.md z instrukcją tworzenia nowych modeli. - -3) Moduł do ładowania plików - - Napisać moduł, który ładuje dane po transformacji. Dane będą wykorzystywane do uczenia i walidacji modelu. - - Moduł powinien dokonywać podziału zbioru danych na 3 czesci - train, valid, test. - - Powinno być możliwe zdefiniowanie rozmiaru batch'a, rozmiaru validation set'a, scieżki skąd załadować dane. - - Dodać możliwość definiowania seed'a, tak aby każdy mógł uzyskać podobne rezultaty w razie potrzeby. Seed powinien być przekierowany na stdout podczas uruchamiania skryptu. - - Dodać możliwość wyboru rozkładu danych. - - Dane wyjściowe powinny być w formacie pozwalającym na załadowanie ich bezpośrednio do modelu (binarne, tf record, lub inne). - - README.md opisujący w jaki sposób parametryzować moduł. - -4) Moduł do obslugi i uruchaminia WanDB Job's - - Napisać skrypt do ściągania danych z kolejki aby obejść problem uruchamiania agenta na Colab/Kaggle. - - Napisać skrypt, który uruchamia job'y i wysyła go na kolejkę. Powinien obsługiwać przyjmowanie hiperparamterów, oraz nazwę kolejki, do której zostanie job przesłany. - - Napisać skrypt, który uruchamia agenta na danej maszynie. - - Napisać skrypt do tworzenia jobów - powinna być sprecyzowana struktura katalogowa, pozwalająca na zarządzanie nimi i obsługę różnych modeli. Ewentualnie synchronizacja job'ów, między WanDB i środowiskiem lokalnym. - - README.md opisujący powyższe. - -5) Eksperymenty, dobieranie hiperparametrów, rozkładu danych, testowanie różnych strategii. Jeżeli konieczne, dodanie nowych modeli. - -6) Dodać Heatmap'ę do modelu (CAM). - -7) Przygotowanie frontu do projektu (https://www.gradio.app/) - 1. Uruchomienie lokalne Frontu do testów. - 2. Obsługa wyświetlania Heatmap. - 3. Deploy frontu na środowisko (lokalne/zdalne, do wyboru). - -8) Wybór najlepszego modelu. - -9) Modul do obslugi Sweeps - automatycznego dobierania hiperparametrów (opcjonalnie). - -# Źródło danych - -https://www.kaggle.com/datasets/vipoooool/new-plant-diseases-dataset - -# Technologie - -## WanDB - -WanDB built-in features: - -- Experiments Tracking -- Predictions Visualization -- Scheduling runs through queues & connected agents -- Model Registry -- Hyperparamter optimization via Sweeps - -## Moc obliczeniowa - -- Radeon 7800XT -- GeForce RTX 3060TI -- GeForce RTX 3070 -- GeForce RTX 4050M -- [zasoby uczelniane](https://laboratoria.wmi.amu.edu.pl/uslugi/zasoby-dla-projektow/maszyna-gpu/) +# Setup +1. Install Docker on your local system. +2. To build docker image and run the shell, use Makefile +```bash +make docker-build +make docker-run +``` +3. Get your API key from https://wandb.ai/settings#api, and add the key to **secrets.txt** file. +4. After running the container +```bash +make login # to login to WanDB. +make check-gpu # to verify if GPU works +``` +5. If needed, to manually run containers, run: +```bash +docker build -t gpu api_key="" . +docker run --rm -it --gpus all --entrypoint /bin/bash gpu +``` diff --git a/compose.yaml b/compose.yaml new file mode 100644 index 0000000..7c93b98 --- /dev/null +++ b/compose.yaml @@ -0,0 +1,23 @@ +services: + gpu: + image: gpu + volumes: + - .:/app + command: nvidia-smi + build: + context: . + dockerfile: Dockerfile + environment: + API_KEY_SECRET: /run/secrets/api_key_secret + secrets: + - api_key_secret + deploy: + resources: + reservations: + devices: + - driver: nvidia + count: 1 + capabilities: [gpu] +secrets: + api_key_secret: + file: ./secrets.txt \ No newline at end of file diff --git a/gpu_check.py b/gpu_check.py new file mode 100644 index 0000000..282cb65 --- /dev/null +++ b/gpu_check.py @@ -0,0 +1,18 @@ +try: + import tensorflow +except ImportError: + print("Tensorflow is not installed, install requied packages from requirements.txt") + exit(1) + +import tensorflow + +print("If you see the tensor result, then the Tensorflow is available.") +rs = tensorflow.reduce_sum(tensorflow.random.normal([1000, 1000])) +print(rs) + +gpus = tensorflow.config.list_physical_devices('GPU') +if len(gpus) == 0: + print("No GPU available.") +else: + print(f"GPUs available: {len(gpus)}") + print(gpus) diff --git a/launch_settings.yaml b/launch_settings.yaml new file mode 100644 index 0000000..b752b17 --- /dev/null +++ b/launch_settings.yaml @@ -0,0 +1,10 @@ +max_jobs: 1 + +entity: uczenie-maszynowe-projekt + +queues: + - GPU queue 1 + - GPU queue 2 + +builder: + type: docker \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..051a19b --- /dev/null +++ b/main.py @@ -0,0 +1,15 @@ +from model.test_model import TestModel +from pathlib import Path +from dataset.dataset import Dataset + +if __name__ == "__main__": + # Loading dataset + train_dataset = Dataset(Path('data/resized_dataset/train')) + valid_dataset = Dataset(Path('data/resized_dataset/valid')) + for i in train_dataset.take(1): + print(i) + + # Training model + model = TestModel() + history = model.fit() + model.save("./src/model/test_model_final.keras") diff --git a/model/__init__.py b/model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/model/resnet_50_model.py b/model/resnet_50_model.py new file mode 100644 index 0000000..584e3cb --- /dev/null +++ b/model/resnet_50_model.py @@ -0,0 +1,55 @@ +import tensorflow as tf + +from wandb_utils.config import Config +from wandb.keras import WandbMetricsLogger + + +class Resnet50Model: + def __init__(self): + self.config = Config(epoch=8, batch_size=64).config() + self.config.learning_rate = 0.01 + # Define specific configuration below, they will be visible in the W&B interface + # Start of config + self.config.optimizer = "sgd" + self.config.loss = "sparse_categorical_crossentropy" + self.config.metrics = ["accuracy"] + # End + self.model = self.__build_model() + self.__compile() + self.__load_dataset() + + def __build_model(self): + return tf.keras.applications.ResNet50( + input_shape=(224, 224, 3), include_top=False, weights='imagenet' + # output - odblokować ostatnią warstwę freeze + # zobaczyc czy dziala to by default, czy wewn. warstwy są frozen, i czy ost. jest dynamiczna + ) + + def __compile(self): + self.model.compile( + optimizer=self.config.optimizer, + loss=self.config.loss, + metrics=self.config.metrics, + ) + + def __load_dataset(self): + (self.x_train, self.y_train), (self.x_test, self.y_test) = tf.keras.datasets.cifar10.load_data() + self.x_train = self.x_train.astype('float32') / 255.0 + self.x_test = self.x_test.astype('float32') / 255.0 + + def fit(self): + wandb_callbacks = [ + WandbMetricsLogger(log_freq=5), + # Not supported with Keras >= 3.0.0 + # WandbModelCheckpoint(filepath="models"), + ] + return self.model.fit( + x=self.x_train, + y=self.y_train, + epochs=self.config.epoch, + batch_size=self.config.batch_size, + callbacks=wandb_callbacks + ) + + def save(self, filepath): + self.model.save(filepath) diff --git a/model/test_model.py b/model/test_model.py new file mode 100644 index 0000000..80498a6 --- /dev/null +++ b/model/test_model.py @@ -0,0 +1,65 @@ +import random +import tensorflow as tf + +from wandb_utils.config import Config +from wandb.keras import WandbMetricsLogger + + +class TestModel: + def __init__(self): + self.config = Config(epoch=8, batch_size=256).config() + self.config.learning_rate = 0.01 + # Define specific configuration below, they will be visible in the W&B interface + # Start of config + self.config.layer_1 = 512 + self.config.activation_1 = "relu" + self.config.dropout = random.uniform(0.01, 0.80) + self.config.layer_2 = 10 + self.config.activation_2 = "softmax" + self.config.optimizer = "sgd" + self.config.loss = "sparse_categorical_crossentropy" + self.config.metrics = ["accuracy"] + # End + self.model = self.__build_model() + self.__compile() + self.__load_dataset() + + def __build_model(self): + return tf.keras.models.Sequential([ + tf.keras.layers.Flatten(input_shape=(28, 28)), + tf.keras.layers.Dense(self.config.layer_1, activation=self.config.activation_1), + tf.keras.layers.Dropout(self.config.dropout), + tf.keras.layers.Dense(self.config.layer_2, activation=self.config.activation_2) + ]) + + def __compile(self): + self.model.compile( + optimizer=self.config.optimizer, + loss=self.config.loss, + metrics=self.config.metrics, + ) + def __load_dataset(self): + mnist = tf.keras.datasets.mnist + (self.x_train, self.y_train), (self.x_test, self.y_test) = mnist.load_data() + self.x_train, self.x_test = self.x_train / 255.0, self.x_test / 255.0 + self.x_train, self.y_train = self.x_train[::5], self.y_train[::5] + self.x_test, self.y_test = self.x_test[::20], self.y_test[::20] + + def fit(self): + wandb_callbacks = [ + WandbMetricsLogger(log_freq=5), + # Not supported with Keras >= 3.0.0 + # WandbModelCheckpoint(filepath="models"), + ] + return self.model.fit( + x=self.x_train, + y=self.y_train, + epochs=self.config.epoch, + batch_size=self.config.batch_size, + validation_data=(self.x_test, self.y_test), + callbacks=wandb_callbacks + ) + + def save(self, filepath): + self.model.save(filepath) + diff --git a/model/test_model_final.keras b/model/test_model_final.keras new file mode 100644 index 0000000..a18bd63 Binary files /dev/null and b/model/test_model_final.keras differ diff --git a/requirements.txt b/requirements.txt index cc6fe18..57e8f15 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,7 @@ -tensorflow==2.16.1 +tensorflow[and-cuda]==2.16.1 +tensorflow-io==0.37.0 numpy==1.26.4 opencv-python==4.9.0.80 +numpy==1.26.4 wget==3.2 +wandb==0.16.6 \ No newline at end of file diff --git a/secrets.txt b/secrets.txt new file mode 100644 index 0000000..55dd485 --- /dev/null +++ b/secrets.txt @@ -0,0 +1 @@ +FILL IN \ No newline at end of file diff --git a/test.py b/test.py deleted file mode 100644 index a75f18f..0000000 --- a/test.py +++ /dev/null @@ -1,10 +0,0 @@ - -from pathlib import Path - -from dataset.dataset import Dataset - -train_dataset = Dataset(Path('data/resized_dataset/train')) -valid_dataset = Dataset(Path('data/resized_dataset/valid')) - -for i in train_dataset.take(1): - print(i) diff --git a/testing.ipynb b/testing.ipynb deleted file mode 100644 index 1d4ffaa..0000000 --- a/testing.ipynb +++ /dev/null @@ -1,363 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "Tracking run with wandb version 0.16.6" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Run data is saved locally in /mnt/c/Users/krzys/OneDrive/Studia/inz-uczenia-maszynowego/Detection-of-plant-diseases/wandb/run-20240416_232247-bfji8amn" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Syncing run floral-energy-3 to Weights & Biases (docs)
" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View project at https://wandb.ai/uczenie-maszynowe-projekt/Detection%20of%20plant%20diseases" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View run at https://wandb.ai/uczenie-maszynowe-projekt/Detection%20of%20plant%20diseases/runs/bfji8amn" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Epoch 1/8\n", - "44/47 [===========================>..] - ETA: 0s - loss: 2.1872 - accuracy: 0.2224INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 2s 32ms/step - loss: 2.1734 - accuracy: 0.2344 - val_loss: 1.9111 - val_accuracy: 0.5380\n", - "Epoch 2/8\n", - "40/47 [========================>.....] - ETA: 0s - loss: 1.7703 - accuracy: 0.5437INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 1s 31ms/step - loss: 1.7483 - accuracy: 0.5527 - val_loss: 1.5486 - val_accuracy: 0.6880\n", - "Epoch 3/8\n", - "46/47 [============================>.] - ETA: 0s - loss: 1.4466 - accuracy: 0.6818INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 2s 33ms/step - loss: 1.4444 - accuracy: 0.6829 - val_loss: 1.2824 - val_accuracy: 0.7460\n", - "Epoch 4/8\n", - "44/47 [===========================>..] - ETA: 0s - loss: 1.2232 - accuracy: 0.7362INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 2s 32ms/step - loss: 1.2162 - accuracy: 0.7390 - val_loss: 1.0886 - val_accuracy: 0.7880\n", - "Epoch 5/8\n", - "44/47 [===========================>..] - ETA: 0s - loss: 1.0583 - accuracy: 0.7694INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 1s 28ms/step - loss: 1.0519 - accuracy: 0.7711 - val_loss: 0.9497 - val_accuracy: 0.8020\n", - "Epoch 6/8\n", - "41/47 [=========================>....] - ETA: 0s - loss: 0.9382 - accuracy: 0.7897INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 1s 28ms/step - loss: 0.9339 - accuracy: 0.7902 - val_loss: 0.8484 - val_accuracy: 0.8180\n", - "Epoch 7/8\n", - "47/47 [==============================] - ETA: 0s - loss: 0.8496 - accuracy: 0.8043INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "47/47 [==============================] - 1s 27ms/step - loss: 0.8496 - accuracy: 0.8043 - val_loss: 0.7735 - val_accuracy: 0.8220\n", - "Epoch 8/8\n", - "44/47 [===========================>..] - ETA: 0s - loss: 0.7790 - accuracy: 0.8180INFO:tensorflow:Assets written to: models/assets\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: models/assets\n", - "\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./models)... Done. 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\r", - "47/47 [==============================] - 1s 29ms/step - loss: 0.7779 - accuracy: 0.8183 - val_loss: 0.7165 - val_accuracy: 0.8260\n" - ] - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "316da49b179f47019f8cf5c9c72353fe" - } - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "\n", - "

Run history:


batch/accuracy▁▁▁▂▂▄▅▅▅▅▆▆▆▇▇▇▇▇▇▇▇▇▇▇████████████████
batch/batch_step▁▁▁▂▂▂▂▂▂▃▃▃▃▃▃▄▄▄▄▄▅▅▅▅▅▅▆▆▆▆▆▆▇▇▇▇▇███
batch/learning_rate▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁
batch/loss███▇▇▆▆▆▅▅▅▄▄▄▄▄▃▃▃▃▃▂▂▂▂▂▂▂▂▂▁▁▁▁▁▁▁▁▁▁
epoch/accuracy▁▅▆▇▇███
epoch/epoch▁▂▃▄▅▆▇█
epoch/learning_rate▁▁▁▁▁▁▁▁
epoch/loss█▆▄▃▂▂▁▁
epoch/val_accuracy▁▅▆▇▇███
epoch/val_loss█▆▄▃▂▂▁▁

Run summary:


batch/accuracy0.81726
batch/batch_step395
batch/learning_rate0.01
batch/loss0.77969
epoch/accuracy0.81825
epoch/epoch7
epoch/learning_rate0.01
epoch/loss0.77791
epoch/val_accuracy0.826
epoch/val_loss0.71648

" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - " View run floral-energy-3 at: https://wandb.ai/uczenie-maszynowe-projekt/Detection%20of%20plant%20diseases/runs/bfji8amn
View project at: https://wandb.ai/uczenie-maszynowe-projekt/Detection%20of%20plant%20diseases
Synced 5 W&B file(s), 0 media file(s), 42 artifact file(s) and 0 other file(s)" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "Find logs at: ./wandb/run-20240416_232247-bfji8amn/logs" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# This script needs these libraries to be installed:\n", - "# tensorflow, numpy\n", - "\n", - "import wandb\n", - "from wandb.keras import WandbMetricsLogger, WandbModelCheckpoint\n", - "\n", - "import random\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "\n", - "\n", - "# Start a run, tracking hyperparameters\n", - "wandb.init(\n", - " # set the wandb project where this run will be logged\n", - " project=\"Detection of plant diseases\",\n", - "\n", - " # track hyperparameters and run metadata with wandb.config\n", - " config={\n", - " \"layer_1\": 512,\n", - " \"activation_1\": \"relu\",\n", - " \"dropout\": random.uniform(0.01, 0.80),\n", - " \"layer_2\": 10,\n", - " \"activation_2\": \"softmax\",\n", - " \"optimizer\": \"sgd\",\n", - " \"loss\": \"sparse_categorical_crossentropy\",\n", - " \"metric\": \"accuracy\",\n", - " \"epoch\": 8,\n", - " \"batch_size\": 256\n", - " }\n", - ")\n", - "\n", - "# [optional] use wandb.config as your config\n", - "config = wandb.config\n", - "\n", - "# get the data\n", - "mnist = tf.keras.datasets.mnist\n", - "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n", - "x_train, x_test = x_train / 255.0, x_test / 255.0\n", - "x_train, y_train = x_train[::5], y_train[::5]\n", - "x_test, y_test = x_test[::20], y_test[::20]\n", - "labels = [str(digit) for digit in range(np.max(y_train) + 1)]\n", - "\n", - "# build a model\n", - "model = tf.keras.models.Sequential([\n", - " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", - " tf.keras.layers.Dense(config.layer_1, activation=config.activation_1),\n", - " tf.keras.layers.Dropout(config.dropout),\n", - " tf.keras.layers.Dense(config.layer_2, activation=config.activation_2)\n", - " ])\n", - "\n", - "# compile the model\n", - "model.compile(optimizer=config.optimizer,\n", - " loss=config.loss,\n", - " metrics=[config.metric]\n", - " )\n", - "\n", - "# WandbMetricsLogger will log train and validation metrics to wandb\n", - "# WandbModelCheckpoint will upload model checkpoints to wandb\n", - "history = model.fit(x=x_train, y=y_train,\n", - " epochs=config.epoch,\n", - " batch_size=config.batch_size,\n", - " validation_data=(x_test, y_test),\n", - " callbacks=[\n", - " WandbMetricsLogger(log_freq=5),\n", - " WandbModelCheckpoint(\"models\")\n", - " ])\n", - "\n", - "# [optional] finish the wandb run, necessary in notebooks\n", - "wandb.finish()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.12" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/wandb_utils/__init__.py b/wandb_utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/wandb_utils/config.py b/wandb_utils/config.py new file mode 100644 index 0000000..ea15e55 --- /dev/null +++ b/wandb_utils/config.py @@ -0,0 +1,22 @@ +import wandb + +class Config: + def __init__(self, epoch, batch_size): + self.epoch = epoch + self.batch_size = batch_size + + self.run = wandb.init( + project="Detection of plant diseases", + config={ + "epoch": epoch, + "batch_size": batch_size, + } + ) + + def config(self): + return self.run.config + + def finish(self): + self.run.config.finish() + +