2024-05-05 01:20:04 +02:00
|
|
|
import os
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
import tensorflow as tf
|
|
|
|
|
2024-05-05 13:25:53 +02:00
|
|
|
from .consts import DISEASE_CLASSES, PLANT_CLASSES
|
|
|
|
|
2024-05-05 01:20:04 +02:00
|
|
|
|
|
|
|
class Dataset:
|
|
|
|
''' Class to load and preprocess the dataset.
|
|
|
|
Loads images and labels from the given directory to tf.data.Dataset.
|
2024-05-05 12:41:54 +02:00
|
|
|
|
|
|
|
|
2024-05-05 01:20:04 +02:00
|
|
|
Args:
|
|
|
|
`data_dir (Path)`: Path to the dataset directory.
|
|
|
|
`seed (int)`: Seed for shuffling the dataset.
|
|
|
|
`repeat (int)`: Number of times to repeat the dataset.
|
|
|
|
`shuffle_buffer_size (int)`: Size of the buffer for shuffling the dataset.
|
|
|
|
`batch_size (int)`: Batch size for the dataset.
|
|
|
|
'''
|
2024-05-05 12:41:54 +02:00
|
|
|
|
2024-05-05 01:20:04 +02:00
|
|
|
def __init__(self,
|
|
|
|
data_dir: Path,
|
|
|
|
seed: int = 42,
|
|
|
|
repeat: int = 1,
|
|
|
|
shuffle_buffer_size: int = 10_000,
|
|
|
|
batch_size: int = 64) -> None:
|
|
|
|
self.data_dir = data_dir
|
|
|
|
self.seed = seed
|
|
|
|
self.repeat = repeat
|
2024-05-05 12:41:54 +02:00
|
|
|
self.shuffle_buffer_size = shuffle_buffer_size
|
2024-05-05 01:20:04 +02:00
|
|
|
self.batch_size = batch_size
|
|
|
|
|
2024-05-05 19:03:15 +02:00
|
|
|
self.dataset = self.__load_dataset()\
|
2024-05-05 12:41:54 +02:00
|
|
|
.shuffle(self.shuffle_buffer_size, seed=self.seed)\
|
2024-05-05 01:20:04 +02:00
|
|
|
.repeat(self.repeat)\
|
2024-05-05 19:28:40 +02:00
|
|
|
.batch(self.batch_size, drop_remainder=True)\
|
2024-05-05 01:20:04 +02:00
|
|
|
.prefetch(tf.data.experimental.AUTOTUNE)
|
|
|
|
|
2024-05-05 19:03:15 +02:00
|
|
|
def __load_dataset(self) -> tf.data.Dataset:
|
2024-05-05 01:20:04 +02:00
|
|
|
# check if path has 'test' word in it
|
|
|
|
dataset = tf.data.Dataset.list_files(str(self.data_dir / '*/*'))
|
|
|
|
if 'test' in str(self.data_dir).lower():
|
|
|
|
# file names issue - labels have camel case (regex?) and differs from the train/valid sets
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
dataset = dataset.map(
|
2024-05-05 19:03:15 +02:00
|
|
|
self.__preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
2024-05-05 01:20:04 +02:00
|
|
|
|
|
|
|
return dataset
|
|
|
|
|
2024-05-05 19:03:15 +02:00
|
|
|
def __get_labels(self, image_path):
|
2024-05-05 12:41:54 +02:00
|
|
|
path = tf.strings.split(image_path, os.path.sep)[-2]
|
|
|
|
plant = tf.strings.split(path, '___')[0]
|
|
|
|
disease = tf.strings.split(path, '___')[1]
|
2024-05-05 13:25:53 +02:00
|
|
|
|
|
|
|
one_hot_plant = plant == PLANT_CLASSES
|
|
|
|
one_hot_disease = disease == DISEASE_CLASSES
|
|
|
|
|
|
|
|
return tf.cast(one_hot_plant, dtype=tf.uint8, name=None), tf.cast(one_hot_disease, dtype=tf.uint8, name=None)
|
2024-05-05 01:20:04 +02:00
|
|
|
|
2024-05-05 19:03:15 +02:00
|
|
|
def __get_image(self, image_path):
|
2024-05-05 12:41:54 +02:00
|
|
|
img = tf.io.read_file(image_path)
|
2024-05-05 13:25:53 +02:00
|
|
|
img = tf.io.decode_jpeg(img, channels=3)
|
|
|
|
return tf.cast(img, dtype=tf.float32, name=None) / 255.
|
2024-05-05 01:20:04 +02:00
|
|
|
|
2024-05-05 19:03:15 +02:00
|
|
|
def __preprocess(self, image_path):
|
|
|
|
labels = self.__get_labels(image_path)
|
|
|
|
image = self.__get_image(image_path)
|
2024-05-05 01:20:04 +02:00
|
|
|
|
2024-05-05 12:41:54 +02:00
|
|
|
# returns X, Y1, Y2
|
2024-05-05 13:25:53 +02:00
|
|
|
return image, labels[0], labels[1]
|
|
|
|
|
|
|
|
def __getattr__(self, attr):
|
|
|
|
return getattr(self.dataset, attr)
|