Compare commits
14 Commits
09070879ac
...
e9fffa0539
Author | SHA1 | Date | |
---|---|---|---|
|
e9fffa0539 | ||
|
7849d9ad51 | ||
c70553ec7c | |||
|
c6f6ae28ca | ||
040b1d014f | |||
|
1cfb74db6a | ||
|
8e6318b1fe | ||
|
d4b6a714bb | ||
|
b7ca0fae45 | ||
|
de27695d53 | ||
|
e3002d5ef8 | ||
|
9ac4b3ec4e | ||
|
9dafc8c895 | ||
|
179a5101cc |
55
.gitignore
vendored
55
.gitignore
vendored
@ -1,4 +1,55 @@
|
|||||||
data
|
|
||||||
archive.zip
|
|
||||||
.ipynb_checkpoints
|
.ipynb_checkpoints
|
||||||
|
data/
|
||||||
|
*.zip
|
||||||
|
|
||||||
|
# https://github.com/microsoft/vscode-python/blob/main/.gitignore
|
||||||
|
.DS_Store
|
||||||
|
.huskyrc.json
|
||||||
|
out
|
||||||
|
log.log
|
||||||
|
**/node_modules
|
||||||
|
*.pyc
|
||||||
|
*.vsix
|
||||||
|
envVars.txt
|
||||||
|
**/.vscode/.ropeproject/**
|
||||||
|
**/testFiles/**/.cache/**
|
||||||
|
*.noseids
|
||||||
|
.nyc_output
|
||||||
|
.vscode-test
|
||||||
__pycache__
|
__pycache__
|
||||||
|
npm-debug.log
|
||||||
|
**/.mypy_cache/**
|
||||||
|
!yarn.lock
|
||||||
|
coverage/
|
||||||
|
cucumber-report.json
|
||||||
|
**/.vscode-test/**
|
||||||
|
**/.vscode test/**
|
||||||
|
**/.vscode-smoke/**
|
||||||
|
**/.venv*/
|
||||||
|
port.txt
|
||||||
|
precommit.hook
|
||||||
|
python_files/lib/**
|
||||||
|
python_files/get-pip.py
|
||||||
|
debug_coverage*/**
|
||||||
|
languageServer/**
|
||||||
|
languageServer.*/**
|
||||||
|
bin/**
|
||||||
|
obj/**
|
||||||
|
.pytest_cache
|
||||||
|
tmp/**
|
||||||
|
.python-version
|
||||||
|
.vs/
|
||||||
|
test-results*.xml
|
||||||
|
xunit-test-results.xml
|
||||||
|
build/ci/performance/performance-results.json
|
||||||
|
!build/
|
||||||
|
debug*.log
|
||||||
|
debugpy*.log
|
||||||
|
pydevd*.log
|
||||||
|
nodeLanguageServer/**
|
||||||
|
nodeLanguageServer.*/**
|
||||||
|
dist/**
|
||||||
|
# translation files
|
||||||
|
*.xlf
|
||||||
|
package.nls.*.json
|
||||||
|
l10n/
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
import glob
|
|
||||||
import shutil
|
|
||||||
import cv2
|
|
||||||
from zipfile import ZipFile
|
|
||||||
import os
|
|
||||||
import wget
|
|
||||||
|
|
||||||
mainPath="data/"
|
|
||||||
pathToTrainAndValidDate = mainPath + "%s/**/*.*"
|
|
||||||
pathToTestDataset = mainPath + "/test"
|
|
||||||
originalDatasetName = "original dataset"
|
|
||||||
|
|
||||||
class DataManager:
|
|
||||||
|
|
||||||
def downloadData(self):
|
|
||||||
if not os.path.isfile("archive.zip"):
|
|
||||||
wget.download("https://storage.googleapis.com/kaggle-data-sets/78313/182633/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240502%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240502T181500Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=87d0661313e358206b6e10d44f135d41e23501d601e58b1e8236ca28a82ccc434534564b45baa84c4d829dd1995ff384d51fe5dba3f543d00eb0763169fd712c6c8f91bb4f298db38a19b31b2d489798a9723a271aa4108d7b93345c5a64a7ef00b9b8f27d1d5f728e373c870f0287eb89bc747941f0aeeb4703c288059e2e07b7ece3a83114a9607276874a90d4ec96dde06fddb94a0d3af72848565661b1404e3ea248eeebf46374daada7df1f37db7d62b21b4ac90706ea64cc74200a58f35bfe379703e7691aeda9e39635b02f58a9f8399fa64b031b1a9bccd7f109d256c6f4886ef94fcdc11034d6da13c0f1d4d8b97cabdd295862a5107b587824ebe8")
|
|
||||||
|
|
||||||
def unzipData(self, fileName, pathToExtract):
|
|
||||||
if not os.path.exists(mainPath):
|
|
||||||
os.makedirs("data")
|
|
||||||
ZipFile(fileName).extractall(mainPath + pathToExtract)
|
|
||||||
shutil.move("data/original dataset/test/test", "data", copy_function = shutil.copytree)
|
|
||||||
shutil.move("data/original dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/train", "data/original dataset/train", copy_function = shutil.copytree)
|
|
||||||
shutil.move("data/original dataset/New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)/valid", "data/original dataset/valid", copy_function = shutil.copytree)
|
|
||||||
shutil.rmtree("data/original dataset/New Plant Diseases Dataset(Augmented)")
|
|
||||||
shutil.rmtree("data/Detection-of-plant-diseases/data/original dataset/test")
|
|
||||||
|
|
||||||
def writeImageToGivenPath(self, image, path):
|
|
||||||
os.makedirs(path.rsplit('/', 1)[0], exist_ok=True)
|
|
||||||
cv2.imwrite(path, image)
|
|
||||||
|
|
||||||
def resizeDataset(self, soruceDatasetName, width, height):
|
|
||||||
if not os.path.exists(mainPath + "resized dataset"):
|
|
||||||
for file in glob.glob(pathToTrainAndValidDate % soruceDatasetName, recursive=True):
|
|
||||||
pathToFile = file.replace("\\","/")
|
|
||||||
image = cv2.imread(pathToFile)
|
|
||||||
image = cv2.resize(image, (width, height))
|
|
||||||
newPath = pathToFile.replace(soruceDatasetName,"resized dataset")
|
|
||||||
self.writeImageToGivenPath(image,newPath)
|
|
||||||
|
|
||||||
def sobelx(self, soruceDatasetName):
|
|
||||||
if not os.path.exists(mainPath + "sobel dataset"):
|
|
||||||
for file in glob.glob(pathToTrainAndValidDate % soruceDatasetName, recursive=True):
|
|
||||||
pathToFile = file.replace("\\","/")
|
|
||||||
image = cv2.imread(pathToFile)
|
|
||||||
sobel = cv2.Sobel(image,cv2.CV_64F,1,0,ksize=5)
|
|
||||||
newPath = pathToFile.replace(soruceDatasetName,"sobel dataset")
|
|
||||||
self.writeImageToGivenPath(sobel,newPath)
|
|
||||||
|
|
||||||
dataManager = DataManager()
|
|
||||||
dataManager.downloadData()
|
|
||||||
dataManager.unzipData("archive.zip","original dataset")
|
|
||||||
dataManager.resizeDataset("original dataset", 64, 64)
|
|
||||||
dataManager.sobelx("resized dataset")
|
|
10
Makefile
Normal file
10
Makefile
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
.PHONY: download-dataset resize-dataset sobel-dataset
|
||||||
|
|
||||||
|
download-dataset:
|
||||||
|
python3 ./file_manager/data_manager.py --download
|
||||||
|
|
||||||
|
resize-dataset:
|
||||||
|
python3 ./file_manager/data_manager.py --resize --shape 64 64 --source "original_dataset"
|
||||||
|
|
||||||
|
sobel-dataset:
|
||||||
|
python3 ./file_manager/data_manager.py --sobel --source "resized_dataset"
|
0
dataset/__init__.py
Normal file
0
dataset/__init__.py
Normal file
40
dataset/consts.py
Normal file
40
dataset/consts.py
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
PLANT_CLASSES = [
|
||||||
|
"Tomato",
|
||||||
|
"Potato",
|
||||||
|
"Corn_(maize)",
|
||||||
|
"Apple",
|
||||||
|
"Blueberry",
|
||||||
|
"Soybean",
|
||||||
|
"Cherry_(including_sour)",
|
||||||
|
"Squash",
|
||||||
|
"Strawberry",
|
||||||
|
"Pepper,_bell",
|
||||||
|
"Peach",
|
||||||
|
"Grape",
|
||||||
|
"Orange",
|
||||||
|
"Raspberry",
|
||||||
|
]
|
||||||
|
|
||||||
|
DISEASE_CLASSES = [
|
||||||
|
"healthy",
|
||||||
|
"Northern_Leaf_Blight",
|
||||||
|
"Tomato_mosaic_virus",
|
||||||
|
"Early_blight",
|
||||||
|
"Leaf_scorch",
|
||||||
|
"Tomato_Yellow_Leaf_Curl_Virus",
|
||||||
|
"Cedar_apple_rust",
|
||||||
|
"Late_blight",
|
||||||
|
"Spider_mites Two-spotted_spider_mite",
|
||||||
|
"Black_rot",
|
||||||
|
"Bacterial_spot",
|
||||||
|
"Apple_scab",
|
||||||
|
"Powdery_mildew",
|
||||||
|
"Esca_(Black_Measles)",
|
||||||
|
"Haunglongbing_(Citrus_greening)",
|
||||||
|
"Leaf_Mold",
|
||||||
|
"Common_rust_",
|
||||||
|
"Target_Spot",
|
||||||
|
"Leaf_blight_(Isariopsis_Leaf_Spot)",
|
||||||
|
"Septoria_leaf_spot",
|
||||||
|
"Cercospora_leaf_spot Gray_leaf_spot",
|
||||||
|
]
|
75
dataset/dataset.py
Normal file
75
dataset/dataset.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from .consts import DISEASE_CLASSES, PLANT_CLASSES
|
||||||
|
|
||||||
|
|
||||||
|
class Dataset:
|
||||||
|
''' Class to load and preprocess the dataset.
|
||||||
|
Loads images and labels from the given directory to tf.data.Dataset.
|
||||||
|
|
||||||
|
|
||||||
|
Args:
|
||||||
|
`data_dir (Path)`: Path to the dataset directory.
|
||||||
|
`seed (int)`: Seed for shuffling the dataset.
|
||||||
|
`repeat (int)`: Number of times to repeat the dataset.
|
||||||
|
`shuffle_buffer_size (int)`: Size of the buffer for shuffling the dataset.
|
||||||
|
`batch_size (int)`: Batch size for the dataset.
|
||||||
|
'''
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
data_dir: Path,
|
||||||
|
seed: int = 42,
|
||||||
|
repeat: int = 1,
|
||||||
|
shuffle_buffer_size: int = 10_000,
|
||||||
|
batch_size: int = 64) -> None:
|
||||||
|
self.data_dir = data_dir
|
||||||
|
self.seed = seed
|
||||||
|
self.repeat = repeat
|
||||||
|
self.shuffle_buffer_size = shuffle_buffer_size
|
||||||
|
self.batch_size = batch_size
|
||||||
|
|
||||||
|
self.dataset = self.__load_dataset()\
|
||||||
|
.shuffle(self.shuffle_buffer_size, seed=self.seed)\
|
||||||
|
.repeat(self.repeat)\
|
||||||
|
.batch(self.batch_size, drop_remainder=True)\
|
||||||
|
.prefetch(tf.data.experimental.AUTOTUNE)
|
||||||
|
|
||||||
|
def __load_dataset(self) -> tf.data.Dataset:
|
||||||
|
# check if path has 'test' word in it
|
||||||
|
dataset = tf.data.Dataset.list_files(str(self.data_dir / '*/*'))
|
||||||
|
if 'test' in str(self.data_dir).lower():
|
||||||
|
# file names issue - labels have camel case (regex?) and differs from the train/valid sets
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
dataset = dataset.map(
|
||||||
|
self.__preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
|
||||||
|
|
||||||
|
return dataset
|
||||||
|
|
||||||
|
def __get_labels(self, image_path):
|
||||||
|
path = tf.strings.split(image_path, os.path.sep)[-2]
|
||||||
|
plant = tf.strings.split(path, '___')[0]
|
||||||
|
disease = tf.strings.split(path, '___')[1]
|
||||||
|
|
||||||
|
one_hot_plant = plant == PLANT_CLASSES
|
||||||
|
one_hot_disease = disease == DISEASE_CLASSES
|
||||||
|
|
||||||
|
return tf.cast(one_hot_plant, dtype=tf.uint8, name=None), tf.cast(one_hot_disease, dtype=tf.uint8, name=None)
|
||||||
|
|
||||||
|
def __get_image(self, image_path):
|
||||||
|
img = tf.io.read_file(image_path)
|
||||||
|
img = tf.io.decode_jpeg(img, channels=3)
|
||||||
|
return tf.cast(img, dtype=tf.float32, name=None) / 255.
|
||||||
|
|
||||||
|
def __preprocess(self, image_path):
|
||||||
|
labels = self.__get_labels(image_path)
|
||||||
|
image = self.__get_image(image_path)
|
||||||
|
|
||||||
|
# returns X, Y1, Y2
|
||||||
|
return image, labels[0], labels[1]
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
return getattr(self.dataset, attr)
|
0
file_manager/__init__.py
Normal file
0
file_manager/__init__.py
Normal file
112
file_manager/data_manager.py
Normal file
112
file_manager/data_manager.py
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
import argparse
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
from pathlib import Path
|
||||||
|
from zipfile import ZipFile
|
||||||
|
|
||||||
|
import cv2
|
||||||
|
import wget
|
||||||
|
|
||||||
|
main_path = Path("data/")
|
||||||
|
path_to_train_and_valid = main_path / "%s/**/*.*"
|
||||||
|
original_dataset_name = "original_dataset"
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument("--download", action="store_true",
|
||||||
|
help="Download the data")
|
||||||
|
parser.add_argument("--resize", action="store_true",
|
||||||
|
help="Resize the dataset")
|
||||||
|
parser.add_argument("--shape", type=int, nargs="+", default=(64, 64),
|
||||||
|
help="Shape of the resized images. Applied only for resize option. Default: (64, 64)")
|
||||||
|
parser.add_argument("--sobel", action="store_true",
|
||||||
|
help="Apply Sobel filter to the dataset")
|
||||||
|
parser.add_argument("--source", type=str, default="original_dataset",
|
||||||
|
help="Name of the source dataset. Applied for all arguments except download. Default: original_dataset")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
class DataManager:
|
||||||
|
|
||||||
|
def download_data(self):
|
||||||
|
if not os.path.isfile("archive.zip"):
|
||||||
|
wget.download("https://storage.googleapis.com/kaggle-data-sets/78313/182633/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240502%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240502T181500Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=87d0661313e358206b6e10d44f135d41e23501d601e58b1e8236ca28a82ccc434534564b45baa84c4d829dd1995ff384d51fe5dba3f543d00eb0763169fd712c6c8f91bb4f298db38a19b31b2d489798a9723a271aa4108d7b93345c5a64a7ef00b9b8f27d1d5f728e373c870f0287eb89bc747941f0aeeb4703c288059e2e07b7ece3a83114a9607276874a90d4ec96dde06fddb94a0d3af72848565661b1404e3ea248eeebf46374daada7df1f37db7d62b21b4ac90706ea64cc74200a58f35bfe379703e7691aeda9e39635b02f58a9f8399fa64b031b1a9bccd7f109d256c6f4886ef94fcdc11034d6da13c0f1d4d8b97cabdd295862a5107b587824ebe8")
|
||||||
|
|
||||||
|
def unzip_data(self, file_name, path_to_extract):
|
||||||
|
full_path_to_extract = main_path / path_to_extract
|
||||||
|
old_path = "New Plant Diseases Dataset(Augmented)/New Plant Diseases Dataset(Augmented)"
|
||||||
|
if not os.path.exists(main_path):
|
||||||
|
os.makedirs(main_path)
|
||||||
|
ZipFile(file_name).extractall(full_path_to_extract)
|
||||||
|
# shutil.move("data/test/test",
|
||||||
|
# full_path_to_extract, copy_function=shutil.copytree)
|
||||||
|
shutil.move(full_path_to_extract / old_path / "train",
|
||||||
|
full_path_to_extract / "train", copy_function=shutil.copytree)
|
||||||
|
shutil.move(full_path_to_extract / old_path / "valid",
|
||||||
|
full_path_to_extract / "valid", copy_function=shutil.copytree)
|
||||||
|
shutil.rmtree(
|
||||||
|
full_path_to_extract / "New Plant Diseases Dataset(Augmented)"
|
||||||
|
)
|
||||||
|
shutil.rmtree(
|
||||||
|
full_path_to_extract / "new plant diseases dataset(augmented)"
|
||||||
|
)
|
||||||
|
shutil.rmtree(full_path_to_extract / "test")
|
||||||
|
self.get_test_ds_from_validation()
|
||||||
|
|
||||||
|
def write_image(self, image, path):
|
||||||
|
os.makedirs(path.rsplit('/', 1)[0], exist_ok=True)
|
||||||
|
cv2.imwrite(path, image)
|
||||||
|
|
||||||
|
def get_test_ds_from_validation(self, files_per_category: int = 2):
|
||||||
|
path_to_extract = main_path / original_dataset_name
|
||||||
|
valid_ds = glob.glob(str(path_to_extract / "valid/*/*"))
|
||||||
|
|
||||||
|
category_dirs = set([category_dir.split("/")[-2]
|
||||||
|
for category_dir in valid_ds])
|
||||||
|
category_lists = {category: [] for category in category_dirs}
|
||||||
|
for file_path in valid_ds:
|
||||||
|
category = file_path.split("/")[-2]
|
||||||
|
category_lists[category].append(file_path)
|
||||||
|
|
||||||
|
test_dir = path_to_extract / "test"
|
||||||
|
if not os.path.exists(test_dir):
|
||||||
|
os.makedirs(test_dir, exist_ok=True)
|
||||||
|
|
||||||
|
for category, files in category_lists.items():
|
||||||
|
os.makedirs(test_dir / category, exist_ok=True)
|
||||||
|
files.sort()
|
||||||
|
for file in files[:files_per_category]:
|
||||||
|
shutil.move(file, test_dir / category)
|
||||||
|
|
||||||
|
def resize_dataset(self, source_dataset_name, shape):
|
||||||
|
dataset_name = "resized_dataset"
|
||||||
|
if not os.path.exists(main_path / dataset_name):
|
||||||
|
for file in glob.glob(str(path_to_train_and_valid) % source_dataset_name, recursive=True):
|
||||||
|
path_to_file = file.replace("\\", "/")
|
||||||
|
image = cv2.imread(path_to_file)
|
||||||
|
image = cv2.resize(image, shape)
|
||||||
|
new_path = path_to_file.replace(
|
||||||
|
source_dataset_name, dataset_name)
|
||||||
|
self.write_image(image, new_path)
|
||||||
|
|
||||||
|
def sobelx(self, source_dataset_name):
|
||||||
|
dataset_name = "sobel_dataset"
|
||||||
|
if not os.path.exists(main_path / dataset_name):
|
||||||
|
for file in glob.glob(str(path_to_train_and_valid) % source_dataset_name, recursive=True):
|
||||||
|
path_to_file = file.replace("\\", "/")
|
||||||
|
image = cv2.imread(path_to_file)
|
||||||
|
sobel = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=5)
|
||||||
|
new_path = path_to_file.replace(
|
||||||
|
source_dataset_name, dataset_name)
|
||||||
|
self.write_image(sobel, new_path)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
data_manager = DataManager()
|
||||||
|
if args.download:
|
||||||
|
data_manager.download_data()
|
||||||
|
data_manager.unzip_data("archive.zip", original_dataset_name)
|
||||||
|
if args.resize:
|
||||||
|
data_manager.resize_dataset(args.source, tuple(args.shape))
|
||||||
|
if args.sobel:
|
||||||
|
data_manager.sobelx(args.source)
|
19
file_manager/shard_files.py
Normal file
19
file_manager/shard_files.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# TODO: split the files into smaller dirs and make list of them
|
||||||
|
class FileSharder:
|
||||||
|
def __init__(self,
|
||||||
|
train_dir: Path = Path('./data/resized_dataset/train'),
|
||||||
|
valid_dir: Path = Path('./data/resized_dataset/valid'),
|
||||||
|
test_dir: Path = Path('./data/resized_dataset/test'),
|
||||||
|
shard_size = 5_000) -> None:
|
||||||
|
self.shard_size = shard_size
|
||||||
|
|
||||||
|
self.train_dir = train_dir
|
||||||
|
self.valid_dir = valid_dir
|
||||||
|
self.test_dir = test_dir
|
||||||
|
|
||||||
|
self.shard()
|
||||||
|
|
||||||
|
def shard(self):
|
||||||
|
pass
|
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
tensorflow==2.16.1
|
||||||
|
numpy==1.26.4
|
||||||
|
opencv-python==4.9.0.80
|
||||||
|
wget==3.2
|
Loading…
Reference in New Issue
Block a user