executable script for training
This commit is contained in:
parent
59b2b9e687
commit
af76c778d7
41
main.py
Normal file
41
main.py
Normal file
@ -0,0 +1,41 @@
|
||||
|
||||
from src.Unet import Unet
|
||||
from src.loss import jaccard_loss
|
||||
from src.metrics import IOU
|
||||
from src.consts import EPOCHS, STEPS, SEED
|
||||
from src.generators import create_generators
|
||||
from tensorflow.keras.callbacks import ModelCheckpoint
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
model = Unet(num_classes=1).build_model()
|
||||
|
||||
compile_params ={
|
||||
'loss':jaccard_loss(smooth=90),
|
||||
'optimizer':'rmsprop',
|
||||
'metrics':[IOU]
|
||||
}
|
||||
|
||||
|
||||
model.compile(**compile_params)
|
||||
# tf.keras.utils.plot_model(model, show_shapes=True)
|
||||
|
||||
model_name = "models/unet.h5"
|
||||
modelcheckpoint = ModelCheckpoint(model_name,
|
||||
monitor='val_loss',
|
||||
mode='auto',
|
||||
verbose=1,
|
||||
save_best_only=True)
|
||||
|
||||
|
||||
train_gen = create_generators('training', SEED)
|
||||
val_gen = create_generators('validation', SEED)
|
||||
|
||||
history = model.fit_generator(train_gen,
|
||||
validation_data=val_gen,
|
||||
epochs=EPOCHS,
|
||||
steps_per_epoch=STEPS,
|
||||
validation_steps = STEPS,
|
||||
shuffle=True,
|
||||
)
|
54
src/Unet.py
Normal file
54
src/Unet.py
Normal file
@ -0,0 +1,54 @@
|
||||
import shutil
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import backend as K
|
||||
from tensorflow.keras.layers import concatenate
|
||||
from tensorflow.keras.layers import UpSampling2D, Conv2D, Dropout, MaxPooling2D
|
||||
from tensorflow.keras.layers import Input
|
||||
from tensorflow.keras.models import Model
|
||||
from src.consts import IMG_SIZE
|
||||
|
||||
class Unet():
|
||||
def __init__(self, num_classes=1):
|
||||
self.num_classes=num_classes
|
||||
|
||||
def build_model(self):
|
||||
in1 = Input(shape=(IMG_SIZE[0], IMG_SIZE[1], 3 ))
|
||||
|
||||
conv1 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(in1)
|
||||
conv1 = Dropout(0.2)(conv1)
|
||||
conv1 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv1)
|
||||
pool1 = MaxPooling2D((2, 2))(conv1)
|
||||
|
||||
conv2 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pool1)
|
||||
conv2 = Dropout(0.2)(conv2)
|
||||
conv2 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv2)
|
||||
pool2 = MaxPooling2D((2, 2))(conv2)
|
||||
|
||||
conv3 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pool2)
|
||||
conv3 = Dropout(0.2)(conv3)
|
||||
conv3 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv3)
|
||||
pool3 = MaxPooling2D((2, 2))(conv3)
|
||||
|
||||
conv4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(pool3)
|
||||
conv4 = Dropout(0.2)(conv4)
|
||||
conv4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv4)
|
||||
|
||||
up1 = concatenate([UpSampling2D((2, 2))(conv4), conv3], axis=-1)
|
||||
conv5 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(up1)
|
||||
conv5 = Dropout(0.2)(conv5)
|
||||
conv5 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv5)
|
||||
|
||||
up2 = concatenate([UpSampling2D((2, 2))(conv5), conv2], axis=-1)
|
||||
conv6 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(up2)
|
||||
conv6 = Dropout(0.2)(conv6)
|
||||
conv6 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv6)
|
||||
|
||||
up2 = concatenate([UpSampling2D((2, 2))(conv6), conv1], axis=-1)
|
||||
conv7 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(up2)
|
||||
conv7 = Dropout(0.2)(conv7)
|
||||
conv7 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same')(conv7)
|
||||
segmentation = Conv2D(self.num_classes, (1, 1), activation='sigmoid', name='seg')(conv7)
|
||||
#segmentation = Conv2D(3, (1, 1), activation='sigmoid', name='seg')(conv7)
|
||||
model = Model(inputs=[in1], outputs=[segmentation])
|
||||
|
||||
return model
|
@ -1,7 +1,7 @@
|
||||
import pandas as pd
|
||||
FEATURES ='../data/train_features'
|
||||
LABELS = '../data/train_labels'
|
||||
JPG_IMAGES = '../images'
|
||||
JPG_IMAGES = 'images'
|
||||
RGB_DIR = "rgb/img"
|
||||
FC_DIR = "fc/img"
|
||||
MASK_DIR = "mask/img"
|
||||
@ -9,4 +9,7 @@ MASK_DIR = "mask/img"
|
||||
|
||||
BATCH = 8
|
||||
IMG_SIZE = (512,512)
|
||||
SEED = 7
|
||||
SEED = 7
|
||||
|
||||
EPOCHS = 10
|
||||
STEPS = 10
|
@ -1,41 +1,36 @@
|
||||
from consts import JPG_IMAGES, RGB_DIR, MASK_DIR, FC_DIR, BATCH, IMG_SIZE
|
||||
from src.consts import JPG_IMAGES, RGB_DIR, MASK_DIR, BATCH, IMG_SIZE
|
||||
import os
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
|
||||
def create_generators(mode='train'):
|
||||
def create_generators(mode='training', seed=1):
|
||||
'''
|
||||
mode can be train or validation.
|
||||
Params
|
||||
mode: training or validation
|
||||
seed: same value as in fit function.
|
||||
'''
|
||||
if(mode=='train'):
|
||||
subset = 'training'
|
||||
else:
|
||||
subset = 'validation'
|
||||
|
||||
# we create two instances with the same arguments
|
||||
train_datagen = ImageDataGenerator(rescale=1 / 255.0,
|
||||
horizontal_flip=True,
|
||||
vertical_flip=True,
|
||||
validation_split=0.2)
|
||||
horizontal_flip=True,
|
||||
vertical_flip=True,
|
||||
validation_split=0.2)
|
||||
|
||||
rgb_gen = train_datagen.flow_from_directory(directory=os.path.join(JPG_IMAGES, RGB_DIR),
|
||||
target_size= IMG_SIZE,
|
||||
batch_size=BATCH,
|
||||
class_mode=None,
|
||||
classes=None,
|
||||
shuffle=False,
|
||||
subset=subset)
|
||||
# Provide the same seed and keyword arguments to the fit and flow methods
|
||||
|
||||
mask_gen = train_datagen.flow_from_directory(
|
||||
directory=os.path.join(JPG_IMAGES, MASK_DIR ),
|
||||
target_size= IMG_SIZE,
|
||||
batch_size=BATCH,
|
||||
class_mode=None,
|
||||
classes=None,
|
||||
shuffle=False,
|
||||
subset=subset)
|
||||
image_generator = train_datagen.flow_from_directory(
|
||||
os.path.dirname(os.path.join(JPG_IMAGES, RGB_DIR)),
|
||||
class_mode=None,
|
||||
target_size= IMG_SIZE,
|
||||
# class_mode='binary',
|
||||
seed=seed,
|
||||
subset=mode
|
||||
)
|
||||
mask_generator = train_datagen.flow_from_directory(
|
||||
os.path.dirname(os.path.join(JPG_IMAGES, MASK_DIR)),
|
||||
target_size= IMG_SIZE,
|
||||
class_mode=None,
|
||||
seed=seed,
|
||||
subset=mode
|
||||
)
|
||||
|
||||
|
||||
# train_genenerator = zip(rgb_gen,mask_gen)
|
||||
# for (imgs, mask) in train_genenerator:
|
||||
# yield (imgs, mask)
|
||||
return rgb_gen, mask_gen
|
||||
|
||||
return zip(image_generator, mask_generator)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user