# Ten plik był odpalany na remote serwerze jupyter notebook, stąd dziwne komentarze i cała baza danych w zipie # Skuteczność sieci to mniej więcej 80% na zbiorze walidacyjnym, ale jest przeuczona, bo nie augmentowaliśmy danych import os os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" os.environ['CUDA_VISIBLE_DEVICES'] = '-1' from tensorflow.keras import Sequential from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Flatten, Input from tensorflow.keras.callbacks import ModelCheckpoint save_best = ModelCheckpoint('./best_model_newD.h5', monitor='val_loss', save_best_only=True) es = EarlyStopping(monitor="val_loss", mode="min", patience=5) EPOCHS = 100 # In[2]: from tensorflow.keras.preprocessing.image import ImageDataGenerator root_path = "./Database/" gen = ImageDataGenerator( rescale=1./255, validation_split=0.2 ) # Load Data train_dataset = gen.flow_from_directory( root_path, class_mode="categorical", classes=os.listdir(root_path), shuffle=True, batch_size=32, target_size=(128,128), subset="training", color_mode='grayscale' ) validation_dataset = gen.flow_from_directory( root_path, class_mode="categorical", classes=os.listdir(root_path), shuffle=True, batch_size=32, target_size=(128,128), subset="validation", color_mode='grayscale' ) # In[4]: model = Sequential() model.add(Input(shape=(128,128))) model.add(Conv2D(filters = 64, kernel_size = (3, 3), activation='relu', input_shape=(128, 128, 1))) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(filters = 32, kernel_size = (3, 3), activation='relu', input_shape=(64, 64, 1))) model.add(MaxPooling2D((2, 2))) model.add(Conv2D(filters = 16, kernel_size = (3, 3), activation='relu', input_shape=(32, 32, 1))) model.add(MaxPooling2D((2, 2))) model.add(Flatten()) model.add(Dense(128*128, activation='relu')) model.add(Dense(100, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(25, activation='relu')) # model.add(Dropout(0.2)) model.add(Dense(5, activation='softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # In[5]: history = model.fit(train_dataset, epochs = EPOCHS, validation_data=(validation_dataset), callbacks = [es, save_best]) # In[ ]: