diff --git a/algorytm_biedronki_wersja_z_wyuczonym_modelem.py b/algorytm_biedronki_wersja_z_wyuczonym_modelem.py new file mode 100644 index 0000000..5bf7ce8 --- /dev/null +++ b/algorytm_biedronki_wersja_z_wyuczonym_modelem.py @@ -0,0 +1,119 @@ +#%% +from tensorflow.keras.layers import Input, Lambda, Dense, Flatten,Dropout +from tensorflow.keras.models import Model +from tensorflow.keras.applications.vgg19 import VGG19 +from tensorflow.keras.applications.vgg19 import preprocess_input +from tensorflow.keras.preprocessing import image +from tensorflow.keras.preprocessing.image import ImageDataGenerator +from tensorflow.keras.models import Sequential +import numpy as np +import pandas as pd +import os +import cv2 +import matplotlib.pyplot as plt +import keras + +# %% +os.chdir(r"C:\Users\pittb\Documents\biedronkapp_2.0\BiedronkApp\biedronki_dataset") +train_path="train" +test_path="test" +# %% +x_train=[] + +for folder in os.listdir(train_path): + + sub_path=train_path+"/"+folder + + for img in os.listdir(sub_path): + image_path=sub_path+"/"+img + + img_arr=cv2.imread(image_path) + + img_arr=cv2.resize(img_arr,(224,224)) + + x_train.append(img_arr) + +x_test=[] + +for folder in os.listdir(test_path): + + sub_path=test_path+"/"+folder + + for img in os.listdir(sub_path): + + image_path=sub_path+"/"+img + + img_arr=cv2.imread(image_path) + + img_arr=cv2.resize(img_arr,(224,224)) + + x_test.append(img_arr) +# %% +train_x=np.array(x_train) +test_x=np.array(x_test) + +train_x=train_x/255.0 +test_x=test_x/255.0 +# %% +train_datagen = ImageDataGenerator(rescale = 1./255) +test_datagen = ImageDataGenerator(rescale = 1./255) +# %% +training_set = train_datagen.flow_from_directory(train_path, + target_size = (224, 224), + batch_size = 32, + class_mode = 'sparse') +test_set = test_datagen.flow_from_directory(test_path, + target_size = (224, 224), + batch_size = 32, + class_mode = 'sparse') +# %% +train_y=training_set.classes +test_y=test_set.classes + +training_set.class_indices + +train_y.shape,test_y.shape +# %% +IMAGE_SIZE = [224, 224] + +vgg = VGG19(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) + +for layer in vgg.layers: + layer.trainable = False + +# %% +# our layers - you can add more if you want +x = Flatten()(vgg.output) + +prediction = Dense(2, activation='softmax')(x) +# %% +model = keras.models.load_model('saved_model/my_model') + +# %% +from sklearn.metrics import accuracy_score,classification_report,confusion_matrix +import numpy as np + +y_pred=model.predict(test_x) +y_pred=np.argmax(y_pred,axis=1) + +accuracy_score(y_pred,test_y) + +print(classification_report(y_pred,test_y)) + + +# %% +img_path = r"test\azjatyckie\Asian_lady_beetle-(Harmonia-axyridis).jpg" + +img_nump = np.array(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)) + +plt.imshow(img_nump) +plt.show() + +img_nump = cv2.resize(img_nump,(224,224)) +img_nump = img_nump/255.0 +img_nump = img_nump[np.newaxis,...] + +pr = model.predict(img_nump) +print('azjatycka: '+str(pr[0][0])+' , siedmiokropka: '+str(pr[0][1])) +print(['azjatycka' if np.argmax(pr,axis=1)[0]==0 else 'siedmiokropka'][0]) +# %%