1.9 MiB
1.9 MiB
LICENSE PLATE DETECTION
YOLO V3
!git clone https://github.com/roboflow-ai/keras-yolo3
Cloning into 'keras-yolo3'... remote: Enumerating objects: 169, done.[K remote: Total 169 (delta 0), reused 0 (delta 0), pack-reused 169[K Receiving objects: 100% (169/169), 172.74 KiB | 625.00 KiB/s, done. Resolving deltas: 100% (80/80), done.
!curl -L "https://app.roboflow.com/ds/hTj8Pr7g7U?key=q9kdROYojM" > roboflow.zip; unzip roboflow.zip; rm roboflow.zip
% Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 897 100 897 0 0 269 0 0:00:03 0:00:03 --:--:-- 269 0 100 2120k 100 2120k 0 0 515k 0 0:00:04 0:00:04 --:--:-- 23.5M Archive: roboflow.zip extracting: README.dataset.txt extracting: README.roboflow.txt creating: test/ extracting: test/2images1_png.rf.5de47b3b58bc776388f9547915f46edf.jpg extracting: test/2images41_png.rf.2f711be90f9f9e796139a02cb45fe9ba.jpg extracting: test/2images45_png.rf.cbcc994c49d1a2ca5e7bc52cb9b2a1a3.jpg extracting: test/3images22_png.rf.b139cdb6065c658e0c7acc2124854383.jpg extracting: test/3images34_png.rf.9a25c14870c5acae15ee0f159a9707b4.jpg extracting: test/3images4_png.rf.a3d6d0b11320142ada8e8347c918dc30.jpg extracting: test/6images3_png.rf.8b1268f1823ea224077f537939c2ccba.jpg extracting: test/7images0_png.rf.fb9d6e1e739e19321bdc7050f4a95798.jpg extracting: test/_annotations.txt extracting: test/_classes.txt extracting: test/images6_png.rf.56641c848717baa02774239ac0039bd6.jpg extracting: test/img105_png.rf.d69f400c7410b1e265136d01b1a2cc5e.jpg extracting: test/img149_png.rf.c487d9bc6be853e23cc7a12359178b40.jpg extracting: test/img14_png.rf.1a47d3748ad1566280dc8199d96430de.jpg extracting: test/img35_png.rf.16e367a1ce2db4dc0b0b1491814e8c95.jpg extracting: test/img89_png.rf.f0f546c24ed5d6a16a2cbf9389065678.jpg creating: train/ extracting: train/20img2_png.rf.015a51172ce51d61531b54af5a144183.jpg extracting: train/21img3_png.rf.c1601abdfd96ebfc6f13205c638364bc.jpg extracting: train/22img34_png.rf.02ddffee2d6e8dc6ef169f89f622a933.jpg extracting: train/23img46_png.rf.fd5a109b78b90ed3582888880b743303.jpg extracting: train/24img50_png.rf.1f28fdcb1632f237fb0bf7be7d877351.jpg extracting: train/25img73_png.rf.25d9c97db5c2c466bbe2692f9f69c869.jpg extracting: train/26img74_png.rf.861f6c881709f3bb65637c7ea3871dca.jpg extracting: train/2images0_png.rf.b8c8f0d2594f6bfaf8be2dca50416bb6.jpg extracting: train/2images18_png.rf.951b35372d913193f0899fda6877cbee.jpg extracting: train/2images22_png.rf.bb299b6d237016c2714b68aead8266d7.jpg extracting: train/2images23_png.rf.cea092359f78eb1c22db6b50627790d6.jpg extracting: train/2images29_png.rf.91d8be50c5d0f82577d74268153ac5fc.jpg extracting: train/2images2_png.rf.62684ca2757500eaeac877d48e04c92f.jpg extracting: train/2images34_png.rf.ffff2284b01426e5cd22ca8053450348.jpg extracting: train/2images3_png.rf.c7b635e1dc54f5bb10aa338d78969c22.jpg extracting: train/2images46_png.rf.d4143a5946da0d1bc8e540c239a648b8.jpg extracting: train/2images4_png.rf.64541674b6b6df83b15534c2d8bf0030.jpg extracting: train/30img11_png.rf.1a236b6935fd926336da07248a867a36.jpg extracting: train/3images0_png.rf.ff30aaf2256dde95d2dc4893b7074098.jpg extracting: train/3images11_png.rf.8a11e1eae3b52a369681843c7d7116d1.jpg extracting: train/3images18_png.rf.0673ed9396fa1ae5a43ff44f10422ff5.jpg extracting: train/3images29_png.rf.a96af5fe85f477adc0fce370e788f76c.jpg extracting: train/3images2_png.rf.d7de4c45de845226a8391e8f332352d9.jpg extracting: train/3images30_png.rf.dd0080eda6b7d8ff2e188c7e5590e7c6.jpg extracting: train/3images33_png.rf.3abc75a93214fc0a497dd54cabd690a0.jpg extracting: train/3images3_png.rf.e9771234c266dba02be2fd6f204aa66b.jpg extracting: train/3images42_png.rf.b3b45a46d57ac11c2d546831ad52cceb.jpg extracting: train/3images43_png.rf.0603c0f1b7a15be7449b6d46c621e7af.jpg extracting: train/3images5_png.rf.6a53d28cdfade27885d25f8208f3028a.jpg extracting: train/6images0_png.rf.1e11dd3d7f4e5a79ce207c7770185b0c.jpg extracting: train/6images12_png.rf.d0d6b3319c39fdb6a9356047f5ddb8ee.jpg extracting: train/6images1_png.rf.8c65b6bfe8d5b01a2a1545337de6c390.jpg extracting: train/6images4_png.rf.2c77da3c85f4cb57ebe5d90ab8ed5e0c.jpg extracting: train/6images5_png.rf.7033ded0e4684504365b5b0345529c5c.jpg extracting: train/7images12_png.rf.c46a44810aea7edafc53b6b561c6cf6a.jpg extracting: train/7images17_png.rf.ff8fc5bb0f84483dd914f5f2de524933.jpg extracting: train/7images1_png.rf.cf5406f149f35ab24eda2c621f9298ed.jpg extracting: train/7images2_png.rf.f84de676f7fb3de9d7789e1dafab8fa3.jpg extracting: train/7images3_png.rf.14c5f2588d07e7234659792e20bd7fd8.jpg extracting: train/7images4_png.rf.5e455f9a5c94b0a3b56043ef05d06854.jpg extracting: train/_annotations.txt extracting: train/_classes.txt extracting: train/images0_png.rf.d1f446cd89662b7ccf994dc77f63ff56.jpg extracting: train/images10_png.rf.bc421baf20b7cbf6af4ea822f259fcab.jpg extracting: train/images13_png.rf.dff8711d203b47a3f8709c4cee5d6927.jpg extracting: train/images15_png.rf.e1b904b94d5539da79117c3613ae5765.jpg extracting: train/images1_png.rf.9c2cb373d7f4613a2735410f1fdb3043.jpg extracting: train/images3_png.rf.e7cf0078d44c2571ebc5d607ffaacbc8.jpg extracting: train/images4_png.rf.97f8f01f67adf77de50c99fd6ed7f879.jpg extracting: train/images5_png.rf.d16b8c87a8a593e5971124648ba63736.jpg extracting: train/img0_png.rf.fa065b68c3d51d65399f883f8713ccf2.jpg extracting: train/img102_png.rf.3da7ec4deedfb6f15834e9a42aee4e7c.jpg extracting: train/img103_png.rf.67216b08a719a9a9dba68f83c5460a74.jpg extracting: train/img104_png.rf.db759d639a6b1ace6dc8e7442c86ba9a.jpg extracting: train/img106_png.rf.d882268d61ac720e54c35110fb8bc4b0.jpg extracting: train/img107_png.rf.a62231fc47913091ec76468e536d6f28.jpg extracting: train/img10_png.rf.e7bba8322d47d623f71903aa50f48730.jpg extracting: train/img113_png.rf.cb3afcbea4e7177a2ed703b4b1d94887.jpg extracting: train/img116_png.rf.1f7034a069e5a888b00da9496e0df5ae.jpg extracting: train/img118_png.rf.9e21a52ffda3719b2cc6deb0309efd7d.jpg extracting: train/img11_png.rf.a9584dc2d254fd84ca6a30cc9b821bd5.jpg extracting: train/img125_png.rf.4a0a9a2f74bd5127343124c4fb4d0670.jpg extracting: train/img126_png.rf.0bad29364a3846287498838f6791cae8.jpg extracting: train/img133_png.rf.e66c88015d6fb51921b20ad8008fc981.jpg extracting: train/img146_png.rf.9811cc9a676e18c4cf2bce86398feb9d.jpg extracting: train/img170_png.rf.1d04d991430ba0d672fabff684817dc6.jpg extracting: train/img174_png.rf.4d01b9ebbdc8c1b434c61c945794a79e.jpg extracting: train/img178_png.rf.b0e5b6547069d86483e91fc99356e5d9.jpg extracting: train/img181_png.rf.363074a89b0325055d28f3794083e479.jpg extracting: train/img189_png.rf.07aedf508ccbfc3e0244bd54bd76cbf8.jpg extracting: train/img197_png.rf.36119ab11e392cfeded10c61aa97eac6.jpg extracting: train/img1_png.rf.bf5b1060d3cb9959dc94b75d4fc78334.jpg extracting: train/img202_png.rf.f6520c22d6c95c8e5a105b6ee48b8da1.jpg extracting: train/img205_png.rf.98d121af5e0548a1402eb3e93560465d.jpg extracting: train/img215_png.rf.6d29cfcf38f6a4b2165ba5ba110454d2.jpg extracting: train/img270_png.rf.52541958250f2b45297faa1440d55d56.jpg extracting: train/img278_png.rf.82173849dfde92f2a2ab2761e5679891.jpg extracting: train/img283_png.rf.809b4e6edbe803fbcab887a40e59f526.jpg extracting: train/img2_png.rf.23b2d7fe287627739888976776de8437.jpg extracting: train/img306_png.rf.642a9812ecebfd9784d9eb593b78dcf2.jpg extracting: train/img34_png.rf.f98b7fa7325ddb9ca373121c5c120f55.jpg extracting: train/img38_png.rf.cd97a110e34ad869a4b79d8237d92a36.jpg extracting: train/img39_png.rf.e9b1634ca400418b29839bad544e8634.jpg extracting: train/img3_png.rf.3f382680461124ba2e19c1df51d895e7.jpg extracting: train/img45_png.rf.870b550082c3da2c42e40017442c115b.jpg extracting: train/img46_png.rf.2c1d961d3e61d1389c825f2aba32ab39.jpg extracting: train/img4_png.rf.4f0ce3c02167bf3f8ae2454471c9c4fd.jpg extracting: train/img57_png.rf.dc254e143fec0667ac462e303290e301.jpg extracting: train/img58_png.rf.1a6e09bda52588bb7f3890768f0db5f2.jpg extracting: train/img5_png.rf.542fe1bdd2a910b20f27ce55cf8689ff.jpg extracting: train/img66_png.rf.534ec186146ae4409f8c875cf28dcb84.jpg extracting: train/img6_png.rf.7aceac81d4a22f02ab0460ee5bd2227f.jpg extracting: train/img77_png.rf.8f8e23567322fd7de129380c6a54bd01.jpg extracting: train/img78_png.rf.eb48e94d48c04b3077d049cb8cd920bb.jpg extracting: train/img7_png.rf.2dd95d826f13ab03805de7f7b842eb40.jpg extracting: train/img85_png.rf.f7a4ae3bb16a8c3fe7f164e35f11ea65.jpg extracting: train/img86_png.rf.3addc2b6c62b8d5098feba035bd6014d.jpg extracting: train/img92_png.rf.5b79211320122e08554541c15fc041dd.jpg extracting: train/img93_png.rf.7fbe9b0dcab1f063b154796d00ae669b.jpg extracting: train/img95_png.rf.c97bb901c22e4f1519bac037ffbdbbf7.jpg extracting: train/img97_png.rf.2e3f7205a9d122aa07906ebe643f1c04.jpg extracting: train/img98_png.rf.c6da81320ec0c22868d84c2291b416f5.jpg creating: valid/ extracting: valid/27img121_png.rf.6b1bbeee06ff52963c7b12c7bfb2aacc.jpg extracting: valid/2images12_png.rf.ba715b76693ae62d01e142ba9859ffc9.jpg extracting: valid/2images35_png.rf.81e0cc483a896440e148a5df5550d243.jpg extracting: valid/2images40_png.rf.45e16e4d96b21eeb7b0e06556ca12291.jpg extracting: valid/3images19_png.rf.aec1de41eff03d6e343427691b2a3029.jpg extracting: valid/3images1_png.rf.f293d93f952977825a07613f23a55f70.jpg extracting: valid/6images11_png.rf.a467d473bfa546de8e2c5ef4ef894802.jpg extracting: valid/6images2_png.rf.386c9a11cef823c522619aefd9c7ca9d.jpg extracting: valid/_annotations.txt extracting: valid/_classes.txt extracting: valid/images14_png.rf.f0a78b8df38e6394e9cc3d56d7677c87.jpg extracting: valid/images2_png.rf.1f566a50352095712ec385ffc17b14c5.jpg extracting: valid/img101_png.rf.aca3e688b7798ee456467954274733de.jpg extracting: valid/img111_png.rf.4bc2a8d175d8bbe2a289ba9e0ed4c717.jpg extracting: valid/img112_png.rf.aaadc30802c92e3c1196a96b859c8ebb.jpg extracting: valid/img117_png.rf.76d5b2f35f4974cca3750f258af86101.jpg extracting: valid/img121_png.rf.a11051677709f708036ca072d0725099.jpg extracting: valid/img122_png.rf.f6c62a3f0290eae81ffc5c457f546adf.jpg extracting: valid/img141_png.rf.9d9ff6b78c2940546bf364e662b1c813.jpg extracting: valid/img165_png.rf.6bb45f3455f0340e377ec61e662d7846.jpg extracting: valid/img177_png.rf.fd279311108df43a7d9225cc26c2542f.jpg extracting: valid/img262_png.rf.cd066cf49feb976bf8cd8eca32dcf729.jpg extracting: valid/img27_png.rf.09745a24cc36301e1eca5c3a9bab3853.jpg extracting: valid/img304_png.rf.f91aa4dfe963c390a521fd748f1ab9f5.jpg extracting: valid/img313_png.rf.8ea5815425e82f42c06715e0b98342f2.jpg extracting: valid/img31_png.rf.3b72bf618de466d70ab487fe5e20ff70.jpg extracting: valid/img40_png.rf.8389bb867a237cad805b4819dc788a98.jpg extracting: valid/img41_png.rf.4f6f5b9dcbe9eb80f9913e223f321f66.jpg extracting: valid/img69_png.rf.52cb5ea0d37bc73a2fcc1ee19de2b124.jpg extracting: valid/img84_png.rf.c9700ee5dee2697886b497a2e17f1573.jpg
!wget https://pjreddie.com/media/files/yolov3.weights
--2023-01-18 12:01:19-- https://pjreddie.com/media/files/yolov3.weights Translacja pjreddie.com (pjreddie.com)... 128.208.4.108 Łączenie się z pjreddie.com (pjreddie.com)|128.208.4.108|:443... połączono. Żądanie HTTP wysłano, oczekiwanie na odpowiedź... 200 OK Długość: 248007048 (237M) [application/octet-stream] Zapis do: `yolov3.weights' yolov3.weights 100%[===================>] 236,52M 17,0MB/s w 15s 2023-01-18 12:01:35 (15,4 MB/s) - zapisano `yolov3.weights' [248007048/248007048]
from keras.layers import ELU, PReLU, LeakyReLU
!python keras-yolo3/convert.py keras-yolo3/yolov3.cfg yolov3.weights model_data/yolo.h5
Loading weights. Weights Header: 0 2 0 [32013312] Parsing Darknet config. Creating Keras model. Parsing section net_0 Parsing section convolutional_0 conv2d bn leaky (3, 3, 3, 32) Metal device set to: Apple M1 systemMemory: 8.00 GB maxCacheSize: 2.67 GB 2023-01-18 12:03:25.001841: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:306] Could not identify NUMA node of platform GPU ID 0, defaulting to 0. Your kernel may not have been built with NUMA support. 2023-01-18 12:03:25.002402: I tensorflow/core/common_runtime/pluggable_device/pluggable_device_factory.cc:272] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 0 MB memory) -> physical PluggableDevice (device: 0, name: METAL, pci bus id: <undefined>) Parsing section convolutional_1 conv2d bn leaky (3, 3, 32, 64) Parsing section convolutional_2 conv2d bn leaky (1, 1, 64, 32) Parsing section convolutional_3 conv2d bn leaky (3, 3, 32, 64) Parsing section shortcut_0 Parsing section convolutional_4 conv2d bn leaky (3, 3, 64, 128) Parsing section convolutional_5 conv2d bn leaky (1, 1, 128, 64) Parsing section convolutional_6 conv2d bn leaky (3, 3, 64, 128) Parsing section shortcut_1 Parsing section convolutional_7 conv2d bn leaky (1, 1, 128, 64) Parsing section convolutional_8 conv2d bn leaky (3, 3, 64, 128) Parsing section shortcut_2 Parsing section convolutional_9 conv2d bn leaky (3, 3, 128, 256) Parsing section convolutional_10 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_11 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_3 Parsing section convolutional_12 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_13 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_4 Parsing section convolutional_14 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_15 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_5 Parsing section convolutional_16 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_17 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_6 Parsing section convolutional_18 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_19 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_7 Parsing section convolutional_20 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_21 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_8 Parsing section convolutional_22 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_23 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_9 Parsing section convolutional_24 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_25 conv2d bn leaky (3, 3, 128, 256) Parsing section shortcut_10 Parsing section convolutional_26 conv2d bn leaky (3, 3, 256, 512) Parsing section convolutional_27 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_28 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_11 Parsing section convolutional_29 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_30 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_12 Parsing section convolutional_31 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_32 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_13 Parsing section convolutional_33 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_34 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_14 Parsing section convolutional_35 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_36 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_15 Parsing section convolutional_37 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_38 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_16 Parsing section convolutional_39 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_40 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_17 Parsing section convolutional_41 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_42 conv2d bn leaky (3, 3, 256, 512) Parsing section shortcut_18 Parsing section convolutional_43 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_44 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_45 conv2d bn leaky (3, 3, 512, 1024) Parsing section shortcut_19 Parsing section convolutional_46 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_47 conv2d bn leaky (3, 3, 512, 1024) Parsing section shortcut_20 Parsing section convolutional_48 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_49 conv2d bn leaky (3, 3, 512, 1024) Parsing section shortcut_21 Parsing section convolutional_50 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_51 conv2d bn leaky (3, 3, 512, 1024) Parsing section shortcut_22 Parsing section convolutional_52 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_53 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_54 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_55 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_56 conv2d bn leaky (1, 1, 1024, 512) Parsing section convolutional_57 conv2d bn leaky (3, 3, 512, 1024) Parsing section convolutional_58 conv2d linear (1, 1, 1024, 255) Parsing section yolo_0 Parsing section route_0 Parsing section convolutional_59 conv2d bn leaky (1, 1, 512, 256) Parsing section upsample_0 Parsing section route_1 Concatenating route layers: [<KerasTensor: shape=(None, None, None, 256) dtype=float32 (created by layer 'up_sampling2d')>, <KerasTensor: shape=(None, None, None, 512) dtype=float32 (created by layer 'add_18')>] Parsing section convolutional_60 conv2d bn leaky (1, 1, 768, 256) Parsing section convolutional_61 conv2d bn leaky (3, 3, 256, 512) Parsing section convolutional_62 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_63 conv2d bn leaky (3, 3, 256, 512) Parsing section convolutional_64 conv2d bn leaky (1, 1, 512, 256) Parsing section convolutional_65 conv2d bn leaky (3, 3, 256, 512) Parsing section convolutional_66 conv2d linear (1, 1, 512, 255) Parsing section yolo_1 Parsing section route_2 Parsing section convolutional_67 conv2d bn leaky (1, 1, 256, 128) Parsing section upsample_1 Parsing section route_3 Concatenating route layers: [<KerasTensor: shape=(None, None, None, 128) dtype=float32 (created by layer 'up_sampling2d_1')>, <KerasTensor: shape=(None, None, None, 256) dtype=float32 (created by layer 'add_10')>] Parsing section convolutional_68 conv2d bn leaky (1, 1, 384, 128) Parsing section convolutional_69 conv2d bn leaky (3, 3, 128, 256) Parsing section convolutional_70 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_71 conv2d bn leaky (3, 3, 128, 256) Parsing section convolutional_72 conv2d bn leaky (1, 1, 256, 128) Parsing section convolutional_73 conv2d bn leaky (3, 3, 128, 256) Parsing section convolutional_74 conv2d linear (1, 1, 256, 255) Parsing section yolo_2 Model: "model" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) [(None, None, None, 0 [] 3)] conv2d (Conv2D) (None, None, None, 864 ['input_1[0][0]'] 32) batch_normalization (BatchNorm (None, None, None, 128 ['conv2d[0][0]'] alization) 32) leaky_re_lu (LeakyReLU) (None, None, None, 0 ['batch_normalization[0][0]'] 32) zero_padding2d (ZeroPadding2D) (None, None, None, 0 ['leaky_re_lu[0][0]'] 32) conv2d_1 (Conv2D) (None, None, None, 18432 ['zero_padding2d[0][0]'] 64) batch_normalization_1 (BatchNo (None, None, None, 256 ['conv2d_1[0][0]'] rmalization) 64) leaky_re_lu_1 (LeakyReLU) (None, None, None, 0 ['batch_normalization_1[0][0]'] 64) conv2d_2 (Conv2D) (None, None, None, 2048 ['leaky_re_lu_1[0][0]'] 32) batch_normalization_2 (BatchNo (None, None, None, 128 ['conv2d_2[0][0]'] rmalization) 32) leaky_re_lu_2 (LeakyReLU) (None, None, None, 0 ['batch_normalization_2[0][0]'] 32) conv2d_3 (Conv2D) (None, None, None, 18432 ['leaky_re_lu_2[0][0]'] 64) batch_normalization_3 (BatchNo (None, None, None, 256 ['conv2d_3[0][0]'] rmalization) 64) leaky_re_lu_3 (LeakyReLU) (None, None, None, 0 ['batch_normalization_3[0][0]'] 64) add (Add) (None, None, None, 0 ['leaky_re_lu_1[0][0]', 64) 'leaky_re_lu_3[0][0]'] zero_padding2d_1 (ZeroPadding2 (None, None, None, 0 ['add[0][0]'] D) 64) conv2d_4 (Conv2D) (None, None, None, 73728 ['zero_padding2d_1[0][0]'] 128) batch_normalization_4 (BatchNo (None, None, None, 512 ['conv2d_4[0][0]'] rmalization) 128) leaky_re_lu_4 (LeakyReLU) (None, None, None, 0 ['batch_normalization_4[0][0]'] 128) conv2d_5 (Conv2D) (None, None, None, 8192 ['leaky_re_lu_4[0][0]'] 64) batch_normalization_5 (BatchNo (None, None, None, 256 ['conv2d_5[0][0]'] rmalization) 64) leaky_re_lu_5 (LeakyReLU) (None, None, None, 0 ['batch_normalization_5[0][0]'] 64) conv2d_6 (Conv2D) (None, None, None, 73728 ['leaky_re_lu_5[0][0]'] 128) batch_normalization_6 (BatchNo (None, None, None, 512 ['conv2d_6[0][0]'] rmalization) 128) leaky_re_lu_6 (LeakyReLU) (None, None, None, 0 ['batch_normalization_6[0][0]'] 128) add_1 (Add) (None, None, None, 0 ['leaky_re_lu_4[0][0]', 128) 'leaky_re_lu_6[0][0]'] conv2d_7 (Conv2D) (None, None, None, 8192 ['add_1[0][0]'] 64) batch_normalization_7 (BatchNo (None, None, None, 256 ['conv2d_7[0][0]'] rmalization) 64) leaky_re_lu_7 (LeakyReLU) (None, None, None, 0 ['batch_normalization_7[0][0]'] 64) conv2d_8 (Conv2D) (None, None, None, 73728 ['leaky_re_lu_7[0][0]'] 128) batch_normalization_8 (BatchNo (None, None, None, 512 ['conv2d_8[0][0]'] rmalization) 128) leaky_re_lu_8 (LeakyReLU) (None, None, None, 0 ['batch_normalization_8[0][0]'] 128) add_2 (Add) (None, None, None, 0 ['add_1[0][0]', 128) 'leaky_re_lu_8[0][0]'] zero_padding2d_2 (ZeroPadding2 (None, None, None, 0 ['add_2[0][0]'] D) 128) conv2d_9 (Conv2D) (None, None, None, 294912 ['zero_padding2d_2[0][0]'] 256) batch_normalization_9 (BatchNo (None, None, None, 1024 ['conv2d_9[0][0]'] rmalization) 256) leaky_re_lu_9 (LeakyReLU) (None, None, None, 0 ['batch_normalization_9[0][0]'] 256) conv2d_10 (Conv2D) (None, None, None, 32768 ['leaky_re_lu_9[0][0]'] 128) batch_normalization_10 (BatchN (None, None, None, 512 ['conv2d_10[0][0]'] ormalization) 128) leaky_re_lu_10 (LeakyReLU) (None, None, None, 0 ['batch_normalization_10[0][0]'] 128) conv2d_11 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_10[0][0]'] 256) batch_normalization_11 (BatchN (None, None, None, 1024 ['conv2d_11[0][0]'] ormalization) 256) leaky_re_lu_11 (LeakyReLU) (None, None, None, 0 ['batch_normalization_11[0][0]'] 256) add_3 (Add) (None, None, None, 0 ['leaky_re_lu_9[0][0]', 256) 'leaky_re_lu_11[0][0]'] conv2d_12 (Conv2D) (None, None, None, 32768 ['add_3[0][0]'] 128) batch_normalization_12 (BatchN (None, None, None, 512 ['conv2d_12[0][0]'] ormalization) 128) leaky_re_lu_12 (LeakyReLU) (None, None, None, 0 ['batch_normalization_12[0][0]'] 128) conv2d_13 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_12[0][0]'] 256) batch_normalization_13 (BatchN (None, None, None, 1024 ['conv2d_13[0][0]'] ormalization) 256) leaky_re_lu_13 (LeakyReLU) (None, None, None, 0 ['batch_normalization_13[0][0]'] 256) add_4 (Add) (None, None, None, 0 ['add_3[0][0]', 256) 'leaky_re_lu_13[0][0]'] conv2d_14 (Conv2D) (None, None, None, 32768 ['add_4[0][0]'] 128) batch_normalization_14 (BatchN (None, None, None, 512 ['conv2d_14[0][0]'] ormalization) 128) leaky_re_lu_14 (LeakyReLU) (None, None, None, 0 ['batch_normalization_14[0][0]'] 128) conv2d_15 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_14[0][0]'] 256) batch_normalization_15 (BatchN (None, None, None, 1024 ['conv2d_15[0][0]'] ormalization) 256) leaky_re_lu_15 (LeakyReLU) (None, None, None, 0 ['batch_normalization_15[0][0]'] 256) add_5 (Add) (None, None, None, 0 ['add_4[0][0]', 256) 'leaky_re_lu_15[0][0]'] conv2d_16 (Conv2D) (None, None, None, 32768 ['add_5[0][0]'] 128) batch_normalization_16 (BatchN (None, None, None, 512 ['conv2d_16[0][0]'] ormalization) 128) leaky_re_lu_16 (LeakyReLU) (None, None, None, 0 ['batch_normalization_16[0][0]'] 128) conv2d_17 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_16[0][0]'] 256) batch_normalization_17 (BatchN (None, None, None, 1024 ['conv2d_17[0][0]'] ormalization) 256) leaky_re_lu_17 (LeakyReLU) (None, None, None, 0 ['batch_normalization_17[0][0]'] 256) add_6 (Add) (None, None, None, 0 ['add_5[0][0]', 256) 'leaky_re_lu_17[0][0]'] conv2d_18 (Conv2D) (None, None, None, 32768 ['add_6[0][0]'] 128) batch_normalization_18 (BatchN (None, None, None, 512 ['conv2d_18[0][0]'] ormalization) 128) leaky_re_lu_18 (LeakyReLU) (None, None, None, 0 ['batch_normalization_18[0][0]'] 128) conv2d_19 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_18[0][0]'] 256) batch_normalization_19 (BatchN (None, None, None, 1024 ['conv2d_19[0][0]'] ormalization) 256) leaky_re_lu_19 (LeakyReLU) (None, None, None, 0 ['batch_normalization_19[0][0]'] 256) add_7 (Add) (None, None, None, 0 ['add_6[0][0]', 256) 'leaky_re_lu_19[0][0]'] conv2d_20 (Conv2D) (None, None, None, 32768 ['add_7[0][0]'] 128) batch_normalization_20 (BatchN (None, None, None, 512 ['conv2d_20[0][0]'] ormalization) 128) leaky_re_lu_20 (LeakyReLU) (None, None, None, 0 ['batch_normalization_20[0][0]'] 128) conv2d_21 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_20[0][0]'] 256) batch_normalization_21 (BatchN (None, None, None, 1024 ['conv2d_21[0][0]'] ormalization) 256) leaky_re_lu_21 (LeakyReLU) (None, None, None, 0 ['batch_normalization_21[0][0]'] 256) add_8 (Add) (None, None, None, 0 ['add_7[0][0]', 256) 'leaky_re_lu_21[0][0]'] conv2d_22 (Conv2D) (None, None, None, 32768 ['add_8[0][0]'] 128) batch_normalization_22 (BatchN (None, None, None, 512 ['conv2d_22[0][0]'] ormalization) 128) leaky_re_lu_22 (LeakyReLU) (None, None, None, 0 ['batch_normalization_22[0][0]'] 128) conv2d_23 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_22[0][0]'] 256) batch_normalization_23 (BatchN (None, None, None, 1024 ['conv2d_23[0][0]'] ormalization) 256) leaky_re_lu_23 (LeakyReLU) (None, None, None, 0 ['batch_normalization_23[0][0]'] 256) add_9 (Add) (None, None, None, 0 ['add_8[0][0]', 256) 'leaky_re_lu_23[0][0]'] conv2d_24 (Conv2D) (None, None, None, 32768 ['add_9[0][0]'] 128) batch_normalization_24 (BatchN (None, None, None, 512 ['conv2d_24[0][0]'] ormalization) 128) leaky_re_lu_24 (LeakyReLU) (None, None, None, 0 ['batch_normalization_24[0][0]'] 128) conv2d_25 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_24[0][0]'] 256) batch_normalization_25 (BatchN (None, None, None, 1024 ['conv2d_25[0][0]'] ormalization) 256) leaky_re_lu_25 (LeakyReLU) (None, None, None, 0 ['batch_normalization_25[0][0]'] 256) add_10 (Add) (None, None, None, 0 ['add_9[0][0]', 256) 'leaky_re_lu_25[0][0]'] zero_padding2d_3 (ZeroPadding2 (None, None, None, 0 ['add_10[0][0]'] D) 256) conv2d_26 (Conv2D) (None, None, None, 1179648 ['zero_padding2d_3[0][0]'] 512) batch_normalization_26 (BatchN (None, None, None, 2048 ['conv2d_26[0][0]'] ormalization) 512) leaky_re_lu_26 (LeakyReLU) (None, None, None, 0 ['batch_normalization_26[0][0]'] 512) conv2d_27 (Conv2D) (None, None, None, 131072 ['leaky_re_lu_26[0][0]'] 256) batch_normalization_27 (BatchN (None, None, None, 1024 ['conv2d_27[0][0]'] ormalization) 256) leaky_re_lu_27 (LeakyReLU) (None, None, None, 0 ['batch_normalization_27[0][0]'] 256) conv2d_28 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_27[0][0]'] 512) batch_normalization_28 (BatchN (None, None, None, 2048 ['conv2d_28[0][0]'] ormalization) 512) leaky_re_lu_28 (LeakyReLU) (None, None, None, 0 ['batch_normalization_28[0][0]'] 512) add_11 (Add) (None, None, None, 0 ['leaky_re_lu_26[0][0]', 512) 'leaky_re_lu_28[0][0]'] conv2d_29 (Conv2D) (None, None, None, 131072 ['add_11[0][0]'] 256) batch_normalization_29 (BatchN (None, None, None, 1024 ['conv2d_29[0][0]'] ormalization) 256) leaky_re_lu_29 (LeakyReLU) (None, None, None, 0 ['batch_normalization_29[0][0]'] 256) conv2d_30 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_29[0][0]'] 512) batch_normalization_30 (BatchN (None, None, None, 2048 ['conv2d_30[0][0]'] ormalization) 512) leaky_re_lu_30 (LeakyReLU) (None, None, None, 0 ['batch_normalization_30[0][0]'] 512) add_12 (Add) (None, None, None, 0 ['add_11[0][0]', 512) 'leaky_re_lu_30[0][0]'] conv2d_31 (Conv2D) (None, None, None, 131072 ['add_12[0][0]'] 256) batch_normalization_31 (BatchN (None, None, None, 1024 ['conv2d_31[0][0]'] ormalization) 256) leaky_re_lu_31 (LeakyReLU) (None, None, None, 0 ['batch_normalization_31[0][0]'] 256) conv2d_32 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_31[0][0]'] 512) batch_normalization_32 (BatchN (None, None, None, 2048 ['conv2d_32[0][0]'] ormalization) 512) leaky_re_lu_32 (LeakyReLU) (None, None, None, 0 ['batch_normalization_32[0][0]'] 512) add_13 (Add) (None, None, None, 0 ['add_12[0][0]', 512) 'leaky_re_lu_32[0][0]'] conv2d_33 (Conv2D) (None, None, None, 131072 ['add_13[0][0]'] 256) batch_normalization_33 (BatchN (None, None, None, 1024 ['conv2d_33[0][0]'] ormalization) 256) leaky_re_lu_33 (LeakyReLU) (None, None, None, 0 ['batch_normalization_33[0][0]'] 256) conv2d_34 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_33[0][0]'] 512) batch_normalization_34 (BatchN (None, None, None, 2048 ['conv2d_34[0][0]'] ormalization) 512) leaky_re_lu_34 (LeakyReLU) (None, None, None, 0 ['batch_normalization_34[0][0]'] 512) add_14 (Add) (None, None, None, 0 ['add_13[0][0]', 512) 'leaky_re_lu_34[0][0]'] conv2d_35 (Conv2D) (None, None, None, 131072 ['add_14[0][0]'] 256) batch_normalization_35 (BatchN (None, None, None, 1024 ['conv2d_35[0][0]'] ormalization) 256) leaky_re_lu_35 (LeakyReLU) (None, None, None, 0 ['batch_normalization_35[0][0]'] 256) conv2d_36 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_35[0][0]'] 512) batch_normalization_36 (BatchN (None, None, None, 2048 ['conv2d_36[0][0]'] ormalization) 512) leaky_re_lu_36 (LeakyReLU) (None, None, None, 0 ['batch_normalization_36[0][0]'] 512) add_15 (Add) (None, None, None, 0 ['add_14[0][0]', 512) 'leaky_re_lu_36[0][0]'] conv2d_37 (Conv2D) (None, None, None, 131072 ['add_15[0][0]'] 256) batch_normalization_37 (BatchN (None, None, None, 1024 ['conv2d_37[0][0]'] ormalization) 256) leaky_re_lu_37 (LeakyReLU) (None, None, None, 0 ['batch_normalization_37[0][0]'] 256) conv2d_38 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_37[0][0]'] 512) batch_normalization_38 (BatchN (None, None, None, 2048 ['conv2d_38[0][0]'] ormalization) 512) leaky_re_lu_38 (LeakyReLU) (None, None, None, 0 ['batch_normalization_38[0][0]'] 512) add_16 (Add) (None, None, None, 0 ['add_15[0][0]', 512) 'leaky_re_lu_38[0][0]'] conv2d_39 (Conv2D) (None, None, None, 131072 ['add_16[0][0]'] 256) batch_normalization_39 (BatchN (None, None, None, 1024 ['conv2d_39[0][0]'] ormalization) 256) leaky_re_lu_39 (LeakyReLU) (None, None, None, 0 ['batch_normalization_39[0][0]'] 256) conv2d_40 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_39[0][0]'] 512) batch_normalization_40 (BatchN (None, None, None, 2048 ['conv2d_40[0][0]'] ormalization) 512) leaky_re_lu_40 (LeakyReLU) (None, None, None, 0 ['batch_normalization_40[0][0]'] 512) add_17 (Add) (None, None, None, 0 ['add_16[0][0]', 512) 'leaky_re_lu_40[0][0]'] conv2d_41 (Conv2D) (None, None, None, 131072 ['add_17[0][0]'] 256) batch_normalization_41 (BatchN (None, None, None, 1024 ['conv2d_41[0][0]'] ormalization) 256) leaky_re_lu_41 (LeakyReLU) (None, None, None, 0 ['batch_normalization_41[0][0]'] 256) conv2d_42 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_41[0][0]'] 512) batch_normalization_42 (BatchN (None, None, None, 2048 ['conv2d_42[0][0]'] ormalization) 512) leaky_re_lu_42 (LeakyReLU) (None, None, None, 0 ['batch_normalization_42[0][0]'] 512) add_18 (Add) (None, None, None, 0 ['add_17[0][0]', 512) 'leaky_re_lu_42[0][0]'] zero_padding2d_4 (ZeroPadding2 (None, None, None, 0 ['add_18[0][0]'] D) 512) conv2d_43 (Conv2D) (None, None, None, 4718592 ['zero_padding2d_4[0][0]'] 1024) batch_normalization_43 (BatchN (None, None, None, 4096 ['conv2d_43[0][0]'] ormalization) 1024) leaky_re_lu_43 (LeakyReLU) (None, None, None, 0 ['batch_normalization_43[0][0]'] 1024) conv2d_44 (Conv2D) (None, None, None, 524288 ['leaky_re_lu_43[0][0]'] 512) batch_normalization_44 (BatchN (None, None, None, 2048 ['conv2d_44[0][0]'] ormalization) 512) leaky_re_lu_44 (LeakyReLU) (None, None, None, 0 ['batch_normalization_44[0][0]'] 512) conv2d_45 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_44[0][0]'] 1024) batch_normalization_45 (BatchN (None, None, None, 4096 ['conv2d_45[0][0]'] ormalization) 1024) leaky_re_lu_45 (LeakyReLU) (None, None, None, 0 ['batch_normalization_45[0][0]'] 1024) add_19 (Add) (None, None, None, 0 ['leaky_re_lu_43[0][0]', 1024) 'leaky_re_lu_45[0][0]'] conv2d_46 (Conv2D) (None, None, None, 524288 ['add_19[0][0]'] 512) batch_normalization_46 (BatchN (None, None, None, 2048 ['conv2d_46[0][0]'] ormalization) 512) leaky_re_lu_46 (LeakyReLU) (None, None, None, 0 ['batch_normalization_46[0][0]'] 512) conv2d_47 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_46[0][0]'] 1024) batch_normalization_47 (BatchN (None, None, None, 4096 ['conv2d_47[0][0]'] ormalization) 1024) leaky_re_lu_47 (LeakyReLU) (None, None, None, 0 ['batch_normalization_47[0][0]'] 1024) add_20 (Add) (None, None, None, 0 ['add_19[0][0]', 1024) 'leaky_re_lu_47[0][0]'] conv2d_48 (Conv2D) (None, None, None, 524288 ['add_20[0][0]'] 512) batch_normalization_48 (BatchN (None, None, None, 2048 ['conv2d_48[0][0]'] ormalization) 512) leaky_re_lu_48 (LeakyReLU) (None, None, None, 0 ['batch_normalization_48[0][0]'] 512) conv2d_49 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_48[0][0]'] 1024) batch_normalization_49 (BatchN (None, None, None, 4096 ['conv2d_49[0][0]'] ormalization) 1024) leaky_re_lu_49 (LeakyReLU) (None, None, None, 0 ['batch_normalization_49[0][0]'] 1024) add_21 (Add) (None, None, None, 0 ['add_20[0][0]', 1024) 'leaky_re_lu_49[0][0]'] conv2d_50 (Conv2D) (None, None, None, 524288 ['add_21[0][0]'] 512) batch_normalization_50 (BatchN (None, None, None, 2048 ['conv2d_50[0][0]'] ormalization) 512) leaky_re_lu_50 (LeakyReLU) (None, None, None, 0 ['batch_normalization_50[0][0]'] 512) conv2d_51 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_50[0][0]'] 1024) batch_normalization_51 (BatchN (None, None, None, 4096 ['conv2d_51[0][0]'] ormalization) 1024) leaky_re_lu_51 (LeakyReLU) (None, None, None, 0 ['batch_normalization_51[0][0]'] 1024) add_22 (Add) (None, None, None, 0 ['add_21[0][0]', 1024) 'leaky_re_lu_51[0][0]'] conv2d_52 (Conv2D) (None, None, None, 524288 ['add_22[0][0]'] 512) batch_normalization_52 (BatchN (None, None, None, 2048 ['conv2d_52[0][0]'] ormalization) 512) leaky_re_lu_52 (LeakyReLU) (None, None, None, 0 ['batch_normalization_52[0][0]'] 512) conv2d_53 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_52[0][0]'] 1024) batch_normalization_53 (BatchN (None, None, None, 4096 ['conv2d_53[0][0]'] ormalization) 1024) leaky_re_lu_53 (LeakyReLU) (None, None, None, 0 ['batch_normalization_53[0][0]'] 1024) conv2d_54 (Conv2D) (None, None, None, 524288 ['leaky_re_lu_53[0][0]'] 512) batch_normalization_54 (BatchN (None, None, None, 2048 ['conv2d_54[0][0]'] ormalization) 512) leaky_re_lu_54 (LeakyReLU) (None, None, None, 0 ['batch_normalization_54[0][0]'] 512) conv2d_55 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_54[0][0]'] 1024) batch_normalization_55 (BatchN (None, None, None, 4096 ['conv2d_55[0][0]'] ormalization) 1024) leaky_re_lu_55 (LeakyReLU) (None, None, None, 0 ['batch_normalization_55[0][0]'] 1024) conv2d_56 (Conv2D) (None, None, None, 524288 ['leaky_re_lu_55[0][0]'] 512) batch_normalization_56 (BatchN (None, None, None, 2048 ['conv2d_56[0][0]'] ormalization) 512) leaky_re_lu_56 (LeakyReLU) (None, None, None, 0 ['batch_normalization_56[0][0]'] 512) conv2d_59 (Conv2D) (None, None, None, 131072 ['leaky_re_lu_56[0][0]'] 256) batch_normalization_58 (BatchN (None, None, None, 1024 ['conv2d_59[0][0]'] ormalization) 256) leaky_re_lu_58 (LeakyReLU) (None, None, None, 0 ['batch_normalization_58[0][0]'] 256) up_sampling2d (UpSampling2D) (None, None, None, 0 ['leaky_re_lu_58[0][0]'] 256) concatenate (Concatenate) (None, None, None, 0 ['up_sampling2d[0][0]', 768) 'add_18[0][0]'] conv2d_60 (Conv2D) (None, None, None, 196608 ['concatenate[0][0]'] 256) batch_normalization_59 (BatchN (None, None, None, 1024 ['conv2d_60[0][0]'] ormalization) 256) leaky_re_lu_59 (LeakyReLU) (None, None, None, 0 ['batch_normalization_59[0][0]'] 256) conv2d_61 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_59[0][0]'] 512) batch_normalization_60 (BatchN (None, None, None, 2048 ['conv2d_61[0][0]'] ormalization) 512) leaky_re_lu_60 (LeakyReLU) (None, None, None, 0 ['batch_normalization_60[0][0]'] 512) conv2d_62 (Conv2D) (None, None, None, 131072 ['leaky_re_lu_60[0][0]'] 256) batch_normalization_61 (BatchN (None, None, None, 1024 ['conv2d_62[0][0]'] ormalization) 256) leaky_re_lu_61 (LeakyReLU) (None, None, None, 0 ['batch_normalization_61[0][0]'] 256) conv2d_63 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_61[0][0]'] 512) batch_normalization_62 (BatchN (None, None, None, 2048 ['conv2d_63[0][0]'] ormalization) 512) leaky_re_lu_62 (LeakyReLU) (None, None, None, 0 ['batch_normalization_62[0][0]'] 512) conv2d_64 (Conv2D) (None, None, None, 131072 ['leaky_re_lu_62[0][0]'] 256) batch_normalization_63 (BatchN (None, None, None, 1024 ['conv2d_64[0][0]'] ormalization) 256) leaky_re_lu_63 (LeakyReLU) (None, None, None, 0 ['batch_normalization_63[0][0]'] 256) conv2d_67 (Conv2D) (None, None, None, 32768 ['leaky_re_lu_63[0][0]'] 128) batch_normalization_65 (BatchN (None, None, None, 512 ['conv2d_67[0][0]'] ormalization) 128) leaky_re_lu_65 (LeakyReLU) (None, None, None, 0 ['batch_normalization_65[0][0]'] 128) up_sampling2d_1 (UpSampling2D) (None, None, None, 0 ['leaky_re_lu_65[0][0]'] 128) concatenate_1 (Concatenate) (None, None, None, 0 ['up_sampling2d_1[0][0]', 384) 'add_10[0][0]'] conv2d_68 (Conv2D) (None, None, None, 49152 ['concatenate_1[0][0]'] 128) batch_normalization_66 (BatchN (None, None, None, 512 ['conv2d_68[0][0]'] ormalization) 128) leaky_re_lu_66 (LeakyReLU) (None, None, None, 0 ['batch_normalization_66[0][0]'] 128) conv2d_69 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_66[0][0]'] 256) batch_normalization_67 (BatchN (None, None, None, 1024 ['conv2d_69[0][0]'] ormalization) 256) leaky_re_lu_67 (LeakyReLU) (None, None, None, 0 ['batch_normalization_67[0][0]'] 256) conv2d_70 (Conv2D) (None, None, None, 32768 ['leaky_re_lu_67[0][0]'] 128) batch_normalization_68 (BatchN (None, None, None, 512 ['conv2d_70[0][0]'] ormalization) 128) leaky_re_lu_68 (LeakyReLU) (None, None, None, 0 ['batch_normalization_68[0][0]'] 128) conv2d_71 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_68[0][0]'] 256) batch_normalization_69 (BatchN (None, None, None, 1024 ['conv2d_71[0][0]'] ormalization) 256) leaky_re_lu_69 (LeakyReLU) (None, None, None, 0 ['batch_normalization_69[0][0]'] 256) conv2d_72 (Conv2D) (None, None, None, 32768 ['leaky_re_lu_69[0][0]'] 128) batch_normalization_70 (BatchN (None, None, None, 512 ['conv2d_72[0][0]'] ormalization) 128) leaky_re_lu_70 (LeakyReLU) (None, None, None, 0 ['batch_normalization_70[0][0]'] 128) conv2d_57 (Conv2D) (None, None, None, 4718592 ['leaky_re_lu_56[0][0]'] 1024) conv2d_65 (Conv2D) (None, None, None, 1179648 ['leaky_re_lu_63[0][0]'] 512) conv2d_73 (Conv2D) (None, None, None, 294912 ['leaky_re_lu_70[0][0]'] 256) batch_normalization_57 (BatchN (None, None, None, 4096 ['conv2d_57[0][0]'] ormalization) 1024) batch_normalization_64 (BatchN (None, None, None, 2048 ['conv2d_65[0][0]'] ormalization) 512) batch_normalization_71 (BatchN (None, None, None, 1024 ['conv2d_73[0][0]'] ormalization) 256) leaky_re_lu_57 (LeakyReLU) (None, None, None, 0 ['batch_normalization_57[0][0]'] 1024) leaky_re_lu_64 (LeakyReLU) (None, None, None, 0 ['batch_normalization_64[0][0]'] 512) leaky_re_lu_71 (LeakyReLU) (None, None, None, 0 ['batch_normalization_71[0][0]'] 256) conv2d_58 (Conv2D) (None, None, None, 261375 ['leaky_re_lu_57[0][0]'] 255) conv2d_66 (Conv2D) (None, None, None, 130815 ['leaky_re_lu_64[0][0]'] 255) conv2d_74 (Conv2D) (None, None, None, 65535 ['leaky_re_lu_71[0][0]'] 255) ================================================================================================== Total params: 62,001,757 Trainable params: 61,949,149 Non-trainable params: 52,608 __________________________________________________________________________________________________ None WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model. Saved Keras model to model_data/yolo.h5 Read 62001757 of 62001757.0 from Darknet weights.
"""
Self-contained Python script to train YOLOv3 on your own dataset
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = './train/_annotations.txt' # path to Roboflow data annotations
log_dir = './logs/000/' # where we're storing our logs
classes_path = './train/_classes.txt' # path to Roboflow class names
anchors_path = './model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
print("-------------------CLASS NAMES-------------------")
print(class_names)
print("-------------------CLASS NAMES-------------------")
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (256,256) # multiple of 32, hw default = (416,416)
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='./model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='./model_data/yolo.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.2 # set the size of the validation set
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 16
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=500,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 16 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='./model_data/yolo.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='./model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
Prepare image to ocr
import cv2 as cv
from matplotlib import pyplot as plt
def grayscale(image):
return cv.cvtColor(image, cv.COLOR_BGR2GRAY)
def noise_removal(image):
import numpy as np
kernel = np.ones((1, 1), np.uint8)
image = cv.dilate(image, kernel, iterations=1)
kernel = np.ones((1, 1), np.uint8)
image = cv.erode(image, kernel, iterations=1)
image = cv.morphologyEx(image, cv.MORPH_CLOSE, kernel)
image = cv.medianBlur(image, 3)
return (image)
def thin_font(image):
import numpy as np
image = cv.bitwise_not(image)
kernel = np.ones((2,2),np.uint8)
image = cv.erode(image, kernel, iterations=1)
image = cv.bitwise_not(image)
return (image)
def thick_font(image):
import numpy as np
image = cv.bitwise_not(image)
kernel = np.ones((2,2),np.uint8)
image = cv.dilate(image, kernel, iterations=1)
image = cv.bitwise_not(image)
return (image)
def remove_borders(image):
contours, heiarchy = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cntsSorted = sorted(contours, key=lambda x:cv.contourArea(x))
cnt = cntsSorted[-1]
x, y, w, h = cv.boundingRect(cnt)
crop = image[y:y+h, x:x+w]
return (crop)
image_file = './img/img00.png'
img = cv.imread(image_file)
gray_image = grayscale(img)
thresh, im_bw = cv.threshold(gray_image, 100, 150, cv.THRESH_BINARY)
no_noise = noise_removal(im_bw)
# eroded_image = thin_font(no_noise)
# dilated_image = thick_font(eroded_image)
no_borders = remove_borders(no_noise)
cv.imwrite("temp/no_borders.jpg", no_borders)
display('temp/no_borders.jpg')
def display(im_path):
dpi = 80
im_data = plt.imread(im_path)
height, width = im_data.shape[:2]
# What size does the figure need to be in inches to fit the image?
figsize = width / float(dpi), height / float(dpi)
# Create a figure of the right size with one axes that takes up the full figure
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
# Hide spines, ticks, etc.
ax.axis('off')
# Display the image.
ax.imshow(im_data, cmap='gray')
plt.show()
display(image_file)
inverted_image = cv.bitwise_not(img)
cv.imwrite("temp/inverted.jpg", inverted_image)
display("temp/inverted.jpg")
def grayscale(image):
return cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray_image = grayscale(img)
cv.imwrite("temp/gray.jpg", gray_image)
True
display("temp/gray.jpg")
thresh, im_bw = cv.threshold(gray_image, 170, 210, cv.THRESH_BINARY)
cv.imwrite("temp/bw_image.jpg", im_bw)
True
display("temp/bw_image.jpg")
def noise_removal(image):
import numpy as np
kernel = np.ones((1, 1), np.uint8)
image = cv.dilate(image, kernel, iterations=1)
kernel = np.ones((1, 1), np.uint8)
image = cv.erode(image, kernel, iterations=1)
image = cv.morphologyEx(image, cv.MORPH_CLOSE, kernel)
image = cv.medianBlur(image, 3)
return (image)
no_noise = noise_removal(im_bw)
cv.imwrite("temp/no_noise.jpg", no_noise)
True
display("temp/no_noise.jpg")
def thin_font(image):
import numpy as np
image = cv.bitwise_not(image)
kernel = np.ones((2,2),np.uint8)
image = cv.erode(image, kernel, iterations=1)
image = cv.bitwise_not(image)
return (image)
eroded_image = thin_font(no_noise)
cv.imwrite("temp/eroded_image.jpg", eroded_image)
True
display("temp/eroded_image.jpg")
def thick_font(image):
import numpy as np
image = cv.bitwise_not(image)
kernel = np.ones((2,2),np.uint8)
image = cv.dilate(image, kernel, iterations=1)
image = cv.bitwise_not(image)
return (image)
dilated_image = thick_font(no_noise)
cv.imwrite("temp/dilated_image.jpg", dilated_image)
True
display("temp/dilated_image.jpg")
def remove_borders(image):
contours, heiarchy = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cntsSorted = sorted(contours, key=lambda x:cv.contourArea(x))
cnt = cntsSorted[-1]
x, y, w, h = cv.boundingRect(cnt)
crop = image[y:y+h, x:x+w]
return (crop)
no_borders = remove_borders(no_noise)
cv.imwrite("temp/no_borders.jpg", no_borders)
display('temp/no_borders.jpg')