Add script for classifying images, update gitignore.
10
.gitignore
vendored
@ -1,4 +1,8 @@
|
||||
.vscode/*
|
||||
*/__pycache__/*
|
||||
.vscode/
|
||||
*/__pycache__/
|
||||
*tex
|
||||
Images/TrainingImages/*
|
||||
Images/TrainingImages/
|
||||
Model/bottleneck/
|
||||
Model/retrain_logs/
|
||||
Model/variables/
|
||||
Model/*.pb_log/
|
||||
|
BIN
Images/TestImages/glass1.jpg
Normal file
After Width: | Height: | Size: 8.1 KiB |
Before Width: | Height: | Size: 21 KiB |
BIN
Images/TestImages/glass110.jpg
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
Images/TestImages/glass113.jpg
Normal file
After Width: | Height: | Size: 9.4 KiB |
BIN
Images/TestImages/glass116.jpg
Normal file
After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 18 KiB |
BIN
Images/TestImages/glass42.jpg
Normal file
After Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 16 KiB |
BIN
Images/TestImages/glass44.jpg
Normal file
After Width: | Height: | Size: 16 KiB |
Before Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 8.0 KiB |
BIN
Images/TestImages/glass59.jpg
Normal file
After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 17 KiB |
BIN
Images/TestImages/metal20.jpg
Normal file
After Width: | Height: | Size: 22 KiB |
BIN
Images/TestImages/metal24.jpg
Normal file
After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 21 KiB |
BIN
Images/TestImages/metal29.jpg
Normal file
After Width: | Height: | Size: 17 KiB |
Before Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 18 KiB |
BIN
Images/TestImages/metal50.jpg
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
Images/TestImages/metal54.jpg
Normal file
After Width: | Height: | Size: 21 KiB |
BIN
Images/TestImages/metal86.jpg
Normal file
After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 26 KiB |
Before Width: | Height: | Size: 42 KiB |
BIN
Images/TestImages/paper102.jpg
Normal file
After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 15 KiB |
BIN
Images/TestImages/paper16.jpg
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
Images/TestImages/paper20.jpg
Normal file
After Width: | Height: | Size: 39 KiB |
BIN
Images/TestImages/paper35.jpg
Normal file
After Width: | Height: | Size: 36 KiB |
Before Width: | Height: | Size: 27 KiB |
BIN
Images/TestImages/paper52.jpg
Normal file
After Width: | Height: | Size: 11 KiB |
Before Width: | Height: | Size: 24 KiB |
Before Width: | Height: | Size: 8.7 KiB |
BIN
Images/TestImages/paper72.jpg
Normal file
After Width: | Height: | Size: 27 KiB |
Before Width: | Height: | Size: 34 KiB |
BIN
Images/TestImages/paper94.jpg
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
Images/TestImages/plastic1.jpg
Normal file
After Width: | Height: | Size: 9.5 KiB |
Before Width: | Height: | Size: 17 KiB |
BIN
Images/TestImages/plastic32.jpg
Normal file
After Width: | Height: | Size: 15 KiB |
Before Width: | Height: | Size: 13 KiB |
BIN
Images/TestImages/plastic73.jpg
Normal file
After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 13 KiB |
BIN
Images/TestImages/plastic78.jpg
Normal file
After Width: | Height: | Size: 8.0 KiB |
Before Width: | Height: | Size: 14 KiB |
BIN
Images/TestImages/plastic91.jpg
Normal file
After Width: | Height: | Size: 14 KiB |
Before Width: | Height: | Size: 12 KiB |
@ -1 +1,156 @@
|
||||
import tensorflow as tf
|
||||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
def load_graph(model_file):
|
||||
graph = tf.Graph()
|
||||
graph_def = tf.GraphDef()
|
||||
|
||||
with open(model_file, "rb") as f:
|
||||
graph_def.ParseFromString(f.read())
|
||||
with graph.as_default():
|
||||
tf.import_graph_def(graph_def)
|
||||
|
||||
return graph
|
||||
|
||||
|
||||
def read_tensor_from_image_file(file_name,
|
||||
input_height=299,
|
||||
input_width=299,
|
||||
input_mean=0,
|
||||
input_std=255):
|
||||
input_name = "file_reader"
|
||||
output_name = "normalized"
|
||||
file_reader = tf.read_file(file_name, input_name)
|
||||
if file_name.endswith(".png"):
|
||||
image_reader = tf.image.decode_png(
|
||||
file_reader, channels=3, name="png_reader")
|
||||
elif file_name.endswith(".gif"):
|
||||
image_reader = tf.squeeze(
|
||||
tf.image.decode_gif(file_reader, name="gif_reader"))
|
||||
elif file_name.endswith(".bmp"):
|
||||
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
|
||||
else:
|
||||
image_reader = tf.image.decode_jpeg(
|
||||
file_reader, channels=3, name="jpeg_reader")
|
||||
float_caster = tf.cast(image_reader, tf.float32)
|
||||
dims_expander = tf.expand_dims(float_caster, 0)
|
||||
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
|
||||
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
|
||||
sess = tf.Session()
|
||||
result = sess.run(normalized)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def load_labels(label_file):
|
||||
label = []
|
||||
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
|
||||
for l in proto_as_ascii_lines:
|
||||
label.append(l.rstrip())
|
||||
return label
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
model_file = "Model/retrained_graph.pb"
|
||||
label_file = "Model/retrained_labels.txt"
|
||||
input_height = 299
|
||||
input_width = 299
|
||||
input_mean = 128
|
||||
input_std = 128
|
||||
input_layer = "input"
|
||||
output_layer = "InceptionV3/Predictions/Reshape_1"
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--graph",
|
||||
default="Model/retrained_graph.pb",
|
||||
help="graph/model to be executed")
|
||||
|
||||
parser.add_argument("--labels",
|
||||
default="Model/retrained_labels.txt",
|
||||
help="name of file containing labels")
|
||||
|
||||
parser.add_argument("--input_height", type=int, help="input height")
|
||||
|
||||
parser.add_argument("--input_width", type=int, help="input width")
|
||||
|
||||
parser.add_argument("--input_mean", type=int, help="input mean")
|
||||
|
||||
parser.add_argument("--input_std", type=int, help="input std")
|
||||
|
||||
parser.add_argument("--input_layer",
|
||||
default="Placeholder",
|
||||
help="name of input layer")
|
||||
|
||||
parser.add_argument("--output_layer",
|
||||
default="final_result",
|
||||
help="name of output layer")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.graph:
|
||||
model_file = args.graph
|
||||
if args.labels:
|
||||
label_file = args.labels
|
||||
if args.input_height:
|
||||
input_height = args.input_height
|
||||
if args.input_width:
|
||||
input_width = args.input_width
|
||||
if args.input_mean:
|
||||
input_mean = args.input_mean
|
||||
if args.input_std:
|
||||
input_std = args.input_std
|
||||
if args.input_layer:
|
||||
input_layer = args.input_layer
|
||||
if args.output_layer:
|
||||
output_layer = args.output_layer
|
||||
|
||||
graph = load_graph(model_file)
|
||||
|
||||
for filename in os.listdir('Images/TestImages'):
|
||||
t = read_tensor_from_image_file(
|
||||
f'Images/TestImages/{filename}',
|
||||
input_height=input_height,
|
||||
input_width=input_width,
|
||||
input_mean=input_mean,
|
||||
input_std=input_std)
|
||||
input_name = "import/" + input_layer
|
||||
output_name = "import/" + output_layer
|
||||
input_operation = graph.get_operation_by_name(input_name)
|
||||
output_operation = graph.get_operation_by_name(output_name)
|
||||
|
||||
with tf.Session(graph=graph) as sess:
|
||||
results = sess.run(output_operation.outputs[0], {
|
||||
input_operation.outputs[0]: t
|
||||
})
|
||||
results = np.squeeze(results)
|
||||
|
||||
top_k = results.argsort()[-5:][::-1]
|
||||
labels = load_labels(label_file)
|
||||
print(f'{filename}: {labels[top_k[0]]} with {results[top_k[0]] * 100}% certainity')
|
||||
# for i in top_k:
|
||||
# print(labels[i], results[i])
|
||||
|
22
Logic/TrashRecognition/LoadGraphToTB.py
Normal file
@ -0,0 +1,22 @@
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.platform import gfile
|
||||
import os
|
||||
|
||||
def load_pb_to_log(filename):
|
||||
with tf.Session() as sess:
|
||||
with gfile.FastGFile(filename, 'rb') as f:
|
||||
graph_def = tf.GraphDef()
|
||||
graph_def.ParseFromString(f.read())
|
||||
g_in = tf.import_graph_def(graph_def)
|
||||
LOGDIR = f'{filename}_log'
|
||||
train_writer = tf.summary.FileWriter(LOGDIR)
|
||||
train_writer.add_graph(sess.graph)
|
||||
|
||||
def main():
|
||||
for filename in os.listdir('Model'):
|
||||
fname, fext = os.path.splitext(filename)
|
||||
if fext == '.pb':
|
||||
load_pb_to_log(f'Model/{filename}')
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -15,6 +15,105 @@
|
||||
# NOTICE: This work was derived from tensorflow/examples/image_retraining
|
||||
# and modified to use TensorFlow Hub modules.
|
||||
|
||||
# pylint: disable=line-too-long
|
||||
r"""Simple transfer learning with image modules from TensorFlow Hub.
|
||||
|
||||
This example shows how to train an image classifier based on any
|
||||
TensorFlow Hub module that computes image feature vectors. By default,
|
||||
it uses the feature vectors computed by Inception V3 trained on ImageNet.
|
||||
For more options, search https://tfhub.dev for image feature vector modules.
|
||||
|
||||
The top layer receives as input a 2048-dimensional vector (assuming
|
||||
Inception V3) for each image. We train a softmax layer on top of this
|
||||
representation. If the softmax layer contains N labels, this corresponds
|
||||
to learning N + 2048*N model parameters for the biases and weights.
|
||||
|
||||
Here's an example, which assumes you have a folder containing class-named
|
||||
subfolders, each full of images for each label. The example folder flower_photos
|
||||
should have a structure like this:
|
||||
|
||||
~/flower_photos/daisy/photo1.jpg
|
||||
~/flower_photos/daisy/photo2.jpg
|
||||
...
|
||||
~/flower_photos/rose/anotherphoto77.jpg
|
||||
...
|
||||
~/flower_photos/sunflower/somepicture.jpg
|
||||
|
||||
The subfolder names are important, since they define what label is applied to
|
||||
each image, but the filenames themselves don't matter. (For a working example,
|
||||
download http://download.tensorflow.org/example_images/flower_photos.tgz
|
||||
and run tar xzf flower_photos.tgz to unpack it.)
|
||||
|
||||
Once your images are prepared, and you have pip-installed tensorflow-hub and
|
||||
a sufficiently recent version of tensorflow, you can run the training with a
|
||||
command like this:
|
||||
|
||||
```bash
|
||||
python retrain.py --image_dir ~/flower_photos
|
||||
```
|
||||
|
||||
You can replace the image_dir argument with any folder containing subfolders of
|
||||
images. The label for each image is taken from the name of the subfolder it's
|
||||
in.
|
||||
|
||||
This produces a new model file that can be loaded and run by any TensorFlow
|
||||
program, for example the tensorflow/examples/label_image sample code.
|
||||
|
||||
By default this script will use the highly accurate, but comparatively large and
|
||||
slow Inception V3 model architecture. It's recommended that you start with this
|
||||
to validate that you have gathered good training data, but if you want to deploy
|
||||
on resource-limited platforms, you can try the `--tfhub_module` flag with a
|
||||
Mobilenet model. For more information on Mobilenet, see
|
||||
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
|
||||
|
||||
For example:
|
||||
|
||||
Run floating-point version of Mobilenet:
|
||||
|
||||
```bash
|
||||
python retrain.py --image_dir ~/flower_photos \
|
||||
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/feature_vector/1
|
||||
```
|
||||
|
||||
Run Mobilenet, instrumented for quantization:
|
||||
|
||||
```bash
|
||||
python retrain.py --image_dir ~/flower_photos/ \
|
||||
--tfhub_module https://tfhub.dev/google/imagenet/mobilenet_v1_100_224/quantops/feature_vector/1
|
||||
```
|
||||
|
||||
These instrumented models can be converted to fully quantized mobile models via
|
||||
TensorFlow Lite.
|
||||
|
||||
There are different Mobilenet models to choose from, with a variety of file
|
||||
size and latency options.
|
||||
- The first number can be '100', '075', '050', or '025' to control the number
|
||||
of neurons (activations of hidden layers); the number of weights (and hence
|
||||
to some extent the file size and speed) shrinks with the square of that
|
||||
fraction.
|
||||
- The second number is the input image size. You can choose '224', '192',
|
||||
'160', or '128', with smaller sizes giving faster speeds.
|
||||
|
||||
To use with TensorBoard:
|
||||
|
||||
By default, this script will log summaries to /tmp/retrain_logs directory
|
||||
|
||||
Visualize the summaries with this command:
|
||||
|
||||
tensorboard --logdir /tmp/retrain_logs
|
||||
|
||||
To use with Tensorflow Serving, run this tool with --saved_model_dir set
|
||||
to some increasingly numbered export location under the model base path, e.g.:
|
||||
|
||||
```bash
|
||||
python retrain.py (... other args as before ...) \
|
||||
--saved_model_dir=/tmp/saved_models/$(date +%s)/
|
||||
tensorflow_model_server --port=9000 --model_name=my_image_classifier \
|
||||
--model_base_path=/tmp/saved_models/
|
||||
```
|
||||
"""
|
||||
# pylint: enable=line-too-long
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
@ -1070,13 +1169,13 @@ if __name__ == '__main__':
|
||||
parser.add_argument(
|
||||
'--image_dir',
|
||||
type=str,
|
||||
default='../../Images/TrainingImages',
|
||||
default='Images/TrainingImages',
|
||||
help='Path to folders of labeled images.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--output_graph',
|
||||
type=str,
|
||||
default='../../Model/retrained_graph.pb',
|
||||
default='Model/output_graph.pb',
|
||||
help='Where to save the trained graph.'
|
||||
)
|
||||
parser.add_argument(
|
||||
@ -1097,13 +1196,13 @@ if __name__ == '__main__':
|
||||
parser.add_argument(
|
||||
'--output_labels',
|
||||
type=str,
|
||||
default='../../Model/retrained_labels.txt',
|
||||
default='Model/output_labels.txt',
|
||||
help='Where to save the trained graph\'s labels.'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--summaries_dir',
|
||||
type=str,
|
||||
default='/retrain_logs',
|
||||
default='Model/retrain_logs',
|
||||
help='Where to save summary logs for TensorBoard.'
|
||||
)
|
||||
parser.add_argument(
|
||||
@ -1177,7 +1276,7 @@ if __name__ == '__main__':
|
||||
parser.add_argument(
|
||||
'--bottleneck_dir',
|
||||
type=str,
|
||||
default='/tmp/bottleneck',
|
||||
default='Model/bottleneck',
|
||||
help='Path to cache bottleneck layer values as files.'
|
||||
)
|
||||
parser.add_argument(
|
||||
@ -1235,7 +1334,7 @@ if __name__ == '__main__':
|
||||
parser.add_argument(
|
||||
'--saved_model_dir',
|
||||
type=str,
|
||||
default='../../Model',
|
||||
default='',
|
||||
help='Where to save the exported graph.')
|
||||
parser.add_argument(
|
||||
'--logging_verbosity',
|
||||
|
@ -1,4 +0,0 @@
|
||||
glass
|
||||
metal
|
||||
paper
|
||||
plastic
|