modify for tomatos

This commit is contained in:
anetla 2023-12-28 18:40:41 +01:00
parent 27873f727a
commit 9293dc57e2
7 changed files with 240 additions and 44 deletions

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 MiB

After

Width:  |  Height:  |  Size: 2.6 MiB

View File

@ -1,20 +1,17 @@
# Traffic Lights Object Detector using YOLOv8 neural network # Tomato maturity detection web application
<div align="center"> <div align="center">
<a href="https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c"> <img src="example_picture.png"/>
<img src="https://res.cloudinary.com/practicaldev/image/fetch/s--mZ1E0vOa--/c_imagga_scale,f_auto,fl_progressive,h_420,q_auto,w_1000/https://dev-to-uploads.s3.amazonaws.com/uploads/articles/n2auv9i8405cgnxhru40.png"/>
</a>
</div> </div>
This is a modified code from [this](https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c) article.
The source code for [this](https://dev.to/andreygermanov/a-practical-introduction-to-object-detection-with-yolov8-neural-network-3n8c) article.
This is a web interface to [YOLOv8 object detection neural network](https://ultralytics.com/yolov8) This is a web interface to [YOLOv8 object detection neural network](https://ultralytics.com/yolov8)
implemented on [Python](https://www.python.org) that uses a model to detect traffic lights and road signs on images. implemented on [Python](https://www.python.org) that uses a model to detect tomato maturity and road signs on images.
## Install ## Install
* Clone this repository: `git clone git@github.com:AndreyGermanov/yolov8_pytorch_python.git` * Cloning instruction coming soon
* Go to the root of cloned repository * Go to the root of cloned repository
* Install dependencies by running `pip3 install -r requirements.txt` * Install dependencies by running `pip3 install -r requirements.txt`

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

View File

@ -4,42 +4,44 @@
<meta charset="UTF-8"> <meta charset="UTF-8">
<title>YOLOv8 Object Detection</title> <title>YOLOv8 Object Detection</title>
<style> <style>
canvas { canvas {
display:block; display:block;
border: 1px solid black; border: 1px solid black;
margin-top:10px; margin-top:10px;
} }
</style> </style>
</head> </head>
<body> <body>
<input id="uploadInput" type="file"/> <input id="uploadInput" type="file"/>
<canvas></canvas> <canvas></canvas>
<script> <script>
/** /**
* "Upload" button onClick handler: uploads selected image file * "Upload" button onClick handler: uploads selected
* to backend, receives array of detected objects * image file to backend, receives an array of
* and draws them on top of image * detected objects and draws them on top of image
*/ */
const input = document.getElementById("uploadInput"); const input = document.getElementById("uploadInput");
input.addEventListener("change",async(event) => { input.addEventListener("change",async(event) => {
const file = event.target.files[0];
const data = new FormData(); const data = new FormData();
data.append("image_file",event.target.files[0],"image_file"); data.append("image_file",file,"image_file");
const response = await fetch("/detect",{ const response = await fetch("/detect",{
method:"post", method:"post",
body:data body:data
}); });
const boxes = await response.json(); const boxes = await response.json();
draw_image_and_boxes(event.target.files[0],boxes); draw_image_and_boxes(file,boxes);
}) })
/** /**
* Function draws the image from provided file * Function draws the image from provided file
* and bounding boxes of detected objects on * and bounding boxes of detected objects on
* top of the image * top of the image
* @param file Uploaded file object * @param file Uploaded file object
* @param boxes Array of bounding boxes in format [[x1,y1,x2,y2,object_type,probability],...] * @param boxes Array of bounding boxes in format
[[x1,y1,x2,y2,object_type,probability],...]
*/ */
function draw_image_and_boxes(file,boxes) { function draw_image_and_boxes(file,boxes) {
const img = new Image() const img = new Image()
img.src = URL.createObjectURL(file); img.src = URL.createObjectURL(file);
img.onload = () => { img.onload = () => {
@ -49,18 +51,20 @@
const ctx = canvas.getContext("2d"); const ctx = canvas.getContext("2d");
ctx.drawImage(img,0,0); ctx.drawImage(img,0,0);
ctx.strokeStyle = "#00FF00"; ctx.strokeStyle = "#00FF00";
ctx.lineWidth = 3; ctx.lineWidth = 5;
ctx.font = "18px serif"; ctx.font = "20px serif";
boxes.forEach(([x1,y1,x2,y2,label]) => { boxes.forEach(([x1,y1,x2,y2,object_type, prob]) => {
const label = `${object_type} ${prob.toFixed(2)}`;
ctx.strokeRect(x1,y1,x2-x1,y2-y1); ctx.strokeRect(x1,y1,x2-x1,y2-y1);
ctx.fillStyle = "#00ff00"; ctx.fillStyle = "#00ff00";
const width = ctx.measureText(label).width; const width = ctx.measureText(label).width;
ctx.fillRect(x1,y1,width+10,25); ctx.fillRect(x1,y1,width+10,25);
ctx.fillStyle = "#000000"; ctx.fillStyle = "#000000";
ctx.fillText(label, x1, y1+18); ctx.fillText(label,x1,y1+18);
}); });
} }
} }
</script> </script>
</body> </body>
</html> </html>

View File

@ -2,6 +2,8 @@ from ultralytics import YOLO
from flask import request, Flask, jsonify from flask import request, Flask, jsonify
from waitress import serve from waitress import serve
from PIL import Image from PIL import Image
import onnxruntime as ort
import numpy as np
#my changes #my changes
import os import os
@ -10,6 +12,14 @@ script_dir = os.path.dirname(os.path.abspath(__file__))
# Change the working directory to the script's directory # Change the working directory to the script's directory
os.chdir(script_dir) os.chdir(script_dir)
yolo_classes = ["b_fully_ripened",
"b_half_ripened",
"b_green",
"l_fully_ripened",
"l_half_ripened",
"l_green"
]
#app start #app start
app = Flask(__name__) app = Flask(__name__)
@ -35,11 +45,86 @@ def detect():
""" """
buf = request.files["image_file"] buf = request.files["image_file"]
boxes = detect_objects_on_image(buf.stream) boxes = detect_objects_on_image(buf.stream)
print(boxes)
return jsonify(boxes) return jsonify(boxes)
def detect_objects_on_image(buf): def detect_objects_on_image(buf):
""" input, img_width, img_height = prepare_input(buf)
output = run_model(input)
return process_output(output,img_width,img_height)
def prepare_input(buf):
img = Image.open(buf)
img_width, img_height = img.size
img = img.resize((640, 640))
img = img.convert("RGB")
input = np.array(img)
input = input.transpose(2, 0, 1)
input = input.reshape(1, 3, 640, 640) / 255.0
return input.astype(np.float32), img_width, img_height
def run_model(input):
model = ort.InferenceSession("best.onnx", providers=['CPUExecutionProvider'])
outputs = model.run(["output0"], {"images":input})
return outputs[0]
def process_output(output, img_width, img_height):
output = output[0].astype(float)
output = output.transpose()
boxes = []
for row in output:
prob = row[4:].max()
if prob < 0.5:
continue
class_id = row[4:].argmax()
label = yolo_classes[class_id]
xc, yc, w, h = row[:4]
x1 = (xc - w/2) / 640 * img_width
y1 = (yc - h/2) / 640 * img_height
x2 = (xc + w/2) / 640 * img_width
y2 = (yc + h/2) / 640 * img_height
rotated_x1 = img_height - y2
rotated_y1 = x1
rotated_x2 = img_height - y1
rotated_y2 = x2
boxes.append([rotated_x1, rotated_y1, rotated_x2, rotated_y2, label, prob])
#boxes.append([x1, y1, x2, y2, label, prob])
boxes.sort(key=lambda x: x[5], reverse=True)
result = []
while len(boxes) > 0:
result.append(boxes[0])
boxes = [box for box in boxes if iou(box, boxes[0]) < 0.7]
return result
def iou(box1,box2):
return intersection(box1,box2)/union(box1,box2)
def union(box1,box2):
box1_x1,box1_y1,box1_x2,box1_y2 = box1[:4]
box2_x1,box2_y1,box2_x2,box2_y2 = box2[:4]
box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1)
box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1)
return box1_area + box2_area - intersection(box1,box2)
def intersection(box1,box2):
box1_x1,box1_y1,box1_x2,box1_y2 = box1[:4]
box2_x1,box2_y1,box2_x2,box2_y2 = box2[:4]
x1 = max(box1_x1,box2_x1)
y1 = max(box1_y1,box2_y1)
x2 = min(box1_x2,box2_x2)
y2 = min(box1_y2,box2_y2)
return (x2-x1)*(y2-y1)
""" def detect_objects_on_image(buf):
"""
""""
Function receives an image, Function receives an image,
passes it through YOLOv8 neural network passes it through YOLOv8 neural network
and returns an array of detected objects and returns an array of detected objects
@ -47,6 +132,7 @@ def detect_objects_on_image(buf):
:param buf: Input image file stream :param buf: Input image file stream
:return: Array of bounding boxes in format [[x1,y1,x2,y2,object_type,probability],..] :return: Array of bounding boxes in format [[x1,y1,x2,y2,object_type,probability],..]
""" """
"""""
model = YOLO("best.pt") model = YOLO("best.pt")
results = model.predict(Image.open(buf)) results = model.predict(Image.open(buf))
result = results[0] result = results[0]
@ -62,5 +148,5 @@ def detect_objects_on_image(buf):
]) ])
return output return output
"""
serve(app, host='0.0.0.0', port=8080) serve(app, host='0.0.0.0', port=8080)