change object_detector.py
This commit is contained in:
parent
07c6da6eed
commit
5397b09a13
@ -1,4 +1,4 @@
|
|||||||
from ultralytics import YOLO
|
#from ultralytics import YOLO
|
||||||
from flask import request, Flask, jsonify
|
from flask import request, Flask, jsonify
|
||||||
from waitress import serve
|
from waitress import serve
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
@ -44,14 +44,17 @@ def detect():
|
|||||||
:return: a JSON array of objects bounding boxes in format [[x1,y1,x2,y2,object_type,probability],..]
|
:return: a JSON array of objects bounding boxes in format [[x1,y1,x2,y2,object_type,probability],..]
|
||||||
"""
|
"""
|
||||||
buf = request.files["image_file"]
|
buf = request.files["image_file"]
|
||||||
boxes = detect_objects_on_image(buf.stream)
|
boxes, orientation = detect_objects_on_image(buf.stream)
|
||||||
print(boxes)
|
#print(boxes)
|
||||||
|
#print(orientation)
|
||||||
return jsonify(boxes)
|
return jsonify(boxes)
|
||||||
|
|
||||||
def detect_objects_on_image(buf):
|
def detect_objects_on_image(buf):
|
||||||
input, img_width, img_height = prepare_input(buf)
|
input, img_width, img_height = prepare_input(buf)
|
||||||
output = run_model(input)
|
output = run_model(input)
|
||||||
return process_output(output,img_width,img_height)
|
orientation = get_orientation(buf)
|
||||||
|
processed_output = process_output(output, img_width, img_height, orientation)
|
||||||
|
return processed_output, orientation
|
||||||
|
|
||||||
def prepare_input(buf):
|
def prepare_input(buf):
|
||||||
img = Image.open(buf)
|
img = Image.open(buf)
|
||||||
@ -68,7 +71,7 @@ def run_model(input):
|
|||||||
outputs = model.run(["output0"], {"images":input})
|
outputs = model.run(["output0"], {"images":input})
|
||||||
return outputs[0]
|
return outputs[0]
|
||||||
|
|
||||||
def process_output(output, img_width, img_height):
|
def process_output(output, img_width, img_height, orientation):
|
||||||
output = output[0].astype(float)
|
output = output[0].astype(float)
|
||||||
output = output.transpose()
|
output = output.transpose()
|
||||||
|
|
||||||
@ -77,6 +80,7 @@ def process_output(output, img_width, img_height):
|
|||||||
prob = row[4:].max()
|
prob = row[4:].max()
|
||||||
if prob < 0.5:
|
if prob < 0.5:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
class_id = row[4:].argmax()
|
class_id = row[4:].argmax()
|
||||||
label = yolo_classes[class_id]
|
label = yolo_classes[class_id]
|
||||||
xc, yc, w, h = row[:4]
|
xc, yc, w, h = row[:4]
|
||||||
@ -85,20 +89,18 @@ def process_output(output, img_width, img_height):
|
|||||||
x2 = (xc + w/2) / 640 * img_width
|
x2 = (xc + w/2) / 640 * img_width
|
||||||
y2 = (yc + h/2) / 640 * img_height
|
y2 = (yc + h/2) / 640 * img_height
|
||||||
|
|
||||||
rotated_x1 = img_height - y2
|
boxes.append([x1, y1, x2, y2, label, prob])
|
||||||
rotated_y1 = x1
|
|
||||||
rotated_x2 = img_height - y1
|
|
||||||
rotated_y2 = x2
|
|
||||||
|
|
||||||
boxes.append([rotated_x1, rotated_y1, rotated_x2, rotated_y2, label, prob])
|
# Adjust boxes based on orientation
|
||||||
|
adjusted_boxes = adjust_boxes_for_orientation(boxes, orientation, img_width, img_height)
|
||||||
|
|
||||||
#boxes.append([x1, y1, x2, y2, label, prob])
|
# Sort and apply non-max suppression as before
|
||||||
|
adjusted_boxes.sort(key=lambda x: x[5], reverse=True)
|
||||||
boxes.sort(key=lambda x: x[5], reverse=True)
|
|
||||||
result = []
|
result = []
|
||||||
while len(boxes) > 0:
|
while len(adjusted_boxes) > 0:
|
||||||
result.append(boxes[0])
|
result.append(adjusted_boxes[0])
|
||||||
boxes = [box for box in boxes if iou(box, boxes[0]) < 0.7]
|
adjusted_boxes = [box for box in adjusted_boxes if iou(box, adjusted_boxes[0]) < 0.7]
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@ -121,6 +123,31 @@ def intersection(box1,box2):
|
|||||||
y2 = min(box1_y2,box2_y2)
|
y2 = min(box1_y2,box2_y2)
|
||||||
return (x2-x1)*(y2-y1)
|
return (x2-x1)*(y2-y1)
|
||||||
|
|
||||||
|
def get_orientation(image_path):
|
||||||
|
with Image.open(image_path) as img:
|
||||||
|
if hasattr(img, '_getexif'):
|
||||||
|
exif_data = img._getexif()
|
||||||
|
if exif_data is not None:
|
||||||
|
return exif_data.get(274, 1) # Default to normal orientation
|
||||||
|
return 1 # Default orientation if no EXIF data
|
||||||
|
|
||||||
|
def adjust_boxes_for_orientation(boxes, orientation, img_width, img_height):
|
||||||
|
adjusted_boxes = []
|
||||||
|
for box in boxes:
|
||||||
|
x1, y1, x2, y2, label, prob = box
|
||||||
|
|
||||||
|
# Apply transformations based on orientation
|
||||||
|
if orientation == 3: # 180 degrees
|
||||||
|
x1, y1, x2, y2 = img_width - x2, img_height - y2, img_width - x1, img_height - y1
|
||||||
|
elif orientation == 6: # 270 degrees (or -90 degrees)
|
||||||
|
x1, y1, x2, y2 = img_height - y2, x1, img_height - y1, x2
|
||||||
|
elif orientation == 8: # 90 degrees
|
||||||
|
x1, y1, x2, y2 = y1, img_width - x2, y2, img_width - x1
|
||||||
|
|
||||||
|
adjusted_boxes.append([x1, y1, x2, y2, label, prob])
|
||||||
|
|
||||||
|
return adjusted_boxes
|
||||||
|
|
||||||
|
|
||||||
""" def detect_objects_on_image(buf):
|
""" def detect_objects_on_image(buf):
|
||||||
"""
|
"""
|
||||||
|
Loading…
Reference in New Issue
Block a user