New Hope. Adding photos and search by number

This commit is contained in:
Norbert 2020-06-20 04:01:22 +02:00
parent 28de1ffbff
commit 6d4bbf4bdc
29 changed files with 209 additions and 129 deletions

View File

@ -120,3 +120,5 @@ USE_TZ = True
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "bibrecognition/images")
MEDIA_URL = '/images/'

Binary file not shown.

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 528 KiB

View File

@ -7,3 +7,9 @@ class PhotoForm(forms.Form):
queryset=Competitions.objects.all(), to_field_name="comp_slug")
file_field = forms.FileField(
widget=forms.ClearableFileInput(attrs={'multiple': True}))
class SearchForm(forms.Form):
zawody = forms.ModelChoiceField(
queryset=Competitions.objects.all(), to_field_name="comp_slug")
numer = forms.DecimalField(decimal_places=0)

View File

@ -37,7 +37,7 @@ def decode_predictions(scores, geometry):
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < args["min_confidence"]:
if scoresData[x] < 0.5:
continue
# compute the offset factor as our resulting feature
@ -71,123 +71,65 @@ def decode_predictions(scores, geometry):
return (rects, confidences)
def findNumber():
def findNumber(url):
image = cv2.imread(url)
orig = image.copy()
(origH, origW) = image.shape[:2]
(newW, newH) = (320,320)
rW = origW / float(newW)
rH = origH / float(newH)
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
net = cv2.dnn.readNet("../EAST/frozen_east_text_detection.pb")
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
(rects, confidences) = decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
results = []
for (startX, startY, endX, endY) in boxes:
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
dX = int((endX - startX) * 0.0)
dY = int((endY - startY) * 0.0)
startX = max(0, startX - dX)
startY = max(0, startY - dY)
endX = min(origW, endX + (dX * 2))
endY = min(origH, endY + (dY * 2))
roi = orig[startY:endY, startX:endX]
config = ("-l eng --oem 1 --psm 7")
text = pytesseract.image_to_string(roi, config=config)
results.append(((startX, startY, endX, endY), text))
results = sorted(results, key=lambda r: r[0][1])
wyniki = []
for ((startX, startY, endX, endY), text) in results:
if( text.isdigit() ):
wyniki.append(text)
# print("OCR TEXT")
# print("========")
# print("{}\n".format(text))
# text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
# output = orig.copy()
# cv2.rectangle(output, (startX, startY), (endX, endY),
# (0, 0, 255), 2)
# cv2.putText(output, text, (startX, startY - 20),
# cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3)
# cv2.imshow("Text Detection", output)
# cv2.waitKey(0)
return 0
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str,
help="path to input image")
ap.add_argument("-east", "--east", type=str, default="./EAST/frozen_east_text_detection.pb",
help="path to input EAST text detector")
ap.add_argument("-c", "--min-confidence", type=float, default=0.5,
help="minimum probability required to inspect a region")
ap.add_argument("-w", "--width", type=int, default=320,
help="nearest multiple of 32 for resized width")
ap.add_argument("-e", "--height", type=int, default=320,
help="nearest multiple of 32 for resized height")
ap.add_argument("-p", "--padding", type=float, default=0.0,
help="amount of padding to add to each border of ROI")
args = vars(ap.parse_args())
# load the input image and grab the image dimensions
image = cv2.imread(args["image"])
orig = image.copy()
(origH, origW) = image.shape[:2]
# set the new width and height and then determine the ratio in change
# for both the width and height
(newW, newH) = (args["width"], args["height"])
rW = origW / float(newW)
rH = origH / float(newH)
# resize the image and grab the new image dimensions
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]
# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text
layerNames = [
"feature_fusion/Conv_7/Sigmoid",
"feature_fusion/concat_3"]
# load the pre-trained EAST text detector
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),
(123.68, 116.78, 103.94), swapRB=True, crop=False)
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
# decode the predictions, then apply non-maxima suppression to
# suppress weak, overlapping bounding boxes
(rects, confidences) = decode_predictions(scores, geometry)
boxes = non_max_suppression(np.array(rects), probs=confidences)
# initialize the list of results
results = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective
# ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
# in order to obtain a better OCR of the text we can potentially
# apply a bit of padding surrounding the bounding box -- here we
# are computing the deltas in both the x and y directions
dX = int((endX - startX) * args["padding"])
dY = int((endY - startY) * args["padding"])
# apply padding to each side of the bounding box, respectively
startX = max(0, startX - dX)
startY = max(0, startY - dY)
endX = min(origW, endX + (dX * 2))
endY = min(origH, endY + (dY * 2))
# extract the actual padded ROI
roi = orig[startY:endY, startX:endX]
# in order to apply Tesseract v4 to OCR text we must supply
# (1) a language, (2) an OEM flag of 4, indicating that the we
# wish to use the LSTM neural net model for OCR, and finally
# (3) an OEM value, in this case, 7 which implies that we are
# treating the ROI as a single line of text
config = ("-l eng --oem 1 --psm 7")
text = pytesseract.image_to_string(roi, config=config)
# add the bounding box coordinates and OCR'd text to the list
# of results
results.append(((startX, startY, endX, endY), text))
# sort the results bounding box coordinates from top to bottom
results = sorted(results, key=lambda r: r[0][1])
# loop over the results
for ((startX, startY, endX, endY), text) in results:
# display the text OCR'd by Tesseract
print("OCR TEXT")
print("========")
print("{}\n".format(text))
# strip out non-ASCII text so we can draw the text on the image
# using OpenCV, then draw the text and a bounding box surrounding
# the text region of the input image
text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
output = orig.copy()
cv2.rectangle(output, (startX, startY), (endX, endY),
(0, 0, 255), 2)
cv2.putText(output, text, (startX, startY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3)
# show the output image
cv2.imshow("Text Detection", output)
cv2.waitKey(0)
return wyniki

View File

@ -0,0 +1,17 @@
# Generated by Django 3.0.3 on 2020-06-19 23:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('imguploader', '0003_competitions_status'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='url',
),
]

View File

@ -0,0 +1,19 @@
# Generated by Django 3.0.3 on 2020-06-20 00:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('imguploader', '0004_remove_photo_url'),
]
operations = [
migrations.AddField(
model_name='photometa',
name='comp_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='imguploader.Competitions'),
),
]

View File

@ -1,8 +1,8 @@
from django.db import models
class PhotoManager(models.Manager):
def create_photo(self, comp_id, name, image, url):
photo = self.create(comp_id = comp_id, name = name, image = image, url = url)
def create_photo(self, comp_id, name, image):
photo = self.create(comp_id = comp_id, name = name, image = image)
return photo
# Create your models here.
@ -18,10 +18,15 @@ class Photo(models.Model):
comp_id = models.ForeignKey(Competitions, on_delete=models.CASCADE)
name = models.CharField(max_length=100, default='Zdjecie')
image = models.ImageField(upload_to='images/', default='placeholder.jpg')
url = models.CharField(max_length=50)
# url = models.CharField(max_length=50)
objects = PhotoManager()
def __str__(self):
return self.name
class PhotoMeta(models.Model):
comp_id = models.ForeignKey(Competitions, on_delete=models.CASCADE, null=True)
photo_id = models.ForeignKey(Photo, on_delete=models.CASCADE)
meta_key = models.CharField(max_length=50)
meta_value = models.CharField(max_length=50)

View File

@ -7,9 +7,12 @@
</head>
<body>
{% if user.is_authenticated %}
Zalogowany 😎
Zalogowany 😎 <br />
<a href="{% url 'upload' %}">Załaduj zdjęcia</a><br />
<a href="{% url 'search' %}">Przeszukaj bazę</a>
{% else %}
Gość 🏃‍♀️
Gość 🏃‍♀️<br />
<a href="{% url 'search' %}">Przeszukaj bazę</a>
{% endif %}
</body>
</html>

View File

@ -0,0 +1,27 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Search Photos</title>
</head>
<body>
{% if form %}
<form action="/search" method="post" >
{% csrf_token %}
{{ form }}
<input type="submit" value="Submit">
</form>
{% endif %}
{% if foto %}
<ul>
{% for n in foto %}
<li><img src="{{ n }}" /></li>
{% endfor %}
</ul>
{% endif %}
</body>
</html>

View File

@ -1,8 +1,16 @@
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('upload', views.uploadPhotos, name="upload"),
path('search', views.searchPhotos, name="search"),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)

View File

@ -1,13 +1,15 @@
from django.shortcuts import render
from django.http import HttpResponse
from .forms import PhotoForm
from .forms import SearchForm
from django.http import HttpResponseRedirect
from .models import PhotoManager
from .models import Photo
from .models import Competitions
from .models import PhotoMeta
# from .functions import test
from .functions import findNumber
# Create your views here.
@ -25,15 +27,61 @@ def uploadPhotos(request):
for f in files:
zawody = Competitions.objects.get(comp_slug=comp)
photo = Photo.objects.create_photo(zawody,f,f,'test')
# photo = Photo.objects.create_photo(zawody,comp+"_"+f,f)
file_name = comp+"_"+f.name
photo = Photo(comp_id=zawody, name=file_name, image=f)
photo.save(force_insert=True)
# print("URL of photo: "+photo.image.url)
numbers = findNumber(photo.image.url)
for nr in numbers:
pm = PhotoMeta(comp_id=zawody, photo_id=photo, meta_key="detect_number", meta_value=nr)
pm.save(force_insert=True)
# return self.form_valid(form)
return HttpResponseRedirect('/success/url/')
return HttpResponseRedirect('/success/')
else:
# return self.form_invalid(form)
# form.save()
# return render(request, print(request.FILES['file_field']))
return HttpResponseRedirect('/faild/url/')
return HttpResponseRedirect('/failed/')
else:
form = PhotoForm()
return render(request, 'upload.html', {'form': form})
# return HttpResponse("Hello, world. This is imageUploader")
def searchPhotos(request):
if request.method == 'POST':
form = SearchForm(request.POST)
comp = request.POST['zawody']
numer = request.POST['numer']
print(request)
if form.is_valid():
allFotos = []
imgUrls = []
zawody = Competitions.objects.get(comp_slug=comp)
try:
zdjecia = PhotoMeta.objects.filter(comp_id=zawody, meta_value=numer)
except PhotoMeta.DoesNotExist:
zdjecia = None
if( zdjecia ):
for zdjecie in zdjecia:
# allFotos.append(Photo.objects.get(id=zdjecie.photo_id))
imgUrls.append(zdjecie.photo_id.image.name)
# for fotos in allFotos:
# imgUrls.append(fotos.image.url)
return render(request, 'search.html', {'foto': imgUrls})
else:
print('no ni ma')
return HttpResponseRedirect('/success/')
else:
return HttpResponseRedirect('/failed/')
else:
form = SearchForm()
return render(request, 'search.html', {'form': form})
# return HttpResponse("Hello, world. This is imageUploader")

BIN
imgs/bib_03_bw.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 261 KiB

BIN
imgs/bib_04.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 528 KiB

View File

@ -169,12 +169,14 @@ for (startX, startY, endX, endY) in boxes:
results = sorted(results, key=lambda r: r[0][1])
# loop over the results
for ((startX, startY, endX, endY), text) in results:
# display the text OCR'd by Tesseract
print("OCR TEXT")
print("========")
print("{}\n".format(text))
# strip out non-ASCII text so we can draw the text on the image
# using OpenCV, then draw the text and a bounding box surrounding
# the text region of the input image
@ -188,3 +190,4 @@ for ((startX, startY, endX, endY), text) in results:
# show the output image
cv2.imshow("Text Detection", output)
cv2.waitKey(0)