diff --git a/bibrecognition/bibrecognition/__pycache__/settings.cpython-38.pyc b/bibrecognition/bibrecognition/__pycache__/settings.cpython-38.pyc index c71a951..ee97199 100644 Binary files a/bibrecognition/bibrecognition/__pycache__/settings.cpython-38.pyc and b/bibrecognition/bibrecognition/__pycache__/settings.cpython-38.pyc differ diff --git a/bibrecognition/bibrecognition/settings.py b/bibrecognition/bibrecognition/settings.py index 7e68fa1..fe55ebe 100644 --- a/bibrecognition/bibrecognition/settings.py +++ b/bibrecognition/bibrecognition/settings.py @@ -120,3 +120,5 @@ USE_TZ = True STATIC_URL = '/static/' +MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "bibrecognition/images") +MEDIA_URL = '/images/' diff --git a/bibrecognition/db.sqlite3 b/bibrecognition/db.sqlite3 index 286da05..748ac26 100644 Binary files a/bibrecognition/db.sqlite3 and b/bibrecognition/db.sqlite3 differ diff --git a/bibrecognition/images/bib_01.jpg b/bibrecognition/images/bib_01.jpg new file mode 100644 index 0000000..4600e07 Binary files /dev/null and b/bibrecognition/images/bib_01.jpg differ diff --git a/bibrecognition/images/bib_01_GBMDqei.jpg b/bibrecognition/images/bib_01_GBMDqei.jpg new file mode 100644 index 0000000..4600e07 Binary files /dev/null and b/bibrecognition/images/bib_01_GBMDqei.jpg differ diff --git a/bibrecognition/images/bib_01_L2ZLOit.jpg b/bibrecognition/images/bib_01_L2ZLOit.jpg new file mode 100644 index 0000000..4600e07 Binary files /dev/null and b/bibrecognition/images/bib_01_L2ZLOit.jpg differ diff --git a/bibrecognition/images/bib_01_SMItOxE.jpg b/bibrecognition/images/bib_01_SMItOxE.jpg new file mode 100644 index 0000000..4600e07 Binary files /dev/null and b/bibrecognition/images/bib_01_SMItOxE.jpg differ diff --git a/bibrecognition/images/bib_03.jpg b/bibrecognition/images/bib_03.jpg new file mode 100644 index 0000000..f0ca984 Binary files /dev/null and b/bibrecognition/images/bib_03.jpg differ diff --git a/bibrecognition/images/bib_03_nr7BMDD.jpg b/bibrecognition/images/bib_03_nr7BMDD.jpg new file mode 100644 index 0000000..f0ca984 Binary files /dev/null and b/bibrecognition/images/bib_03_nr7BMDD.jpg differ diff --git a/bibrecognition/images/bib_04.jpg b/bibrecognition/images/bib_04.jpg new file mode 100644 index 0000000..c266258 Binary files /dev/null and b/bibrecognition/images/bib_04.jpg differ diff --git a/bibrecognition/imguploader/__pycache__/forms.cpython-38.pyc b/bibrecognition/imguploader/__pycache__/forms.cpython-38.pyc index 9587776..3137f72 100644 Binary files a/bibrecognition/imguploader/__pycache__/forms.cpython-38.pyc and b/bibrecognition/imguploader/__pycache__/forms.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/__pycache__/functions.cpython-38.pyc b/bibrecognition/imguploader/__pycache__/functions.cpython-38.pyc index 819f739..49ddef4 100644 Binary files a/bibrecognition/imguploader/__pycache__/functions.cpython-38.pyc and b/bibrecognition/imguploader/__pycache__/functions.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/__pycache__/models.cpython-38.pyc b/bibrecognition/imguploader/__pycache__/models.cpython-38.pyc index c6457be..2d71fee 100644 Binary files a/bibrecognition/imguploader/__pycache__/models.cpython-38.pyc and b/bibrecognition/imguploader/__pycache__/models.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/__pycache__/urls.cpython-38.pyc b/bibrecognition/imguploader/__pycache__/urls.cpython-38.pyc index b0b8a69..db3c30d 100644 Binary files a/bibrecognition/imguploader/__pycache__/urls.cpython-38.pyc and b/bibrecognition/imguploader/__pycache__/urls.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/__pycache__/views.cpython-38.pyc b/bibrecognition/imguploader/__pycache__/views.cpython-38.pyc index 95c64b3..cbf86ea 100644 Binary files a/bibrecognition/imguploader/__pycache__/views.cpython-38.pyc and b/bibrecognition/imguploader/__pycache__/views.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/forms.py b/bibrecognition/imguploader/forms.py index 330a1d6..9adf0d9 100644 --- a/bibrecognition/imguploader/forms.py +++ b/bibrecognition/imguploader/forms.py @@ -7,3 +7,9 @@ class PhotoForm(forms.Form): queryset=Competitions.objects.all(), to_field_name="comp_slug") file_field = forms.FileField( widget=forms.ClearableFileInput(attrs={'multiple': True})) + + +class SearchForm(forms.Form): + zawody = forms.ModelChoiceField( + queryset=Competitions.objects.all(), to_field_name="comp_slug") + numer = forms.DecimalField(decimal_places=0) diff --git a/bibrecognition/imguploader/functions.py b/bibrecognition/imguploader/functions.py index 5ccc6a1..231572b 100644 --- a/bibrecognition/imguploader/functions.py +++ b/bibrecognition/imguploader/functions.py @@ -37,7 +37,7 @@ def decode_predictions(scores, geometry): for x in range(0, numCols): # if our score does not have sufficient probability, # ignore it - if scoresData[x] < args["min_confidence"]: + if scoresData[x] < 0.5: continue # compute the offset factor as our resulting feature @@ -71,123 +71,65 @@ def decode_predictions(scores, geometry): return (rects, confidences) -def findNumber(): +def findNumber(url): + image = cv2.imread(url) + orig = image.copy() + (origH, origW) = image.shape[:2] + (newW, newH) = (320,320) + rW = origW / float(newW) + rH = origH / float(newH) + image = cv2.resize(image, (newW, newH)) + (H, W) = image.shape[:2] + layerNames = [ + "feature_fusion/Conv_7/Sigmoid", + "feature_fusion/concat_3"] + net = cv2.dnn.readNet("../EAST/frozen_east_text_detection.pb") + blob = cv2.dnn.blobFromImage(image, 1.0, (W, H), + (123.68, 116.78, 103.94), swapRB=True, crop=False) + net.setInput(blob) + (scores, geometry) = net.forward(layerNames) + (rects, confidences) = decode_predictions(scores, geometry) + boxes = non_max_suppression(np.array(rects), probs=confidences) + results = [] + for (startX, startY, endX, endY) in boxes: + + startX = int(startX * rW) + startY = int(startY * rH) + endX = int(endX * rW) + endY = int(endY * rH) + + dX = int((endX - startX) * 0.0) + dY = int((endY - startY) * 0.0) + + startX = max(0, startX - dX) + startY = max(0, startY - dY) + endX = min(origW, endX + (dX * 2)) + endY = min(origH, endY + (dY * 2)) + + roi = orig[startY:endY, startX:endX] + + config = ("-l eng --oem 1 --psm 7") + text = pytesseract.image_to_string(roi, config=config) + + results.append(((startX, startY, endX, endY), text)) + + results = sorted(results, key=lambda r: r[0][1]) + wyniki = [] + for ((startX, startY, endX, endY), text) in results: + if( text.isdigit() ): + wyniki.append(text) + # print("OCR TEXT") + # print("========") + # print("{}\n".format(text)) + + # text = "".join([c if ord(c) < 128 else "" for c in text]).strip() + # output = orig.copy() + # cv2.rectangle(output, (startX, startY), (endX, endY), + # (0, 0, 255), 2) + # cv2.putText(output, text, (startX, startY - 20), + # cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3) + + # cv2.imshow("Text Detection", output) + # cv2.waitKey(0) - return 0 - -# construct the argument parser and parse the arguments -ap = argparse.ArgumentParser() -ap.add_argument("-i", "--image", type=str, - help="path to input image") -ap.add_argument("-east", "--east", type=str, default="./EAST/frozen_east_text_detection.pb", - help="path to input EAST text detector") -ap.add_argument("-c", "--min-confidence", type=float, default=0.5, - help="minimum probability required to inspect a region") -ap.add_argument("-w", "--width", type=int, default=320, - help="nearest multiple of 32 for resized width") -ap.add_argument("-e", "--height", type=int, default=320, - help="nearest multiple of 32 for resized height") -ap.add_argument("-p", "--padding", type=float, default=0.0, - help="amount of padding to add to each border of ROI") -args = vars(ap.parse_args()) - -# load the input image and grab the image dimensions -image = cv2.imread(args["image"]) -orig = image.copy() -(origH, origW) = image.shape[:2] - -# set the new width and height and then determine the ratio in change -# for both the width and height -(newW, newH) = (args["width"], args["height"]) -rW = origW / float(newW) -rH = origH / float(newH) - -# resize the image and grab the new image dimensions -image = cv2.resize(image, (newW, newH)) -(H, W) = image.shape[:2] - -# define the two output layer names for the EAST detector model that -# we are interested -- the first is the output probabilities and the -# second can be used to derive the bounding box coordinates of text -layerNames = [ - "feature_fusion/Conv_7/Sigmoid", - "feature_fusion/concat_3"] - -# load the pre-trained EAST text detector -print("[INFO] loading EAST text detector...") -net = cv2.dnn.readNet(args["east"]) - -# construct a blob from the image and then perform a forward pass of -# the model to obtain the two output layer sets -blob = cv2.dnn.blobFromImage(image, 1.0, (W, H), - (123.68, 116.78, 103.94), swapRB=True, crop=False) -net.setInput(blob) -(scores, geometry) = net.forward(layerNames) - -# decode the predictions, then apply non-maxima suppression to -# suppress weak, overlapping bounding boxes -(rects, confidences) = decode_predictions(scores, geometry) -boxes = non_max_suppression(np.array(rects), probs=confidences) - -# initialize the list of results -results = [] - -# loop over the bounding boxes -for (startX, startY, endX, endY) in boxes: - # scale the bounding box coordinates based on the respective - # ratios - startX = int(startX * rW) - startY = int(startY * rH) - endX = int(endX * rW) - endY = int(endY * rH) - - # in order to obtain a better OCR of the text we can potentially - # apply a bit of padding surrounding the bounding box -- here we - # are computing the deltas in both the x and y directions - dX = int((endX - startX) * args["padding"]) - dY = int((endY - startY) * args["padding"]) - - # apply padding to each side of the bounding box, respectively - startX = max(0, startX - dX) - startY = max(0, startY - dY) - endX = min(origW, endX + (dX * 2)) - endY = min(origH, endY + (dY * 2)) - - # extract the actual padded ROI - roi = orig[startY:endY, startX:endX] - - # in order to apply Tesseract v4 to OCR text we must supply - # (1) a language, (2) an OEM flag of 4, indicating that the we - # wish to use the LSTM neural net model for OCR, and finally - # (3) an OEM value, in this case, 7 which implies that we are - # treating the ROI as a single line of text - config = ("-l eng --oem 1 --psm 7") - text = pytesseract.image_to_string(roi, config=config) - - # add the bounding box coordinates and OCR'd text to the list - # of results - results.append(((startX, startY, endX, endY), text)) - -# sort the results bounding box coordinates from top to bottom -results = sorted(results, key=lambda r: r[0][1]) - -# loop over the results -for ((startX, startY, endX, endY), text) in results: - # display the text OCR'd by Tesseract - print("OCR TEXT") - print("========") - print("{}\n".format(text)) - - # strip out non-ASCII text so we can draw the text on the image - # using OpenCV, then draw the text and a bounding box surrounding - # the text region of the input image - text = "".join([c if ord(c) < 128 else "" for c in text]).strip() - output = orig.copy() - cv2.rectangle(output, (startX, startY), (endX, endY), - (0, 0, 255), 2) - cv2.putText(output, text, (startX, startY - 20), - cv2.FONT_HERSHEY_SIMPLEX, 1.2, (0, 0, 255), 3) - - # show the output image - cv2.imshow("Text Detection", output) - cv2.waitKey(0) + return wyniki diff --git a/bibrecognition/imguploader/migrations/0004_remove_photo_url.py b/bibrecognition/imguploader/migrations/0004_remove_photo_url.py new file mode 100644 index 0000000..c62e876 --- /dev/null +++ b/bibrecognition/imguploader/migrations/0004_remove_photo_url.py @@ -0,0 +1,17 @@ +# Generated by Django 3.0.3 on 2020-06-19 23:49 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('imguploader', '0003_competitions_status'), + ] + + operations = [ + migrations.RemoveField( + model_name='photo', + name='url', + ), + ] diff --git a/bibrecognition/imguploader/migrations/0005_photometa_comp_id.py b/bibrecognition/imguploader/migrations/0005_photometa_comp_id.py new file mode 100644 index 0000000..9085319 --- /dev/null +++ b/bibrecognition/imguploader/migrations/0005_photometa_comp_id.py @@ -0,0 +1,19 @@ +# Generated by Django 3.0.3 on 2020-06-20 00:27 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('imguploader', '0004_remove_photo_url'), + ] + + operations = [ + migrations.AddField( + model_name='photometa', + name='comp_id', + field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='imguploader.Competitions'), + ), + ] diff --git a/bibrecognition/imguploader/migrations/__pycache__/0004_remove_photo_url.cpython-38.pyc b/bibrecognition/imguploader/migrations/__pycache__/0004_remove_photo_url.cpython-38.pyc new file mode 100644 index 0000000..1bf9620 Binary files /dev/null and b/bibrecognition/imguploader/migrations/__pycache__/0004_remove_photo_url.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/migrations/__pycache__/0005_photometa_comp_id.cpython-38.pyc b/bibrecognition/imguploader/migrations/__pycache__/0005_photometa_comp_id.cpython-38.pyc new file mode 100644 index 0000000..b9794c6 Binary files /dev/null and b/bibrecognition/imguploader/migrations/__pycache__/0005_photometa_comp_id.cpython-38.pyc differ diff --git a/bibrecognition/imguploader/models.py b/bibrecognition/imguploader/models.py index d11199f..c3a7462 100644 --- a/bibrecognition/imguploader/models.py +++ b/bibrecognition/imguploader/models.py @@ -1,8 +1,8 @@ from django.db import models class PhotoManager(models.Manager): - def create_photo(self, comp_id, name, image, url): - photo = self.create(comp_id = comp_id, name = name, image = image, url = url) + def create_photo(self, comp_id, name, image): + photo = self.create(comp_id = comp_id, name = name, image = image) return photo # Create your models here. @@ -18,10 +18,15 @@ class Photo(models.Model): comp_id = models.ForeignKey(Competitions, on_delete=models.CASCADE) name = models.CharField(max_length=100, default='Zdjecie') image = models.ImageField(upload_to='images/', default='placeholder.jpg') - url = models.CharField(max_length=50) + # url = models.CharField(max_length=50) objects = PhotoManager() + def __str__(self): + return self.name + + class PhotoMeta(models.Model): + comp_id = models.ForeignKey(Competitions, on_delete=models.CASCADE, null=True) photo_id = models.ForeignKey(Photo, on_delete=models.CASCADE) meta_key = models.CharField(max_length=50) meta_value = models.CharField(max_length=50) diff --git a/bibrecognition/imguploader/templates/index.html b/bibrecognition/imguploader/templates/index.html index 6849961..3a51633 100644 --- a/bibrecognition/imguploader/templates/index.html +++ b/bibrecognition/imguploader/templates/index.html @@ -7,9 +7,12 @@ {% if user.is_authenticated %} - Zalogowany 😎 + Zalogowany 😎
+ Załaduj zdjęcia
+ Przeszukaj bazę {% else %} - Gość 🏃‍♀️ + Gość 🏃‍♀️
+ Przeszukaj bazę {% endif %} \ No newline at end of file diff --git a/bibrecognition/imguploader/templates/search.html b/bibrecognition/imguploader/templates/search.html new file mode 100644 index 0000000..ed76646 --- /dev/null +++ b/bibrecognition/imguploader/templates/search.html @@ -0,0 +1,27 @@ + + + + + + + Search Photos + + + + {% if form %} +
+ {% csrf_token %} + {{ form }} + +
+ {% endif %} + {% if foto %} + + {% endif %} + + + \ No newline at end of file diff --git a/bibrecognition/imguploader/urls.py b/bibrecognition/imguploader/urls.py index 334a710..e9c24c1 100644 --- a/bibrecognition/imguploader/urls.py +++ b/bibrecognition/imguploader/urls.py @@ -1,8 +1,16 @@ from django.urls import path +from django.conf.urls.static import static +from django.conf import settings from . import views urlpatterns = [ path('', views.index, name="index"), path('upload', views.uploadPhotos, name="upload"), + path('search', views.searchPhotos, name="search"), + + + ] + +urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) diff --git a/bibrecognition/imguploader/views.py b/bibrecognition/imguploader/views.py index 96e7825..dea6a57 100644 --- a/bibrecognition/imguploader/views.py +++ b/bibrecognition/imguploader/views.py @@ -1,13 +1,15 @@ from django.shortcuts import render from django.http import HttpResponse from .forms import PhotoForm +from .forms import SearchForm from django.http import HttpResponseRedirect from .models import PhotoManager from .models import Photo from .models import Competitions +from .models import PhotoMeta -# from .functions import test +from .functions import findNumber # Create your views here. @@ -25,15 +27,61 @@ def uploadPhotos(request): for f in files: zawody = Competitions.objects.get(comp_slug=comp) - photo = Photo.objects.create_photo(zawody,f,f,'test') + # photo = Photo.objects.create_photo(zawody,comp+"_"+f,f) + file_name = comp+"_"+f.name + photo = Photo(comp_id=zawody, name=file_name, image=f) + photo.save(force_insert=True) + # print("URL of photo: "+photo.image.url) + numbers = findNumber(photo.image.url) + + for nr in numbers: + pm = PhotoMeta(comp_id=zawody, photo_id=photo, meta_key="detect_number", meta_value=nr) + pm.save(force_insert=True) # return self.form_valid(form) - return HttpResponseRedirect('/success/url/') + return HttpResponseRedirect('/success/') else: # return self.form_invalid(form) # form.save() # return render(request, print(request.FILES['file_field'])) - return HttpResponseRedirect('/faild/url/') + return HttpResponseRedirect('/failed/') else: form = PhotoForm() return render(request, 'upload.html', {'form': form}) # return HttpResponse("Hello, world. This is imageUploader") + + +def searchPhotos(request): + if request.method == 'POST': + form = SearchForm(request.POST) + comp = request.POST['zawody'] + numer = request.POST['numer'] + print(request) + + if form.is_valid(): + allFotos = [] + imgUrls = [] + zawody = Competitions.objects.get(comp_slug=comp) + try: + zdjecia = PhotoMeta.objects.filter(comp_id=zawody, meta_value=numer) + except PhotoMeta.DoesNotExist: + zdjecia = None + if( zdjecia ): + for zdjecie in zdjecia: + # allFotos.append(Photo.objects.get(id=zdjecie.photo_id)) + imgUrls.append(zdjecie.photo_id.image.name) + + # for fotos in allFotos: + # imgUrls.append(fotos.image.url) + + return render(request, 'search.html', {'foto': imgUrls}) + else: + print('no ni ma') + + return HttpResponseRedirect('/success/') + else: + + return HttpResponseRedirect('/failed/') + else: + form = SearchForm() + return render(request, 'search.html', {'form': form}) + # return HttpResponse("Hello, world. This is imageUploader") diff --git a/imgs/bib_03_bw.jpg b/imgs/bib_03_bw.jpg new file mode 100644 index 0000000..255bf90 Binary files /dev/null and b/imgs/bib_03_bw.jpg differ diff --git a/imgs/bib_04.jpg b/imgs/bib_04.jpg new file mode 100644 index 0000000..c266258 Binary files /dev/null and b/imgs/bib_04.jpg differ diff --git a/main.py b/main.py index a274ba3..4002f53 100644 --- a/main.py +++ b/main.py @@ -169,12 +169,14 @@ for (startX, startY, endX, endY) in boxes: results = sorted(results, key=lambda r: r[0][1]) # loop over the results + for ((startX, startY, endX, endY), text) in results: # display the text OCR'd by Tesseract print("OCR TEXT") print("========") print("{}\n".format(text)) + # strip out non-ASCII text so we can draw the text on the image # using OpenCV, then draw the text and a bounding box surrounding # the text region of the input image @@ -188,3 +190,4 @@ for ((startX, startY, endX, endY), text) in results: # show the output image cv2.imshow("Text Detection", output) cv2.waitKey(0) +