Add final version

This commit is contained in:
Adrian 2023-07-05 19:06:09 +02:00
commit c2900a41ba
41 changed files with 3391 additions and 0 deletions

188
.gitignore vendored Normal file
View File

@ -0,0 +1,188 @@
# Created by .ignore support plugin (hsz.mobi)
### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# dotenv
.env
# virtualenv
venv/
ENV/
# Spyder project settings
.spyderproject
# Rope project settings
.ropeproject
### VirtualEnv template
# Virtualenv
# http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
[Bb]in
[Ii]nclude
[Ll]ib
[Ll]ib64
[Ll]ocal
[Ss]cripts
pyvenv.cfg
.venv
pip-selfcheck.json
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
# idea folder, uncomment if you don't need it
.idea
.vscode
.vscode/
config.py

View File

@ -0,0 +1,36 @@
import cv2 as cv
import numpy as np
from skimage.io import imread
import os
def get_led_color(robot_image, model):
green_color = 1
red_color = 2
white_color = 3
arr_led = []
X_led = {}
resized_image = cv.resize(robot_image, (128, 128), interpolation=cv.INTER_AREA)
resized_image = resized_image / 255
arr_led.append(resized_image)
X_led["values"] = np.array(arr_led)
image_with_alpha_led = X_led["values"][:, :, :, :3]
pred = predict(image_with_alpha_led, model)
if pred[0] == green_color:
return "green"
elif pred[0] == red_color:
return "red"
elif pred[0] == white_color:
return "white"
else:
return "other"
def predict(image, model):
predict_x = model.predict(image)
classes_x = np.argmax(predict_x, axis=1)
print(f'{classes_x}\n')
return classes_x

48
README.md Normal file
View File

@ -0,0 +1,48 @@
# Flask API
To create conda env just run:
`conda env create conda_env.yml`
And then select newly created env in your IDE.
### To run API in project folder run this command:
`export FLASK_APP=main.py`
### After that you are ready to deploy API:
`flask run --without-threads`
Supported requests:
- `API_address/detectRobot1?img=...`
- response model:
```
{
"0": ["color", int(width), int(x)]
}
```
- when no objects were detected response is:
```
{
"0": []
}
```
- `API_address/detectRat1?img=...`
- response model:
```
{
"0": [int(width), int(x)]
}
```
- when no objects were detected response is:
```
{
"0": []
}
```
# Przed uruchomieniem
###### Skopiuj pliki z folderu ``dependencies`` do folderu ``/CoppeliaRobotics/CoppeliaSimEdu/lua``

BIN
VGG16_led_detector/.DS_Store vendored Normal file

Binary file not shown.

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

286
api.yml Normal file
View File

@ -0,0 +1,286 @@
name: tensorflow-sem
channels:
- apple
- anaconda
- conda-forge
- defaults
dependencies:
- _ipython_minor_entry_point=8.7.0=h8cf3c4a_0
- anyio=3.6.2=pyhd8ed1ab_0
- appnope=0.1.3=pyhd8ed1ab_0
- argon2-cffi=21.3.0=pyhd8ed1ab_0
- argon2-cffi-bindings=21.2.0=py39h02fc5c5_3
- asttokens=2.2.1=pyhd8ed1ab_0
- attrs=22.1.0=pyh71513ae_1
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=pyhd8ed1ab_3
- backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0
- beautifulsoup4=4.11.1=pyha770c72_0
- bleach=5.0.1=pyhd8ed1ab_0
- blosc=1.21.0=h98b2900_1
- boto3=1.26.33=pyhd8ed1ab_0
- botocore=1.29.33=pyhd8ed1ab_0
- brotli=1.0.9=h1a8c8d9_8
- brotli-bin=1.0.9=h1a8c8d9_8
- brotlipy=0.7.0=py39h02fc5c5_1005
- brunsli=0.1=hc377ac9_1
- bzip2=1.0.8=h3422bc3_4
- c-ares=1.18.1=h3422bc3_0
- ca-certificates=2022.12.7=h4653dfc_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- cairo=1.16.0=he69dfd1_1008
- certifi=2022.12.7=pyhd8ed1ab_0
- cffi=1.15.1=py39h7e6b969_3
- cfitsio=3.470=h7f6438f_7
- charls=2.2.0=hc377ac9_0
- charset-normalizer=2.1.1=pyhd8ed1ab_0
- click=8.1.3=unix_pyhd8ed1ab_2
- colorama=0.4.6=pyhd8ed1ab_0
- comm=0.1.2=pyhd8ed1ab_0
- contourpy=1.0.6=py39haaf3ac1_0
- cryptography=38.0.4=py39haa0b8cc_0
- cycler=0.11.0=pyhd8ed1ab_0
- cytoolz=0.11.0=py39h1a28f6b_0
- dask-core=2022.5.0=py39hca03da5_0
- debugpy=1.6.4=py39h23fbdae_0
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- eigen=3.4.0=hc021e02_0
- entrypoints=0.4=pyhd8ed1ab_0
- executing=1.2.0=pyhd8ed1ab_0
- flask=2.2.2=pyhd8ed1ab_0
- flit-core=3.8.0=pyhd8ed1ab_0
- fontconfig=2.13.94=heb65262_0
- fonttools=4.38.0=py39h02fc5c5_1
- freetype=2.12.1=hd633e50_1
- fsspec=2022.3.0=py39hca03da5_0
- gettext=0.21.1=h0186832_0
- giflib=5.2.1=h1a28f6b_0
- glib=2.74.1=hb5ab8b9_1
- glib-tools=2.74.1=hb5ab8b9_1
- graphite2=1.3.14=hc377ac9_1
- grpcio=1.42.0=py39h95c9599_0
- gst-plugins-base=1.14.1=hf0a386a_0
- gstreamer=1.14.1=he09cfb7_0
- h5py=3.6.0=nompi_py39hd982b79_100
- harfbuzz=4.3.0=hb1b0ec1_0
- hdf5=1.12.1=nompi_hf9525e8_104
- icu=68.2=hbdafb3b_0
- idna=3.4=pyhd8ed1ab_0
- imagecodecs=2021.8.26=py39h0dccdf0_1
- imageio=2.9.0=pyhd3eb1b0_0
- imath=3.1.6=hb5ab8b9_1
- importlib-metadata=5.2.0=pyha770c72_0
- importlib_resources=5.10.1=pyhd8ed1ab_0
- ipykernel=6.19.4=pyh736e0ef_0
- ipython=8.7.0=pyhd1c38e8_0
- ipython_genutils=0.2.0=py_1
- ipywidgets=8.0.3=pyhd8ed1ab_0
- itsdangerous=2.1.2=pyhd8ed1ab_0
- jedi=0.18.2=pyhd8ed1ab_0
- jinja2=3.1.2=pyhd8ed1ab_1
- jmespath=1.0.1=pyhd8ed1ab_0
- joblib=1.2.0=pyhd8ed1ab_0
- jpeg=9e=he4db4b2_2
- jsonschema=4.17.3=pyhd8ed1ab_0
- jupyter=1.0.0=py39h2804cbe_8
- jupyter_client=7.4.8=pyhd8ed1ab_0
- jupyter_console=6.4.4=pyhd8ed1ab_0
- jupyter_core=5.1.0=py39h2804cbe_0
- jupyter_events=0.5.0=pyhd8ed1ab_0
- jupyter_server=2.0.1=pyhd8ed1ab_0
- jupyter_server_terminals=0.4.3=pyhd8ed1ab_0
- jupyterlab_pygments=0.2.2=pyhd8ed1ab_0
- jupyterlab_widgets=3.0.4=pyhd8ed1ab_0
- jxrlib=1.1=h1a28f6b_2
- kiwisolver=1.4.4=py39haaf3ac1_1
- krb5=1.19.3=hf9b2bbe_0
- lcms2=2.12=hba8e193_0
- lerc=3.0=hc377ac9_0
- libaec=1.0.4=hc377ac9_1
- libblas=3.9.0=16_osxarm64_openblas
- libbrotlicommon=1.0.9=h1a8c8d9_8
- libbrotlidec=1.0.9=h1a8c8d9_8
- libbrotlienc=1.0.9=h1a8c8d9_8
- libcblas=3.9.0=16_osxarm64_openblas
- libcurl=7.86.0=hd538317_1
- libcxx=14.0.6=h2692d47_0
- libdeflate=1.8=h1a28f6b_5
- libedit=3.1.20191231=hc8eb9b7_2
- libev=4.33=h642e427_1
- libffi=3.4.2=h3422bc3_5
- libgfortran=5.0.0=11_3_0_hd922786_27
- libgfortran5=11.3.0=hdaf2cc0_27
- libglib=2.74.1=h4646484_1
- libiconv=1.17=he4db4b2_0
- liblapack=3.9.0=16_osxarm64_openblas
- libllvm12=12.0.1=h93073aa_2
- libnghttp2=1.47.0=h232270b_1
- libopenblas=0.3.21=openmp_hc731615_3
- libpng=1.6.39=h76d750c_0
- libpq=12.9=h65cfe13_3
- libprotobuf=3.19.4=hccf11d3_0
- libsodium=1.0.18=h27ca646_1
- libsqlite=3.40.0=h76d750c_0
- libssh2=1.10.0=hb80f160_3
- libtiff=4.2.0=h01837e1_1
- libwebp=1.2.4=h68602c7_0
- libwebp-base=1.2.4=h57fd34a_0
- libxcb=1.13=h9b22ae9_1004
- libxml2=2.9.14=h8c5e841_0
- libxslt=1.1.35=h9833966_0
- libzlib=1.2.13=h03a7124_4
- libzopfli=1.0.3=hc377ac9_0
- llvm-openmp=15.0.6=h7cfbb63_0
- locket=1.0.0=py39hca03da5_0
- lxml=4.9.1=py39h2fae87d_0
- lz4-c=1.9.3=hc377ac9_0
- markupsafe=2.1.1=py39h02fc5c5_2
- matplotlib=3.6.2=py39hdf13c20_0
- matplotlib-base=3.6.2=py39h35e9e80_0
- matplotlib-inline=0.1.6=pyhd8ed1ab_0
- mistune=2.0.4=pyhd8ed1ab_0
- munkres=1.1.4=pyh9f0ad1d_0
- nbclassic=0.4.8=pyhd8ed1ab_0
- nbclient=0.7.2=pyhd8ed1ab_0
- nbconvert=7.2.7=pyhd8ed1ab_0
- nbconvert-core=7.2.7=pyhd8ed1ab_0
- nbconvert-pandoc=7.2.7=pyhd8ed1ab_0
- nbformat=5.7.1=pyhd8ed1ab_0
- ncurses=6.3=h07bb92c_1
- nest-asyncio=1.5.6=pyhd8ed1ab_0
- networkx=2.7.1=pyhd3eb1b0_0
- notebook=6.5.2=pyha770c72_1
- notebook-shim=0.2.2=pyhd8ed1ab_0
- nspr=4.35=hb7217d7_0
- nss=3.78=h1483a63_0
- numpy=1.23.2=py39h3668e8b_0
- opencv=4.6.0=py39h8794c10_2
- openexr=3.1.5=h264c651_1
- openjpeg=2.3.0=h7a6adac_2
- openssl=1.1.1s=h03a7124_1
- packaging=22.0=pyhd8ed1ab_0
- pandas=1.5.2=py39hde7b980_0
- pandas-datareader=0.10.0=pyh6c4a22f_0
- pandoc=2.19.2=hce30654_1
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.8.3=pyhd8ed1ab_0
- partd=1.2.0=pyhd3eb1b0_1
- pcre2=10.40=hb34f9b4_0
- pexpect=4.8.0=pyh1a96a4e_2
- pickleshare=0.7.5=py_1003
- pillow=9.0.1=py39h4d1bdd5_0
- pip=22.3.1=pyhd8ed1ab_0
- pixman=0.40.0=h27ca646_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_0
- platformdirs=2.6.0=pyhd8ed1ab_0
- prometheus_client=0.15.0=pyhd8ed1ab_0
- prompt-toolkit=3.0.36=pyha770c72_0
- prompt_toolkit=3.0.36=hd8ed1ab_0
- protobuf=3.19.4=py39hfb83b0d_0
- psutil=5.9.4=py39h02fc5c5_0
- pthread-stubs=0.4=h27ca646_1001
- ptyprocess=0.7.0=pyhd3deb0d_0
- pure_eval=0.2.2=pyhd8ed1ab_0
- pycparser=2.21=pyhd8ed1ab_0
- pygments=2.13.0=pyhd8ed1ab_0
- pyopenssl=22.1.0=pyhd8ed1ab_0
- pyparsing=3.0.9=pyhd8ed1ab_0
- pyrsistent=0.19.2=py39h02fc5c5_0
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.15=h2d96c93_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python-fastjsonschema=2.16.2=pyhd8ed1ab_0
- python-json-logger=2.0.1=pyh9f0ad1d_0
- python_abi=3.9=3_cp39
- pytz=2022.7=pyhd8ed1ab_0
- pywavelets=1.3.0=py39h1a28f6b_0
- pyyaml=6.0=py39h02fc5c5_5
- pyzmq=24.0.1=py39h0553236_1
- qt-main=5.15.2=ha2d02b5_7
- qt-webengine=5.15.9=h2903aaf_4
- qtwebkit=5.212=h0f11f3c_4
- readline=8.1.2=h46ed386_0
- requests=2.28.1=pyhd8ed1ab_1
- s3transfer=0.6.0=pyhd8ed1ab_0
- scikit-image=0.19.2=py39h9197a36_0
- scikit-learn=1.2.0=py39h57c6424_0
- scipy=1.9.3=py39h18313fe_2
- seaborn=0.11.2=pyhd3eb1b0_0
- send2trash=1.8.0=pyhd8ed1ab_0
- setuptools=65.6.3=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.9=hc377ac9_0
- sniffio=1.3.0=pyhd8ed1ab_0
- soupsieve=2.3.2.post1=pyhd8ed1ab_0
- sqlite=3.40.0=h2229b38_0
- stack_data=0.6.2=pyhd8ed1ab_0
- tensorflow-deps=2.10.0=0
- terminado=0.17.1=pyhd1c38e8_0
- threadpoolctl=3.1.0=pyh8a188c0_0
- tifffile=2021.7.2=pyhd3eb1b0_2
- tinycss2=1.2.1=pyhd8ed1ab_0
- tk=8.6.12=he1e0b03_0
- toolz=0.11.2=pyhd3eb1b0_0
- tornado=6.2=py39h02fc5c5_1
- tqdm=4.64.1=pyhd8ed1ab_0
- traitlets=5.8.0=pyhd8ed1ab_0
- typing_extensions=4.4.0=pyha770c72_0
- tzdata=2022g=h191b570_0
- unicodedata2=15.0.0=py39h02fc5c5_0
- urllib3=1.26.13=pyhd8ed1ab_0
- wcwidth=0.2.5=pyh9f0ad1d_2
- webencodings=0.5.1=py_1
- websocket-client=1.4.2=pyhd8ed1ab_0
- werkzeug=2.2.2=pyhd8ed1ab_0
- wheel=0.38.4=pyhd8ed1ab_0
- widgetsnbextension=4.0.4=pyhd8ed1ab_0
- xorg-libxau=1.0.9=h27ca646_0
- xorg-libxdmcp=1.1.3=h27ca646_0
- xz=5.2.6=h57fd34a_0
- yaml=0.2.5=h3422bc3_2
- zeromq=4.3.4=hbdafb3b_1
- zfp=0.5.5=hc377ac9_6
- zipp=3.11.0=pyhd8ed1ab_0
- zlib=1.2.13=h03a7124_4
- zstd=1.5.2=h8128057_4
- pip:
- absl-py==1.3.0
- astunparse==1.6.3
- bayesian-optimization==1.4.2
- cachetools==5.2.0
- cbor==1.0.0
- cloudpickle==2.2.0
- flatbuffers==22.12.6
- gast==0.4.0
- google-auth==2.15.0
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- gym==0.26.2
- gym-notices==0.0.8
- imageai==3.0.3
- imutils==0.5.4
- kaggle==1.5.12
- keras==2.11.0
- libclang==14.0.6
- markdown==3.4.1
- oauthlib==3.2.2
- opencv-python==4.7.0.68
- opt-einsum==3.3.0
- pyasn1==0.4.8
- pyasn1-modules==0.2.8
- python-slugify==7.0.0
- requests-oauthlib==1.3.1
- rsa==4.9
- tensorboard==2.11.0
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorflow-addons==0.19.0
- tensorflow-estimator==2.11.0
- tensorflow-macos==2.11.0
- tensorflow-metal==0.7.0
- termcolor==2.1.1
- text-unidecode==1.3
- typeguard==2.13.3
- wrapt==1.14.1

254
conda_env.yml Normal file
View File

@ -0,0 +1,254 @@
name: tensorflow
channels:
- apple
- conda-forge
- defaults
dependencies:
- anyio=3.6.2=pyhd8ed1ab_0
- appdirs=1.4.4=pyh9f0ad1d_0
- appnope=0.1.3=pyhd8ed1ab_0
- argon2-cffi=21.3.0=pyhd8ed1ab_0
- argon2-cffi-bindings=21.2.0=py39h02fc5c5_3
- asttokens=2.2.1=pyhd8ed1ab_0
- attrs=22.2.0=pyh71513ae_0
- backcall=0.2.0=pyh9f0ad1d_0
- backports=1.0=pyhd8ed1ab_3
- backports.functools_lru_cache=1.6.4=pyhd8ed1ab_0
- beautifulsoup4=4.11.1=pyha770c72_0
- black=22.6.0=py39hca03da5_0
- bleach=5.0.1=pyhd8ed1ab_0
- boto3=1.26.44=pyhd8ed1ab_0
- botocore=1.29.44=pyhd8ed1ab_0
- brotli=1.0.9=h1a8c8d9_8
- brotli-bin=1.0.9=h1a8c8d9_8
- brotlipy=0.7.0=py39h02fc5c5_1005
- bzip2=1.0.8=h3422bc3_4
- c-ares=1.18.1=h3422bc3_0
- ca-certificates=2022.10.11=hca03da5_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- certifi=2022.12.7=py39hca03da5_0
- cffi=1.15.1=py39h7e6b969_3
- charset-normalizer=2.1.1=pyhd8ed1ab_0
- click=8.1.3=unix_pyhd8ed1ab_2
- colorama=0.4.6=pyhd8ed1ab_0
- comm=0.1.2=pyhd8ed1ab_0
- contourpy=1.0.6=py39haaf3ac1_0
- cryptography=39.0.0=py39he2a39a8_0
- cycler=0.11.0=pyhd8ed1ab_0
- debugpy=1.6.5=py39h23fbdae_0
- decorator=5.1.1=pyhd8ed1ab_0
- defusedxml=0.7.1=pyhd8ed1ab_0
- entrypoints=0.4=pyhd8ed1ab_0
- executing=1.2.0=pyhd8ed1ab_0
- flask=2.2.2=pyhd8ed1ab_0
- flit-core=3.8.0=pyhd8ed1ab_0
- fonttools=4.38.0=py39h02fc5c5_1
- freetype=2.12.1=hd633e50_1
- grpcio=1.46.3=py39h365d37b_0
- h5py=3.6.0=nompi_py39hd982b79_100
- hdf5=1.12.1=nompi_hd9dbc9e_104
- icu=70.1=h6b3803e_0
- idna=3.4=pyhd8ed1ab_0
- importlib-metadata=6.0.0=pyha770c72_0
- importlib_resources=5.10.2=pyhd8ed1ab_0
- iniconfig=1.1.1=pyhd3eb1b0_0
- ipykernel=6.19.4=pyh736e0ef_0
- ipython=8.8.0=pyhd1c38e8_0
- ipython_genutils=0.2.0=py_1
- ipywidgets=8.0.4=pyhd8ed1ab_0
- itsdangerous=2.1.2=pyhd8ed1ab_0
- jedi=0.18.2=pyhd8ed1ab_0
- jinja2=3.1.2=pyhd8ed1ab_1
- jmespath=1.0.1=pyhd8ed1ab_0
- joblib=1.2.0=pyhd8ed1ab_0
- jpeg=9e=he4db4b2_2
- jsonschema=4.17.3=pyhd8ed1ab_0
- jupyter=1.0.0=py39h2804cbe_8
- jupyter_client=7.4.8=pyhd8ed1ab_0
- jupyter_console=6.4.4=pyhd8ed1ab_0
- jupyter_core=5.1.2=py39h2804cbe_0
- jupyter_events=0.5.0=pyhd8ed1ab_1
- jupyter_server=2.0.6=pyhd8ed1ab_0
- jupyter_server_terminals=0.4.3=pyhd8ed1ab_0
- jupyterlab_pygments=0.2.2=pyhd8ed1ab_0
- jupyterlab_widgets=3.0.5=pyhd8ed1ab_0
- keras-preprocessing=1.1.2=pyhd3eb1b0_0
- kiwisolver=1.4.4=py39haaf3ac1_1
- krb5=1.20.1=h69eda48_0
- lcms2=2.14=h481adae_1
- lerc=4.0.0=h9a09cb3_0
- libblas=3.9.0=16_osxarm64_openblas
- libbrotlicommon=1.0.9=h1a8c8d9_8
- libbrotlidec=1.0.9=h1a8c8d9_8
- libbrotlienc=1.0.9=h1a8c8d9_8
- libcblas=3.9.0=16_osxarm64_openblas
- libcurl=7.87.0=h9049daf_0
- libcxx=14.0.6=h2692d47_0
- libdeflate=1.14=h1a8c8d9_0
- libedit=3.1.20191231=hc8eb9b7_2
- libev=4.33=h642e427_1
- libffi=3.4.2=h3422bc3_5
- libgfortran=5.0.0=11_3_0_hd922786_27
- libgfortran5=11.3.0=hdaf2cc0_27
- libiconv=1.17=he4db4b2_0
- libjpeg-turbo=2.1.4=h1a8c8d9_0
- liblapack=3.9.0=16_osxarm64_openblas
- libnghttp2=1.51.0=hae82a92_0
- libopenblas=0.3.21=openmp_hc731615_3
- libpng=1.6.39=h76d750c_0
- libprotobuf=3.19.4=hccf11d3_0
- libsodium=1.0.18=h27ca646_1
- libsqlite=3.40.0=h76d750c_0
- libssh2=1.10.0=h7a5bd25_3
- libtiff=4.5.0=heb92581_0
- libwebp-base=1.2.4=h57fd34a_0
- libxcb=1.13=h9b22ae9_1004
- libxml2=2.10.3=h87b0503_0
- libxslt=1.1.37=h1bd8bc4_0
- libzlib=1.2.13=h03a7124_4
- llvm-openmp=15.0.6=h7cfbb63_0
- lxml=4.9.2=py39h0520ce3_0
- markupsafe=2.1.1=py39h02fc5c5_2
- matplotlib=3.6.2=py39hdf13c20_0
- matplotlib-base=3.6.2=py39h35e9e80_0
- matplotlib-inline=0.1.6=pyhd8ed1ab_0
- mistune=2.0.4=pyhd8ed1ab_0
- munkres=1.1.4=pyh9f0ad1d_0
- mypy_extensions=0.4.3=py39hca03da5_1
- nbclassic=0.4.8=pyhd8ed1ab_0
- nbclient=0.7.2=pyhd8ed1ab_0
- nbconvert=7.2.7=pyhd8ed1ab_0
- nbconvert-core=7.2.7=pyhd8ed1ab_0
- nbconvert-pandoc=7.2.7=pyhd8ed1ab_0
- nbformat=5.7.1=pyhd8ed1ab_0
- ncurses=6.3=h07bb92c_1
- nest-asyncio=1.5.6=pyhd8ed1ab_0
- notebook=6.5.2=pyha770c72_1
- notebook-shim=0.2.2=pyhd8ed1ab_0
- numpy=1.23.2=py39h3668e8b_0
- openjpeg=2.5.0=hbc2ba62_2
- openssl=3.0.7=h03a7124_1
- packaging=22.0=pyhd8ed1ab_0
- pandas=1.5.2=py39hde7b980_0
- pandas-datareader=0.10.0=pyh6c4a22f_0
- pandoc=2.19.2=hce30654_1
- pandocfilters=1.5.0=pyhd8ed1ab_0
- parso=0.8.3=pyhd8ed1ab_0
- pathspec=0.9.0=py39hca03da5_0
- pexpect=4.8.0=pyh1a96a4e_2
- pickleshare=0.7.5=py_1003
- pillow=9.4.0=py39h8bbe137_0
- pip=22.3.1=pyhd8ed1ab_0
- pkgutil-resolve-name=1.3.10=pyhd8ed1ab_0
- platformdirs=2.6.2=pyhd8ed1ab_0
- pluggy=1.0.0=py39hca03da5_1
- pooch=1.6.0=pyhd8ed1ab_0
- progressbar2=3.37.1=py39hca03da5_0
- prometheus_client=0.15.0=pyhd8ed1ab_0
- prompt-toolkit=3.0.36=pyha770c72_0
- prompt_toolkit=3.0.36=hd8ed1ab_0
- protobuf=3.19.4=py39hfb83b0d_0
- psutil=5.9.4=py39h02fc5c5_0
- pthread-stubs=0.4=h27ca646_1001
- ptyprocess=0.7.0=pyhd3deb0d_0
- pure_eval=0.2.2=pyhd8ed1ab_0
- py=1.11.0=pyhd3eb1b0_0
- pycparser=2.21=pyhd8ed1ab_0
- pygments=2.14.0=pyhd8ed1ab_0
- pyopenssl=23.0.0=pyhd8ed1ab_0
- pyparsing=3.0.9=pyhd8ed1ab_0
- pyrsistent=0.19.3=py39h02fc5c5_0
- pysocks=1.7.1=pyha2e5f31_6
- pytest=7.1.2=py39hca03da5_0
- pytest-runner=6.0.0=py39hca03da5_0
- python=3.9.15=hea58f1e_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python-fastjsonschema=2.16.2=pyhd8ed1ab_0
- python-json-logger=2.0.4=pyhd8ed1ab_0
- python-utils=3.3.3=py39hca03da5_0
- python_abi=3.9=3_cp39
- pytz=2022.7=pyhd8ed1ab_0
- pyyaml=6.0=py39h02fc5c5_5
- pyzmq=24.0.1=py39h0553236_1
- readline=8.1.2=h46ed386_0
- requests=2.28.1=pyhd8ed1ab_1
- s3transfer=0.6.0=pyhd8ed1ab_0
- scikit-learn=1.2.0=py39h57c6424_0
- scipy=1.10.0=py39h18313fe_0
- seaborn=0.12.1=py39hca03da5_0
- send2trash=1.8.0=pyhd8ed1ab_0
- setuptools=65.6.3=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- sniffio=1.3.0=pyhd8ed1ab_0
- soupsieve=2.3.2.post1=pyhd8ed1ab_0
- stack_data=0.6.2=pyhd8ed1ab_0
- tensorflow-deps=2.10.0=0
- terminado=0.17.1=pyhd1c38e8_0
- threadpoolctl=3.1.0=pyh8a188c0_0
- tinycss2=1.2.1=pyhd8ed1ab_0
- tk=8.6.12=he1e0b03_0
- tomli=2.0.1=py39hca03da5_0
- tornado=6.2=py39h02fc5c5_1
- tqdm=4.64.1=pyhd8ed1ab_0
- traitlets=5.8.0=pyhd8ed1ab_0
- typing-extensions=4.4.0=hd8ed1ab_0
- typing_extensions=4.4.0=pyha770c72_0
- tzdata=2022g=h191b570_0
- unicodedata2=15.0.0=py39h02fc5c5_0
- urllib3=1.26.13=pyhd8ed1ab_0
- wcwidth=0.2.5=pyh9f0ad1d_2
- webencodings=0.5.1=py_1
- websocket-client=1.4.2=pyhd8ed1ab_0
- werkzeug=2.2.2=pyhd8ed1ab_0
- wheel=0.38.4=pyhd8ed1ab_0
- widgetsnbextension=4.0.5=pyhd8ed1ab_0
- xorg-libxau=1.0.9=h27ca646_0
- xorg-libxdmcp=1.1.3=h27ca646_0
- xz=5.2.6=h57fd34a_0
- yaml=0.2.5=h3422bc3_2
- zeromq=4.3.4=hbdafb3b_1
- zipp=3.11.0=pyhd8ed1ab_0
- zlib=1.2.13=h03a7124_4
- zstd=1.5.2=h8128057_4
- pip:
- absl-py==1.3.0
- astunparse==1.6.3
- bayesian-optimization==1.4.2
- cachetools==5.2.0
- cloudpickle==2.2.0
- flatbuffers==23.1.4
- gast==0.4.0
- google-auth==2.15.0
- google-auth-oauthlib==0.4.6
- google-pasta==0.2.0
- gym==0.26.2
- gym-notices==0.0.8
- imageio==2.23.0
- kaggle==1.5.12
- keras==2.11.0
- keras-applications==1.0.8
- libclang==14.0.6
- markdown==3.4.1
- networkx==2.8.8
- oauthlib==3.2.2
- opencv-python==4.7.0.68
- opt-einsum==3.3.0
- pyasn1==0.4.8
- pyasn1-modules==0.2.8
- python-slugify==7.0.0
- pywavelets==1.4.1
- requests-oauthlib==1.3.1
- rsa==4.9
- scikit-image==0.19.3
- tensorboard==2.11.0
- tensorboard-data-server==0.6.1
- tensorboard-plugin-wit==1.8.1
- tensorflow-estimator==2.11.0
- tensorflow-macos==2.11.0
- tensorflow-metal==0.7.0
- termcolor==2.2.0
- text-unidecode==1.3
- tifffile==2022.10.10
- wrapt==1.14.1
- yolo3==1.0

388
depdencies/json.lua Normal file
View File

@ -0,0 +1,388 @@
--
-- json.lua
--
-- Copyright (c) 2020 rxi
--
-- Permission is hereby granted, free of charge, to any person obtaining a copy of
-- this software and associated documentation files (the "Software"), to deal in
-- the Software without restriction, including without limitation the rights to
-- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-- of the Software, and to permit persons to whom the Software is furnished to do
-- so, subject to the following conditions:
--
-- The above copyright notice and this permission notice shall be included in all
-- copies or substantial portions of the Software.
--
-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-- SOFTWARE.
--
local json = { _version = "0.1.2" }
-------------------------------------------------------------------------------
-- Encode
-------------------------------------------------------------------------------
local encode
local escape_char_map = {
[ "\\" ] = "\\",
[ "\"" ] = "\"",
[ "\b" ] = "b",
[ "\f" ] = "f",
[ "\n" ] = "n",
[ "\r" ] = "r",
[ "\t" ] = "t",
}
local escape_char_map_inv = { [ "/" ] = "/" }
for k, v in pairs(escape_char_map) do
escape_char_map_inv[v] = k
end
local function escape_char(c)
return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte()))
end
local function encode_nil(val)
return "null"
end
local function encode_table(val, stack)
local res = {}
stack = stack or {}
-- Circular reference?
if stack[val] then error("circular reference") end
stack[val] = true
if rawget(val, 1) ~= nil or next(val) == nil then
-- Treat as array -- check keys are valid and it is not sparse
local n = 0
for k in pairs(val) do
if type(k) ~= "number" then
error("invalid table: mixed or invalid key types")
end
n = n + 1
end
if n ~= #val then
error("invalid table: sparse array")
end
-- Encode
for i, v in ipairs(val) do
table.insert(res, encode(v, stack))
end
stack[val] = nil
return "[" .. table.concat(res, ",") .. "]"
else
-- Treat as an object
for k, v in pairs(val) do
if type(k) ~= "string" then
error("invalid table: mixed or invalid key types")
end
table.insert(res, encode(k, stack) .. ":" .. encode(v, stack))
end
stack[val] = nil
return "{" .. table.concat(res, ",") .. "}"
end
end
local function encode_string(val)
return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"'
end
local function encode_number(val)
-- Check for NaN, -inf and inf
if val ~= val or val <= -math.huge or val >= math.huge then
error("unexpected number value '" .. tostring(val) .. "'")
end
return string.format("%.14g", val)
end
local type_func_map = {
[ "nil" ] = encode_nil,
[ "table" ] = encode_table,
[ "string" ] = encode_string,
[ "number" ] = encode_number,
[ "boolean" ] = tostring,
}
encode = function(val, stack)
local t = type(val)
local f = type_func_map[t]
if f then
return f(val, stack)
end
error("unexpected type '" .. t .. "'")
end
function json.encode(val)
return ( encode(val) )
end
-------------------------------------------------------------------------------
-- Decode
-------------------------------------------------------------------------------
local parse
local function create_set(...)
local res = {}
for i = 1, select("#", ...) do
res[ select(i, ...) ] = true
end
return res
end
local space_chars = create_set(" ", "\t", "\r", "\n")
local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",")
local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u")
local literals = create_set("true", "false", "null")
local literal_map = {
[ "true" ] = true,
[ "false" ] = false,
[ "null" ] = nil,
}
local function next_char(str, idx, set, negate)
for i = idx, #str do
if set[str:sub(i, i)] ~= negate then
return i
end
end
return #str + 1
end
local function decode_error(str, idx, msg)
local line_count = 1
local col_count = 1
for i = 1, idx - 1 do
col_count = col_count + 1
if str:sub(i, i) == "\n" then
line_count = line_count + 1
col_count = 1
end
end
error( string.format("%s at line %d col %d", msg, line_count, col_count) )
end
local function codepoint_to_utf8(n)
-- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa
local f = math.floor
if n <= 0x7f then
return string.char(n)
elseif n <= 0x7ff then
return string.char(f(n / 64) + 192, n % 64 + 128)
elseif n <= 0xffff then
return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128)
elseif n <= 0x10ffff then
return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128,
f(n % 4096 / 64) + 128, n % 64 + 128)
end
error( string.format("invalid unicode codepoint '%x'", n) )
end
local function parse_unicode_escape(s)
local n1 = tonumber( s:sub(1, 4), 16 )
local n2 = tonumber( s:sub(7, 10), 16 )
-- Surrogate pair?
if n2 then
return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000)
else
return codepoint_to_utf8(n1)
end
end
local function parse_string(str, i)
local res = ""
local j = i + 1
local k = j
while j <= #str do
local x = str:byte(j)
if x < 32 then
decode_error(str, j, "control character in string")
elseif x == 92 then -- `\`: Escape
res = res .. str:sub(k, j - 1)
j = j + 1
local c = str:sub(j, j)
if c == "u" then
local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1)
or str:match("^%x%x%x%x", j + 1)
or decode_error(str, j - 1, "invalid unicode escape in string")
res = res .. parse_unicode_escape(hex)
j = j + #hex
else
if not escape_chars[c] then
decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string")
end
res = res .. escape_char_map_inv[c]
end
k = j + 1
elseif x == 34 then -- `"`: End of string
res = res .. str:sub(k, j - 1)
return res, j + 1
end
j = j + 1
end
decode_error(str, i, "expected closing quote for string")
end
local function parse_number(str, i)
local x = next_char(str, i, delim_chars)
local s = str:sub(i, x - 1)
local n = tonumber(s)
if not n then
decode_error(str, i, "invalid number '" .. s .. "'")
end
return n, x
end
local function parse_literal(str, i)
local x = next_char(str, i, delim_chars)
local word = str:sub(i, x - 1)
if not literals[word] then
decode_error(str, i, "invalid literal '" .. word .. "'")
end
return literal_map[word], x
end
local function parse_array(str, i)
local res = {}
local n = 1
i = i + 1
while 1 do
local x
i = next_char(str, i, space_chars, true)
-- Empty / end of array?
if str:sub(i, i) == "]" then
i = i + 1
break
end
-- Read token
x, i = parse(str, i)
res[n] = x
n = n + 1
-- Next token
i = next_char(str, i, space_chars, true)
local chr = str:sub(i, i)
i = i + 1
if chr == "]" then break end
if chr ~= "," then decode_error(str, i, "expected ']' or ','") end
end
return res, i
end
local function parse_object(str, i)
local res = {}
i = i + 1
while 1 do
local key, val
i = next_char(str, i, space_chars, true)
-- Empty / end of object?
if str:sub(i, i) == "}" then
i = i + 1
break
end
-- Read key
if str:sub(i, i) ~= '"' then
decode_error(str, i, "expected string for key")
end
key, i = parse(str, i)
-- Read ':' delimiter
i = next_char(str, i, space_chars, true)
if str:sub(i, i) ~= ":" then
decode_error(str, i, "expected ':' after key")
end
i = next_char(str, i + 1, space_chars, true)
-- Read value
val, i = parse(str, i)
-- Set
res[key] = val
-- Next token
i = next_char(str, i, space_chars, true)
local chr = str:sub(i, i)
i = i + 1
if chr == "}" then break end
if chr ~= "," then decode_error(str, i, "expected '}' or ','") end
end
return res, i
end
local char_func_map = {
[ '"' ] = parse_string,
[ "0" ] = parse_number,
[ "1" ] = parse_number,
[ "2" ] = parse_number,
[ "3" ] = parse_number,
[ "4" ] = parse_number,
[ "5" ] = parse_number,
[ "6" ] = parse_number,
[ "7" ] = parse_number,
[ "8" ] = parse_number,
[ "9" ] = parse_number,
[ "-" ] = parse_number,
[ "t" ] = parse_literal,
[ "f" ] = parse_literal,
[ "n" ] = parse_literal,
[ "[" ] = parse_array,
[ "{" ] = parse_object,
}
parse = function(str, idx)
local chr = str:sub(idx, idx)
local f = char_func_map[chr]
if f then
return f(str, idx)
end
decode_error(str, idx, "unexpected character '" .. chr .. "'")
end
function json.decode(str)
if type(str) ~= "string" then
error("expected argument of type string, got " .. type(str))
end
local res, idx = parse(str, next_char(str, 1, space_chars, true))
idx = next_char(str, idx, space_chars, true)
if idx <= #str then
decode_error(str, idx, "trailing garbage")
end
return res
end
return json

388
dependencies/json.lua vendored Normal file
View File

@ -0,0 +1,388 @@
--
-- json.lua
--
-- Copyright (c) 2020 rxi
--
-- Permission is hereby granted, free of charge, to any person obtaining a copy of
-- this software and associated documentation files (the "Software"), to deal in
-- the Software without restriction, including without limitation the rights to
-- use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-- of the Software, and to permit persons to whom the Software is furnished to do
-- so, subject to the following conditions:
--
-- The above copyright notice and this permission notice shall be included in all
-- copies or substantial portions of the Software.
--
-- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-- SOFTWARE.
--
local json = { _version = "0.1.2" }
-------------------------------------------------------------------------------
-- Encode
-------------------------------------------------------------------------------
local encode
local escape_char_map = {
[ "\\" ] = "\\",
[ "\"" ] = "\"",
[ "\b" ] = "b",
[ "\f" ] = "f",
[ "\n" ] = "n",
[ "\r" ] = "r",
[ "\t" ] = "t",
}
local escape_char_map_inv = { [ "/" ] = "/" }
for k, v in pairs(escape_char_map) do
escape_char_map_inv[v] = k
end
local function escape_char(c)
return "\\" .. (escape_char_map[c] or string.format("u%04x", c:byte()))
end
local function encode_nil(val)
return "null"
end
local function encode_table(val, stack)
local res = {}
stack = stack or {}
-- Circular reference?
if stack[val] then error("circular reference") end
stack[val] = true
if rawget(val, 1) ~= nil or next(val) == nil then
-- Treat as array -- check keys are valid and it is not sparse
local n = 0
for k in pairs(val) do
if type(k) ~= "number" then
error("invalid table: mixed or invalid key types")
end
n = n + 1
end
if n ~= #val then
error("invalid table: sparse array")
end
-- Encode
for i, v in ipairs(val) do
table.insert(res, encode(v, stack))
end
stack[val] = nil
return "[" .. table.concat(res, ",") .. "]"
else
-- Treat as an object
for k, v in pairs(val) do
if type(k) ~= "string" then
error("invalid table: mixed or invalid key types")
end
table.insert(res, encode(k, stack) .. ":" .. encode(v, stack))
end
stack[val] = nil
return "{" .. table.concat(res, ",") .. "}"
end
end
local function encode_string(val)
return '"' .. val:gsub('[%z\1-\31\\"]', escape_char) .. '"'
end
local function encode_number(val)
-- Check for NaN, -inf and inf
if val ~= val or val <= -math.huge or val >= math.huge then
error("unexpected number value '" .. tostring(val) .. "'")
end
return string.format("%.14g", val)
end
local type_func_map = {
[ "nil" ] = encode_nil,
[ "table" ] = encode_table,
[ "string" ] = encode_string,
[ "number" ] = encode_number,
[ "boolean" ] = tostring,
}
encode = function(val, stack)
local t = type(val)
local f = type_func_map[t]
if f then
return f(val, stack)
end
error("unexpected type '" .. t .. "'")
end
function json.encode(val)
return ( encode(val) )
end
-------------------------------------------------------------------------------
-- Decode
-------------------------------------------------------------------------------
local parse
local function create_set(...)
local res = {}
for i = 1, select("#", ...) do
res[ select(i, ...) ] = true
end
return res
end
local space_chars = create_set(" ", "\t", "\r", "\n")
local delim_chars = create_set(" ", "\t", "\r", "\n", "]", "}", ",")
local escape_chars = create_set("\\", "/", '"', "b", "f", "n", "r", "t", "u")
local literals = create_set("true", "false", "null")
local literal_map = {
[ "true" ] = true,
[ "false" ] = false,
[ "null" ] = nil,
}
local function next_char(str, idx, set, negate)
for i = idx, #str do
if set[str:sub(i, i)] ~= negate then
return i
end
end
return #str + 1
end
local function decode_error(str, idx, msg)
local line_count = 1
local col_count = 1
for i = 1, idx - 1 do
col_count = col_count + 1
if str:sub(i, i) == "\n" then
line_count = line_count + 1
col_count = 1
end
end
error( string.format("%s at line %d col %d", msg, line_count, col_count) )
end
local function codepoint_to_utf8(n)
-- http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=iws-appendixa
local f = math.floor
if n <= 0x7f then
return string.char(n)
elseif n <= 0x7ff then
return string.char(f(n / 64) + 192, n % 64 + 128)
elseif n <= 0xffff then
return string.char(f(n / 4096) + 224, f(n % 4096 / 64) + 128, n % 64 + 128)
elseif n <= 0x10ffff then
return string.char(f(n / 262144) + 240, f(n % 262144 / 4096) + 128,
f(n % 4096 / 64) + 128, n % 64 + 128)
end
error( string.format("invalid unicode codepoint '%x'", n) )
end
local function parse_unicode_escape(s)
local n1 = tonumber( s:sub(1, 4), 16 )
local n2 = tonumber( s:sub(7, 10), 16 )
-- Surrogate pair?
if n2 then
return codepoint_to_utf8((n1 - 0xd800) * 0x400 + (n2 - 0xdc00) + 0x10000)
else
return codepoint_to_utf8(n1)
end
end
local function parse_string(str, i)
local res = ""
local j = i + 1
local k = j
while j <= #str do
local x = str:byte(j)
if x < 32 then
decode_error(str, j, "control character in string")
elseif x == 92 then -- `\`: Escape
res = res .. str:sub(k, j - 1)
j = j + 1
local c = str:sub(j, j)
if c == "u" then
local hex = str:match("^[dD][89aAbB]%x%x\\u%x%x%x%x", j + 1)
or str:match("^%x%x%x%x", j + 1)
or decode_error(str, j - 1, "invalid unicode escape in string")
res = res .. parse_unicode_escape(hex)
j = j + #hex
else
if not escape_chars[c] then
decode_error(str, j - 1, "invalid escape char '" .. c .. "' in string")
end
res = res .. escape_char_map_inv[c]
end
k = j + 1
elseif x == 34 then -- `"`: End of string
res = res .. str:sub(k, j - 1)
return res, j + 1
end
j = j + 1
end
decode_error(str, i, "expected closing quote for string")
end
local function parse_number(str, i)
local x = next_char(str, i, delim_chars)
local s = str:sub(i, x - 1)
local n = tonumber(s)
if not n then
decode_error(str, i, "invalid number '" .. s .. "'")
end
return n, x
end
local function parse_literal(str, i)
local x = next_char(str, i, delim_chars)
local word = str:sub(i, x - 1)
if not literals[word] then
decode_error(str, i, "invalid literal '" .. word .. "'")
end
return literal_map[word], x
end
local function parse_array(str, i)
local res = {}
local n = 1
i = i + 1
while 1 do
local x
i = next_char(str, i, space_chars, true)
-- Empty / end of array?
if str:sub(i, i) == "]" then
i = i + 1
break
end
-- Read token
x, i = parse(str, i)
res[n] = x
n = n + 1
-- Next token
i = next_char(str, i, space_chars, true)
local chr = str:sub(i, i)
i = i + 1
if chr == "]" then break end
if chr ~= "," then decode_error(str, i, "expected ']' or ','") end
end
return res, i
end
local function parse_object(str, i)
local res = {}
i = i + 1
while 1 do
local key, val
i = next_char(str, i, space_chars, true)
-- Empty / end of object?
if str:sub(i, i) == "}" then
i = i + 1
break
end
-- Read key
if str:sub(i, i) ~= '"' then
decode_error(str, i, "expected string for key")
end
key, i = parse(str, i)
-- Read ':' delimiter
i = next_char(str, i, space_chars, true)
if str:sub(i, i) ~= ":" then
decode_error(str, i, "expected ':' after key")
end
i = next_char(str, i + 1, space_chars, true)
-- Read value
val, i = parse(str, i)
-- Set
res[key] = val
-- Next token
i = next_char(str, i, space_chars, true)
local chr = str:sub(i, i)
i = i + 1
if chr == "}" then break end
if chr ~= "," then decode_error(str, i, "expected '}' or ','") end
end
return res, i
end
local char_func_map = {
[ '"' ] = parse_string,
[ "0" ] = parse_number,
[ "1" ] = parse_number,
[ "2" ] = parse_number,
[ "3" ] = parse_number,
[ "4" ] = parse_number,
[ "5" ] = parse_number,
[ "6" ] = parse_number,
[ "7" ] = parse_number,
[ "8" ] = parse_number,
[ "9" ] = parse_number,
[ "-" ] = parse_number,
[ "t" ] = parse_literal,
[ "f" ] = parse_literal,
[ "n" ] = parse_literal,
[ "[" ] = parse_array,
[ "{" ] = parse_object,
}
parse = function(str, idx)
local chr = str:sub(idx, idx)
local f = char_func_map[chr]
if f then
return f(str, idx)
end
decode_error(str, idx, "unexpected character '" .. chr .. "'")
end
function json.decode(str)
if type(str) ~= "string" then
error("expected argument of type string, got " .. type(str))
end
local res, idx = parse(str, next_char(str, 1, space_chars, true))
idx = next_char(str, idx, space_chars, true)
if idx <= #str then
decode_error(str, idx, "trailing garbage")
end
return res
end
return json

View File

@ -0,0 +1,54 @@
from PIL import Image
import cv2 as cv
import re
def detect_img(yolo, img_path):
detected_rats = []
try:
image = Image.open(img_path)
except:
print('Image open Error! Try again!')
return None
else:
r_image, pred = yolo.detect_image(image)
processed_image = cv.imread(img_path)
# r_image.show()
# r_image.save(img_path)
## FIXME : better list mapping
for prediction in pred:
is_rat_detected = re.search("rat", prediction[0])
if is_rat_detected:
x1 = prediction[1][0]
x2 = prediction[2][0]
y1 = prediction[1][1]
y2 = prediction[2][1]
w = abs(x1 - x2)
h = abs(y1 - y2)
# print(pred)
# print(f'x1: {x1}, x2: {x2}, y1: {y1}, y2: {y2}, w: {w}, h: {h}')
rat_img = processed_image[y1:y1 + h, x1:x1 + w]
rat_img = cv.resize(rat_img, (128, 128), interpolation=cv.INTER_AREA)
rat_pos = w, x1
detected_rats.append((rat_img, rat_pos))
return detected_rats
def get_turn_value(cords):
img_width = 1920
w, x = cords
center_of_object = (x + w) / 2
object_position = center_of_object / img_width
return round(object_position * 100, 2)
def detect_rat(model, img_path):
detected_rats = detect_img(model, img_path)
return detected_rats
# rat_position = detected_rats[0][1]
# turn_val = get_turn_value(rat_position)
# return turn_val

View File

@ -0,0 +1,59 @@
from PIL import Image
import cv2 as cv
import re
def detect_img(yolo, img_path):
detected_robots = []
try:
image = Image.open(img_path)
except:
print('Image open Error! Try again!')
return None
else:
r_image, pred = yolo.detect_image(image)
processed_image = cv.imread(img_path)
# r_image.show()
# r_image.save(img_path)
if not pred:
return None
## FIXME : better list mapping
for prediction in pred:
is_robot_detected = re.search("robot", prediction[0])
if is_robot_detected:
x1 = prediction[1][0]
x2 = prediction[2][0]
y1 = prediction[1][1]
y2 = prediction[2][1]
w = abs(x1 - x2)
h = abs(y1 - y2)
# print(pred)
# print(f'x1: {x1}, x2: {x2}, y1: {y1}, y2: {y2}, w: {w}, h: {h}')
robot_img = processed_image[y1:y1 + h, x1:x1 + w]
robot_img = cv.resize(robot_img, (128, 128), interpolation=cv.INTER_AREA)
robot_pos = w, x1
detected_robots.append((robot_img, robot_pos))
return detected_robots
def get_turn_value(cords):
img_width = 1920
w, x = cords
center_of_object = (x + w) / 2
object_position = center_of_object / img_width
return round(object_position * 100, 2)
def detect_robot(model, img_path):
detected_robots = detect_img(model, img_path)
return detected_robots
# if not detected_robots:
# return None
# robot_position = detected_robots[0][1]
# turn_val = get_turn_value(robot_position)
# return turn_val

Binary file not shown.

View File

@ -0,0 +1,45 @@
Copyright (c) 2014, Mozilla Foundation https://mozilla.org/ with Reserved Font Name Fira Mono.
Copyright (c) 2014, Telefonica S.A.
This Font Software is licensed under the SIL Open Font License, Version 1.1.
This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL
-----------------------------------------------------------
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
-----------------------------------------------------------
PREAMBLE
The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others.
The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives.
DEFINITIONS
"Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation.
"Reserved Font Name" refers to any names specified as such after the copyright statement(s).
"Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s).
"Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment.
"Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software.
PERMISSION & CONDITIONS
Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions:
1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself.
2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user.
3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users.
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission.
5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software.
TERMINATION
This license becomes null and void if any of the above conditions are not met.
DISCLAIMER
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.

View File

@ -0,0 +1,412 @@
"""YOLO_v3 Model Defined in Keras."""
from functools import wraps
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D
from keras.layers import LeakyReLU
from keras.layers import BatchNormalization
from keras.models import Model
from keras.regularizers import l2
from image_detector.yolo3.utils import compose
@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
"""Wrapper to set Darknet parameters for Convolution2D."""
darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
darknet_conv_kwargs.update(kwargs)
return Conv2D(*args, **darknet_conv_kwargs)
def DarknetConv2D_BN_Leaky(*args, **kwargs):
"""Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
no_bias_kwargs = {'use_bias': False}
no_bias_kwargs.update(kwargs)
return compose(
DarknetConv2D(*args, **no_bias_kwargs),
BatchNormalization(),
LeakyReLU(alpha=0.1))
def resblock_body(x, num_filters, num_blocks):
'''A series of resblocks starting with a downsampling Convolution2D'''
# Darknet uses left and top padding instead of 'same' mode
x = ZeroPadding2D(((1,0),(1,0)))(x)
x = DarknetConv2D_BN_Leaky(num_filters, (3,3), strides=(2,2))(x)
for i in range(num_blocks):
y = compose(
DarknetConv2D_BN_Leaky(num_filters//2, (1,1)),
DarknetConv2D_BN_Leaky(num_filters, (3,3)))(x)
x = Add()([x,y])
return x
def darknet_body(x):
'''Darknent body having 52 Convolution2D layers'''
x = DarknetConv2D_BN_Leaky(32, (3,3))(x)
x = resblock_body(x, 64, 1)
x = resblock_body(x, 128, 2)
x = resblock_body(x, 256, 8)
x = resblock_body(x, 512, 8)
x = resblock_body(x, 1024, 4)
return x
def make_last_layers(x, num_filters, out_filters):
'''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
x = compose(
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)),
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D_BN_Leaky(num_filters, (1,1)))(x)
y = compose(
DarknetConv2D_BN_Leaky(num_filters*2, (3,3)),
DarknetConv2D(out_filters, (1,1)))(x)
return x, y
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 model CNN body in Keras."""
darknet = Model(inputs, darknet_body(inputs))
x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(256, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[152].output])
x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))
x = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x)
x = Concatenate()([x,darknet.layers[92].output])
x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))
return Model(inputs, [y1,y2,y3])
def tiny_yolo_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 model CNN body in keras.'''
x1 = compose(
DarknetConv2D_BN_Leaky(16, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(32, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(64, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(128, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(256, (3,3)))(inputs)
x2 = compose(
MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='same'),
DarknetConv2D_BN_Leaky(512, (3,3)),
MaxPooling2D(pool_size=(2,2), strides=(1,1), padding='same'),
DarknetConv2D_BN_Leaky(1024, (3,3)),
DarknetConv2D_BN_Leaky(256, (1,1)))(x1)
y1 = compose(
DarknetConv2D_BN_Leaky(512, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(256, (3,3)),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
return Model(inputs, [y1,y2])
def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
'''Process Conv layer output'''
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
anchors, num_classes, input_shape)
boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
boxes = K.reshape(boxes, [-1, 4])
box_scores = box_confidence * box_class_probs
box_scores = K.reshape(box_scores, [-1, num_classes])
return boxes, box_scores
def yolo_eval(yolo_outputs,
anchors,
num_classes,
image_shape,
max_boxes=20,
score_threshold=.6,
iou_threshold=.5):
"""Evaluate YOLO model on given input and return filtered boxes."""
num_layers = len(yolo_outputs)
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
mask = box_scores >= score_threshold
max_boxes_tensor = K.constant(max_boxes, dtype='int32')
boxes_ = []
scores_ = []
classes_ = []
for c in range(num_classes):
# TODO: use keras backend instead of tf.
class_boxes = tf.boolean_mask(boxes, mask[:, c])
class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])
nms_index = tf.image.non_max_suppression(
class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
class_boxes = K.gather(class_boxes, nms_index)
class_box_scores = K.gather(class_box_scores, nms_index)
classes = K.ones_like(class_box_scores, 'int32') * c
boxes_.append(class_boxes)
scores_.append(class_box_scores)
classes_.append(classes)
boxes_ = K.concatenate(boxes_, axis=0)
scores_ = K.concatenate(scores_, axis=0)
classes_ = K.concatenate(classes_, axis=0)
return boxes_, scores_, classes_
def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
'''Preprocess true boxes to training input format
Parameters
----------
true_boxes: array, shape=(m, T, 5)
Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
input_shape: array-like, hw, multiples of 32
anchors: array, shape=(N, 2), wh
num_classes: integer
Returns
-------
y_true: list of array, shape like yolo_outputs, xywh are reletive value
'''
assert (true_boxes[..., 4]<num_classes).all(), 'class id must be less than num_classes'
num_layers = len(anchors)//3 # default setting
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
true_boxes = np.array(true_boxes, dtype='float32')
input_shape = np.array(input_shape, dtype='int32')
boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]
true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]
m = true_boxes.shape[0]
grid_shapes = [input_shape//{0:32, 1:16, 2:8}[l] for l in range(num_layers)]
y_true = [np.zeros((m,grid_shapes[l][0],grid_shapes[l][1],len(anchor_mask[l]),5+num_classes),
dtype='float32') for l in range(num_layers)]
# Expand dim to apply broadcasting.
anchors = np.expand_dims(anchors, 0)
anchor_maxes = anchors / 2.
anchor_mins = -anchor_maxes
valid_mask = boxes_wh[..., 0]>0
for b in range(m):
# Discard zero rows.
wh = boxes_wh[b, valid_mask[b]]
if len(wh)==0: continue
# Expand dim to apply broadcasting.
wh = np.expand_dims(wh, -2)
box_maxes = wh / 2.
box_mins = -box_maxes
intersect_mins = np.maximum(box_mins, anchor_mins)
intersect_maxes = np.minimum(box_maxes, anchor_maxes)
intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
box_area = wh[..., 0] * wh[..., 1]
anchor_area = anchors[..., 0] * anchors[..., 1]
iou = intersect_area / (box_area + anchor_area - intersect_area)
# Find best anchor for each true box
best_anchor = np.argmax(iou, axis=-1)
for t, n in enumerate(best_anchor):
for l in range(num_layers):
if n in anchor_mask[l]:
i = np.floor(true_boxes[b,t,0]*grid_shapes[l][1]).astype('int32')
j = np.floor(true_boxes[b,t,1]*grid_shapes[l][0]).astype('int32')
k = anchor_mask[l].index(n)
c = true_boxes[b,t, 4].astype('int32')
y_true[l][b, j, i, k, 0:4] = true_boxes[b,t, 0:4]
y_true[l][b, j, i, k, 4] = 1
y_true[l][b, j, i, k, 5+c] = 1
return y_true
def box_iou(b1, b2):
'''Return iou tensor
Parameters
----------
b1: tensor, shape=(i1,...,iN, 4), xywh
b2: tensor, shape=(j, 4), xywh
Returns
-------
iou: tensor, shape=(i1,...,iN, j)
'''
# Expand dim to apply broadcasting.
b1 = K.expand_dims(b1, -2)
b1_xy = b1[..., :2]
b1_wh = b1[..., 2:4]
b1_wh_half = b1_wh/2.
b1_mins = b1_xy - b1_wh_half
b1_maxes = b1_xy + b1_wh_half
# Expand dim to apply broadcasting.
b2 = K.expand_dims(b2, 0)
b2_xy = b2[..., :2]
b2_wh = b2[..., 2:4]
b2_wh_half = b2_wh/2.
b2_mins = b2_xy - b2_wh_half
b2_maxes = b2_xy + b2_wh_half
intersect_mins = K.maximum(b1_mins, b2_mins)
intersect_maxes = K.minimum(b1_maxes, b2_maxes)
intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
b1_area = b1_wh[..., 0] * b1_wh[..., 1]
b2_area = b2_wh[..., 0] * b2_wh[..., 1]
iou = intersect_area / (b1_area + b2_area - intersect_area)
return iou
def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
'''Return yolo_loss tensor
Parameters
----------
yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
y_true: list of array, the output of preprocess_true_boxes
anchors: array, shape=(N, 2), wh
num_classes: integer
ignore_thresh: float, the iou threshold whether to ignore object confidence loss
Returns
-------
loss: tensor, shape=(1,)
'''
num_layers = len(anchors)//3 # default setting
yolo_outputs = args[:num_layers]
y_true = args[num_layers:]
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]] if num_layers==3 else [[3,4,5], [1,2,3]]
input_shape = K.cast(K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0])) for l in range(num_layers)]
loss = 0
m = K.shape(yolo_outputs[0])[0] # batch size, tensor
mf = K.cast(m, K.dtype(yolo_outputs[0]))
for l in range(num_layers):
object_mask = y_true[l][..., 4:5]
true_class_probs = y_true[l][..., 5:]
grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
anchors[anchor_mask[l]], num_classes, input_shape, calc_loss=True)
pred_box = K.concatenate([pred_xy, pred_wh])
# Darknet raw box to calculate loss.
raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] * input_shape[::-1])
raw_true_wh = K.switch(object_mask, raw_true_wh, K.zeros_like(raw_true_wh)) # avoid log(0)=-inf
box_loss_scale = 2 - y_true[l][...,2:3]*y_true[l][...,3:4]
# Find ignore mask, iterate over each of batch.
ignore_mask = tf.TensorArray(K.dtype(y_true[0]), size=1, dynamic_size=True)
object_mask_bool = K.cast(object_mask, 'bool')
def loop_body(b, ignore_mask):
true_box = tf.boolean_mask(y_true[l][b,...,0:4], object_mask_bool[b,...,0])
iou = box_iou(pred_box[b], true_box)
best_iou = K.max(iou, axis=-1)
ignore_mask = ignore_mask.write(b, K.cast(best_iou<ignore_thresh, K.dtype(true_box)))
return b+1, ignore_mask
_, ignore_mask = tf.while_loop(lambda b,*args: b<m, loop_body, [0, ignore_mask])
ignore_mask = ignore_mask.stack()
ignore_mask = K.expand_dims(ignore_mask, -1)
# K.binary_crossentropy is helpful to avoid exp overflow.
xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(raw_true_xy, raw_pred[...,0:2], from_logits=True)
wh_loss = object_mask * box_loss_scale * 0.5 * K.square(raw_true_wh-raw_pred[...,2:4])
confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
(1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
class_loss = object_mask * K.binary_crossentropy(true_class_probs, raw_pred[...,5:], from_logits=True)
xy_loss = K.sum(xy_loss) / mf
wh_loss = K.sum(wh_loss) / mf
confidence_loss = K.sum(confidence_loss) / mf
class_loss = K.sum(class_loss) / mf
loss += xy_loss + wh_loss + confidence_loss + class_loss
if print_loss:
loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(ignore_mask)], message='loss: ')
return loss

View File

@ -0,0 +1 @@
16,26, 23,34, 29,45, 43,62, 62,102, 102,67, 134,130, 211,218, 455,592

View File

@ -0,0 +1,3 @@
grains
rat
robot

Binary file not shown.

View File

@ -0,0 +1,122 @@
"""Miscellaneous utility functions."""
from functools import reduce
from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb
def compose(*funcs):
"""Compose arbitrarily many functions, evaluated left to right.
Reference: https://mathieularose.com/function-composition-in-python/
"""
# return lambda x: reduce(lambda v, f: f(v), funcs, x)
if funcs:
return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
else:
raise ValueError('Composition of empty sequence not supported.')
def letterbox_image(image, size):
'''resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def rand(a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
'''random preprocessing for real-time data augmentation'''
line = annotation_line.split()
path = r"/keras-yolo3/train/"
image = Image.open(path + line[0])
iw, ih = image.size
h, w = input_shape
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
# resize image
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
image_data=0
if proc_img:
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)/255.
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
if len(box)>max_boxes: box = box[:max_boxes]
box[:, [0,2]] = box[:, [0,2]]*scale + dx
box[:, [1,3]] = box[:, [1,3]]*scale + dy
box_data[:len(box)] = box
return image_data, box_data
# resize image
new_ar = w/h * rand(1-jitter,1+jitter)/rand(1-jitter,1+jitter)
scale = rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
# place image
dx = int(rand(0, w-nw))
dy = int(rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
# flip image or not
flip = rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
# distort image
hue = rand(-hue, hue)
sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
val = rand(1, val) if rand()<.5 else 1/rand(1, val)
x = rgb_to_hsv(np.array(image)/255.)
x[..., 0] += hue
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x>1] = 1
x[x<0] = 0
image_data = hsv_to_rgb(x) # numpy array, 0 to 1
# correct boxes
box_data = np.zeros((max_boxes,5))
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
if len(box)>max_boxes: box = box[:max_boxes]
box_data[:len(box)] = box
return image_data, box_data

View File

@ -0,0 +1,221 @@
# -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
import os
import tensorflow.compat.v1.keras.backend as K
import tensorflow as tf
from image_detector.yolo3.utils import letterbox_image
from image_detector.yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
# from utils import letterbox_image
# from model import yolo_eval, yolo_body, tiny_yolo_body
tf.compat.v1.disable_eager_execution()
class YOLO(object):
_defaults = {
"model_path": 'image_detector/yolo3/model_data/model_weights.h5',
"anchors_path": 'image_detector/yolo3/model_data/_anchors.txt',
"classes_path": 'image_detector/yolo3/model_data/_classes.txt',
"score": 0.3,
"iou": 0.45,
"model_image_size": (128, 128),
"gpu_num": 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors == 6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None, None, 3)), num_anchors // 2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None, None, 3)), num_anchors // 3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors / len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2,))
if self.gpu_num >= 2:
self.yolo_model = tf.multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
pred = []
if self.model_image_size != (None, None):
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='image_detector/yolo3/font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
# score > 0.7
if score > 0.7:
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) + 30
right = min(image.size[0], np.floor(right + 0.5).astype('int32')) + 30
# print('=' * 10)
# print(label, (left, top), (right, bottom))
pred.append([label, (left, top), (right, bottom)])
# print('=' * 10)
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image, pred
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()

146
main.py Normal file
View File

@ -0,0 +1,146 @@
# python -m pip install flask
# export FLASK_APP=main.py
# flask run --without-threads
from flask import Flask, request
from tensorflow import keras
from image_detector.yolo3.yolo import YOLO
from image_detector.robot_detector import detect_robot
from LED_color_detector.led_detector import get_led_color
from image_detector.rat_detector import detect_rat
from config import img_base_path
app = Flask(__name__)
led_model = keras.models.load_model('./model_VGG16_LED_color_bigger_scene2/')
""" Automatic call while FLASK init """
yolo_model = YOLO()
# def deinit_yolo(yolo):
# yolo.close_session()
"""API_address/detectRobot1?img="""
@app.get("/detectRobot1")
def detectRobot1():
robot_led_colors = []
response_model = {}
# build path
image_path = img_base_path + "robot1/" + request.args['img']
detected_objects = detect_robot(model=yolo_model, img_path=image_path)
if not detected_objects:
return {
0: ["None"],
}, 200
for robot in detected_objects:
color = get_led_color(robot_image=robot[0], model=led_model)
robot_led_colors.append(color)
""" for emphatic robot """
### led color and robot positions will be sended to empathy model
""" for egoistic robot """
robot_id = 0
for robot in range(len(detected_objects)):
response_model[int(robot_id)] = [
robot_led_colors[robot],
int(detected_objects[robot][1][0]),
int(detected_objects[robot][1][1])
]
robot_id += 1
return response_model, 200
"""API_address/detectRobot2?img="""
@app.get("/detectRobot2")
def detectRobot2():
robot_led_colors = []
response_model = {}
# build path
image_path = img_base_path + "robot2/" + request.args['img']
detected_objects = detect_robot(model=yolo_model, img_path=image_path)
if not detected_objects:
return {
0: ["None"],
}, 200
for robot in detected_objects:
color = get_led_color(robot_image=robot[0], model=led_model)
robot_led_colors.append(color)
""" for emphatic robot """
### led color and robot positions will be sended to empathy model
""" for egoistic robot """
robot_id = 0
for robot in range(len(detected_objects)):
response_model[int(robot_id)] = [
robot_led_colors[robot],
int(detected_objects[robot][1][0]),
int(detected_objects[robot][1][1])
]
robot_id += 1
return response_model, 200
"""API_address/detectRat1?img="""
@app.get("/detectRat1")
def detectRat1():
response_model = {}
# build path
image_path = img_base_path + "robot1/" + request.args['img']
detected_objects = detect_rat(model=yolo_model, img_path=image_path)
if not detected_objects:
return {
0: ["None"],
}, 200
""" for emphatic robot """
### led color and robot positions will be sended to empathy model
""" for egoistic robot """
rat_id = 0
for rat in range(len(detected_objects)):
response_model[int(rat_id)] = [
int(detected_objects[rat][1][0]),
int(detected_objects[rat][1][1])
]
rat_id += 1
return response_model, 200
"""API_address/detectRat2?img="""
@app.get("/detectRat2")
def detectRat2():
response_model = {}
# build path
image_path = img_base_path + "robot2/" + request.args['img']
detected_objects = detect_rat(model=yolo_model, img_path=image_path)
if not detected_objects:
return {
0: ["None"],
}, 200
""" for emphatic robot """
### led color and robot positions will be sended to empathy model
""" for egoistic robot """
rat_id = 0
for rat in range(len(detected_objects)):
response_model[int(rat_id)] = [
int(detected_objects[rat][1][0]),
int(detected_objects[rat][1][1])
]
rat_id += 1
return response_model, 200

Binary file not shown.

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

460
new_egoistic.lua Normal file
View File

@ -0,0 +1,460 @@
-- Problem: Program si? wiesza przy zapisywaniu zdj?? i wysy?aniu requesta do API
http = require('socket.http')
json = require('json')
COLOR_WHITE = {255, 255, 255}
COLOR_RED = {255, 0, 0}
COLOR_GREEN = {0, 255, 0}
COLOR_BLUE = {0, 0, 255}
OBSTACLE_AVOID_DISTANCE = 3
MOVING_VELOCITY = 20
TURNING_VELOCITY = 15
PROXIMITY_STOP_DISTANCE = 1
CONFIDENCE = 0
IMAGE_COUNTER = 0
CAMERA_FIELD_OF_VIEW = 60
PHOTO_WIDTH = 1920
BATERY_VALUE = 10
function sysCall_init()
os.execute("rm -r robot")
os.execute("mkdir robot")
ModelHandle = sim.getObject(".")
ModelName = sim.getObjectAlias(ModelHandle)
RatHandles = {} -- List of rats on the scene
local i = 0
while true do
local rat_handle = sim.getObject("/Rat*", {index=i, noError=true})
if rat_handle < 0 then
break
end
table.insert(RatHandles, rat_handle)
i = i + 1
end
DuckHandles = {} -- List of ducks on the scene
local i = 0
while true do
local duck_handle = sim.getObject("/Duck*", {index=i, noError=true})
if duck_handle < 0 then
break
end
table.insert(DuckHandles, duck_handle)
i = i + 1
end
RightWheel = sim.getObject("./RightWheel")
LeftWheel = sim.getObject("./LeftWheel")
Camera = sim.getObject("./Camera")
VisionSensor = sim.getObject("./Vision_sensor")
SonarRight = sim.getObject("./Proximity_sensor_right")
SonarLeft = sim.getObject("./Proximity_sensor_left")
Diodes = { -- LED_strip has to be ungrouped
D2 = sim.getObject("./diode_2"),
D3 = sim.getObject("./diode_3"),
D4 = sim.getObject("./diode_4"),
D5 = sim.getObject("./diode_5"),
D6 = sim.getObject("./diode_6"),
D7 = sim.getObject("./diode_7")
}
sim.setShapeColor(sim.getObject("./diode_1"), nil, 0, COLOR_WHITE)
sim.setShapeColor(sim.getObject("./diode_8"), nil, 0, COLOR_WHITE)
sim.setObjectInt32Param(RightWheel,2000, 1) -- Enable velocity control
sim.setObjectInt32Param(LeftWheel,2000, 1)
sim.setObjectInt32Param(Camera,2000, 1)
Set_wheel_velocity(5, 5)
sim.setJointTargetVelocity(Camera, 0)
sim.setJointTargetForce(LeftWheel, 5)
sim.setJointTargetForce(RightWheel, 5)
Last_Color_Change_Time = sim.getSimulationTime()
Last_Camera_Rotation_Time = sim.getSimulationTime()
Last_Photo_Time = sim.getSimulationTime()
Last_Turn_Time=sim.getSimulationTime()
Turning = false
Seeing_Another_Rat = 0
Close_To_Rat = false
CameraAngle = Get_angle(VisionSensor)
ModelAngle = Get_angle(ModelHandle)
Rat_Degrees_Offset_From_Photo_Center = 0
DEBUG_Sensor_Time = sim.getSimulationTime()
--sim.addBanner("Bateria = 100", 2, sim_banner_fullyfacingcamera, nil, ModelHandle, nil, nil)
Camera_Rotate_Counter = 0
--IMAGE_COUNTER = 0
end
function sysCall_actuation()
-------------------- photo test --------------------
if (sim.getSimulationTime() - Last_Photo_Time) > 0.5 then
if BATERY_VALUE > 0 then
BATERY_VALUE = BATERY_VALUE - 1
UpdateBatery(BATERY_VALUE)
end
if not Close_To_Rat then
Take_photo()
end
Last_Photo_Time = sim.getSimulationTime()
end
-------------------- camera test -------------------
if (sim.getSimulationTime() - Last_Camera_Rotation_Time) > 1.7 then
if Camera_Rotate_Counter == 0 then
sim.setJointTargetVelocity(Camera, 2)
Camera_Rotate_Counter = 1
else
sim.setJointTargetVelocity(Camera, -2)
Camera_Rotate_Counter = 0
end
Last_Camera_Rotation_Time = sim.getSimulationTime()
end
-------------------- moving test -------------------
if Turning == true and Angle_difference(Get_angle(ModelHandle), Desired_angle) < 10 then
Finish_turning()
Stop_if_close(RatHandles)
end
end
function sysCall_sensing()
local _, _, _, detected_handle_left, _ = sim.readProximitySensor(SonarLeft)
local _, _, _, detected_handle_right, _ = sim.readProximitySensor(SonarRight)
if Has_Value(RatHandles, detected_handle_left) or Has_Value(RatHandles, detected_handle_right) then
if detected_handle_left > 0 and Has_Value(RatHandles, detected_handle_left) then
Handle = detected_handle_left
else
Handle = detected_handle_right
end
Close_To_Rat = true
Change_strip_color(COLOR_RED)
SensorMovement(Handle)
elseif BATERY_VALUE > 0 then
Close_To_Rat = false
VisionMovement()
else
Set_wheel_velocity(0, 0)
end
end
function sysCall_cleanup()
-- do some clean-up here
end
------------------------------ non-sysCall functions below ------------------------------
function VisionMovement()
if Seeing_Another_Rat == 1 then
local adjusted_angle = Calculate_desired_angle(CameraAngle, Rat_Degrees_Offset_From_Photo_Center)
local angle_diff = Angle_difference(ModelAngle, adjusted_angle)
if angle_diff > 5 then
if math.abs(ModelAngle - adjusted_angle) < 180 then
if ModelAngle < adjusted_angle then
print(string.format("[%s] Rat detected on the left", ModelName))
else
print(string.format("[%s] Rat detected on the right", ModelName))
angle_diff = -angle_diff
end
else
if ModelAngle > adjusted_angle then
print(string.format("[%s] Rat detected on the left", ModelName))
else
print(string.format("[%s] Rat detected on the right", ModelName))
angle_diff = -angle_diff
end
end
DEGREES_TO_TURN = angle_diff
Start_turning(angle_diff)
ModelAngle = 0
CameraAngle = 0
Rat_Degrees_Offset_From_Photo_Center = 0
end
if not Turning then
Go_Forward()
end
elseif not Turning then
Random_Walk()
end
if not Turning then
Stop_if_close(RatHandles)
end
end
function SensorMovement(handle)
Follow(handle)
end
function RatCallPredictionAPI(filename)
local response = http.request(string.format("http://127.0.0.1:5000/detectRat1?img=%s", filename))
return json.decode(response)
end
function RobotCallPredictionAPI(filename)
local response = http.request(string.format("http://127.0.0.1:5000/detectRobot1?img=%s", filename))
return json.decode(response)
end
function UpdateBatery(batery_value)
sim.addBanner(batery_value, 0.4, sim_banner_fullyfacingcamera, {1.0, 0, 0, 0, 0, 0}, ModelHandle, nil, {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
end
function Has_Value (tab, val)
for index, value in ipairs(tab) do
if value == val then
return true
end
end
return false
end
function Change_strip_color(rgb_vals)
for _, v in pairs(Diodes) do
sim.setShapeColor(v, nil, 0, rgb_vals)
end
end
function Set_wheel_velocity(left_vel, right_vel)
sim.setJointTargetVelocity(LeftWheel, left_vel)
sim.setJointTargetVelocity(RightWheel, right_vel)
end
function Take_photo()
CameraAngle = Get_angle(VisionSensor)
ModelAngle = Get_angle(ModelHandle)
local img = sim.getVisionSensorImg(VisionSensor)
local filename = "img"..IMAGE_COUNTER..".png"
local path = "robot1/"..filename
sim.saveImage(img, {1920,1080}, 0, path, -1)
print(string.format("[%s] Photo taken 1", ModelName))
local res = RatCallPredictionAPI(filename)
local res_robot = RobotCallPredictionAPI(filename)
print(CONFIDENCE)
if BATERY_VALUE > 0 then
if table.concat(res['0']) == 'None' then
--print(CONFIDENCE)
if CONFIDENCE > 0 then
CONFIDENCE = CONFIDENCE - 1
end
Seeing_Another_Rat = 0
if not Close_To_Rat and CONFIDENCE == 0 then
Change_strip_color(COLOR_GREEN)
print(string.format("[%s] No rats detected!", ModelName))
else
print(string.format("[%s] Chase the rat!", ModelName))
end
else
local frame_width = res['0'][1]
local rat_pos = res['0'][2] + frame_width / 2
Rat_Degrees_Offset_From_Photo_Center = (rat_pos - (PHOTO_WIDTH / 2)) / (PHOTO_WIDTH/CAMERA_FIELD_OF_VIEW)
if table.concat(res_robot['0']) == 'None' then
Seeing_Another_Rat = 1
Change_strip_color(COLOR_RED)
CONFIDENCE = 3
print(string.format("[%s] Another rat detected on camera", ModelName))
elseif res_robot['0'][1] == 'red' then
Seeing_Another_Rat = 0
print(string.format("[%s] I'm looking for another rat. This robot doesn't need my help", ModelName))
if CONFIDENCE == 0 then
Change_strip_color(COLOR_GREEN)
end
elseif res_robot['0'][1] == 'white' then
Seeing_Another_Rat = 1
print(string.format("[%s] Robot need my help. I'm on my way!", ModelName))
Change_strip_color(COLOR_RED)
CONFIDENCE = 3
end
end
IMAGE_COUNTER = IMAGE_COUNTER + 1
else
Change_strip_color(COLOR_WHITE)
print(string.format("[%s] I have to charge my battery", ModelName))
end
end
function Orientation_to_angle(orientation)
-- returns a value between [0, 360]
local angle = math.deg(orientation[2]) + 90
if orientation[1] < 0 then
angle = 360 - angle
end
return angle
end
function Get_angle(handle)
local orientation = sim.getObjectOrientation(handle, sim.handle_world)
return Orientation_to_angle(orientation)
end
function Calculate_desired_angle(old_angle, angle_delta)
local desired = old_angle + angle_delta
if desired > 360 then
return desired % 360
elseif
desired < 0 then
return desired + 360
else
return desired
end
end
function Angle_difference(actual_angle, desired_angle)
-- returns a value between [0, 180]
local diff = math.abs(desired_angle - actual_angle)
return diff < 180 and diff or 360 - diff
end
function Turn_Left()
Set_wheel_velocity(0, TURNING_VELOCITY)
end
function Turn_Right()
Set_wheel_velocity(TURNING_VELOCITY,0)
end
function Go_Forward()
Set_wheel_velocity(MOVING_VELOCITY, MOVING_VELOCITY)
end
function Stop_Moving()
Set_wheel_velocity(0, 0)
end
function Start_turning(degrees)
FULL_CIRCLE_PERIOD = 2.8 -- at 2.5 speed
print(string.format("[%s] Starting to turn %.2f degrees", ModelName, degrees))
Turning = true
Desired_angle = Calculate_desired_angle(Get_angle(ModelHandle), degrees)
if degrees > 0 then
Turn_Left()
else
Turn_Right()
end
Last_Turn_Time = sim.getSimulationTime()
end
function Finish_turning()
Go_Forward()
Last_Turn_Time = sim.getSimulationTime()
Turning = false
After_turning = Get_angle(ModelHandle)
print(string.format("[%s] Finished turning; Angle deviation from desired: %.2f (Desired: %.2f vs Actual: %.2f)",
ModelName,
Angle_difference(After_turning, Desired_angle),
Desired_angle,
After_turning))
end
function Random_Walk()
local _, obstacle_left_distance, _, _, _ = sim.readProximitySensor(SonarLeft)
if obstacle_left_distance == 0 then
obstacle_left_distance = math.huge
end
local _, obstacle_right_distance, _, _, _ = sim.readProximitySensor(SonarRight)
if obstacle_right_distance == 0 then
obstacle_right_distance = math.huge
end
if obstacle_right_distance < obstacle_left_distance and obstacle_right_distance < OBSTACLE_AVOID_DISTANCE then
Turn_Left()
elseif obstacle_left_distance < obstacle_right_distance and obstacle_left_distance < OBSTACLE_AVOID_DISTANCE then
Turn_Right()
else
Go_Forward()
end
end
function Follow(object_handle) -- Use with flat sensors with no blind zone in the middle
local left_detected, left_distance, _, _, _ = sim.checkProximitySensor(SonarLeft, object_handle)
local right_detected, right_distance, _, _, _ = sim.checkProximitySensor(SonarRight, object_handle)
if (sim.getSimulationTime() - Last_Photo_Time) > 0.5 then
if not Close_To_Rat then
local img = sim.getVisionSensorImg(VisionSensor)
local filename = "img"..IMAGE_COUNTER..".png"
local path = "robot1/"..filename
sim.saveImage(img, {1920,1080}, 0, path, -1)
end
Last_Photo_Time = sim.getSimulationTime()
IMAGE_COUNTER = IMAGE_COUNTER + 1
end
if left_detected == 1 and right_detected == 1 then
Go_Forward()
elseif left_detected == 1 then
Turn_Left()
elseif right_detected == 1 then
Turn_Right()
else
Go_Forward()
end
if (left_distance > 0 and left_distance < PROXIMITY_STOP_DISTANCE) or (right_distance > 0 and right_distance < PROXIMITY_STOP_DISTANCE) then
Stop_Moving()
end
end
function Stop_if_close(handle_table)
-- Przy szczurach jest ok przy robotach z jakiegos powodu nie wykrywa jesli patrzy na kolo zamiast chassisa (kolo pewnie ma inny objecthandle?)
local left_detected, left_distance, _, left_detected_object_handle, _ = sim.readProximitySensor(SonarLeft)
local right_detected, right_distance, _, right_detected_object_handle, _ = sim.readProximitySensor(SonarRight)
if Has_Value(handle_table, left_detected_object_handle) or Has_Value(handle_table, right_detected_object_handle) then
if (left_distance > 0 and left_distance < PROXIMITY_STOP_DISTANCE) or (right_distance > 0 and right_distance < PROXIMITY_STOP_DISTANCE) then
Stop_Moving()
end
end
end
------------------------------ DEBUG functions below ------------------------------
function DEBUG_What_Do_I_See(sensor_handle)
local detected, distance, detected_point, detected_object_handle, detected_surface_normal_vector = sim.readProximitySensor(sensor_handle)
if detected == 1 then
print(string.format("[%s] Object detected by %s; Handle: %d, Name: %s, Distance: %.2f",
ModelName,
sim.getObjectAlias(sensor_handle),
detected_object_handle,
sim.getObjectAlias(detected_object_handle, 1),
distance))
end
end

174
rat.lua Normal file
View File

@ -0,0 +1,174 @@
WALL_AVOID_DISTANCE = 0.5
MOVING_VELOCITY = -20
TURNING_VELOCITY = 2.5
ROBOT_PROXIMITY_STOP_DISTANCE = 1.2
function sysCall_init()
grains1 = sim.getObject("../Bump[2]/shape")
WallHandle = sim.getObject("../ExternalWall/Cuboid")
rightWheel= sim.getObject("./RightWheelRat")
leftWheel= sim.getObject("./LeftWheelRat")
sim.setObjectInt32Param(rightWheel,2000,1)
sim.setObjectInt32Param(leftWheel,2000,1)
Set_wheel_velocity(-20, -20)
sim.setJointTargetForce(leftWheel, 5)
sim.setJointTargetForce(rightWheel, 5)
last_time=sim.getSimulationTime()
turn_time=sim.getSimulationTime()
rotate = false
sonarRight = sim.getObject("./Proximity_sensor_right_rat");
sonarLeft = sim.getObject("./Proximity_sensor_left_rat");
sonarCenter = sim.getObject("./Proximity_sensor_center_rat");
max_dist = 3.0
speed = 10
RobotHandles = {} -- List of our robots on the scene
local i = 0
while true do
local robot_handle = sim.getObject("/Bump*/shape", {index=i, noError=true})
if robot_handle < 0 then
break
end
table.insert(RobotHandles, robot_handle)
i = i + 1
end
end
function sysCall_actuation()
end
function sysCall_sensing()
-- put your sensing code here
local _, _, _, detected_handle_left, _ = sim.readProximitySensor(sonarLeft)
local _, _, _, detected_handle_right, _ = sim.readProximitySensor(sonarRight)
if Has_Value(RobotHandles, detected_handle_left) then -- For demonstration purposes this robot will follow any robot of the same type
Follow(detected_handle_left)
elseif Has_Value(RobotHandles, detected_handle_right) then
Follow(detected_handle_right)
else
Random_Walk()
end
end
function sysCall_cleanup()
-- do some clean-up here
end
function Has_Value (tab, val)
for index, value in ipairs(tab) do
if value == val then
return true
end
end
return false
end
function Set_wheel_velocity(left_vel, right_vel)
sim.setJointTargetVelocity(leftWheel, left_vel)
sim.setJointTargetVelocity(rightWheel, right_vel)
end
function Turn_Left()
Set_wheel_velocity(TURNING_VELOCITY, -TURNING_VELOCITY)
end
function Turn_Right()
Set_wheel_velocity(-TURNING_VELOCITY, TURNING_VELOCITY)
end
function Go_Forward()
Set_wheel_velocity(MOVING_VELOCITY, MOVING_VELOCITY)
end
function Stop_Moving()
Set_wheel_velocity(0, 0)
end
function Random_Walk()
-- put your sensing code here
distSonarRight = getDistance(sonarRight, max_dist)
distSonarLeft = getDistance(sonarLeft, max_dist)
distSonarCenter = getDistance(sonarCenter, max_dist)
if distSonarRight < max_dist and distSonarRight < distSonarLeft and rotate == false then
Set_wheel_velocity(0, -40)
elseif distSonarRight < max_dist and distSonarLeft < max_dist and distSonarCenter < max_dist and distSonarRight < distSonarLeft and rotate == false then
Set_wheel_velocity(0, -40)
elseif distSonarRight < max_dist and distSonarCenter < max_dist and distSonarRight < distSonarLeft and rotate == false then
Set_wheel_velocity(0, -40)
elseif distSonarLeft < max_dist and distSonarCenter < max_dist and distSonarLeft < distSonarRight and rotate == false then
Set_wheel_velocity(-40, 0)
elseif distSonarLeft < max_dist and distSonarLeft < distSonarRight and rotate == false then
Set_wheel_velocity(-40, 0)
elseif distSonarCenter < max_dist and rotate == false then
Set_wheel_velocity(0, -40)
turn_time = sim.getSimulationTime()
rotate = true
elseif rotate == false then
Set_wheel_velocity(-20, -20)
end
if rotate == true and (sim.getSimulationTime() - turn_time) > 0.5 then
Set_wheel_velocity(-20, -20)
rotate = false
end
end
function Follow(object_handle) -- Use with flat sensors with no blind zone in the middle
local left_detected, left_distance, _, _, _ = sim.checkProximitySensor(sonarLeft, object_handle)
local right_detected, right_distance, _, _, _ = sim.checkProximitySensor(sonarRight, object_handle)
if left_detected == 1 and right_detected == 1 then
Go_Forward()
elseif left_detected == 1 then
Turn_Left()
elseif right_detected == 1 then
Turn_Right()
else
Go_Forward()
end
if (left_distance > 0 and left_distance < ROBOT_PROXIMITY_STOP_DISTANCE) or (right_distance > 0 and right_distance < ROBOT_PROXIMITY_STOP_DISTANCE) then
Stop_Moving()
end
end
function getDistance(sensor, max_dist)
local detected, distance
detected, distance = sim.readProximitySensor(sensor)
if (detected < 1) then
distance = max_dist
end
return distance
end
function DEBUG_What_Do_I_See(sensor_handle)
local detected, distance, detected_point, detected_object_handle, detected_surface_normal_vector = sim.readProximitySensor(sensor_handle)
if detected == 1 then
print(string.format("[%s] Object detected by %s; Handle: %d, Name: %s, Distance: %.2f",
ModelName,
sim.getObjectAlias(sensor_handle),
detected_object_handle,
sim.getObjectAlias(detected_object_handle, 1),
distance))
end
end