working w koncu!!!
@ -16,24 +16,55 @@
|
||||
<item name="GROUP_NODE:origin" type="e8cecc67:BranchNodeDescriptor" />
|
||||
</path>
|
||||
</expand>
|
||||
<select />
|
||||
<select>
|
||||
<path>
|
||||
<item name="ROOT" type="e8cecc67:BranchNodeDescriptor" />
|
||||
<item name="REMOTE_ROOT" type="e8cecc67:BranchNodeDescriptor" />
|
||||
<item name="GROUP_NODE:origin" type="e8cecc67:BranchNodeDescriptor" />
|
||||
<item name="BRANCH:origin/assigning" type="e8cecc67:BranchNodeDescriptor" />
|
||||
</path>
|
||||
</select>
|
||||
</component>
|
||||
<component name="ChangeListManager">
|
||||
<list default="true" id="828778c9-9d97-422f-a727-18ddbd059b85" name="Default Changelist" comment="detecting digits">
|
||||
<change afterPath="$PROJECT_DIR$/coder/517.png" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/test1.png" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/test2.png" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/55555.jpg" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/processed/test.pt" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/processed/training.pt" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/t10k-images-idx3-ubyte" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/t10k-images-idx3-ubyte.gz" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/t10k-labels-idx1-ubyte" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/t10k-labels-idx1-ubyte.gz" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/train-images-idx3-ubyte" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/train-images-idx3-ubyte.gz" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/train-labels-idx1-ubyte" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/train-labels-idx1-ubyte.gz" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/model.pt" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/model.py" afterDir="false" />
|
||||
<change afterPath="$PROJECT_DIR$/coder/nn_model.py" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/517.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TESTSET/MNIST/processed/test.pt" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TESTSET/MNIST/processed/test.pt" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TESTSET/MNIST/processed/training.pt" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TESTSET/MNIST/processed/training.pt" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/processed/test.pt" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/processed/test.pt" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/processed/training.pt" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/processed/training.pt" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/t10k-images-idx3-ubyte" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/t10k-images-idx3-ubyte" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/t10k-images-idx3-ubyte.gz" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/t10k-images-idx3-ubyte.gz" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/t10k-labels-idx1-ubyte" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/t10k-labels-idx1-ubyte" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/t10k-labels-idx1-ubyte.gz" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/t10k-labels-idx1-ubyte.gz" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/train-images-idx3-ubyte" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/train-images-idx3-ubyte" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/train-images-idx3-ubyte.gz" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/train-images-idx3-ubyte.gz" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/train-labels-idx1-ubyte" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/train-labels-idx1-ubyte" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/PATH_TO_STORE_TRAINSET/MNIST/raw/train-labels-idx1-ubyte.gz" beforeDir="false" afterPath="$PROJECT_DIR$/coder/PATH_TO_STORE_TEST_SET/MNIST/raw/train-labels-idx1-ubyte.gz" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/barcode.jpg" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/dataset/test.csv" beforeDir="false" afterPath="$PROJECT_DIR$/coder/dataset/test.csv" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/dataset/train.csv" beforeDir="false" afterPath="$PROJECT_DIR$/coder/dataset/train.csv" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/digit_reco_model.pt" beforeDir="false" afterPath="$PROJECT_DIR$/coder/digit_reco_model.pt" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/digits_recognizer.py" beforeDir="false" afterPath="$PROJECT_DIR$/coder/digits_recognizer.py" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/ll.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/image.py" beforeDir="false" afterPath="$PROJECT_DIR$/coder/image.py" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/rocognizer.py" beforeDir="false" afterPath="$PROJECT_DIR$/coder/rocognizer.py" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test.jpg" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test1.jpg" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test3.png" beforeDir="false" afterPath="$PROJECT_DIR$/coder/test3.png" afterDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test5.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test9.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/testno.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/train_nn.py" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test1.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test2.png" beforeDir="false" />
|
||||
<change beforePath="$PROJECT_DIR$/coder/test3.png" beforeDir="false" />
|
||||
</list>
|
||||
<option name="SHOW_DIALOG" value="false" />
|
||||
<option name="HIGHLIGHT_CONFLICTS" value="true" />
|
||||
@ -82,7 +113,7 @@
|
||||
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
|
||||
<property name="SHARE_PROJECT_CONFIGURATION_FILES" value="true" />
|
||||
<property name="WebServerToolWindowFactoryState" value="false" />
|
||||
<property name="last_opened_file_path" value="$USER_HOME$/Downloads/digitRecognition" />
|
||||
<property name="last_opened_file_path" value="$PROJECT_DIR$/coder" />
|
||||
<property name="restartRequiresConfirmation" value="false" />
|
||||
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
|
||||
</component>
|
||||
@ -115,28 +146,6 @@
|
||||
<option name="INPUT_FILE" value="" />
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<configuration name="coder" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<module name="wozek" />
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs>
|
||||
<env name="PYTHONUNBUFFERED" value="1" />
|
||||
</envs>
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/coder" />
|
||||
<option name="IS_MODULE_SDK" value="true" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/coder/coder.py" />
|
||||
<option name="PARAMETERS" value="" />
|
||||
<option name="SHOW_COMMAND_LINE" value="true" />
|
||||
<option name="EMULATE_TERMINAL" value="false" />
|
||||
<option name="MODULE_MODE" value="false" />
|
||||
<option name="REDIRECT_INPUT" value="false" />
|
||||
<option name="INPUT_FILE" value="" />
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<configuration name="digits_recognizer" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<module name="wozek" />
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
@ -159,7 +168,7 @@
|
||||
<option name="INPUT_FILE" value="" />
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<configuration name="image" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<configuration name="image (1)" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<module name="wozek" />
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
@ -172,7 +181,29 @@
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
|
||||
<option name="SCRIPT_NAME" value="C:\Users\Pawel Lukaszewicz\PycharmProjects\AL-2020\coder\image.py" />
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/coder/image.py" />
|
||||
<option name="PARAMETERS" value="" />
|
||||
<option name="SHOW_COMMAND_LINE" value="true" />
|
||||
<option name="EMULATE_TERMINAL" value="false" />
|
||||
<option name="MODULE_MODE" value="false" />
|
||||
<option name="REDIRECT_INPUT" value="false" />
|
||||
<option name="INPUT_FILE" value="" />
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<configuration name="model" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<module name="wozek" />
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
<envs>
|
||||
<env name="PYTHONUNBUFFERED" value="1" />
|
||||
</envs>
|
||||
<option name="SDK_HOME" value="" />
|
||||
<option name="WORKING_DIRECTORY" value="$PROJECT_DIR$/coder" />
|
||||
<option name="IS_MODULE_SDK" value="true" />
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/coder/model.py" />
|
||||
<option name="PARAMETERS" value="" />
|
||||
<option name="SHOW_COMMAND_LINE" value="true" />
|
||||
<option name="EMULATE_TERMINAL" value="false" />
|
||||
@ -200,7 +231,7 @@
|
||||
<option name="INPUT_FILE" value="" />
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<configuration name="z8" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<configuration name="tescik" type="PythonConfigurationType" factoryName="Python" temporary="true" nameIsGenerated="true">
|
||||
<module name="wozek" />
|
||||
<option name="INTERPRETER_OPTIONS" value="" />
|
||||
<option name="PARENT_ENVS" value="true" />
|
||||
@ -213,7 +244,7 @@
|
||||
<option name="ADD_CONTENT_ROOTS" value="true" />
|
||||
<option name="ADD_SOURCE_ROOTS" value="true" />
|
||||
<EXTENSION ID="PythonCoverageRunConfigurationExtension" runner="coverage.py" />
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/coder/z8.py" />
|
||||
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/coder/tescik.py" />
|
||||
<option name="PARAMETERS" value="" />
|
||||
<option name="SHOW_COMMAND_LINE" value="true" />
|
||||
<option name="EMULATE_TERMINAL" value="false" />
|
||||
@ -223,19 +254,19 @@
|
||||
<method v="2" />
|
||||
</configuration>
|
||||
<list>
|
||||
<item itemvalue="Python.image" />
|
||||
<item itemvalue="Python.rocognizer" />
|
||||
<item itemvalue="Python.digits_recognizer" />
|
||||
<item itemvalue="Python.coder" />
|
||||
<item itemvalue="Python.z8" />
|
||||
<item itemvalue="Python.image (1)" />
|
||||
<item itemvalue="Python.model" />
|
||||
<item itemvalue="Python.tescik" />
|
||||
</list>
|
||||
<recent_temporary>
|
||||
<list>
|
||||
<item itemvalue="Python.rocognizer" />
|
||||
<item itemvalue="Python.tescik" />
|
||||
<item itemvalue="Python.model" />
|
||||
<item itemvalue="Python.digits_recognizer" />
|
||||
<item itemvalue="Python.coder" />
|
||||
<item itemvalue="Python.z8" />
|
||||
<item itemvalue="Python.image" />
|
||||
<item itemvalue="Python.image (1)" />
|
||||
</list>
|
||||
</recent_temporary>
|
||||
</component>
|
||||
@ -280,7 +311,14 @@
|
||||
<workItem from="1590929874289" duration="7728000" />
|
||||
<workItem from="1590938377522" duration="176000" />
|
||||
<workItem from="1590944305590" duration="10043000" />
|
||||
<workItem from="1590961284964" duration="2200000" />
|
||||
<workItem from="1590961284964" duration="2253000" />
|
||||
<workItem from="1591008066071" duration="12088000" />
|
||||
<workItem from="1591035169869" duration="618000" />
|
||||
<workItem from="1591043444436" duration="1048000" />
|
||||
<workItem from="1591111427048" duration="1041000" />
|
||||
<workItem from="1591112695695" duration="1110000" />
|
||||
<workItem from="1591113830067" duration="8945000" />
|
||||
<workItem from="1591124808042" duration="2910000" />
|
||||
</task>
|
||||
<task id="LOCAL-00001" summary="create Shelf">
|
||||
<created>1589815443652</created>
|
||||
@ -387,7 +425,14 @@
|
||||
<option name="project" value="LOCAL" />
|
||||
<updated>1590938465634</updated>
|
||||
</task>
|
||||
<option name="localTasksCounter" value="16" />
|
||||
<task id="LOCAL-00016" summary="new tests">
|
||||
<created>1590963692385</created>
|
||||
<option name="number" value="00016" />
|
||||
<option name="presentableId" value="LOCAL-00016" />
|
||||
<option name="project" value="LOCAL" />
|
||||
<updated>1590963692385</updated>
|
||||
</task>
|
||||
<option name="localTasksCounter" value="17" />
|
||||
<servers />
|
||||
</component>
|
||||
<component name="TypeScriptGeneratedFilesManager">
|
||||
@ -398,7 +443,19 @@
|
||||
<map>
|
||||
<entry key="MAIN">
|
||||
<value>
|
||||
<State />
|
||||
<State>
|
||||
<option name="FILTERS">
|
||||
<map>
|
||||
<entry key="branch">
|
||||
<value>
|
||||
<list>
|
||||
<option value="origin/assigning" />
|
||||
</list>
|
||||
</value>
|
||||
</entry>
|
||||
</map>
|
||||
</option>
|
||||
</State>
|
||||
</value>
|
||||
</entry>
|
||||
</map>
|
||||
@ -422,41 +479,62 @@
|
||||
<MESSAGE value="add coder.py" />
|
||||
<MESSAGE value="detecting digits" />
|
||||
<MESSAGE value="img is cropped and transformed to torch" />
|
||||
<option name="LAST_COMMIT_MESSAGE" value="img is cropped and transformed to torch" />
|
||||
<MESSAGE value="new tests" />
|
||||
<option name="LAST_COMMIT_MESSAGE" value="new tests" />
|
||||
</component>
|
||||
<component name="WindowStateProjectService">
|
||||
<state x="115" y="162" key="#com.intellij.refactoring.safeDelete.UnsafeUsagesDialog" timestamp="1590956217731">
|
||||
<state x="525" y="214" key="#com.intellij.fileTypes.FileTypeChooser" timestamp="1591117361907">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state x="115" y="162" key="#com.intellij.refactoring.safeDelete.UnsafeUsagesDialog/0.0.1536.824@0.0.1536.824" timestamp="1590956217731" />
|
||||
<state x="525" y="214" key="#com.intellij.fileTypes.FileTypeChooser/0.0.1536.824@0.0.1536.824" timestamp="1591117361907" />
|
||||
<state x="115" y="162" key="#com.intellij.refactoring.safeDelete.UnsafeUsagesDialog" timestamp="1591113556580">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state x="115" y="162" key="#com.intellij.refactoring.safeDelete.UnsafeUsagesDialog/0.0.1536.824@0.0.1536.824" timestamp="1591113556580" />
|
||||
<state x="549" y="167" key="FileChooserDialogImpl" timestamp="1590962272315">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state x="549" y="167" key="FileChooserDialogImpl/0.0.1536.824@0.0.1536.824" timestamp="1590962272315" />
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.bottom" timestamp="1589845242796">
|
||||
<state width="724" height="264" key="GridCell.Tab.0.bottom" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.bottom/0.0.1536.824@0.0.1536.824" timestamp="1589845242796" />
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.center" timestamp="1589845242796">
|
||||
<state width="724" height="264" key="GridCell.Tab.0.bottom/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.0.center" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.center/0.0.1536.824@0.0.1536.824" timestamp="1589845242796" />
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.left" timestamp="1589845242796">
|
||||
<state width="724" height="264" key="GridCell.Tab.0.center/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.0.left" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.left/0.0.1536.824@0.0.1536.824" timestamp="1589845242796" />
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.right" timestamp="1589845242796">
|
||||
<state width="724" height="264" key="GridCell.Tab.0.left/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.0.right" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="1493" height="208" key="GridCell.Tab.0.right/0.0.1536.824@0.0.1536.824" timestamp="1589845242796" />
|
||||
<state width="724" height="264" key="GridCell.Tab.0.right/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.1.bottom" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="724" height="264" key="GridCell.Tab.1.bottom/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.1.center" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="724" height="264" key="GridCell.Tab.1.center/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.1.left" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="724" height="264" key="GridCell.Tab.1.left/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state width="724" height="264" key="GridCell.Tab.1.right" timestamp="1591028610371">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state width="724" height="264" key="GridCell.Tab.1.right/0.0.1536.824@0.0.1536.824" timestamp="1591028610371" />
|
||||
<state x="277" y="57" key="SettingsEditor" timestamp="1590443566792">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state x="277" y="57" key="SettingsEditor/0.0.1536.824@0.0.1536.824" timestamp="1590443566792" />
|
||||
<state x="361" y="145" key="Vcs.Push.Dialog.v2" timestamp="1590938471243">
|
||||
<state x="361" y="145" key="Vcs.Push.Dialog.v2" timestamp="1590963699596">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
<state x="361" y="145" key="Vcs.Push.Dialog.v2/0.0.1536.824@0.0.1536.824" timestamp="1590938471243" />
|
||||
<state x="361" y="145" key="Vcs.Push.Dialog.v2/0.0.1536.824@0.0.1536.824" timestamp="1590963699596" />
|
||||
<state x="54" y="145" width="672" height="678" key="search.everywhere.popup" timestamp="1590930912296">
|
||||
<screen x="0" y="0" width="1536" height="824" />
|
||||
</state>
|
||||
@ -465,6 +543,7 @@
|
||||
<component name="com.intellij.coverage.CoverageDataManagerImpl">
|
||||
<SUITE FILE_PATH="coverage/AL_2020$feature_hashing.coverage" NAME="feature_hashing Coverage Results" MODIFIED="1589823737949" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/Assiging" />
|
||||
<SUITE FILE_PATH="coverage/wozek$main.coverage" NAME="main Coverage Results" MODIFIED="1589556038208" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
||||
<SUITE FILE_PATH="coverage/AL_2020$digits_recognizer.coverage" NAME="digits_recognizer Coverage Results" MODIFIED="1591028603228" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$/coder" />
|
||||
<SUITE FILE_PATH="coverage/AL_2020$main.coverage" NAME="main Coverage Results" MODIFIED="1589845236495" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
||||
<SUITE FILE_PATH="coverage/wozek$board.coverage" NAME="board Coverage Results" MODIFIED="1589210811600" SOURCE_PROVIDER="com.intellij.coverage.DefaultCoverageFileProvider" RUNNER="coverage.py" COVERAGE_BY_TEST_ENABLED="true" COVERAGE_TRACING_ENABLED="false" WORKING_DIRECTORY="$PROJECT_DIR$" />
|
||||
</component>
|
||||
|
BIN
coder/12345.png
Before Width: | Height: | Size: 10 KiB |
BIN
coder/517.png
Before Width: | Height: | Size: 11 KiB |
BIN
coder/55555.jpg
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
coder/PATH_TO_STORE_TRAIN_SET/MNIST/processed/test.pt
Normal file
BIN
coder/PATH_TO_STORE_TRAIN_SET/MNIST/processed/training.pt
Normal file
BIN
coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/t10k-images-idx3-ubyte
Normal file
BIN
coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/t10k-labels-idx1-ubyte
Normal file
BIN
coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/train-images-idx3-ubyte
Normal file
BIN
coder/PATH_TO_STORE_TRAIN_SET/MNIST/raw/train-labels-idx1-ubyte
Normal file
Before Width: | Height: | Size: 42 KiB |
@ -1,59 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
import matplotlib.pyplot as plt
|
||||
from time import time
|
||||
from torchvision import datasets, transforms
|
||||
from torch import nn, optim
|
||||
import cv2
|
||||
|
||||
|
||||
transform = transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.5,), (0.5,)),
|
||||
])
|
||||
|
||||
|
||||
|
||||
# load nn model
|
||||
input_size = 784 # = 28*28
|
||||
hidden_sizes = [128, 128, 64]
|
||||
output_size = 10
|
||||
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[1], hidden_sizes[2]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[2], output_size),
|
||||
nn.LogSoftmax(dim=-1))
|
||||
model.load_state_dict(torch.load('digit_reco_model2.pt'))
|
||||
model.eval()
|
||||
# model = torch.load('digit_reco_model2.pt')
|
||||
|
||||
if model is None:
|
||||
print("Model is not loaded.")
|
||||
else:
|
||||
print("Model is loaded.")
|
||||
|
||||
|
||||
# img from dataset
|
||||
val_set = datasets.MNIST('PATH_TO_STORE_TESTSET', download=True, train=False, transform=transform)
|
||||
|
||||
val_loader = torch.utils.data.DataLoader(val_set, batch_size=64, shuffle=True)
|
||||
|
||||
images, labels = next(iter(val_loader))
|
||||
print(type(images))
|
||||
img = images[0].view(1, 784)
|
||||
plt.imshow(images[0].numpy().squeeze(), cmap='gray_r')
|
||||
plt.show()
|
||||
|
||||
|
||||
# recognizing
|
||||
|
||||
with torch.no_grad():
|
||||
logps = model(img)
|
||||
print(logps)
|
||||
|
||||
ps = torch.exp(logps)
|
||||
probab = list(ps.numpy()[0])
|
||||
print("Predicted Digit =", probab.index(max(probab)))
|
28001
coder/dataset/test.csv
42001
coder/dataset/train.csv
@ -1,111 +0,0 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
import matplotlib.pyplot as plt
|
||||
from time import time
|
||||
from torchvision import datasets, transforms
|
||||
from torch import nn, optim
|
||||
import cv2
|
||||
|
||||
# IMG transform
|
||||
transform = transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.5,), (0.5,)),
|
||||
])
|
||||
|
||||
# dataset download
|
||||
train_set = datasets.MNIST('PATH_TO_STORE_TRAINSET', download=True, train=True, transform=transform)
|
||||
val_set = datasets.MNIST('PATH_TO_STORE_TESTSET', download=True, train=False, transform=transform)
|
||||
train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
|
||||
val_loader = torch.utils.data.DataLoader(val_set, batch_size=64, shuffle=True)
|
||||
print(train_set[0])
|
||||
|
||||
# building nn model
|
||||
input_size = 784 # = 28*28
|
||||
hidden_sizes = [128, 128, 64]
|
||||
output_size = 10
|
||||
|
||||
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[1], hidden_sizes[2]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[2], output_size),
|
||||
nn.LogSoftmax(dim=-1))
|
||||
# print(model)
|
||||
|
||||
criterion = nn.NLLLoss()
|
||||
images, labels = next(iter(train_loader))
|
||||
images = images.view(images.shape[0], -1)
|
||||
|
||||
logps = model(images) # log probabilities
|
||||
loss = criterion(logps, labels) # calculate the NLL loss
|
||||
|
||||
|
||||
# training
|
||||
|
||||
optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.9)
|
||||
time0 = time()
|
||||
epochs = 1
|
||||
for e in range(epochs):
|
||||
running_loss = 0
|
||||
for images, labels in train_loader:
|
||||
# Flatten MNIST images into a 784 long vector
|
||||
images = images.view(images.shape[0], -1)
|
||||
|
||||
# Training pass
|
||||
optimizer.zero_grad()
|
||||
|
||||
output = model(images)
|
||||
loss = criterion(output, labels)
|
||||
|
||||
# This is where the model learns by backpropagating
|
||||
loss.backward()
|
||||
|
||||
# And optimizes its weights here
|
||||
optimizer.step()
|
||||
|
||||
running_loss += loss.item()
|
||||
else:
|
||||
print("Epoch {} - Training loss: {}".format(e + 1, running_loss / len(train_loader)))
|
||||
|
||||
print("\nTraining Time (in minutes) =", (time() - time0) / 60)
|
||||
|
||||
# testing
|
||||
|
||||
images, labels = next(iter(val_loader))
|
||||
img = images[0].view(1, 784)
|
||||
print(type(img))
|
||||
print(img.size())
|
||||
|
||||
with torch.no_grad():
|
||||
logps = model(img)
|
||||
ps = torch.exp(logps)
|
||||
probab = list(ps.numpy()[0])
|
||||
print("Predicted Digit =", probab.index(max(probab)))
|
||||
# view_classify(img.view(1, 28, 28), ps)
|
||||
|
||||
# accuracy
|
||||
correct_count, all_count = 0, 0
|
||||
for images, labels in val_loader:
|
||||
for i in range(len(labels)):
|
||||
img = images[i].view(1, 784)
|
||||
with torch.no_grad():
|
||||
logps = model(img)
|
||||
|
||||
ps = torch.exp(logps)
|
||||
probab = list(ps.numpy()[0])
|
||||
pred_label = probab.index(max(probab))
|
||||
true_label = labels.numpy()[i]
|
||||
if true_label == pred_label:
|
||||
correct_count += 1
|
||||
all_count += 1
|
||||
|
||||
print("Number Of Images Tested =", all_count)
|
||||
print("\nModel Accuracy =", (correct_count / all_count))
|
||||
|
||||
|
||||
# saving model
|
||||
|
||||
# torch.save(model.state_dict(), './digit_reco_model.pt')
|
||||
# torch.save(model.state_dict(), './digit_reco_model2.pt')
|
@ -1,85 +0,0 @@
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import matplotlib.pyplot as plt
|
||||
from sklearn import datasets
|
||||
from sklearn.metrics import accuracy_score
|
||||
from sklearn.neural_network import MLPClassifier
|
||||
import pandas as pd
|
||||
import cv2
|
||||
import keras
|
||||
|
||||
# 28x28
|
||||
train_data = np.genfromtxt('dataset/train.csv', delimiter=',', skip_header=1, max_rows=20000, encoding='utf-8')
|
||||
test_data = np.genfromtxt('dataset/test.csv', delimiter=',', skip_header=1, max_rows=20000, encoding='utf-8')
|
||||
|
||||
# train_data = pd.read_csv('dataset/train.csv')
|
||||
# test_data = pd.read_csv('dataset/test.csv')
|
||||
|
||||
# training
|
||||
# recznie napisane cyfry
|
||||
|
||||
digits = datasets.load_digits()
|
||||
y = digits.target
|
||||
x = digits.images.reshape((len(digits.images), -1))
|
||||
|
||||
# print(type(y[0]), type(x[0]))
|
||||
# ogarnac zbior, zwiekszyc warstwy
|
||||
|
||||
# x_train = train_data.iloc[:, 1:].values.astype('float32')
|
||||
# y_train = train_data.iloc[:, 0].values.astype('int32')
|
||||
# x_test = test_data.values.astype('float32')
|
||||
|
||||
x_train = train_data[0:10000, 1:]
|
||||
y_train = train_data[0:10000, 0]
|
||||
x_test = train_data[10001:20000, 1:]
|
||||
y_test = train_data[10001:20000, 0].astype('int')
|
||||
|
||||
print(type(y_test[0]), type(x_test[0]))
|
||||
|
||||
# x_train = x[:900]
|
||||
# y_train = y[:900]
|
||||
# x_test = x[900:]
|
||||
# y_test = y[900:]
|
||||
|
||||
# 500, 500, 500, 500, 500
|
||||
mlp = MLPClassifier(hidden_layer_sizes=(150, 100, 100, 100), activation='logistic', alpha=1e-4,
|
||||
solver='sgd', tol=0.000000000001, random_state=1,
|
||||
learning_rate_init=.1, verbose=True, max_iter=10000)
|
||||
|
||||
mlp.fit(x_train, y_train)
|
||||
predictions = mlp.predict(x_test)
|
||||
|
||||
print("Accuracy: ", accuracy_score(y_test, predictions))
|
||||
|
||||
# image
|
||||
|
||||
img = cv2.cvtColor(cv2.imread('test5.jpg'), cv2.COLOR_BGR2GRAY)
|
||||
img = cv2.blur(img, (9, 9)) # poprawia jakosc
|
||||
img = cv2.resize(img, (28, 28), interpolation=cv2.INTER_AREA)
|
||||
img = img.reshape((len(img), -1))
|
||||
|
||||
# print(type(img))
|
||||
# print(img.shape)
|
||||
# plt.imshow(img ,cmap='binary')
|
||||
# plt.show()
|
||||
|
||||
data = []
|
||||
|
||||
rows, cols = img.shape
|
||||
for i in range(rows):
|
||||
for j in range(cols):
|
||||
k = img[i, j]
|
||||
if k > 225:
|
||||
k = 0 # brak czarnego
|
||||
else:
|
||||
k = 255
|
||||
|
||||
data.append(k)
|
||||
|
||||
data = np.asarray(data, dtype=np.float64)
|
||||
# print(data)
|
||||
print(type(data))
|
||||
|
||||
predictions = mlp.predict([data])
|
||||
|
||||
print("Liczba to:", predictions[0].astype('int'))
|
BIN
coder/model.pt
Normal file
89
coder/model.py
Normal file
@ -0,0 +1,89 @@
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision
|
||||
import matplotlib.pyplot as plt
|
||||
from time import time
|
||||
from torchvision import datasets, transforms
|
||||
from torch import nn, optim
|
||||
import torch.nn.functional as F
|
||||
|
||||
import cv2
|
||||
|
||||
from nn_model import Net
|
||||
|
||||
'''
|
||||
Q:
|
||||
what is batch?
|
||||
|
||||
'''
|
||||
n_epochs = 3
|
||||
batch_size_train = 64
|
||||
batch_size_test = 1000
|
||||
model = Net()
|
||||
print("Model loaded.")
|
||||
optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.5)
|
||||
criterion = nn.NLLLoss()
|
||||
|
||||
transform = transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.1307,), (0.3081,)),
|
||||
])
|
||||
|
||||
train_set = datasets.MNIST('PATH_TO_STORE_TRAIN_SET', download=True, train=True, transform=transform)
|
||||
test_set = datasets.MNIST('PATH_TO_STORE_TEST_SET', download=True, train=False, transform=transform)
|
||||
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size_train, shuffle=True)
|
||||
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size_test, shuffle=True)
|
||||
|
||||
print("Data sets loaded.")
|
||||
|
||||
train_losses = []
|
||||
train_counter = []
|
||||
test_losses = []
|
||||
test_counter = [i * len(train_loader.dataset) for i in range(n_epochs + 1)]
|
||||
|
||||
|
||||
def train_model(epoch):
|
||||
print("Training model.")
|
||||
model.train()
|
||||
for batch_idx, (data, target) in enumerate(train_loader):
|
||||
optimizer.zero_grad()
|
||||
output = model(data)
|
||||
loss = criterion(output, target)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
if batch_idx % 10 == 0:
|
||||
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
|
||||
epoch, batch_idx * len(data), len(train_loader.dataset),
|
||||
100. * batch_idx / len(train_loader), loss.item()))
|
||||
train_losses.append(loss.item())
|
||||
train_counter.append(
|
||||
(batch_idx * 64) + ((epoch - 1) * len(train_loader.dataset)))
|
||||
|
||||
|
||||
def test_model():
|
||||
print("Testing model.")
|
||||
model.eval()
|
||||
test_loss = 0
|
||||
correct = 0
|
||||
with torch.no_grad():
|
||||
for data, target in test_loader:
|
||||
output = model(data)
|
||||
test_loss += F.nll_loss(output, target, size_average=False).item()
|
||||
pred = output.data.max(1, keepdim=True)[1]
|
||||
correct += pred.eq(target.data.view_as(pred)).sum()
|
||||
test_loss /= len(test_loader.dataset)
|
||||
test_losses.append(test_loss)
|
||||
print('\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
|
||||
test_loss, correct, len(test_loader.dataset),
|
||||
100. * correct / len(test_loader.dataset)))
|
||||
|
||||
|
||||
def create_model():
|
||||
test_model()
|
||||
for epoch in range(1, n_epochs + 1):
|
||||
train_model(epoch)
|
||||
test_model()
|
||||
|
||||
torch.save(model.state_dict(), './model.pt')
|
||||
|
||||
|
||||
create_model()
|
22
coder/nn_model.py
Normal file
@ -0,0 +1,22 @@
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torch.optim as optim
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
|
||||
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
|
||||
self.conv2_drop = nn.Dropout2d()
|
||||
self.fc1 = nn.Linear(320, 50)
|
||||
self.fc2 = nn.Linear(50, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(F.max_pool2d(self.conv1(x), 2))
|
||||
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
|
||||
x = x.view(-1, 320)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = F.dropout(x, training=self.training)
|
||||
x = self.fc2(x)
|
||||
return F.log_softmax(x)
|
@ -4,62 +4,60 @@ import torch
|
||||
from PIL.Image import Image
|
||||
from torch import nn
|
||||
from torchvision.transforms import transforms
|
||||
from torch.autograd import Variable
|
||||
import numpy as np
|
||||
|
||||
def white_bg_square(img):
|
||||
"return a white-background-color image having the img in exact center"
|
||||
size = (max(img.size),)*2
|
||||
layer = Image.new('RGB', size, (255, 255, 255))
|
||||
layer.paste(img, tuple(map(lambda x:(x[0]-x[1])/2, zip(size, img.size))))
|
||||
return layer
|
||||
from nn_model import Net
|
||||
|
||||
code = []
|
||||
path = "test5.jpg"
|
||||
|
||||
transform = transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.5,), (0.5,)),
|
||||
])
|
||||
def recognizer(a_path):
|
||||
code = []
|
||||
path = a_path
|
||||
|
||||
img = cv2.imread(path)
|
||||
transform = transforms.Compose([transforms.ToTensor(),
|
||||
transforms.Normalize((0.5,), (0.5,)),
|
||||
])
|
||||
|
||||
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
img_gray = cv2.GaussianBlur(img_gray, (5, 5), 0)
|
||||
img = cv2.imread(path)
|
||||
|
||||
ret, im_th = cv2.threshold(img_gray, 90, 255, cv2.THRESH_BINARY_INV)
|
||||
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
img_gray = cv2.GaussianBlur(img_gray, (5, 5), 0)
|
||||
|
||||
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
ret, im_th = cv2.threshold(img_gray, 90, 255, cv2.THRESH_BINARY_INV)
|
||||
|
||||
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
|
||||
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
# load nn model
|
||||
input_size = 784 # = 28*28
|
||||
hidden_sizes = [128, 128, 64]
|
||||
output_size = 10
|
||||
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[1], hidden_sizes[2]),
|
||||
nn.ReLU(),
|
||||
nn.Linear(hidden_sizes[2], output_size),
|
||||
nn.LogSoftmax(dim=-1))
|
||||
model.load_state_dict(torch.load('digit_reco_model2.pt'))
|
||||
model.eval()
|
||||
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
|
||||
|
||||
for rect in rects:
|
||||
# Crop image
|
||||
crop_img = img[rect[1]:rect[1] + rect[3] + 10, rect[0]:rect[0] + rect[2] + 10, 0]
|
||||
# Resize the image
|
||||
roi = cv2.resize(crop_img, (28, 28), interpolation=cv2.INTER_LINEAR)
|
||||
roi = cv2.dilate(roi, (3, 3))
|
||||
plt.imshow(roi)
|
||||
plt.show()
|
||||
im = transform(roi)
|
||||
im = im.view(1, 784)
|
||||
with torch.no_grad():
|
||||
logps = model(im.float())
|
||||
ps = torch.exp(logps)
|
||||
probab = list(ps.numpy()[0])
|
||||
print("Predicted Digit =", probab.index(max(probab)))
|
||||
# load nn model
|
||||
input_size = 784 # = 28*28
|
||||
hidden_sizes = [128, 128, 64]
|
||||
output_size = 10
|
||||
model = Net()
|
||||
model.load_state_dict(torch.load('model.pt'))
|
||||
model.eval()
|
||||
|
||||
cv2.imshow("Code", img)
|
||||
cv2.waitKey()
|
||||
for rect in rects:
|
||||
# Crop image
|
||||
crop_img = img[rect[1]:rect[1] + rect[3] + 10, rect[0]:rect[0] + rect[2] + 10, 0]
|
||||
# Resize the image
|
||||
roi = cv2.resize(crop_img, (28, 28), interpolation=cv2.INTER_CUBIC)
|
||||
# roi = cv2.dilate(roi, (3, 3))
|
||||
# plt.imshow(roi)
|
||||
# plt.show()
|
||||
im = transform(roi)
|
||||
im = im.view(1, 1, 28, 28)
|
||||
with torch.no_grad():
|
||||
logps = model(im)
|
||||
ps = torch.exp(logps)
|
||||
probab = list(ps.numpy()[0])
|
||||
code.append(probab.index(max(probab)))
|
||||
|
||||
print(code)
|
||||
# cv2.imshow("Code", img)
|
||||
# cv2.waitKey()
|
||||
return code
|
||||
|
||||
|
||||
recognizer("55555.jpg")
|
||||
# print(recognizer("55555.jpg"))
|
||||
|
BIN
coder/test1.png
Before Width: | Height: | Size: 6.3 KiB |
BIN
coder/test2.png
Before Width: | Height: | Size: 9.9 KiB |
BIN
coder/test3.png
Before Width: | Height: | Size: 6.9 KiB |
BIN
coder/test5.jpg
Before Width: | Height: | Size: 13 KiB |