├── .codebeatignore
├── MANIFEST.in
├── data
├── dog.jpg
├── yolov3.data
└── coco.names
├── requirements.txt
├── docs
├── NumPyNet
│ ├── images
│ │ ├── maxpool.gif
│ │ ├── average_3-2.png
│ │ ├── maxpool_3_2.png
│ │ ├── activation_elu.png
│ │ ├── average_30-20.png
│ │ ├── dropout_prob10.png
│ │ ├── maxpool_30_20.png
│ │ ├── activation_relu.png
│ │ ├── activation_logistic.png
│ │ └── shortcut_connection.png
│ └── layers
│ │ ├── input_layer.md
│ │ ├── l1norm_layer.md
│ │ ├── l2norm_layer.md
│ │ ├── logistic_layer.md
│ │ ├── route_layer.md
│ │ ├── upsample_layer.md
│ │ ├── dropout_layer.md
│ │ ├── activation_layer.md
│ │ ├── cost_layer.md
│ │ └── shortcut_layer.md
├── source
│ ├── API
│ │ ├── modules.rst
│ │ ├── utils
│ │ │ ├── box.rst
│ │ │ ├── image.rst
│ │ │ ├── utils.rst
│ │ │ ├── parser.rst
│ │ │ ├── metrics.rst
│ │ │ ├── network.rst
│ │ │ ├── data.rst
│ │ │ ├── detection.rst
│ │ │ ├── optimizer.rst
│ │ │ ├── video.rst
│ │ │ ├── fmath.rst
│ │ │ └── modules.rst
│ │ └── layers
│ │ │ ├── cost_layer.rst
│ │ │ ├── input_layer.rst
│ │ │ ├── l1norm_layer.rst
│ │ │ ├── l2norm_layer.rst
│ │ │ ├── route_layer.rst
│ │ │ ├── avgpool_layers.rst
│ │ │ ├── dropout_layer.rst
│ │ │ ├── logistic_layer.rst
│ │ │ ├── maxpool_layer.rst
│ │ │ ├── shortcut_layer.rst
│ │ │ ├── batchnorm_layer.rst
│ │ │ ├── activation_layer.rst
│ │ │ ├── connected_layer.rst
│ │ │ ├── convolutional_layer.rst
│ │ │ └── modules.rst
│ ├── references.rst
│ └── conf.py
├── Makefile
├── make.bat
├── authors.md
└── _config.yml
├── testing
├── __init__.py
├── test_network.py
├── test_utils.py
├── test_gru_layer.py
├── test_parser.py
├── test_lstm_layer.py
├── test_metrics.py
├── test_l2norm_layer.py
├── test_l1norm_layer.py
├── test_input_layer.py
├── test_shuffler_layer.py
├── test_fmath.py
├── test_dropout_layer.py
├── test_softmax_layer.py
└── test_avgpool_layer.py
├── .codebeatsettings
├── .codacy.yml
├── AUTHORS.md
├── .github
├── PULL_REQUEST_TEMPLATE
│ └── PULL_REQUEST_TEMPLATE.md
├── ISSUE_TEMPLATE
│ └── ISSUE_TEMPLATE.md
├── CONTRIBUTING.md
├── workflows
│ └── python.yml
└── CODE_OF_CONDUCT.md
├── NumPyNet
├── __version__.py
├── __init__.py
├── layers
│ ├── __init__.py
│ ├── gru_layer.py
│ ├── base.py
│ ├── route_layer.py
│ ├── input_layer.py
│ ├── l2norm_layer.py
│ └── l1norm_layer.py
├── image_utils.py
├── rnn_utils.py
├── exception.py
├── metrics.py
├── detection.py
├── video.py
└── fmath.py
├── .coveragerc
├── SOURCES.txt
├── LICENSE.md
├── appveyor.yml
├── .gitignore
├── .travis.yml
├── examples
├── lstm_signal.py
├── rnn_signal.py
└── MNIST.py
└── setup.py
/.codebeatignore:
--------------------------------------------------------------------------------
1 | *.md
2 | *.bib
3 | testing/*
4 | timing/*
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.md
2 | include *.txt
3 | include *.yml
4 |
--------------------------------------------------------------------------------
/data/dog.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/data/dog.jpg
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | opencv-python<=4.2.0.32
2 | numpy>=1.15
3 | pillow
4 | pytest
5 | enum34
6 | configparser
7 |
--------------------------------------------------------------------------------
/docs/NumPyNet/images/maxpool.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/maxpool.gif
--------------------------------------------------------------------------------
/docs/NumPyNet/images/average_3-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/average_3-2.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/maxpool_3_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/maxpool_3_2.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/activation_elu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/activation_elu.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/average_30-20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/average_30-20.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/dropout_prob10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/dropout_prob10.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/maxpool_30_20.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/maxpool_30_20.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/activation_relu.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/activation_relu.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/activation_logistic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/activation_logistic.png
--------------------------------------------------------------------------------
/docs/NumPyNet/images/shortcut_connection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Nico-Curti/NumPyNet/HEAD/docs/NumPyNet/images/shortcut_connection.png
--------------------------------------------------------------------------------
/docs/source/API/modules.rst:
--------------------------------------------------------------------------------
1 | NumPyNet API
2 | ============
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | layers/modules/
8 | utils/modules/
9 |
--------------------------------------------------------------------------------
/data/yolov3.data:
--------------------------------------------------------------------------------
1 | cfg = cfg/yolov3.cfg
2 | weights = data/yolov3.weights
3 | names = data/coco.names
4 | thresh = 0.5
5 | hier = 0.5
6 | classes = 80
7 |
--------------------------------------------------------------------------------
/docs/source/API/utils/box.rst:
--------------------------------------------------------------------------------
1 | Box
2 | ---
3 |
4 | .. automodule:: box
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/image.rst:
--------------------------------------------------------------------------------
1 | Image
2 | -----
3 |
4 | .. automodule:: image
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/utils.rst:
--------------------------------------------------------------------------------
1 | Utils
2 | -----
3 |
4 | .. automodule:: utils
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/parser.rst:
--------------------------------------------------------------------------------
1 | Parser
2 | ------
3 |
4 | .. automodule:: parser
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/metrics.rst:
--------------------------------------------------------------------------------
1 | Metrics
2 | --------
3 |
4 | .. automodule:: metrics
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/network.rst:
--------------------------------------------------------------------------------
1 | Network
2 | --------
3 |
4 | .. automodule:: network
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/data.rst:
--------------------------------------------------------------------------------
1 | DataGenerator
2 | --------------
3 |
4 | .. automodule:: data
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/detection.rst:
--------------------------------------------------------------------------------
1 | Detection
2 | ----------
3 |
4 | .. automodule:: detection
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/optimizer.rst:
--------------------------------------------------------------------------------
1 | Optimizer
2 | ----------
3 |
4 | .. automodule:: optimizer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/utils/video.rst:
--------------------------------------------------------------------------------
1 | VideoCapture
2 | ------------
3 |
4 | .. automodule:: video
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/layers/cost_layer.rst:
--------------------------------------------------------------------------------
1 | Cost layer
2 | ----------------
3 |
4 | .. automodule:: layers.cost_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/input_layer.rst:
--------------------------------------------------------------------------------
1 | Input layer
2 | -------------
3 |
4 | .. automodule:: layers.input_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/l1norm_layer.rst:
--------------------------------------------------------------------------------
1 | L1Norm layer
2 | -------------
3 |
4 | .. automodule:: layers.l1norm_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/l2norm_layer.rst:
--------------------------------------------------------------------------------
1 | L2Norm layer
2 | -------------
3 |
4 | .. automodule:: layers.l2norm_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/route_layer.rst:
--------------------------------------------------------------------------------
1 | Route layer
2 | -------------
3 |
4 | .. automodule:: layers.route_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/testing/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | __author__ = ['Nico Curti', 'Mattia Ceccarelli']
5 | __email__ = ['nico.curit2@unibo.it', 'mattia.ceccarelli3@studio.unibo.it']
6 |
--------------------------------------------------------------------------------
/docs/source/API/layers/avgpool_layers.rst:
--------------------------------------------------------------------------------
1 | Avgpool layer
2 | -------------
3 |
4 | .. automodule:: layers.avgpool_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/dropout_layer.rst:
--------------------------------------------------------------------------------
1 | Dropout layer
2 | -------------
3 |
4 | .. automodule:: layers.dropout_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/logistic_layer.rst:
--------------------------------------------------------------------------------
1 | Logistic layer
2 | -------------
3 |
4 | .. automodule:: layers.logistic_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/maxpool_layer.rst:
--------------------------------------------------------------------------------
1 | Maxpool layer
2 | -------------
3 |
4 | .. automodule:: layers.maxpool_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/shortcut_layer.rst:
--------------------------------------------------------------------------------
1 | Shortcut layer
2 | -------------
3 |
4 | .. automodule:: layers.shortcut_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/batchnorm_layer.rst:
--------------------------------------------------------------------------------
1 | Batchnorm layer
2 | ---------------
3 |
4 | .. automodule:: layers.batchnorm_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/utils/fmath.rst:
--------------------------------------------------------------------------------
1 | Fast math operations
2 | --------------------
3 |
4 | .. automodule:: fmath
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
10 |
--------------------------------------------------------------------------------
/docs/source/API/layers/activation_layer.rst:
--------------------------------------------------------------------------------
1 | Activation layer
2 | ----------------
3 |
4 | .. automodule:: layers.activation_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/layers/connected_layer.rst:
--------------------------------------------------------------------------------
1 | Connected layer
2 | ----------------
3 |
4 | .. automodule:: layers.connected_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/references.rst:
--------------------------------------------------------------------------------
1 | References
2 | ----------
3 |
4 | - Travis Oliphant. "NumPy: A guide to NumPy", USA: Trelgol Publishing, 2006.
5 | - Bradski, G. "The OpenCV Library", Dr. Dobb's Journal of Software Tools, 2000.
6 |
--------------------------------------------------------------------------------
/.codebeatsettings:
--------------------------------------------------------------------------------
1 | {
2 | "PYTHON": {
3 | "BLOCK_NESTING": [5, 6, 7, 8],
4 | "ABC": [30, 50, 70, 80],
5 | "LOC" : [35, 50, 70, 90].
6 | "TOTAL_COMPLEXITY" : [150, 250, 350, 500],
7 | "CYCLO" : [20, 35, 50, 75]
8 | }
9 | }
--------------------------------------------------------------------------------
/docs/source/API/layers/convolutional_layer.rst:
--------------------------------------------------------------------------------
1 | Convolutional layer
2 | -------------------
3 |
4 | .. automodule:: layers.convolutional_layer
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 | :inherited-members:
9 |
--------------------------------------------------------------------------------
/docs/source/API/utils/modules.rst:
--------------------------------------------------------------------------------
1 | NumPyNet utility
2 | ================
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | box
8 | data
9 | detection
10 | fmath
11 | image
12 | metrics
13 | network
14 | optimizer
15 | parser
16 | utils
17 | video
18 |
--------------------------------------------------------------------------------
/.codacy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | engines:
3 | rubocop:
4 | enabled: true
5 |
6 | pylint:
7 | enabled: true
8 | python_version: 3
9 |
10 | exclude_paths:
11 | - '.github/**'
12 | - '**.md'
13 | - 'docs/**'
14 | - NumPyNet/__init__.py
15 | - NumPyNet/__version__.py
16 | - NumPyNet/layers/__init__.py
17 | - testing/**
18 | - timing/**
19 |
--------------------------------------------------------------------------------
/AUTHORS.md:
--------------------------------------------------------------------------------
1 | # Authors #
2 |
3 | ----------
4 | - Mattia Ceccarelli - Department of Physics and Astronomy, University of Bologna ([mattia.ceccarelli3@studio.unibo.it](mailto:mattia.ceccarelli3@studio.unibo.it))
5 | - Nico Curti - Dept. of Experimental, Diagnostic and Specialty Medicine of Bologna University ([nico.curti2@unibo.it](mailto:nico.curti2@unibo.it))
6 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
6 |
7 | #### This PR changes :grey_question:
8 |
9 |
10 | #### Any other comments?
11 |
--------------------------------------------------------------------------------
/NumPyNet/__version__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
5 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
6 |
7 | # update this numbers after important commits/editing or updates of the modules
8 | VERSION = (1, 0, 0)
9 |
10 | __version__ = '.'.join(map(str, VERSION))
11 |
--------------------------------------------------------------------------------
/docs/source/API/layers/modules.rst:
--------------------------------------------------------------------------------
1 | NumPyNet layers
2 | ===============
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | activation_layer
8 | avgpool_layer
9 | batchnorm_layer
10 | connected_layer
11 | convolutional_layer
12 | cost_layer
13 | dropout_layer
14 | input_layer
15 | l1norm_layer
16 | l2norm_layer
17 | logistic_layer
18 | maxpool_layer
19 | route_layer
20 | shortcut_layer
21 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | branch = True
3 | omit =
4 | */NumPyNet/__init__.py
5 | */NumPyNet/__version__.py
6 | */NumPyNet/layers/__init__.py
7 | [report]
8 | # Regexes for lines to exclude from consideration
9 | exclude_lines =
10 | pragma: no cover
11 |
12 | # Don't complain if tests don't hit defensive assertion code:
13 | raise AssertionError
14 | raise NotImplementedError
15 |
16 | # Don't complain if non-runnable code isn't run:
17 | if 0:
18 | if __name__ == .__main__.:
19 |
20 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/NumPyNet/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # Import all the objects in the package
5 |
6 | from __future__ import division
7 | from __future__ import print_function
8 |
9 | from .activations import *
10 | from .box import Box
11 | from .data import DataGenerator
12 | from .detection import Detection
13 | from .image import Image
14 | from .image_utils import normalization
15 | from .image_utils import image_utils
16 | from .network import Network
17 |
18 | from . import parser
19 | from . import rnn_utils
20 | from .utils import print_statistics
21 | from .video import VideoCapture
22 |
23 | from .__version__ import __version__
24 |
25 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
26 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
27 |
28 | # aliases
29 |
30 | Model = Network
31 |
32 | # define FMATH
33 | ENABLE_FMATH = False
34 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/data/coco.names:
--------------------------------------------------------------------------------
1 | person
2 | bicycle
3 | car
4 | motorbike
5 | aeroplane
6 | bus
7 | train
8 | truck
9 | boat
10 | traffic light
11 | fire hydrant
12 | stop sign
13 | parking meter
14 | bench
15 | bird
16 | cat
17 | dog
18 | horse
19 | sheep
20 | cow
21 | elephant
22 | bear
23 | zebra
24 | giraffe
25 | backpack
26 | umbrella
27 | handbag
28 | tie
29 | suitcase
30 | frisbee
31 | skis
32 | snowboard
33 | sports ball
34 | kite
35 | baseball bat
36 | baseball glove
37 | skateboard
38 | surfboard
39 | tennis racket
40 | bottle
41 | wine glass
42 | cup
43 | fork
44 | knife
45 | spoon
46 | bowl
47 | banana
48 | apple
49 | sandwich
50 | orange
51 | broccoli
52 | carrot
53 | hot dog
54 | pizza
55 | donut
56 | cake
57 | chair
58 | sofa
59 | pottedplant
60 | bed
61 | diningtable
62 | toilet
63 | tvmonitor
64 | laptop
65 | mouse
66 | remote
67 | keyboard
68 | cell phone
69 | microwave
70 | oven
71 | toaster
72 | sink
73 | refrigerator
74 | book
75 | clock
76 | vase
77 | scissors
78 | teddy bear
79 | hair drier
80 | toothbrush
81 |
--------------------------------------------------------------------------------
/docs/authors.md:
--------------------------------------------------------------------------------
1 | # Authors
2 |
3 | ## Nico Curti
4 |
5 |
About me:
6 | - **Name**: Nico
7 | - **Surname**: Curti
8 | - **Profession**: PhD at Dept. of Experimental, Diagnostic and Specialty Medicine of Bologna University
9 | - **University**: University of Bologna
10 | - **Location**: Italy
11 | - **Web page**: [git](https://github.com/Nico-Curti), [unibo](https://www.unibo.it/sitoweb/nico.curti2)
12 | - **Contact me**: [email](mailto:nico.curti2@unibo.it)
13 |
14 | ## Mattia Ceccarelli
15 |
16 |
About me:
17 | - **Name**: Mattia
18 | - **Surname**: Ceccarelli
19 | - **Profession**: Student at Physics and Astronomy Department in Bologna
20 | - **University**: University of Bologna
21 | - **Location**: Italy
22 | - **Web page**: [git](https://github.com/Mat092)
23 | - **Contact me**: [email](mailto:mattia.ceccarelli3@studio.unibo.it)
24 |
--------------------------------------------------------------------------------
/SOURCES.txt:
--------------------------------------------------------------------------------
1 | setup.py
2 | NumPyNet/__init__.py
3 | NumPyNet/__version__.py
4 | NumPyNet/activations.py
5 | NumPyNet/box.py
6 | NumPyNet/data.py
7 | NumPyNet/detection.py
8 | NumPyNet/exception.py
9 | NumPyNet/fmath.py
10 | NumPyNet/image.py
11 | NumPyNet/image_utils.py
12 | NumPyNet/metrics.py
13 | NumPyNet/network.py
14 | NumPyNet/optimizer.py
15 | NumPyNet/parser.py
16 | NumPyNet/rnn_utils.py
17 | NumPyNet/utils.py
18 | NumPyNet/video.py
19 | NumPyNet/layers/__init__.py
20 | NumPyNet/layers/activation_layer.py
21 | NumPyNet/layers/avgpool_layer.py
22 | NumPyNet/layers/batchnorm_layer.py
23 | NumPyNet/layers/connected_layer.py
24 | NumPyNet/layers/convolutional_layer.py
25 | NumPyNet/layers/cost_layer.py
26 | NumPyNet/layers/dropout_layer.py
27 | NumPyNet/layers/input_layer.py
28 | NumPyNet/layers/l1norm_layer.py
29 | NumPyNet/layers/l2norm_layer.py
30 | NumPyNet/layers/logistic_layer.py
31 | NumPyNet/layers/maxpool_layer.py
32 | NumPyNet/layers/rnn_layer.py
33 | NumPyNet/layers/route_layer.py
34 | NumPyNet/layers/shortcut_layer.py
35 | NumPyNet/layers/shuffler_layer.py
36 | NumPyNet/layers/softmax_layer.py
37 | NumPyNet/layers/upsample_layer.py
38 | NumPyNet/layers/yolo_layer.py
39 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/ISSUE_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
4 |
5 | #### Detailed Description :grey_question:
6 |
7 |
8 | #### Steps/Code to Reproduce :grey_question:
9 |
15 |
16 | #### Expected Behavior :grey_question:
17 |
18 |
19 | #### Actual Behavior :grey_question:
20 |
21 |
22 | #### Operating System / Platform :grey_question:
23 |
29 |
30 | #### Python Version :grey_question:
31 |
35 |
36 | #### NumPyNet Version (`NumPyNet.__version__`) :grey_question:
37 |
41 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | The NumPyNet package is licensed under the MIT "Expat" License:
2 |
3 | > Copyright (c) 2019: Nico Curti and Mattia Ceccarelli.
4 | >
5 | > Permission is hereby granted, free of charge, to any person obtaining a copy
6 | > of this software and associated documentation files (the "Software"), to deal
7 | > in the Software without restriction, including without limitation the rights
8 | > to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | > copies of the Software, and to permit persons to whom the Software is
10 | > furnished to do so, subject to the following conditions:
11 | >
12 | > The above copyright notice and this permission notice shall be included in all
13 | > copies or substantial portions of the Software.
14 | >
15 | > THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | > IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | > FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | > AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | > LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | > OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | > SOFTWARE.
22 | >
23 |
--------------------------------------------------------------------------------
/testing/test_network.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from NumPyNet.metrics import mean_accuracy_score
8 | from NumPyNet.optimizer import Adam
9 | from NumPyNet.network import Network
10 | from NumPyNet.exception import MetricsError
11 |
12 | import pytest
13 |
14 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
15 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
16 |
17 |
18 | class TestNetwork:
19 | '''
20 | Tests:
21 | - add default metrics
22 | '''
23 |
24 | def test_add_metrics (self):
25 |
26 | check_function_equality = lambda f1, f2 : f1.__code__.co_code == f2.__code__.co_code
27 |
28 | custom_metrics_wrong = lambda y_true, y_pred, a : None
29 | custom_metrics_default = lambda y_true, y_pred, a=3.14 : None
30 |
31 | model = Network(batch=42, input_shape=(1, 1, 1))
32 |
33 | model.compile(optimizer=Adam(), metrics=[mean_accuracy_score])
34 | assert model.metrics == [mean_accuracy_score]
35 | assert all(check_function_equality(x1, x2) for x1, x2 in zip(model.metrics, [mean_accuracy_score]))
36 |
37 | model.compile(optimizer=Adam(), metrics=[custom_metrics_default])
38 | assert model.metrics == [custom_metrics_default]
39 | assert all(check_function_equality(x1, x2) for x1, x2 in zip(model.metrics, [custom_metrics_default]))
40 |
41 | with pytest.raises(MetricsError):
42 | model.compile(optimizer=Adam(), metrics=[custom_metrics_wrong])
43 |
44 |
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | site_name: NumPyNet
2 | repo_url: https://github.com/Nico-Curti/NumPyNet
3 | site_description: Neural Networks in Pure NumPy
4 | site_author: https://www.unibo.it/sitoweb/nico.curti2
5 |
6 | extra_javascript:
7 | - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML
8 | - assets/mathjaxhelper.js
9 |
10 | extra:
11 | social:
12 | - type: 'github'
13 | link: 'https://github.com/Nico-Curti'
14 | - type: 'Unibo'
15 | link: 'https://www.unibo.it/sitoweb/nico.curti2'
16 |
17 | markdown_extensions:
18 | - extra
19 | - tables
20 | - fenced_code
21 | - mdx_math
22 |
23 | plugins:
24 | - jekyll-relative-links
25 | relative_links:
26 | enabled: true
27 | collections: true
28 |
29 | include:
30 | - index.md
31 | - authors.md
32 | - NumPyNet/layers/activation_layer.md
33 | - NumPyNet/layers/avgpool_layer.md
34 | - NumPyNet/layers/batchnorm_layer.md
35 | - NumPyNet/layers/connected_layer.md
36 | - NumPyNet/layers/convolutional_layer.md
37 | - NumPyNet/layers/cost_layer.md
38 | - NumPyNet/layers/dropout_layer.md
39 | - NumPyNet/layers/input_layer.md
40 | - NumPyNet/layers/l1norm_layer.md
41 | - NumPyNet/layers/l2norm_layer.md
42 | - NumPyNet/layers/logistic_layer.md
43 | - NumPyNet/layers/maxpool_layer.md
44 | - NumPyNet/layers/pixelshuffle_layer.md
45 | - NumPyNet/layers/route_layer.md
46 | - NumPyNet/layers/shortcut_layer.md
47 | - NumPyNet/layers/upsample_layer.md
48 | - NumPyNet/layers/yolo_layer.md
49 |
50 | theme: jekyll-theme-slate
51 |
--------------------------------------------------------------------------------
/NumPyNet/layers/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # import layer objects
5 |
6 | from .activation_layer import Activation_layer
7 | from .avgpool_layer import Avgpool_layer
8 | from .batchnorm_layer import BatchNorm_layer
9 | from .connected_layer import Connected_layer
10 | from .convolutional_layer import Convolutional_layer
11 | from .cost_layer import Cost_layer
12 | from .dropout_layer import Dropout_layer
13 | from .input_layer import Input_layer
14 | from .l1norm_layer import L1Norm_layer
15 | from .l2norm_layer import L2Norm_layer
16 | from .logistic_layer import Logistic_layer
17 | from .lstm_layer import LSTM_layer
18 | from .maxpool_layer import Maxpool_layer
19 | from .rnn_layer import RNN_layer
20 | from .route_layer import Route_layer
21 | from .shortcut_layer import Shortcut_layer
22 | from .simple_rnn_layer import SimpleRNN_layer
23 | from .shuffler_layer import Shuffler_layer
24 | from .softmax_layer import Softmax_layer
25 | from .upsample_layer import Upsample_layer
26 | from .yolo_layer import Yolo_layer
27 |
28 | # Alias (keras)
29 |
30 | AvgPool2D = Avgpool_layer
31 | Batchnorm = BatchNorm_layer
32 | Dense = Connected_layer
33 | Conv2D = Convolutional_layer
34 | Dropout = Dropout_layer
35 | L1Normalization = L1Norm_layer
36 | L2Normalization = L2Norm_layer
37 | LSTM = LSTM_layer
38 | MaxPool2D = Maxpool_layer
39 | concatenate = Route_layer
40 | RNN = RNN_layer
41 | Add = Shortcut_layer
42 | SimpleRNN = SimpleRNN_layer
43 | SoftMax = Softmax_layer
44 | UpSampling2D = Upsample_layer
45 |
46 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
47 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
48 |
49 |
--------------------------------------------------------------------------------
/testing/test_utils.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.utils import to_categorical
10 | from NumPyNet.utils import from_categorical
11 |
12 | import numpy as np
13 | import pytest
14 | from hypothesis import strategies as st
15 | from hypothesis import given
16 | from hypothesis import settings
17 | from hypothesis import example
18 |
19 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
20 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
21 |
22 |
23 | class TestUtils:
24 | '''
25 | Test functions in the utils.py file
26 |
27 | -to_categorical
28 | -from_categorical
29 | '''
30 |
31 | @given(size = st.integers(min_value=10, max_value=100),
32 | num_labels = st.integers(min_value=1, max_value=120))
33 | @settings(max_examples=100, deadline=None)
34 | def test_to_categorical(self, size, num_labels):
35 |
36 | label = np.random.randint(low=0, high=num_labels, size=(size,))
37 |
38 | categorical_tf = tf.keras.utils.to_categorical(label, num_classes=None)
39 | categorical_np = to_categorical(label)
40 |
41 | np.testing.assert_allclose(categorical_tf, categorical_np)
42 |
43 | @given(size = st.integers(min_value=10, max_value=100),
44 | num_labels = st.integers(min_value=0, max_value=120))
45 | @settings(max_examples=100, deadline=None)
46 | def test_from_categorical(self, size, num_labels):
47 |
48 | label = np.random.uniform(low=0, high=num_labels, size=(size,))
49 |
50 | categorical_tf = tf.keras.utils.to_categorical(label, num_classes=None)
51 | categorical_np = to_categorical(label)
52 |
53 | np.testing.assert_allclose(categorical_tf, categorical_np)
54 |
55 | fromlabel_tf = tf.math.argmax(categorical_tf, axis=-1)
56 | fromlabel_np = from_categorical(categorical_np)
57 |
58 | np.testing.assert_allclose(fromlabel_tf, fromlabel_np)
59 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution
2 |
3 | Any contribution is more than welcome :heart:. Just fill an [issue](https://github.com/Nico-Curti/NumPyNet/blob/master/ISSUE_TEMPLATE.md) or a [pull request](https://github.com/Nico-Curti/NumPyNet/blob/master/PULL_REQUEST_TEMPLATE.md) and we will check ASAP!
4 |
5 | ## Before start contributing
6 |
7 | - Make sure you agree to contribute your code under `NumPyNet` [license](https://github.com/Nico-Curti/NumPyNet/blob/master/LICENSE.md)
8 |
9 | - If you are going to fix a bug, check that it's still exists and that you are working with the latest version of `NumPyNet` library.
10 |
11 | - Make sure that there is not someone working on the same issue. In this case you can provide support or suggestion in the issue.
12 |
13 | - If you have any question about the library fill an issue with the same criteria described in the previous step.
14 |
15 | ## Pull Request and Contribution
16 |
17 | Please consider the following step to create a pull request which can be processed as quickly as possible
18 |
19 | 1. Install Git.
20 |
21 | 2. Create your account on Github.
22 |
23 | 3. Fork [this](https://github.com/Nico-Curti/NumPyNet) repository.
24 |
25 | 4. Create your feature branch (under `dev` branch).
26 |
27 | 5. Choose a task for yourself. It could be a bugfix or some new code.
28 |
29 | 6. Add documentation (`docstring`) to your functions/methods in the Python version.
30 |
31 | 7. Add tests for your contributions
32 |
33 | 9. Pass **ALL** CI tests.
34 |
35 | 10. Submit a Pull Request into `dev` (following the pull request [template](https://github.com/Nico-Curti/NumPyNet/blob/master/.github/PULL_REQUEST_TEMPLATE/PULL_REQUEST_TEMPLATE.md))
36 |
37 | ## Merging
38 |
39 | 1. As soon as possible the authors will review your pull request, answering to your message.
40 |
41 | 2. Make sure you got credits at the end of merge: we have a lot of project to follow and we may forget to add your name to the list of `AUTHORS.md`. Please, do not hesitate to remind us!
42 |
--------------------------------------------------------------------------------
/NumPyNet/image_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from enum import Enum
5 |
6 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
7 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
8 |
9 |
10 | class normalization (int, Enum):
11 |
12 | '''
13 | Utility class with enum for normalization
14 | algorithm.
15 |
16 | This class is used for a pure readability purpose.
17 | '''
18 |
19 | normalize = 0
20 | denormalize = 1
21 |
22 |
23 | class image_utils (object):
24 |
25 | '''
26 | Utility class for the Image object.
27 |
28 | The class stores pre-determined set of
29 | values related to the colors and colormaps
30 | for the detection boxes.
31 | '''
32 |
33 | num_box_colors = 6
34 | num_map_colors = 19
35 |
36 | colors = [ (0., 0., 0.),
37 | (0., 1., 0.),
38 | (0., 1., 1.),
39 | (1., 0., 0.),
40 | (1., 0., 1.),
41 | (1., 1., 0.)
42 | ]
43 |
44 | rgb_cmap = [ (0.078431, 0.078431, 0.078431),
45 | (0.266667, 0.133333, 0.600000),
46 | (0.231373, 0.047059, 0.741176),
47 | (0.200000, 0.066667, 0.733333),
48 | (0.266667, 0.266667, 0.866667),
49 | (0.066667, 0.666667, 0.733333),
50 | (0.070588, 0.741176, 0.725490),
51 | (0.133333, 0.800000, 0.666667),
52 | (0.411765, 0.815686, 0.145098),
53 | (0.666667, 0.800000, 0.133333),
54 | (0.815686, 0.764706, 0.062745),
55 | (0.800000, 0.733333, 0.200000),
56 | (0.996078, 0.682353, 0.176471),
57 | (1.000000, 0.600000, 0.200000),
58 | (1.000000, 0.400000, 0.266667),
59 | (1.000000, 0.266667, 0.133333),
60 | (1.000000, 0.200000, 0.066667),
61 | (0.933333, 0.066667, 0.000000),
62 | (0.972549, 0.047059, 0.070588)
63 | ]
64 |
65 |
66 |
--------------------------------------------------------------------------------
/NumPyNet/rnn_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import struct
8 | import itertools
9 | import numpy as np
10 |
11 | __author__ = ['Nico Curti']
12 | __email__ = ['nico.curti2@unibo.it']
13 |
14 |
15 | def read_tokenized_data (filename):
16 |
17 | with open(filename, 'rb') as fp:
18 | tokens = [struct.unpack("1022I", fp.read(4088)) for _ in range(1022)]
19 |
20 | tokens = sum(tokens, [])
21 | return tokens
22 |
23 | def read_tokens (filename):
24 |
25 | with open(filename, 'r', encoding='utf-8') as fp:
26 | lines = fp.read()
27 |
28 | lines.replace('', '\n')
29 | lines = map(ord, lines)
30 | return lines
31 |
32 | def get_rnn_data (tokens, offsets, characters, lenght, batch, steps):
33 |
34 | x = np.empty(shape=(batch * steps * characters, ))
35 | y = np.empty(shape=(batch * steps * characters, ))
36 |
37 | for i, j in itertools.product(range(batch), range(steps)):
38 | offset = offsets[i]
39 | _curr = tokens[offset % lenght]
40 | _next = tokens[(offset + 1 ) % lenght]
41 |
42 | idx = (j * batch + i) * characters
43 |
44 | x[idx + _curr] = 1
45 | y[idx + _next] = 1
46 |
47 | offsets[i] = (offset + 1) % lenght
48 |
49 | if _curr >= characters or _curr < 0 or _next >= characters or _next < 0:
50 | raise ValueError('Bad char')
51 |
52 | return (x, y)
53 |
54 |
55 | def sample_array (arr):
56 |
57 | s = 1. / np.sum(arr)
58 | arr *= s
59 |
60 | r = np.random.uniform(low=0., high=1., size=(1,))
61 |
62 | cumulative = np.cumsum(arr)
63 | r = r - cumulative
64 | pos = np.where(np.sign(r[:-1]) != np.sign(r[1:]))[0] + 1
65 |
66 | return pos if pos else len(arr) - 1
67 |
68 | def print_symbol (n, tokens=None):
69 |
70 | if tokens is not None:
71 | print('{} '.format(tokens[n]), end='', flush=True)
72 | else:
73 | print('{}'.format(n))
74 |
75 |
76 | if __name__ == '__main__':
77 |
78 | print('Insert testing here')
79 |
--------------------------------------------------------------------------------
/appveyor.yml:
--------------------------------------------------------------------------------
1 | # Tensorflow is supported only for windows x64
2 |
3 | clone_folder: c:\projects\NumPyNet
4 |
5 | environment:
6 |
7 | WORKSPACE: C:\projects
8 |
9 | matrix:
10 |
11 | # For Python versions available on Appveyor, see
12 | # https://www.appveyor.com/docs/windows-images-software/#python
13 | # The list here is complete (excluding Python 2.6, which
14 | # isn't covered by this document) at the time of writing.
15 |
16 | - PYTHON: "C:\\Python27"
17 | VERSION: 27
18 | ARCH: x86
19 | #- PYTHON: "C:\\Python33" # does not support opencv
20 | # VERSION: 27
21 | # ARCH: x86
22 | - PYTHON: "C:\\Python34"
23 | VERSION: 34
24 | ARCH: x86
25 | - PYTHON: "C:\\Python35"
26 | VERSION: 35
27 | ARCH: x86
28 | - PYTHON: "C:\\Python27-x64"
29 | VERSION: 27
30 | ARCH: x64
31 | - PYTHON: "C:\\Python35-x64" # pip install --ignore-installed --upgrade https://storage.googleapis.com/tensorflow/windows/cpu/tensorflow-1.1.0-cp35-cp35m-window.whl
32 | VERSION: 35
33 | ARCH: x64
34 | - PYTHON: "C:\\Python36-x64"
35 | VERSION: 36
36 | ARCH: x64
37 | - PYTHON: "C:\\Python37-x64"
38 | VERSION: 37
39 | ARCH: x64
40 | - PYTHON: "C:\\Python38-x64"
41 | VERSION: 38
42 | ARCH: x64
43 |
44 | build: off
45 |
46 | install:
47 | - "%PYTHON%\\python.exe --version"
48 | - cd %WORKSPACE%\NumPyNet
49 | - "%PYTHON%\\python.exe -m pip install -U pip"
50 | - "%PYTHON%\\python.exe -m pip --version"
51 |
52 | - "%PYTHON%\\python.exe -m pip install -r requirements.txt"
53 | - "%PYTHON%\\python.exe setup.py install"
54 | # testing libraries
55 | - IF [%VERSION%] GEQ [35] IF [%ARCH%] == [x64] %PYTHON%\\python.exe -m pip install hypothesis
56 | - IF [%VERSION%] GEQ [35] IF [%ARCH%] == [x64] IF [%VERSION%] GEQ [38] ( %PYTHON%\\python.exe -m pip install tensorflow==2.2.0rc1 ) ELSE ( %PYTHON%\\python.exe -m pip install tensorflow==2.0.0b1 )
57 |
58 | test_script:
59 | - IF [%VERSION%] GEQ [35] IF [%ARCH%] == [x64] %PYTHON%\\Scripts\\pytest.exe
60 | - exit 0
61 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # tex
7 | *.aux
8 | *.bbl
9 | *.fdb_latexmk
10 | *.blg
11 | *.log
12 | *.out
13 | *.fls
14 | *.toc
15 | *.synctex.gz
16 | *.tex.backup
17 |
18 | # markdown
19 | *.dvi
20 | *.pyg
21 |
22 | # C extensions
23 | *.so
24 |
25 | # Distribution / packaging
26 | .Python
27 | build/
28 | develop-eggs/
29 | dist/
30 | downloads/
31 | eggs/
32 | .eggs/
33 | lib/
34 | lib64/
35 | parts/
36 | sdist/
37 | var/
38 | wheels/
39 | *.egg-info/
40 | .installed.cfg
41 | *.egg
42 | MANIFEST
43 |
44 | # PyInstaller
45 | # Usually these files are written by a python script from a template
46 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
47 | *.manifest
48 | *.spec
49 |
50 | # Installer logs
51 | pip-log.txt
52 | pip-delete-this-directory.txt
53 |
54 | # Unit test / coverage reports
55 | htmlcov/
56 | .tox/
57 | .coverage
58 | .coverage.*
59 | .cache
60 | nosetests.xml
61 | coverage.xml
62 | *.cover
63 | .hypothesis/
64 | .pytest_cache/
65 |
66 | # Translations
67 | *.mo
68 | *.pot
69 |
70 | # Django stuff:
71 | *.log
72 | local_settings.py
73 | db.sqlite3
74 |
75 | # Flask stuff:
76 | instance/
77 | .webassets-cache
78 |
79 | # Scrapy stuff:
80 | .scrapy
81 |
82 | # Sphinx documentation
83 | docs/_build/
84 |
85 | # PyBuilder
86 | target/
87 |
88 | # Jupyter Notebook
89 | .ipynb_checkpoints
90 |
91 | # pyenv
92 | .python-version
93 |
94 | # celery beat schedule file
95 | celerybeat-schedule
96 |
97 | # SageMath parsed files
98 | *.sage.py
99 |
100 | # Environments
101 | .env
102 | .venv
103 | env/
104 | venv/
105 | ENV/
106 | env.bak/
107 | venv.bak/
108 |
109 | # Spyder project settings
110 | .spyderproject
111 | .spyproject
112 |
113 | # VScode project settings
114 | .vscode/
115 |
116 | # Rope project settings
117 | .ropeproject
118 |
119 | # mkdocs documentation
120 | /site
121 |
122 | # mypy
123 | .mypy_cache/
124 |
125 | # directories
126 | [Bb]in/
127 | [Bb]uild/
128 | .hypothesis/
129 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 |
16 | sys.path.insert(0, os.path.abspath('../../NumPyNet/'))
17 |
18 | from __version__ import __version__
19 |
20 | # -- Project information -----------------------------------------------------
21 |
22 | project = 'NumPyNet - Neural Networks Library in pure Numpy'
23 | copyright = '2020, Nico Curti, Mattia Ceccarelli'
24 | author = 'Nico Curti, Mattia Ceccarelli'
25 |
26 | # The full version, including alpha/beta/rc tags
27 | release = __version__
28 |
29 | master_doc = 'index'
30 |
31 | # -- General configuration ---------------------------------------------------
32 |
33 | # Add any Sphinx extension module names here, as strings. They can be
34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 | # ones.
36 | extensions = ['sphinx.ext.autodoc',
37 | 'sphinx.ext.napoleon',
38 | 'sphinx.ext.viewcode',
39 | ]
40 |
41 | # Add any paths that contain templates here, relative to this directory.
42 | templates_path = []
43 |
44 | # List of patterns, relative to source directory, that match files and
45 | # directories to ignore when looking for source files.
46 | # This pattern also affects html_static_path and html_extra_path.
47 | exclude_patterns = []
48 |
49 |
50 | # -- Options for HTML output -------------------------------------------------
51 |
52 | # The theme to use for HTML and HTML Help pages. See the documentation for
53 | # a list of builtin themes.
54 | #
55 | html_theme = 'sphinx_rtd_theme'
56 |
57 | # Add any paths that contain custom static files (such as style sheets) here,
58 | # relative to this directory. They are copied after the builtin static files,
59 | # so a file named "default.css" will overwrite the builtin "default.css".
60 | html_static_path = []
61 |
--------------------------------------------------------------------------------
/.github/workflows/python.yml:
--------------------------------------------------------------------------------
1 | name: NumPyNet CI
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | build:
11 |
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | python-version: [3.5, 3.6, 3.7]
16 |
17 | steps:
18 | - uses: actions/checkout@v2
19 | - name: Set up Python ${{ matrix.python-version }}
20 | uses: actions/setup-python@v2
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 | - name: Install dependencies
24 | run: |
25 | python -m pip install --upgrade pip
26 | pip install flake8 pytest
27 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
28 | - name: Install testing dependencies
29 | run: |
30 | pip install tensorflow==2.0.0b1 hypothesis==4.13.0 codecov pytest-cov
31 | - name: Lint with flake8
32 | run: |
33 | # stop the build if there are Python syntax errors or undefined names
34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
36 | # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
37 | - name: Test with pytest
38 | run: |
39 | python -m pytest ./testing --cov=NumPyNet --cov-config=.coveragerc
40 |
41 | # python-latest:
42 | #
43 | # runs-on: ubuntu-latest
44 | # strategy:
45 | # matrix:
46 | # python-version: [3.8]
47 | #
48 | # steps:
49 | # - uses: actions/checkout@v2
50 | # - name: Set up Python ${{ matrix.python-version }}
51 | # uses: actions/setup-python@v2
52 | # with:
53 | # python-version: ${{ matrix.python-version }}
54 | # - name: Install dependencies
55 | # run: |
56 | # python -m pip install --upgrade pip
57 | # pip install flake8 pytest
58 | # if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
59 | # - name: Install testing dependencies
60 | # run: |
61 | # pip install tensorflow==2.2.0rc1 hypothesis==4.13.0 codecov pytest-cov
62 | # - name: Lint with flake8
63 | # run: |
64 | # # stop the build if there are Python syntax errors or undefined names
65 | # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
66 | # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
67 | # # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
68 | # - name: Test with pytest
69 | # run: |
70 | # python -m pytest ./testing --cov=NumPyNet --cov-config=.coveragerc
71 |
72 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 |
3 | dist: trusty
4 | timeout: 1000
5 |
6 | matrix:
7 | include:
8 |
9 | # linux + python
10 |
11 | - os: linux
12 | python: 2.6
13 | name: ubuntu 14.04 - python@2.6
14 | sudo: true
15 | env:
16 | - MATRIX_EVAL=""
17 |
18 | - os: linux
19 | python: 2.7
20 | name: ubuntu 14.04 - python@2.7
21 | sudo: true
22 | env:
23 | - MATRIX_EVAL=""
24 |
25 | - os: linux
26 | python: 3.3
27 | name: ubuntu 14.04 - python@3.3
28 | sudo: true
29 | env:
30 | - MATRIX_EVAL=""
31 |
32 | - os: linux
33 | python: 3.4
34 | name: ubuntu 14.04 - python@3.4
35 | sudo: true
36 | env:
37 | - MATRIX_EVAL=""
38 |
39 | - os: linux
40 | python: 3.5
41 | name: ubuntu 14.04 - python@3.5
42 | sudo: true
43 | env:
44 | - MATRIX_EVAL=""
45 |
46 | - os: linux
47 | python: 3.6
48 | name: ubuntu 14.04 - python@3.6
49 | sudo: true
50 | env:
51 | - MATRIX_EVAL=""
52 |
53 | - os: linux
54 | python: 3.7
55 | name: ubuntu 14.04 - python@3.7
56 | dist: xenial
57 | sudo: true
58 | env:
59 | - MATRIX_EVAL=""
60 |
61 | - os: linux
62 | python: 3.8
63 | name: ubuntu 14.04 - python@3.8
64 | dist: xenial
65 | sudo: true
66 | env:
67 | - MATRIX_EVAL=""
68 |
69 | allow_failures:
70 | # python2.6 and python3.3 are no longer supported by opencv
71 | # python3.8 does not yet supports opencv
72 | - name: ubuntu 14.04 - python@2.6
73 | - name: ubuntu 14.04 - python@3.3
74 | - name: ubuntu 14.04 - python@3.8
75 |
76 |
77 | before_install:
78 | - travis_retry eval "${MATRIX_EVAL}"
79 | #- pip install -r requirements.txt
80 | - python -m pip install --upgrade pip
81 | - python -m pip --version
82 |
83 | - python -m pip install numpy==1.15
84 | - python -m pip install Pillow==2.2.2
85 | - python -m pip install opencv-python==4.0.0.21
86 | - python -m pip install matplotlib
87 | - python -m pip install pytest==3.0.7
88 | - python -m pip install enum34
89 | - python -m pip install configparser
90 | # testing libraries
91 | - python -m pip install tensorflow==2.0.0b1
92 | - python -m pip install hypothesis==4.13.0
93 |
94 | - python -m pip install codecov
95 | - python -m pip install pytest-cov
96 |
97 | install:
98 | - python setup.py install
99 |
100 | script:
101 | - travis_wait 45 python -m pytest --cov=NumPyNet --cov-config=.coveragerc
102 |
103 | after_succes:
104 | - codecov
105 |
--------------------------------------------------------------------------------
/testing/test_gru_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from tensorflow.keras.models import Model, Sequential
8 | from tensorflow.keras.layers import GRU, Input
9 | import tensorflow.keras.backend as K
10 |
11 | from NumPyNet.exception import LayerError
12 | from NumPyNet.exception import NotFittedError
13 | from NumPyNet.layers.gru_layer import GRU_layer
14 | from NumPyNet.utils import data_to_timesteps
15 |
16 | import numpy as np
17 | import pytest
18 | from hypothesis import strategies as st
19 | from hypothesis import given
20 | from hypothesis import settings
21 |
22 | from random import choice
23 |
24 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
25 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
26 |
27 | class TestGRUlayer :
28 |
29 | def test_constructor (self):
30 | pass
31 |
32 | def test_printer (self):
33 | pass
34 |
35 | def _forward (self):
36 | outputs = 30
37 | steps = 1
38 | features = 10
39 | batch = 16
40 |
41 | data = np.random.uniform(size=(batch, features))
42 |
43 | weights = [np.random.uniform(size=(features, outputs)),
44 | np.random.uniform(size=(features, outputs)),
45 | np.random.uniform(size=(features, outputs)),
46 | np.random.uniform(size=(outputs, outputs)),
47 | np.random.uniform(size=(outputs, outputs)),
48 | np.random.uniform(size=(outputs, outputs))]
49 |
50 | bias = [np.zeros(shape=(outputs,)), np.zeros(shape=outputs)]
51 |
52 | # assign same weights to all the kernel in keras as for NumPyNet
53 | keras_weights1 = np.concatenate([weights[i] for i in range(3)], axis=1)
54 | keras_weights2 = np.concatenate([weights[i] for i in range(3, 6)], axis=1)
55 | keras_bias = np.concatenate([bias[0] for i in range(4)])
56 |
57 | inpt_keras, _ = data_to_timesteps(data, steps)
58 |
59 | assert inpt_keras.shape == (batch - steps, steps, features)
60 |
61 | inp = Input(shape=(steps, features))
62 | gru = GRU(units=outputs, use_bias=False)(inp)
63 | model = Model(inputs=inp, outputs=gru)
64 |
65 | model.set_weights([keras_weights1, keras_weights2])
66 |
67 | layer = GRU_layer(outputs=outputs, steps=steps, weights=weights, bias=[0,0,0])
68 |
69 | layer.forward(data)
70 |
71 | forward_out_keras = model.predict(inpt_keras)
72 |
73 | forward_out_numpynet = layer.output
74 |
75 | np.allclose(forward_out_keras, forward_out_numpynet)
76 |
77 | forward_out_keras
78 | forward_out_numpynet
79 |
80 |
81 | def test_backward (self):
82 | pass
83 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/input_layer.md:
--------------------------------------------------------------------------------
1 | # Input Layer
2 |
3 | This layer is more of an utility: it's the first layer for every `network` object and all it does is passing the exact same input that it receives, checking that `input_shape` is consistent with the effective input shape.
4 | Even the backward just pass back the delta it receive from the next layer.
5 |
6 | To use this layer as a single layer, this is a simple example code:
7 |
8 | ```python
9 | # first the essential import for the library.
10 | # after the installation:
11 | from NumPyNet.layers.input_layer import Input_layer # class import
12 |
13 | import numpy as np # the library is entirely based on numpy
14 |
15 | # define a batch of images (even a single image is ok, but is important that it has all
16 | # the four dimensions) in the format (batch, width, height, channels)
17 |
18 | batch, w, h, c = (5, 100, 100, 3)
19 | input = np.random.uniform(low=0., high=1., size=(batch, w, h, c)) # you can also import an image from file
20 |
21 | # Layer initialization
22 | layer = Input_layer(input_shape=input.shape)
23 |
24 | # Forward pass
25 | layer.forward(inpt=input)
26 | out_img = layer.output # the output in this case will be of shape=(batch, w, h, c), so a batch of images (identical to the input actually)
27 |
28 | # Backward pass
29 | delta = np.random.uniform(low=0., high=1., size=input.shape) # definition of network delta, to be backpropagated
30 | layer.delta = np.random.uniform(low=0., high=1., size=out_img.shape) # layer delta, ideally coming from the next layer
31 | layer.backward(delta, copy=False)
32 |
33 | # now net_delta is modified
34 | ```
35 |
36 | To have a look more in details on what's happening, those are the definitions of `forward` and `backward` functions for this layer:
37 |
38 | ```python
39 | def forward(self, inpt):
40 | '''
41 | Simply store the input array
42 | Parameters:
43 | inpt: the input array
44 | '''
45 | if self.out_shape != inpt.shape:
46 | raise ValueError('Forward Input layer. Incorrect input shape. Expected {} and given {}'.format(self.out_shape, inpt.shape))
47 |
48 | self.output[:] = inpt
49 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
50 | ```
51 |
52 | As stated above, all it does is check that the input shape is consistent with `self.out_shape`, that is the same as `input_shape`.
53 | And here's the backward:
54 |
55 | ```python
56 | def backward(self, delta):
57 | '''
58 | Simply pass the gradient
59 | Parameter:
60 | delta : global error to be backpropagated
61 | '''
62 | if self.out_shape != delta.shape:
63 | raise ValueError('Forward Input layer. Incorrect delta shape. Expected {} and given {}'.format(self.out_shape, delta.shape))
64 |
65 | delta[:] = self.delta
66 | ```
67 |
68 | That does nothing more than updating `delta` with `layer.delta` exactly as it is.
69 |
--------------------------------------------------------------------------------
/NumPyNet/layers/gru_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from NumPyNet.activations import Logistic
8 | from NumPyNet.activations import Tanh
9 | from NumPyNet.utils import _check_activation
10 | from NumPyNet.utils import check_is_fitted
11 |
12 | import numpy as np
13 |
14 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
15 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
16 |
17 |
18 | class GRU_layer (object):
19 |
20 | def __init__(self, outputs, steps, input_shape=None, weights=None, bias=None):
21 |
22 | if isinstance(outputs, int) and outputs > 0:
23 | self.outputs = outputs
24 | else:
25 | raise ValueError('GRU layer : Parameter "outputs" must be an integer and > 0')
26 |
27 | if isinstance(steps, int) and steps > 0:
28 | self.steps = steps
29 | else:
30 | raise ValueError('GRU layer : Parameter "steps" must be an integer and > 0')
31 |
32 | self.input_shape = input_shape
33 |
34 | self.Wz = weights[0] # shape (inputs, outputs)
35 | self.Wr = weights[1]
36 | self.Wh = weights[2]
37 |
38 | self.Uz = weights[3] # shape (outputs, outputs)
39 | self.Ur = weights[4]
40 | self.Uh = weights[5]
41 |
42 | self.bz = bias[0] # shape (outputs, )
43 | self.br = bias[1]
44 | self.bh = bias[2]
45 |
46 | def __call__(self, prev_layer):
47 | raise NotImplementedError
48 |
49 | @property
50 | def out_shape(self):
51 | pass
52 |
53 | def _as_Strided(self, arr, shift=1):
54 | '''
55 | Stride the input array to return mini-sequences
56 | '''
57 |
58 | X = arr.reshape(arr.shape[0], -1)
59 |
60 | Npoints, features = X.shape
61 | stride0, stride1 = X.strides
62 |
63 | shape = (Npoints - self.steps * shift, self.steps, features)
64 | strides = (shift * stride0, stride0, stride1)
65 |
66 | X = np.lib.stride_tricks.as_strided(arr, shape=shape, strides=strides)
67 |
68 | return np.swapaxes(X, 0, 1)
69 |
70 | def forward(self, inpt):
71 |
72 | inpt = inpt.astype('float64')
73 | _input = self._as_Strided(inpt)
74 | state = np.zeros(shape=(_input.shape[1], self.outputs))
75 |
76 | self.output = np.zeros_like(state)
77 |
78 | for i, X in enumerate(_input):
79 |
80 | op = 'ij, jk -> ik'
81 | xz = np.einsum(op, X, self.Wz) + self.bz
82 | xr = np.einsum(op, X, self.Wr) + self.br
83 | xh = np.einsum(op, X, self.Wh) + self.bh
84 |
85 | hz = np.einsum(op, state, self.Uz)
86 | hr = np.einsum(op, state, self.Ur)
87 |
88 | zt = Logistic.activate(xz + hz)
89 | rt = Logistic.activate(xr + hr)
90 |
91 | hh = np.einsum(op, state * rt, self.Uh)
92 |
93 | state = zt * state + (1 - zt) * Tanh.activate(xh + hh)
94 |
95 | # implementation "no sequence"
96 | self.output = state
97 |
98 | self.delta = np.zeros_like(self.output)
99 |
100 | def backward(self, delta):
101 | pass
102 |
103 | def update(self):
104 | pass
105 |
106 |
107 | if __name__ == '__main__':
108 | pass
109 |
--------------------------------------------------------------------------------
/testing/test_parser.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import os
8 | import numpy as np
9 |
10 | from NumPyNet.exception import CfgVariableError
11 | from NumPyNet.exception import DataVariableError
12 | from NumPyNet.parser import net_config
13 | from NumPyNet.parser import data_config
14 |
15 | import pytest
16 | from hypothesis import strategies as st
17 | from hypothesis import given
18 | from hypothesis import settings
19 |
20 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
21 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
22 |
23 |
24 | class TestDataConfig:
25 | '''
26 | '''
27 |
28 | def test_constructor (self):
29 |
30 | here = os.path.dirname(__file__)
31 | filename = os.path.join(here, '..', 'data', 'yolov3.data')
32 |
33 | cfg = data_config(filename)
34 | assert len(cfg) == 6
35 |
36 | with pytest.raises(IOError):
37 | filename = ''
38 | cfg = data_config(filename)
39 |
40 | def test_getter (self):
41 |
42 | here = os.path.dirname(__file__)
43 | filename = os.path.join(here, '..', 'data', 'yolov3.data')
44 |
45 | cfg = data_config(filename)
46 |
47 | assert cfg.get('cfg', '') == 'cfg/yolov3.cfg'
48 | assert cfg.get('None', 42) == 42
49 |
50 | with pytest.raises(CfgVariableError):
51 | res = cfg.get(['weights'], 32)
52 | assert res == 32
53 |
54 | assert cfg.get('weights', '') == 'data/yolov3.weights'
55 | assert cfg.get('names', '') == 'data/coco.names'
56 | assert cfg.get('thresh', 2.12) == .5
57 | assert cfg.get('hier', 3.14) == .5
58 | assert cfg.get('classes', 42) == 80
59 |
60 | def test_print (self):
61 |
62 | here = os.path.dirname(__file__)
63 | filename = os.path.join(here, '..', 'data', 'yolov3.data')
64 |
65 | cfg = data_config(filename)
66 |
67 | assert str(cfg) == str(cfg._data)
68 | evaluated = eval(str(cfg))
69 |
70 | assert isinstance(evaluated, dict)
71 |
72 | assert evaluated.get('cfg', '') == 'cfg/yolov3.cfg'
73 | assert evaluated.get('None', 42) == 42
74 | assert evaluated.get('weights', 32) != 32
75 | assert evaluated.get('weights', '') == 'data/yolov3.weights'
76 | assert evaluated.get('names', '') == 'data/coco.names'
77 | assert evaluated.get('thresh', 2.12) == .5
78 | assert evaluated.get('hier', 3.14) == .5
79 | assert evaluated.get('classes', 42) == 80
80 |
81 | class TestNetConfig:
82 | '''
83 | '''
84 |
85 | def test_constructor (self):
86 |
87 | here = os.path.dirname(__file__)
88 | filename = os.path.join(here, '..', 'cfg', 'yolov3.cfg')
89 |
90 | cfg = net_config(filename)
91 | assert len(cfg) == 108
92 |
93 | print(cfg)
94 |
95 | with pytest.raises(IOError):
96 | filename = ''
97 | cfg = net_config(filename)
98 |
99 | def test_getter (self):
100 |
101 | here = os.path.dirname(__file__)
102 | filename = os.path.join(here, '..', 'cfg', 'yolov3.cfg')
103 |
104 | cfg = net_config(filename)
105 |
106 | with pytest.raises(DataVariableError):
107 | res = cfg.get('net', 'batch', 42)
108 | assert res == 42
109 |
110 | assert cfg.get('net0', 'batch', 42) == 1
111 | assert cfg.get('convolutional1', 'stride', 3) == 1
112 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/l1norm_layer.md:
--------------------------------------------------------------------------------
1 | # L1 normalization Layer
2 |
3 | The l1 normalizatioon layer normalizes the data with respect to the selected axis, using the l1 norm, computed as such:
4 |
5 | 
6 |
7 | Where `N` is the dimension of the selected axis. The normalization is computed as:
8 |
9 | 
10 |
11 | Where ε is a small (order of 10-8) constant used to avoid division by zero.
12 |
13 | The backward, in this case, is computed as:
14 |
15 | )
16 |
17 | Where δl is the previous layer's delta, and δl-1 is the next layer delta.
18 |
19 | The code below is an example on how to use the single layer:
20 |
21 | ```python
22 | import os
23 |
24 | from NumPyNet.layers.l1norm_layer import L1Norm_layer
25 |
26 | import numpy as np
27 |
28 | # those functions rescale the pixel values [0,255]->[0,1] and [0,1->[0,255]
29 | img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
30 | float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
31 |
32 | filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
33 | inpt = np.asarray(Image.open(filename), dtype=float)
34 | inpt.setflags(write=1)
35 | inpt = img_2_float(inpt) # preparation of the image
36 |
37 | # add batch = 1
38 | inpt = np.expand_dims(inpt, axis=0)
39 |
40 | # instantiate the layer
41 | layer = L1Norm_layer(axis=None) # axis=None just sum all the values
42 |
43 | # FORWARD
44 |
45 | layer.forward(inpt)
46 | forward_out = layer.output # the shape of the output is the same as the one of the input
47 |
48 | # BACKWARD
49 |
50 | delta = np.zeros(shape=inpt.shape, dtype=float)
51 | layer.backward(delta, copy=False)
52 | ```
53 |
54 | To have a look more in details on what's happening, here are presented the defition of the functions `forward` and `backward`:
55 |
56 | ```python
57 | def forward(self, inpt):
58 | '''
59 | Forward of the l1norm layer, apply the l1 normalization over
60 | the input along the given axis
61 | Parameters:
62 | inpt: the input to be normaliza
63 | '''
64 | self._out_shape = inpt.shape
65 |
66 | norm = np.abs(inpt).sum(axis=self.axis, keepdims=True)
67 | norm = 1. / (norm + 1e-8)
68 | self.output = inpt * norm
69 | self.scales = -np.sign(self.output)
70 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
71 | ```
72 |
73 | The `forward` function is an implemenatation of what's stated before:
74 | * compute the inverse of the L1 norm, over the axis selected during the initialization of the layer objec. If `self.axis` is `None`, then the sum counts every pixels
75 | * compute `self.output` with the formula previuosly described
76 | * instantiate `self.scale`, used in `backward`
77 |
78 | ```python
79 | def backward(self, delta, copy=False):
80 | '''
81 | Compute the backward of the l1norm layer
82 | Parameter:
83 | delta : global error to be backpropagated
84 | '''
85 |
86 | self.delta += self.scales
87 | delta[:] += self.delta
88 | ```
89 |
90 | As for `forward`, `backward` is just a simple implementation of what's described above.
91 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/l2norm_layer.md:
--------------------------------------------------------------------------------
1 | # L2 normalization layer
2 |
3 | The l2 normalizatioon layer normalizes the data with respect to the selected axis, using the l2 norm, computed as such:
4 |
5 | 
6 |
7 | Where `N` is the dimension fo the selected axis. The normalization is computed as:
8 |
9 | 
10 |
11 | Where ε is a small (order of 10-8) constant used to avoid division by zero.
12 |
13 | The backward, in this case, is computed as:
14 |
15 | }{\sqrt{\sum_i&space;x_i&space;+&space;\epsilon}})
16 |
17 | Where δl is the delta to be backpropagated, while δl-1 is the delta coming from the next layer
18 |
19 | This code is an example of how to use the single `l2norm_layer` object:
20 |
21 | ```python
22 |
23 | import os
24 |
25 | from NumPyNet.layers.l2norm_layer import L2Norm_layer
26 |
27 | import numpy as np
28 |
29 | # those functions rescale the pixel values [0,255]->[0,1] and [0,1->[0,255]
30 | img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
31 | float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
32 |
33 | filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
34 | inpt = np.asarray(Image.open(filename), dtype=float)
35 | inpt.setflags(write=1)
36 | inpt = img_2_float(inpt)
37 |
38 | # add batch = 1
39 | inpt = np.expand_dims(inpt, axis=0)
40 |
41 | # instantiate the layer
42 | layer = L2Norm_layer(axis=None) # axis=None just sum all the values
43 |
44 | # FORWARD
45 |
46 | layer.forward(inpt)
47 | forward_out = layer.output # the shape of the output is the same as the one of the input
48 |
49 | # BACKWARD
50 |
51 | delta = np.zeros(shape=inpt.shape, dtype=float)
52 | layer.backward(delta, copy=False)
53 | ```
54 |
55 | To have a look more in details on what's happening, those are the definitions of `forward` and `backward` for the l2norm_layer:
56 |
57 | ```python
58 | def forward(self, inpt):
59 | '''
60 | Forward of the l2norm layer, apply the l2 normalization over
61 | the input along the given axis
62 | Parameters:
63 | inpt: the input to be normaliza
64 | '''
65 | self._out_shape = inpt.shape
66 |
67 | norm = (inpt * inpt).sum(axis=self.axis, keepdims=True)
68 | norm = 1. / np.sqrt(norm + 1e-8)
69 | self.output = inpt * norm
70 | self.scales = (1. - self.output) * norm
71 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
72 | ```
73 |
74 | That's just a simple implementation of the formualas described above:
75 | * sum of the input squared over the selected axis. If `self.axis` is `None` then the sum is computed considering every pixel.
76 | * `self.output` is `inpt` normalized
77 | * define `self.scale` and initialize `self.delta`
78 |
79 | The `backward` is:
80 |
81 | ```python
82 | def backward(self, delta, copy=False):
83 | '''
84 | Compute the backward of the l2norm layer
85 | Parameter:
86 | delta : global error to be backpropagated
87 | '''
88 |
89 | self.delta += self.scales
90 | delta[:] += self.delta
91 | ```
92 |
93 | which updates `self.delta` with the value of `self.scales` computed in `forward`, and then update the value of `delta`, received as argument from the function.
94 |
--------------------------------------------------------------------------------
/NumPyNet/layers/base.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from NumPyNet.exception import LayerError
8 |
9 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
10 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
11 |
12 |
13 | class BaseLayer (object):
14 |
15 | '''
16 | Abstract Base Layer object
17 |
18 | Parameters
19 | ----------
20 | input_shape : tuple (default=None)
21 | Input layer dimension
22 | '''
23 |
24 | def __init__(self, input_shape=None):
25 | self.input_shape = input_shape
26 | self.output, self.delta = (None, None)
27 |
28 | def __str__(self):
29 | '''
30 | Print the layer
31 | '''
32 | raise NotImplementedError
33 |
34 | def __repr__(self):
35 | '''
36 | Object representation
37 | '''
38 | class_name = self.__class__.__qualname__
39 | params = self.__init__.__code__.co_varnames
40 | params = set(params) - {'self'}
41 | args = ', '.join(['{0}={1}'.format(k, str(getattr(self, k)))
42 | if not isinstance(getattr(self, k), str) else '{0}="{1}"'.format(k, str(getattr(self, k)))
43 | for k in params])
44 | return '{0}({1})'.format(class_name, args)
45 |
46 | def _build(self, *args, **kwargs):
47 | '''
48 | Build layer parameters
49 | '''
50 | pass
51 |
52 | def _check_dims(self, shape, arr, func):
53 | '''
54 | Check shape array
55 | '''
56 | if shape[1:] != arr.shape[1:]:
57 | class_name = self.__class__.__name__
58 | raise ValueError('{0} {1}. Incorrect input shape. Expected {2} and given {3}'.format(func, class_name, shape[1:], arr.shape[1:]))
59 |
60 | def __call__(self, previous_layer):
61 | '''
62 | Overload operator ()
63 |
64 | Parameters
65 | ----------
66 | previous_layer : Layer obj
67 | Layer object to join
68 | '''
69 |
70 | if previous_layer.out_shape is None:
71 | class_name = self.__class__.__name__
72 | prev_name = previous_layer.__class__.__name__
73 | raise LayerError('Incorrect shapes found. Layer {0} cannot be connected to the previous {1} layer.'.format(class_name, prev_name))
74 |
75 | self.input_shape = previous_layer.out_shape
76 |
77 | self._build()
78 |
79 | return self
80 |
81 | @property
82 | def out_shape(self):
83 | '''
84 | Get the output shape
85 |
86 | Returns
87 | -------
88 | out_shape : tuple
89 | Tuple as (batch, out_w, out_h, out_c)
90 | '''
91 | return self.input_shape
92 |
93 | def forward(self, input, *args, **kwargs):
94 | '''
95 | Abstract Forward function
96 |
97 | Parameters
98 | ----------
99 | input : array-like
100 | Input array of data to process
101 |
102 | *args : list
103 | Optional arguments
104 |
105 | **kwargs : dict
106 | Optional arguments
107 | '''
108 | raise NotImplementedError
109 |
110 | def backward(self, delta, input=None, *args, **kwargs):
111 | '''
112 | Abstract Backward function
113 |
114 | Parameters
115 | ----------
116 | delta : array-like
117 | Gradient array to back-propagate
118 |
119 | input : array-like (default=None)
120 | Input array of data to process
121 |
122 | *args : list
123 | Optional arguments
124 |
125 | **kwargs : dict
126 | Optional arguments
127 | '''
128 | raise NotImplementedError
129 |
--------------------------------------------------------------------------------
/.github/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at nico.curti2@unibo.it. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 |
--------------------------------------------------------------------------------
/NumPyNet/exception.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
8 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
9 |
10 |
11 | class CfgfmtError (Exception):
12 |
13 | '''
14 | Config file format exception
15 |
16 | This exception is raised if something goes wrong in the
17 | format of the neural network configuration file.
18 | The error raised is set to 1.
19 | '''
20 |
21 | def __init__ (self, message, errors=1):
22 |
23 | super(CfgfmtError, self).__init__(message)
24 |
25 | self.errors = errors
26 |
27 | class CfgVariableError (Exception):
28 |
29 | '''
30 | Config file variable exception
31 |
32 | This exception is raised if something goes wrong in the
33 | variables read in the neural network configuration file.
34 | The error raised is set to 2.
35 | '''
36 |
37 | def __init__ (self, message, errors=2):
38 |
39 | super(CfgVariableError, self).__init__(message)
40 |
41 | self.errors = errors
42 |
43 |
44 | class DataVariableError (Exception):
45 |
46 | '''
47 | Data file variable exception
48 |
49 | This exception is raised if something goes wrong in the
50 | variables read in the neural network data file.
51 | The error raised is set to 3.
52 | '''
53 |
54 | def __init__ (self, message, errors=3):
55 |
56 | super(DataVariableError, self).__init__(message)
57 |
58 | self.errors = errors
59 |
60 | class LayerError (Exception):
61 |
62 | '''
63 | Layer exception
64 |
65 | This exception is raised if something goes wrong in the
66 | construction or management of the Layer objects
67 | The error raised is set to 4.
68 | '''
69 |
70 | def __init__ (self, message, errors=4):
71 |
72 | super(LayerError, self).__init__(message)
73 |
74 | self.errors = errors
75 |
76 | class MetricsError (Exception):
77 |
78 | '''
79 | Metrics exception
80 |
81 | This exception is raised if something goes wrong in the
82 | execution of the evaluation metrics for the neural network object.
83 | The error raised is set to 5.
84 | '''
85 |
86 | def __init__ (self, message, errors=5):
87 |
88 | super(MetricsError, self).__init__(message)
89 |
90 | self.errors = errors
91 |
92 | class NetworkError (Exception):
93 |
94 | '''
95 | Network exception
96 |
97 | This exception is raised if something goes wrong
98 | during the building/training of the neural network object.
99 | The error raised is set to 6.
100 | '''
101 |
102 | def __init__ (self, message, errors=6):
103 |
104 | super(NetworkError, self).__init__(message)
105 |
106 | self.errors = errors
107 |
108 | class VideoError (Exception):
109 |
110 | '''
111 | Video exception
112 |
113 | This exception is raised if something goes wrong during
114 | the video capture performed by the VideoCapture object.
115 | The error raised is set to 7.
116 | '''
117 |
118 | def __init__ (self, message, errors=7):
119 |
120 | super(VideoError, self).__init__(message)
121 |
122 | self.errors = errors
123 |
124 | class NotFittedError (Exception):
125 |
126 | '''
127 | Not fitted exception
128 |
129 | This exception is raised if you can try to perform the
130 | model prediction before the training phase.
131 | The error raised is set to 8.
132 | '''
133 |
134 | def __init__ (self, message, errors=8):
135 |
136 | super(NotFittedError, self).__init__(message)
137 |
138 | self.errors = errors
139 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/logistic_layer.md:
--------------------------------------------------------------------------------
1 | # Logistic Layer
2 |
3 | The logistic layer is a particular implementation of what has already been describe for the [Cost Layer](./cost_layer.md).
4 | It perfmors a logitic tranformation of the output as:
5 |
6 | 
7 |
8 | and then, if its `forward` function recevives `truth` values, it computes the binary cross entropy loss as:
9 |
10 | &space;-&space;(1&space;-&space;t)&space;\cdot&space;log(1&space;-&space;y))
11 |
12 | and the `cost`:
13 |
14 | 
15 |
16 | where `N` is the toal number of features in the input `x`.
17 |
18 | This is an example code on how to use the single `Logistic_layer` object:
19 |
20 | ```python
21 | # first, the essential import for the layer.
22 | from NumPyNet.layers.logistic_layer import Logistic_layer
23 |
24 | import numpy as np # the library is entirely based on numpy
25 |
26 | batch, w, h, c = (5, 100, 100, 3)
27 | input = np.random.uniform(low=0., high=1., size=(batch, w, h, c)) # ideally is the output of a Model
28 | truth = np.random.choice(a=[0., 1.], size=input.shape, p=None) # Binary truth, p=None assume a uniform ditro between "a"
29 |
30 | layer = Logistic_layer()
31 |
32 | # forward
33 | layer.forward(inpt=input, truth=truth )
34 | output = layer.output # this is the tranformed input
35 | cost = layer.cost # real number, a measure of how well the model perfomed
36 | loss = layer.loss # loss of the Model, every element is the distance of output form the truth.
37 |
38 | # backward
39 | delta = np.zeros(shape=input.shape)
40 | layer.backward(delta=delta) # layer.delta is already initialized in forward
41 |
42 | # now delta is updated and ready to be passed backward
43 | ```
44 |
45 | To have a look more in details on what's happening, the definitions of `forward` and `backward`:
46 |
47 | ```python
48 | def forward(self, inpt, truth=None) :
49 | '''
50 | Forward function of the logistic layer, now the output should be consistent with darknet
51 |
52 | Parameters:
53 | inpt : output of the network with shape (batch, w, h, c)
54 | truth : arrat of same shape as input (without the batch dimension),
55 | if given, the function computes the binary cross entropy
56 | '''
57 |
58 | self._out_shape = inpt.shape
59 | # inpt = np.log(inpt/(1-inpt))
60 | self.output = 1. / (1. + np.exp(-inpt)) # as for darknet
61 | # self.output = inpt
62 |
63 | if truth is not None:
64 | out = np.clip(self.output, 1e-8, 1. - 1e-8)
65 | self.loss = -truth * np.log(out) - (1. - truth) * np.log(1. - out)
66 | out_upd = out * (1. - out)
67 | out_upd[out_upd <= 1e-8] = 1e-8
68 | self.delta = (truth - out) * out_upd
69 | # self.cost = np.mean(self.loss)
70 | self.cost = np.sum(self.loss) # as for darknet
71 | else :
72 | self.delta = np.zeros(shape=self._out_shape, dtype=float)
73 | ```
74 |
75 | The code proceeds as follow:
76 |
77 | * `self.output` is computed as the element-wise sigmoid tranformation of the input.
78 | * If `truth` is given (same shape as `inpt`), then the function clip `self.output` in the range [ε, 1-ε], this is due to the singularity of the logarithm.
79 | * `self.loss` is computed as described above.
80 | * `self.delta` is updated as:
81 |
82 | &space;\cdot&space;y(1&space;-&space;y))
83 |
84 | * and `self.cost` is the sum of all `self.loss` elements
85 |
86 | And this is the `backward` definiton:
87 |
88 | ```python
89 | def backward(self, delta=None):
90 | '''
91 | Backward function of the Logistic Layer
92 |
93 | Parameters:
94 | delta : array same shape as the input.
95 | '''
96 | if delta is not None:
97 | delta[:] += self.delta # as for darknet, probably an approx
98 | ```
99 |
100 | That is a simple update of `delta` with values of `self.delta` computed in `forward`.
101 |
--------------------------------------------------------------------------------
/examples/lstm_signal.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | '''
5 | Little example on how to use a recurrent neural network to predict a math function
6 |
7 | Reference: https://www.datatechnotes.com/2018/12/rnn-example-with-keras-simplernn-in.html
8 | '''
9 |
10 | # from NumPyNet.layers.input_layer import Input_layer
11 | from NumPyNet.layers.lstm_layer import LSTM_layer
12 | from NumPyNet.layers.connected_layer import Connected_layer
13 | from NumPyNet.layers.cost_layer import Cost_layer
14 | # from NumPyNet.layers.dropout_layer import Dropout_layer
15 | from NumPyNet.network import Network
16 | from NumPyNet.optimizer import RMSprop
17 | from NumPyNet.metrics import mean_absolute_error
18 | from NumPyNet.utils import data_to_timesteps
19 |
20 | import numpy as np
21 | import pylab as plt
22 |
23 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
24 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
25 |
26 | np.random.seed(42)
27 |
28 | if __name__ == '__main__':
29 |
30 | Npoints = 1000
31 | train_size = 800
32 |
33 | time = np.arange(0, Npoints)
34 | noisy_signal = np.sin(0.02 * time) + 2 * np.random.rand(Npoints)
35 |
36 | steps = 4
37 | window_size=steps
38 |
39 | X = data_to_timesteps(noisy_signal, steps=steps)
40 | y = np.concatenate([X[1:, 0, :], X[-1:, 0, :]], axis=0)
41 |
42 | # Reshape the data according to a 4D tensor
43 | num_samples, size, _ = X.shape
44 |
45 | if size != steps:
46 | raise ValueError('Something went wrong with the stride trick!')
47 |
48 | if X.max() > noisy_signal.max() or X.min() < noisy_signal.min():
49 | raise ValueError('Something went wrong with the stride trick!')
50 |
51 | X = X.reshape(num_samples, 1, 1, size)
52 |
53 | X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
54 | y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]
55 |
56 | batch = 20
57 | step = batch
58 |
59 | y_train = y_train.reshape(-1, 1, 1, 1)
60 | y_test = y_test.reshape(-1, 1, 1, 1)
61 |
62 | # Create the model and training
63 | model = Network(batch=batch, input_shape=X_train.shape[1:])
64 |
65 | model.add(LSTM_layer(outputs=32, steps=step))
66 | model.add(Connected_layer(outputs=8, activation='relu'))
67 | model.add(Connected_layer(outputs=1, activation='linear'))
68 | model.add(Cost_layer(cost_type='mse'))
69 | # keras standard arguments
70 | model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error])
71 |
72 | print('*************************************')
73 | print('\n Total input dimension: {}'.format(X_train.shape), '\n')
74 | print('**************MODEL SUMMARY***********')
75 |
76 | model.summary()
77 |
78 | print('\n***********START TRAINING***********\n')
79 |
80 | # Fit the model on the training set
81 | model.fit(X=X_train, y=y_train.reshape(-1, 1, 1, 1), max_iter=10)
82 |
83 | print('\n***********START TESTING**************\n')
84 |
85 | # Test the prediction with timing
86 | loss, out = model.evaluate(X=X_test, truth=y_test, verbose=True)
87 |
88 | mae = mean_absolute_error(y_test, out)
89 |
90 | print('\n')
91 | print('Loss Score: {:.3f}'.format(loss))
92 | print('MAE Score: {:.3f}'.format(mae))
93 |
94 | # concatenate the prediction
95 |
96 | train_predicted = model.predict(X=X_train, verbose=False)
97 | test_predicted = model.predict(X=X_test, verbose=False)
98 |
99 | predicted = np.concatenate((train_predicted, test_predicted), axis=0)
100 |
101 | fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
102 | ax.plot(time[:- window_size*2], noisy_signal[:- window_size*2], 'b-', alpha=.75, label='true noisy signal')
103 | ax.plot(time[:predicted.shape[0]], predicted[:, 0, 0, 0], '-', color='orange', alpha=1, label='predicted signal')
104 |
105 | ax.vlines(time[train_predicted.shape[0]], noisy_signal.min(), noisy_signal.max(), colors='k', linestyle='dashed')
106 |
107 | ax.set_xlabel('Time', fontsize=14)
108 | ax.set_ylabel('Signal', fontsize=14)
109 |
110 | fig.legend(loc='upper right', fontsize=14)
111 | fig.tight_layout()
112 |
113 | plt.show()
114 |
--------------------------------------------------------------------------------
/examples/rnn_signal.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | '''
5 | Little example on how to use a recurrent neural network to predict a math function
6 |
7 | Reference: https://www.datatechnotes.com/2018/12/rnn-example-with-keras-simplernn-in.html
8 | '''
9 |
10 | # from NumPyNet.layers.input_layer import Input_layer
11 | from NumPyNet.layers.rnn_layer import RNN_layer
12 | from NumPyNet.layers.connected_layer import Connected_layer
13 | from NumPyNet.layers.cost_layer import Cost_layer
14 | # from NumPyNet.layers.dropout_layer import Dropout_layer
15 | from NumPyNet.network import Network
16 | from NumPyNet.optimizer import RMSprop
17 | from NumPyNet.metrics import mean_absolute_error
18 | from NumPyNet.utils import data_to_timesteps
19 |
20 |
21 | import numpy as np
22 | import pylab as plt
23 |
24 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
25 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
26 |
27 | np.random.seed(42)
28 |
29 | if __name__ == '__main__':
30 |
31 | Npoints = 1000
32 | train_size = 800
33 |
34 | time = np.arange(0, Npoints)
35 | noisy_signal = np.sin(0.02 * time) + 2 * np.random.rand(Npoints)
36 |
37 | steps = 4
38 | window_size=steps
39 |
40 | X, _ = data_to_timesteps(noisy_signal, steps=steps)
41 | y = np.concatenate([X[1:, 0, :], X[-1:, 0, :]], axis=0)
42 |
43 | # Reshape the data according to a 4D tensor
44 | num_samples, size, _ = X.shape
45 |
46 | if size != steps:
47 | raise ValueError('Something went wrong with the stride trick!')
48 |
49 | if X.max() > noisy_signal.max() or X.min() < noisy_signal.min():
50 | raise ValueError('Something went wrong with the stride trick!')
51 |
52 | X = X.reshape(num_samples, 1, 1, size)
53 |
54 | X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
55 | y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]
56 |
57 | batch = 20
58 | step = batch
59 |
60 | y_train = y_train.reshape(-1, 1, 1, 1)
61 | y_test = y_test.reshape(-1, 1, 1, 1)
62 |
63 | # Create the model and training
64 | model = Network(batch=batch, input_shape=X_train.shape[1:])
65 |
66 | model.add(RNN_layer(outputs=32, steps=step, activation='linear'))
67 | model.add(Connected_layer(outputs=8, activation='relu'))
68 | model.add(Connected_layer(outputs=1, activation='linear'))
69 | model.add(Cost_layer(cost_type='mse'))
70 | # keras standard arguments
71 | model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error])
72 |
73 | print('*************************************')
74 | print('\n Total input dimension: {}'.format(X_train.shape), '\n')
75 | print('**************MODEL SUMMARY***********')
76 |
77 | model.summary()
78 |
79 | print('\n***********START TRAINING***********\n')
80 |
81 | # Fit the model on the training set
82 | model.fit(X=X_train, y=y_train.reshape(-1, 1, 1, 1), max_iter=10)
83 |
84 | print('\n***********START TESTING**************\n')
85 |
86 | # Test the prediction with timing
87 | loss, out = model.evaluate(X=X_test, truth=y_test, verbose=True)
88 |
89 | mae = mean_absolute_error(y_test, out)
90 |
91 | print('\n')
92 | print('Loss Score: {:.3f}'.format(loss))
93 | print('MAE Score: {:.3f}'.format(mae))
94 |
95 | # concatenate the prediction
96 |
97 | train_predicted = model.predict(X=X_train, verbose=False)
98 | test_predicted = model.predict(X=X_test, verbose=False)
99 |
100 | predicted = np.concatenate((train_predicted, test_predicted), axis=0)
101 |
102 | fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
103 | ax.plot(time[:- window_size*2], noisy_signal[:- window_size*2], 'b-', alpha=.75, label='true noisy signal')
104 | ax.plot(time[:predicted.shape[0]], predicted[:, 0, 0, 0], '-', color='orange', alpha=1, label='predicted signal')
105 |
106 | ax.vlines(time[train_predicted.shape[0]], noisy_signal.min(), noisy_signal.max(), colors='k', linestyle='dashed')
107 |
108 | ax.set_xlabel('Time', fontsize=14)
109 | ax.set_ylabel('Signal', fontsize=14)
110 |
111 | fig.legend(loc='upper right', fontsize=14)
112 | fig.tight_layout()
113 |
114 | plt.show()
115 |
--------------------------------------------------------------------------------
/testing/test_lstm_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from tensorflow.keras.models import Model, Sequential
8 | from tensorflow.keras.layers import LSTM, Input
9 | import tensorflow.keras.backend as K
10 |
11 | from NumPyNet.exception import LayerError
12 | from NumPyNet.exception import NotFittedError
13 | from NumPyNet.layers.lstm_layer import LSTM_layer
14 | from NumPyNet.utils import data_to_timesteps
15 |
16 | import numpy as np
17 | import pytest
18 | from hypothesis import strategies as st
19 | from hypothesis import given
20 | from hypothesis import settings
21 |
22 | from random import choice
23 |
24 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
25 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
26 |
27 |
28 | class TestLSTMlayer :
29 | '''
30 | Tests:
31 | - costructor of RNN_layer object
32 | - print function
33 | - forward function against tf.keras
34 | - backward function against tf.keras
35 |
36 | to be:
37 | update function.
38 | '''
39 |
40 | def test_constructor (self):
41 | pass
42 |
43 | def test_printer (self):
44 | pass
45 |
46 | def test_forward (self):
47 |
48 | batch = 11
49 | timesteps = 5
50 | features = 3
51 | outputs = 5
52 |
53 | np.random.seed(123)
54 |
55 | data = np.random.uniform(size=(batch, features))
56 |
57 | inpt_keras, _ = data_to_timesteps(data, timesteps)
58 |
59 | assert inpt_keras.shape == (batch - timesteps, timesteps, features)
60 |
61 | weights = [np.random.uniform(size=(features, outputs)), np.random.uniform(size=(outputs,outputs))]
62 | bias = [np.zeros(shape=(outputs,), dtype=float), np.zeros(shape=outputs, dtype=float)]
63 |
64 | # assign same weights to all the kernel in keras as for NumPyNet
65 | keras_weights1 = np.concatenate([weights[0] for i in range(4)], axis=1)
66 | keras_weights2 = np.concatenate([weights[1] for i in range(4)], axis=1)
67 | keras_bias = np.concatenate([bias[0] for i in range(4)])
68 |
69 | for i in range(4):
70 | np.testing.assert_allclose(keras_weights1[:,outputs*i:outputs*(i+1)], weights[0], rtol=1e-5, atol=1e-8)
71 |
72 | for i in range(4):
73 | np.testing.assert_allclose(keras_weights2[:,outputs*i:outputs*(i+1)], weights[1], rtol=1e-5, atol=1e-8)
74 |
75 | inp = Input(shape=(inpt_keras.shape[1:]))
76 | lstm = LSTM(units=outputs, implementation=1, use_bias=False)(inp)
77 | model = Model(inputs=[inp], outputs=[lstm])
78 |
79 | model.set_weights([keras_weights1, keras_weights2])
80 |
81 | inpt_numpynet = data.reshape(batch, 1, 1, features)
82 | layer = LSTM_layer(outputs=outputs, steps=timesteps, weights=weights, bias=bias, input_shape=inpt_numpynet.shape)
83 |
84 | np.testing.assert_allclose(layer.uf.weights, model.get_weights()[0][:, :outputs], rtol=1e-5, atol=1e-8)
85 | np.testing.assert_allclose(layer.ui.weights, model.get_weights()[0][:, outputs:2*outputs], rtol=1e-5, atol=1e-8)
86 | np.testing.assert_allclose(layer.ug.weights, model.get_weights()[0][:, 2*outputs:3*outputs], rtol=1e-5, atol=1e-8)
87 | np.testing.assert_allclose(layer.uo.weights, model.get_weights()[0][:, 3*outputs:4*outputs], rtol=1e-5, atol=1e-8)
88 |
89 | np.testing.assert_allclose(layer.wf.weights, model.get_weights()[1][:, :outputs], rtol=1e-5, atol=1e-8)
90 | np.testing.assert_allclose(layer.wi.weights, model.get_weights()[1][:, outputs:2*outputs], rtol=1e-5, atol=1e-8)
91 | np.testing.assert_allclose(layer.wg.weights, model.get_weights()[1][:, 2*outputs:3*outputs], rtol=1e-5, atol=1e-8)
92 | np.testing.assert_allclose(layer.wo.weights, model.get_weights()[1][:, 3*outputs:4*outputs], rtol=1e-5, atol=1e-8)
93 |
94 | forward_out_keras = model.predict(inpt_keras)
95 |
96 | layer.forward(inpt=inpt_numpynet)
97 | forward_out_numpynet = layer.output.reshape(batch, outputs)
98 |
99 | # np.allclose(forward_out_numpynet, forward_out_keras)
100 |
101 | # np.abs(forward_out_keras - forward_out_numpynet).max()
102 |
103 |
104 | def test_backward (self):
105 | pass
106 |
--------------------------------------------------------------------------------
/NumPyNet/metrics.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 |
9 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
10 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
11 |
12 |
13 | def mean_accuracy_score (y_true, y_pred):
14 | '''
15 | Compute average accuracy score of a classification.
16 |
17 | Parameters
18 | ----------
19 | y_true : 2d array-like
20 | Ground truth (correct) labels expressed as image.
21 |
22 | y_pred : 2d array-like
23 | Predicted labels, as returned by the NN
24 |
25 | Returns
26 | -------
27 | score : float
28 | Average accuracy between the two inputs
29 | '''
30 | return np.mean(np.equal(y_true, y_pred))
31 |
32 |
33 | def mean_square_error (y_true, y_pred):
34 | '''
35 | Compute average square error score of a classification.
36 |
37 | Parameters
38 | ----------
39 | y_true : 2d array-like
40 | Ground truth (correct) labels expressed as image.
41 |
42 | y_pred : 2d array-like
43 | Predicted labels, as returned by the NN
44 |
45 | Returns
46 | -------
47 | score : float
48 | Average square error between the two inputs
49 | '''
50 | diff = y_true - y_pred
51 | diff *= diff
52 | return np.mean(diff)
53 |
54 |
55 | def mean_absolute_error (y_true, y_pred):
56 | '''
57 | Compute average absolute error score of a classification.
58 |
59 | Parameters
60 | ----------
61 | y_true : 2d array-like
62 | Ground truth (correct) labels expressed as image.
63 |
64 | y_pred : 2d array-like
65 | Predicted labels, as returned by the NN
66 |
67 | Returns
68 | -------
69 | score : float
70 | Average absolute error between the two inputs
71 | '''
72 | diff = np.abs(y_true - y_pred)
73 | return np.mean(diff)
74 |
75 |
76 | def mean_logcosh (y_true, y_pred):
77 | '''
78 | Compute average logcosh score of a classification.
79 |
80 | Parameters
81 | ----------
82 | y_true : 2d array-like
83 | Ground truth (correct) labels expressed as image.
84 |
85 | y_pred : 2d array-like
86 | Predicted labels, as returned by the NN
87 |
88 | Returns
89 | -------
90 | score : float
91 | Average logcosh error between the two inputs
92 | '''
93 | diff = np.log(np.cosh(y_true - y_pred))
94 | return np.mean(diff)
95 |
96 |
97 | def mean_hellinger (y_true, y_pred):
98 | '''
99 | Compute average hellinger score of a classification.
100 |
101 | Parameters
102 | ----------
103 | y_true : 2d array-like
104 | Ground truth (correct) labels expressed as image.
105 |
106 | y_pred : 2d array-like
107 | Predicted labels, as returned by the NN
108 |
109 | Returns
110 | -------
111 | score : float
112 | Average hellinger error between the two inputs
113 | '''
114 | diff = np.sqrt(y_true) - np.sqrt(y_pred)
115 | diff *= diff
116 | return np.mean(diff)
117 |
118 |
119 | def mean_iou_score (y_true, y_pred):
120 | '''
121 | Compute average IoU score of a classification.
122 | IoU is computed as Intersection Over Union between true and predict labels.
123 |
124 | It's a tipical metric in segmentation problems, so we encourage to use it
125 | when you are dealing image processing tasks.
126 |
127 | Parameters
128 | ----------
129 | y_true : 2d array-like
130 | Ground truth (correct) labels expressed as image.
131 |
132 | y_pred : 2d array-like
133 | Predicted labels, as returned by the NN
134 |
135 | Returns
136 | -------
137 | score : float
138 | Average IoU between the two inputs
139 | '''
140 |
141 | unique_labels = set(y_true.ravel())
142 | num_unique_labels = len(unique_labels)
143 |
144 | I = np.empty(shape=(num_unique_labels, ), dtype=float)
145 | U = np.empty(shape=(num_unique_labels, ), dtype=float)
146 |
147 | for i, val in enumerate(unique_labels):
148 |
149 | pred_i = y_pred == val
150 | lbl_i = y_true == val
151 |
152 | I[i] = np.sum(np.logical_and(lbl_i, pred_i))
153 | U[i] = np.sum(np.logical_or(lbl_i, pred_i))
154 |
155 | return np.mean(I / U)
156 |
157 |
158 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/route_layer.md:
--------------------------------------------------------------------------------
1 | # Route Layer
2 |
3 | In the YOLOv3 model, the Route layer is usefull to bring finer grained features in from earlier in the network.
4 | This mean that its main function is to recover output from previous layer in the network and bring them forward, avoiding all the in-between processes.
5 | Moreover, it is able to recall more than one layer's output, by concatenating them. In this case though, all Route layer's input must have the same width and height.
6 | It's role in a CNN is similar to what has alredy been described for the [Shortcut layer](./shortcut_layer.md).
7 |
8 | In the YOLOv3 applications, it's always used to concatenate outputs by channels: let out1 = (batch, w, h, c1) and out2 = (batch, w, h, c2) be the two inputs of the Route layer, then the final output will be a tensor of shape (bacth, w, h, c1 + c2), as described [here](https://github.com/AlexeyAB/darknet/issues/487).
9 | On the other hand, the popular Machine Learning library Caffe, let the user chose, by the [Concat Layer](https://caffe.berkeleyvision.org/tutorial/layers/concat.html), if the concatenation must be performed channels or batch dimension, in a similar way as described above.
10 |
11 | Our implementation is similar to Caffe, even though the applications will have more resamblance with YOLO models.
12 |
13 | An example on how to instantiate a Route layer and use it is shown in the code below:
14 |
15 | ```python
16 |
17 | from NumPyNet.layers.route_layer import Route_layer
18 | import numpy as np # the library is entirely based on numpy
19 |
20 | from NumPyNet.network import Network
21 |
22 | model = Network() # imagine that model is a CNN with 10 layers
23 |
24 | # [...] istantiation of model, via model.add or cgf file
25 |
26 | # layer init
27 | layer = Route_layer(inpt_layers=(3,6), by_channels=True)
28 |
29 | # forward
30 | layer.forward(network=model) # Assuming layer 3 and 6 have the same dimensios batch, width and height, the output will be (batch, w, h, c1 + c2)
31 | output = layer.output
32 |
33 | # backward
34 | delta = np.random.uniform(low=0., high=1., size=layer.output.shape) # delta coming from next layers, ideally
35 | layer.backward(delta=delta, network=model)
36 |
37 | # now backward updates only the self.delta of layer 3 and 6 of model, there's no layer.delta, since is not needed
38 | ```
39 | Of course, there are smarter ways of using this layer, as demostrated by YOLOv3.
40 | The parameter `by_channels` determines `self.axis` (if True is 3, else is 0), to perfmorm the correct concatenation.
41 |
42 | In particular, those are the definitions of the `forward` and `backward` functions in `NumPyNet`:
43 |
44 | ```python
45 | def forward(self, network):
46 | '''
47 | Concatenate along chosen axis the outputs of selected network layers
48 | In main CNN applications, like YOLOv3, the concatenation happens channels wise
49 |
50 | Parameters:
51 | network : Network object type.
52 | '''
53 |
54 | self.output = np.concatenate([network[layer_idx] for layer_idx in self.input_layers], axis=self.axis)
55 | ```
56 |
57 | Where `self.input_layers` is the list of indexes at which the chosen layers are located in the network (starting at 1, since 0 is always an [Input Layer](./input_layer.md)). As you can see, is a simple concatenation by the correct axis.
58 |
59 | And this is the definition of `backward`:
60 |
61 | ```python
62 | def backward(self, delta, network):
63 | '''
64 | Sum self.delta to the correct layer delta on the network
65 |
66 | Parameters:
67 | delta : 4-d numpy array, network delta to be backpropagated
68 | network: Network object type.
69 | '''
70 |
71 | if self.axis == 3: # this works for concatenation by channels axis
72 | channels_sum = 0
73 | for idx in self.input_layers:
74 | channels = network[idx].out_shape[3]
75 | network[idx].delta += delta[:,:,:, channels_sum : channels_sum + channels]
76 | channels_sum += channels
77 |
78 | elif self.axis == 0: # this works for concatenation by batch axis
79 | batch_sum = 0
80 | for idx in self.self.input_layers:
81 | batches = network[idx].out_shape[0]
82 | network[idx].delta += delta[batch_sum : batch_sum + batches,:,:,:]
83 | batch_sum += batches
84 | ```
85 |
86 | in this case, `self.delta` of the correspoding layer is updated taking into consideration the dimensions: if the first layer has 3 channels, the only the first 3 channels of `delta` are passed to it.
87 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import print_function
5 |
6 | import os
7 |
8 | try:
9 | from setuptools import setup
10 | from setuptools import find_packages
11 |
12 | except ImportError:
13 | from distutils.core import setup
14 | from distutils.core import find_packages
15 |
16 |
17 | def get_requires (requirements_filename):
18 | '''
19 | What packages are required for this module to be executed?
20 |
21 | Parameters
22 | ----------
23 | requirements_filename : str
24 | filename of requirements (e.g requirements.txt)
25 |
26 | Returns
27 | -------
28 | requirements : list
29 | list of required packages
30 | '''
31 | with open(requirements_filename, 'r') as fp:
32 | requirements = fp.read()
33 |
34 | return list(filter(lambda x: x != '', requirements.split()))
35 |
36 |
37 | def read_description (readme_filename):
38 | '''
39 | Description package from filename
40 |
41 | Parameters
42 | ----------
43 | readme_filename : str
44 | filename with readme information (e.g README.md)
45 |
46 | Returns
47 | -------
48 | description : str
49 | str with description
50 | '''
51 |
52 | try:
53 |
54 | with open(readme_filename, 'r') as fp:
55 | description = '\n'
56 | description += fp.read()
57 |
58 | return description
59 |
60 | except IOError:
61 | return ''
62 |
63 |
64 |
65 | here = os.path.abspath(os.path.dirname(__file__))
66 |
67 | # Package meta-data.
68 | NAME = 'NumPyNet'
69 | DESCRIPTION = 'Neural Networks Library in pure Numpy'
70 | URL = 'https://github.com/Nico-Curti/NumPyNet'
71 | EMAIL = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
72 | AUTHOR = ['Mattia Ceccarelli', 'Nico Curti']
73 | REQUIRES_PYTHON = '>=2.7'
74 | VERSION = None
75 | KEYWORDS = 'neural-networks deep-neural-networks deep-learning image-classification super-resolution'
76 |
77 | README_FILENAME = os.path.join(here, 'README.md')
78 | REQUIREMENTS_FILENAME = os.path.join(here, 'requirements.txt')
79 | VERSION_FILENAME = os.path.join(here, 'NumPyNet', '__version__.py')
80 |
81 | # Import the README and use it as the long-description.
82 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file!
83 | try:
84 | LONG_DESCRIPTION = read_description(README_FILENAME)
85 |
86 | except IOError:
87 | LONG_DESCRIPTION = DESCRIPTION
88 |
89 |
90 | # Load the package's __version__.py module as a dictionary.
91 | about = {}
92 | if not VERSION:
93 | with open(VERSION_FILENAME) as fp:
94 | exec(fp.read(), about)
95 |
96 | else:
97 | about['__version__'] = VERSION
98 |
99 | # parse version variables and add them to command line as definitions
100 | Version = about['__version__'].split('.')
101 |
102 |
103 | setup(
104 | name = NAME,
105 | version = about['__version__'],
106 | description = DESCRIPTION,
107 | long_description = LONG_DESCRIPTION,
108 | long_description_content_type = 'text/markdown',
109 | author = AUTHOR,
110 | author_email = EMAIL,
111 | maintainer = AUTHOR,
112 | maintainer_email = EMAIL,
113 | python_requires = REQUIRES_PYTHON,
114 | install_requires = get_requires(REQUIREMENTS_FILENAME),
115 | url = URL,
116 | download_url = URL,
117 | keywords = KEYWORDS,
118 | packages = find_packages(include=['NumPyNet', 'NumPyNet.*'], exclude=('test', 'testing')),
119 | include_package_data = True, # no absolute paths are allowed
120 | platforms = 'any',
121 | classifiers =[
122 | #'License :: OSI Approved :: GPL License',
123 | 'Programming Language :: Python',
124 | 'Programming Language :: Python :: 3',
125 | 'Programming Language :: Python :: 3.6',
126 | 'Programming Language :: Python :: Implementation :: CPython',
127 | 'Programming Language :: Python :: Implementation :: PyPy'
128 | ],
129 | license = 'MIT'
130 | )
131 |
--------------------------------------------------------------------------------
/testing/test_metrics.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.metrics import mean_accuracy_score
10 | from NumPyNet.metrics import mean_square_error
11 | from NumPyNet.metrics import mean_absolute_error
12 | from NumPyNet.metrics import mean_logcosh
13 | from NumPyNet.metrics import mean_hellinger
14 | from NumPyNet.metrics import mean_iou_score
15 |
16 | import numpy as np
17 | import pytest
18 | from hypothesis import strategies as st
19 | from hypothesis import given
20 | from hypothesis import settings
21 | from hypothesis import example
22 |
23 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
24 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
25 |
26 |
27 | class TestMetrics:
28 |
29 | @given(size = st.integers(min_value=10, max_value=100))
30 | @settings(max_examples=10, deadline=None)
31 | def test_mean_accuracy_score (self, size):
32 | y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
33 | y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
34 |
35 | metric = tf.keras.metrics.Accuracy()
36 |
37 | res_py = mean_accuracy_score(y_true, y_pred)
38 |
39 | metric.update_state(y_true, y_pred)
40 | res_tf = metric.result().numpy()
41 |
42 | np.testing.assert_allclose(res_tf, res_py, atol=1e-8, rtol=1e-5)
43 |
44 |
45 | @given(size = st.integers(min_value=10, max_value=100))
46 | @settings(max_examples=10, deadline=None)
47 | def test_mean_absolute_error (self, size):
48 | y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
49 | y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
50 |
51 | metric = tf.keras.metrics.MeanAbsoluteError()
52 |
53 | res_py = mean_absolute_error(y_true, y_pred)
54 |
55 | metric.update_state(y_true, y_pred)
56 | res_tf = metric.result().numpy()
57 |
58 | np.testing.assert_allclose(res_tf, res_py, atol=1e-8, rtol=1e-5)
59 |
60 |
61 | @given(size = st.integers(min_value=10, max_value=100))
62 | @settings(max_examples=10, deadline=None)
63 | def test_mean_squared_error (self, size):
64 | y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
65 | y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
66 |
67 | metric = tf.keras.metrics.MeanSquaredError()
68 |
69 | res_py = mean_square_error(y_true, y_pred)
70 |
71 | metric.update_state(y_true, y_pred)
72 | res_tf = metric.result().numpy()
73 |
74 | np.testing.assert_allclose(res_tf, res_py, atol=1e-8, rtol=1e-5)
75 |
76 |
77 | @given(size = st.integers(min_value=10, max_value=100))
78 | @settings(max_examples=10, deadline=None)
79 | def test_mean_logcosh (self, size):
80 | y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
81 | y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
82 |
83 | metric = tf.keras.metrics.LogCoshError()
84 |
85 | res_py = mean_logcosh(y_true, y_pred)
86 |
87 | metric.update_state(y_true, y_pred)
88 | res_tf = metric.result().numpy()
89 |
90 | np.testing.assert_allclose(res_tf, res_py, atol=1e-8, rtol=1e-5)
91 |
92 |
93 | @given(size = st.integers(min_value=10, max_value=100))
94 | @settings(max_examples=10, deadline=None)
95 | def test_mean_hellinger (self, size):
96 | y_true = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
97 | y_pred = np.random.choice([0., 1.], p=[.5, .5], size=(size, ))
98 |
99 | res_py = mean_hellinger(y_true, y_pred)
100 |
101 | assert res_py >= 0.
102 | np.testing.assert_allclose(res_py, mean_hellinger(y_pred, y_true), rtol=1e-5, atol=1e-8)
103 |
104 |
105 | @given(size = st.integers(min_value=10, max_value=100),
106 | # num_class = st.integers(min_value=2, max_value=100)
107 | )
108 | @settings(max_examples=10, deadline=None)
109 | def test_mean_iou_score (self, size):
110 | # working only with two classes for now
111 | y_true = np.random.choice([0., 1.], size=(size, ))
112 | y_pred = np.random.choice([0., 1.], size=(size, ))
113 |
114 | metric = tf.keras.metrics.MeanIoU(num_classes=2)
115 |
116 | res_py = mean_iou_score(y_true, y_pred)
117 |
118 | metric.update_state(y_true, y_pred)
119 | res_tf = metric.result().numpy()
120 |
121 | np.testing.assert_allclose(res_tf, res_py, atol=1e-8, rtol=1e-5)
122 |
--------------------------------------------------------------------------------
/NumPyNet/detection.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 |
9 | from NumPyNet.box import Box
10 |
11 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
12 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
13 |
14 |
15 | class Detection(object):
16 |
17 | '''
18 | Detection object
19 |
20 | Parameters
21 | ----------
22 | num_classes : int (default=None)
23 | Number of classes to monitor
24 |
25 | mask_size : int (default=None)
26 | Size of the possible mask values
27 |
28 | Notes
29 | -----
30 | .. note::
31 | The detection object stores the detection probability of each class and its "objectness".
32 | Moreover in the member "bbox" are store the detection box infos as Box object, aka (x, y, w, h)
33 | '''
34 |
35 | def __init__(self, num_classes=None, mask_size=None):
36 |
37 | self._prob = []*num_classes if num_classes is not None else None
38 | self._mask = []*mask_size if mask_size is not None else None
39 |
40 | self._objectness = None
41 | self._box = Box()
42 |
43 | @property
44 | def box(self):
45 | '''
46 | Return the box object as tuple
47 | '''
48 | return self._box.box
49 |
50 | @property
51 | def objectness(self):
52 | '''
53 | Return the objectness of the detection
54 | '''
55 | return self._objectness
56 |
57 | @property
58 | def prob(self):
59 | '''
60 | Return the probability of detection for each class
61 | '''
62 | return self._prob
63 |
64 |
65 | @staticmethod
66 | def top_k_predictions(output):
67 | '''
68 | Compute the indices of the sorted output
69 |
70 | Parameters
71 | ----------
72 | output: array_like (1D array)
73 | Array of predictions expressed as floats.
74 | Its value will be sorted in ascending order and the corresponding array of indices
75 | is given in output.
76 |
77 | Returns
78 | -------
79 | indexes: list (int32 values)
80 | Array of indexes which sort the output values in ascending order.
81 | '''
82 | return np.argsort(output)[::-1] # it is the fastest way to obtain a descending order
83 |
84 |
85 | @staticmethod
86 | def do_nms_obj(detections, thresh):
87 | '''
88 | Sort the detection according to the probability of each class and perform the IOU as filter for the boxes
89 |
90 | Parameters
91 | ----------
92 | detections: array_like (1D array)
93 | Array of detection objects.
94 |
95 | thresh: float
96 | Threshold to apply for IoU filtering.
97 | If IoU is greater than thresh the corresponding objectness and probabilities are set to null.
98 |
99 | Returns
100 | -------
101 | dets: array_like (1D array)
102 | Array of detection objects processed.
103 | '''
104 |
105 | # filter 0 objectness
106 | detections = filter(lambda x : x.objectness != 0, detections)
107 |
108 | # sort the objectness
109 | detections = sorted(detections, key=lambda x : x.objectness, reverse=True)
110 |
111 | # MISS
112 |
113 |
114 | @staticmethod
115 | def do_nms_sort(detections, thresh):
116 | '''
117 | Sort the detection according to the objectness and perform the IOU as filter for the boxes.
118 |
119 | Parameters
120 | ----------
121 | detections: array_like (1D array)
122 | Array of detection objects.
123 |
124 | thresh: float
125 | Threshold to apply for IoU filtering.
126 | If IoU is greater than thresh the corresponding objectness and probabilities are set to null.
127 |
128 | Returns
129 | -------
130 | dets: array_like (1D array)
131 | Array of detection objects processed.
132 | '''
133 |
134 | # filter 0 objectness
135 | detections = filter(lambda x : x.objectness != 0, detections)
136 |
137 | # sort the objectness
138 | detections = sorted(detections, key=lambda x : x.objectness, reverse=True)
139 |
140 | # MISS
141 |
142 | def __str__(self):
143 | '''
144 | Printer of objectness and probability
145 | '''
146 | probs = ' '.join(['{:.3f}'.format(x) for x in self.prob])
147 | return '{0:.3f}: {1}'.format(self.objectness, probs)
148 |
149 |
150 | if __name__ == '__main__':
151 |
152 | print('insert testing here')
153 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/upsample_layer.md:
--------------------------------------------------------------------------------
1 | # Upsample Layer
2 |
3 | To feed a super-resolution model we have to use a series of prior-known LR-HR image association; starting from this considerations we can down-sample our images by a desired scale factor: tipically between 2 and 8.
4 | On the other hand, thought, we can also consider a set of images as the LR ones, and obtain the
5 | Thhese feats can be achieved using a Pooling algorithm (in particular an average [AveragePooling](./avgpool_layer.md)) for the down-sample or we can use an UpSample layer.
6 |
7 | The UpSample function is commonly related to GAN (Generative Adversarial Networks) models in which we have to provide a series of artificial images to a given Neural Network, but it's also a function that can be introduced inside a Neural Network model to rescale the number of features.
8 | The UpSample function inside a Neural Network model has to provide both up- and down- sampling technique since one is used in the `forward` function, while its inverse during the back-propagation.
9 |
10 | This is an example code on how to use the single UpSample layer:
11 |
12 | ```python
13 | from NumPyNet.layers.upsample_layer import Upsample_layer
14 |
15 | import numpy as np # the library is entirely based on numpy
16 |
17 | batch, w, h, c = (5, 100, 100, 3)
18 | inpt = np.random.uniform(low=0., high=1., size(batch, w, h, c))
19 |
20 | stride = -3 # in this case we will have a downsample by factor 3 x 3 = 9
21 | scale = 1.5
22 |
23 | layer = Upsample_layer(scale=scale, stride=stride)
24 |
25 | # FORWARD
26 |
27 | layer.forward(inpt)
28 | forward_out = layer.output # donwscaled images
29 | print(layer)
30 |
31 | # BACKWARD
32 |
33 | layer.delta = layer.output
34 | delta = np.empty(shape=inpt.shape, dtype=float)
35 | layer.backward(delta)
36 |
37 | # now delta is updated and ready to be backpropagated.
38 | ```
39 |
40 | To have a look more in details on what's happening, this is the definition of `forward`:
41 |
42 | ```python
43 | def forward(self, inpt):
44 | '''
45 | Forward of the upsample layer, apply a bilinear upsample/downsample to
46 | the input according to the sign of stride
47 |
48 | Parameters:
49 | inpt: the input to be up-down sampled
50 | '''
51 | self.batch, self.w, self.h, self.c = inpt.shape
52 |
53 | if self.reverse: # Downsample
54 | self.output = self._downsample(inpt) * self.scale
55 |
56 | else: # Upsample
57 | self.output = self._upsample(inpt) * self.scale
58 |
59 | self.delta = np.zeros(shape=inpt.shape, dtype=float)
60 | ```
61 | That calls for the functions `_downsample` and `_upsample` depending on the value of `self.reverse`, respectively `True` or `False`.
62 |
63 | And this is the definition of `backward`:
64 |
65 | ```python
66 | def backward(self, delta):
67 | '''
68 | Compute the inverse transformation of the forward function
69 | on the gradient
70 |
71 | Parameters:
72 | delta : global error to be backpropagated
73 | '''
74 |
75 | if self.reverse: # Upsample
76 | delta[:] = self._upsample(self.delta) * (1. / self.scale)
77 |
78 | else: # Downsample
79 | delta[:] = self._downsample(self.delta) * (1. / self.scale)
80 | ```
81 | That's just the inverse of the forward.
82 |
83 | The real core of the layer though, are the two function `_upsample` and `_downsample`, defined as:
84 |
85 | ```python
86 | def _upsample (self, inpt):
87 | batch, w, h, c = inpt.shape # number of rows/columns
88 | b, ws, hs, cs = inpt.strides # row/column strides
89 |
90 | x = as_strided(inpt, (batch, w, self.stride[0], h, self.stride[1], c), (b, ws, 0, hs, 0, cs)) # view a as larger 4D array
91 | return x.reshape(batch, w * self.stride[0], h * self.stride[1], c) # create new 2D array
92 | ```
93 |
94 | The up-sample function use the stride functionality of the Numpy array to rearrange and replicate the value of each pixel in a mask of size `strides × strides`.
95 |
96 | And here's the `_downsample` function:
97 |
98 | ```python
99 | def _downsample (self, inpt):
100 | # This function works only if the dimensions are perfectly divisible by strides
101 | # TODO: add padding (?)
102 | batch, w, h, c = inpt.shape
103 | scale_w = w // self.stride[0]
104 | scale_h = h // self.stride[1]
105 |
106 | return inpt.reshape(batch, scale_w, self.stride[0], scale_h, self.stride[1], c).mean(axis=(2, 4))
107 | ```
108 |
109 | The down-sampling algorithm is obtained reshaping the input array according to two scale factors (`strides` in the code) along the two dimensions and computing the mean along these axes.
110 |
111 | Unfortunately, for now it works only if `h % stride` and `w % stride` are zero.
112 |
--------------------------------------------------------------------------------
/NumPyNet/video.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import cv2
8 | import time
9 | from threading import Thread
10 |
11 | try:
12 |
13 | from queue import Queue
14 |
15 | except ImportError:
16 |
17 | from Queue import Queue
18 |
19 | from NumPyNet.image import Image
20 | from NumPyNet.exception import VideoError
21 |
22 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
23 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
24 |
25 |
26 | class VideoCapture (object):
27 |
28 | '''
29 | OpenCV VideoCapture wrap in detached thread.
30 |
31 | Parameters
32 | ----------
33 | cam_index : integer or str
34 | Filename or cam index
35 |
36 | queue_size : int
37 | Integer of maximum number of frame to store into the queue
38 |
39 | Example
40 | -------
41 | >>> cap = VideoCapture()
42 | >>> time.sleep(.1)
43 | >>>
44 | >>> cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)
45 | >>>
46 | >>> cap.start()
47 | >>>
48 | >>> while cap.running():
49 | >>>
50 | >>> frame = cap.read()
51 | >>> frame.show('Camera', ms=1)
52 | >>> print('FPS: {:.3f}'.format(cap.fps))
53 | >>>
54 | >>> cap.stop()
55 | >>>
56 | >>> cv2.destroyAllWindows()
57 |
58 |
59 | Notes
60 | -----
61 | The object is inspired to the ImUtils implementation.
62 |
63 | References
64 | ----------
65 | - https://github.com/jrosebr1/imutils
66 | '''
67 |
68 | def __init__ (self, cam_index=0, queue_size=128):
69 |
70 | self._stream = cv2.VideoCapture(cam_index)
71 |
72 | if self._stream is None or not self._stream.isOpened():
73 | raise VideoError('Can not open or find camera. Given: {}'.format(cam_index))
74 |
75 | self._queue = Queue(maxsize=queue_size)
76 | self._thread = Thread(target=self._update, args=())
77 | self._thread.daemon = True
78 |
79 | self._num_frames = 0
80 | self._start = None
81 | self._end = None
82 |
83 | self._stopped = False
84 |
85 | def start (self):
86 | '''
87 | Start the video capture in thread
88 | '''
89 | self._thread.start()
90 | return self
91 |
92 | def _update (self):
93 | '''
94 | Infinite loop of frame reading.
95 | Each frame is inserted into the private queue.
96 | '''
97 |
98 | self._start = time.time()
99 |
100 | while not self._stopped:
101 |
102 | if not self._queue.full():
103 | (grabbed, frame) = self._stream.read()
104 |
105 | if not grabbed:
106 | self._stopped = True
107 |
108 | else:
109 | self._num_frames += 1
110 |
111 | self._queue.put(frame)
112 |
113 | else:
114 |
115 | time.sleep(.1)
116 |
117 | self._stream.release()
118 |
119 | def read (self):
120 | '''
121 | Get a frame as Image object
122 |
123 | Returns
124 | -------
125 | im : Image obj
126 | The loaded image
127 | '''
128 | im = Image()
129 | return im.from_frame(self._queue.get())
130 |
131 | def running (self):
132 | '''
133 | Check if new frames are available
134 |
135 | Returns
136 | -------
137 | running : bool
138 | True if there are data into the queue, False otherwise
139 | '''
140 |
141 | tries = 0
142 |
143 | while self._queue.qsize() == 0 and not self._stopped and tries < 5:
144 | time.sleep(.1)
145 | tries += 1
146 |
147 | return self._queue.qsize() > 0
148 |
149 |
150 | def stop (self):
151 | '''
152 | Stop the thread
153 | '''
154 |
155 | self._stopped = True
156 | self._thread.join()
157 | self._end = time.time()
158 |
159 | @property
160 | def elapsed (self):
161 | '''
162 | Get the elapsed time from start to up to now
163 |
164 | Returns
165 | -------
166 | elapsed : float
167 | Elapsed time
168 | '''
169 | return time.time() - self._start
170 |
171 | @property
172 | def fps (self):
173 | '''
174 | Get the frame per seconds
175 |
176 | Returns
177 | -------
178 | fps : float
179 | Frame per seconds
180 | '''
181 | return self._num_frames / self.elapsed
182 |
183 |
184 |
185 | if __name__ == '__main__':
186 |
187 | cap = VideoCapture()
188 | time.sleep(.1)
189 |
190 | cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)
191 |
192 | cap.start()
193 |
194 | while cap.running():
195 |
196 | frame = cap.read()
197 | frame.show('Camera', ms=1)
198 | print('FPS: {:.3f}'.format(cap.fps))
199 |
200 | cap.stop()
201 |
202 | cv2.destroyAllWindows()
203 |
204 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/dropout_layer.md:
--------------------------------------------------------------------------------
1 | # DropOut Layer
2 |
3 | **Overfitting** is a real problem for Convolutional Neural Network and, in general, for every Machine Learning systems. In general, it's easy to identify an overfitted model since it will perfoms very well on the training dataset while it's accuracy in a completely different set of data will drop a lot.
4 | To avoid overfitting means to create a generalized model, that can works well in different datasets.
5 |
6 | One of the method to avoid overfitting is by the use of a DropOut layer: all it does is setting to zero a randomly chosen set of neuron from the outputs of a previous layer. By doing so the layer temporarily removes that neuron from the network (the neuron is said to be *dropped out*).
7 |
8 | For a deeper explanation of the drop out operation you can take a look at the [original paper](http://jmlr.org/papers/volume15/srivastava14a/srivastava14a.pdf) or this [usefull blog post](https://machinelearningmastery.com/dropout-for-regularizing-deep-neural-networks/) that sums it up.
9 |
10 | In the image below ther's an example output of a single image processed by the DropOut layer:
11 |
12 | 
13 |
14 | The black dots are the dropped pixel (or neuron), while in the backward the delta is passed only through the remaing pixels.
15 |
16 | That's a simple example on how to use the layer as a single operation (also used to produce the image above):
17 |
18 | ```python
19 | import os
20 | from random
21 |
22 | np.random.seed(123)
23 |
24 |
25 | # those function rescale the image: [0,255]->[0,1] and [0,1]->[0,255]
26 | img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
27 | float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
28 |
29 | filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
30 | inpt = np.asarray(Image.open(filename), dtype=float) # it's not really meaningfull on a random image
31 | inpt.setflags(write=1)
32 | inpt = img_2_float(inpt)
33 |
34 | # adding batch dimension
35 | inpt = np.expand_dims(inpt, axis=0)
36 |
37 | prob = 0.1 # drops probability
38 |
39 | layer = Dropout_layer(prob=prob)
40 |
41 | # FORWARD
42 |
43 | layer.forward(inpt)
44 | forward_out = layer.output # of same shape as the input
45 |
46 | # BACKWARD
47 |
48 | delta = np.ones(shape=inpt.shape, dtype=float)
49 | layer.delta = np.ones(shape=layer.out_shape, dtype=float)
50 | layer.backward(delta)
51 |
52 | # and here delta is correctly updated and ready to pe passed backward
53 | ```
54 |
55 | To have a look more in details on what's happening, the definitons of `forward` and `backward` function are:
56 |
57 | ```python
58 | def forward(self, inpt):
59 | '''
60 | Forward function of the Dropout layer: it create a random mask for every input
61 | in the batch and set to zero the chosen values. Other pixels are scaled
62 | with the scale variable.
63 | Parameters :
64 | inpt : array of shape (batch, w, h, c), input of the layer
65 | '''
66 |
67 | self._out_shape = inpt.shape
68 |
69 | self.rnd = np.random.uniform(low=0., high=1., size=self.out_shape) > self.probability
70 | self.output = self.rnd * inpt * self.scale
71 | self.delta = np.zeros(shape=inpt.shape)
72 | ```
73 |
74 | The code proceeds as follow:
75 | * create a random mask
76 | * multiply the mask (element-wise) to inpt and multiply for `self.scale`
77 | * initialize `self.delta`
78 |
79 | 
80 |
81 | while if `prob=1`, `scale` is set to one, but it doesn't really matter, since every pixel is zero at that point, it's just to avoid the division.
82 |
83 | The backward function is:
84 |
85 | ```python
86 | def backward(self, delta=None):
87 | '''
88 | Backward function of the Dropout layer: given the same mask as the layer
89 | it backprogates delta only to those pixel which values has not been set to zero
90 | in the forward.
91 | Parameters :
92 | delta : array of shape (batch, w, h, c), default value is None.
93 | If given, is the global delta to be backpropagated
94 | '''
95 |
96 | if delta is not None:
97 | self.delta = self.rnd * self.delta * self.scale
98 | delta[:] = self.delta.copy()
99 | ```
100 |
101 | Tha backward multiply `delta` by scale only for the pixel unaffected by the "dropout".
102 | Then the mask sets to zero the correspondnt values of `self.delta`, and `delta` is updated.
103 |
--------------------------------------------------------------------------------
/testing/test_l2norm_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.layers.l2norm_layer import L2Norm_layer
10 |
11 | import numpy as np
12 | import pytest
13 | from hypothesis import strategies as st
14 | from hypothesis import given
15 | from hypothesis import settings
16 |
17 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
18 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
19 |
20 |
21 | class TestL2normLayer :
22 | '''
23 | Tests:
24 | - constructor of the L1norm_layer.
25 | - printer of the layer.
26 | - forward against tensorflow.
27 | - backward against tensorflow.
28 |
29 | to be:
30 | backward doesn't work
31 | '''
32 |
33 | @given(ax = st.sampled_from([None, 1, 2, 3]))
34 | @settings(max_examples=20,
35 | deadline=None)
36 | def test_costructor (self, ax):
37 |
38 | layer = L2Norm_layer(axis=ax)
39 |
40 | assert layer.axis == ax
41 | assert layer.scales == None
42 | assert layer.output == None
43 | assert layer.delta == None
44 | assert layer.out_shape == None
45 |
46 |
47 | @given(b = st.integers(min_value=1, max_value=15),
48 | w = st.integers(min_value=1, max_value=100),
49 | h = st.integers(min_value=1, max_value=100),
50 | c = st.integers(min_value=1, max_value=10))
51 | @settings(max_examples=50,
52 | deadline=None)
53 | def test_printer (self, b, w, h, c):
54 |
55 | layer = L2Norm_layer(input_shape=(b, w, h, c))
56 |
57 | print(layer)
58 |
59 | layer.input_shape = (3.14, w, h, c)
60 |
61 | with pytest.raises(ValueError):
62 | print(layer)
63 |
64 | @given(b = st.integers(min_value=3, max_value=15), # unstable for low values!
65 | w = st.integers(min_value=10, max_value=100),
66 | h = st.integers(min_value=10, max_value=100),
67 | c = st.integers(min_value=2, max_value=10),
68 | ax = st.sampled_from([None, 1, 2, 3]))
69 | @settings(max_examples=10,
70 | deadline=None)
71 | def test_forward (self, b, w, h, c, ax):
72 |
73 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c))
74 | inpt_tf = tf.Variable(inpt)
75 |
76 | # NumPyNet model
77 | layer = L2Norm_layer(input_shape=inpt.shape, axis=ax)
78 |
79 | # Keras output
80 | forward_out_keras = tf.math.l2_normalize(inpt_tf, axis=ax).numpy()
81 |
82 | # numpynet forward and output
83 | layer.forward(inpt=inpt)
84 | forward_out_numpynet = layer.output
85 |
86 | # Test for dimension and allclose of all output
87 | assert forward_out_numpynet.shape == forward_out_keras.shape
88 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, atol=1e-3, rtol=1e-3)
89 | np.testing.assert_allclose(layer.delta, np.zeros(shape=(b, w, h, c), dtype=float), rtol=1e-5, atol=1e-8)
90 |
91 |
92 | @given(b = st.integers(min_value=3, max_value=15), # unstable for low values!
93 | w = st.integers(min_value=10, max_value=100),
94 | h = st.integers(min_value=10, max_value=100),
95 | c = st.integers(min_value=2, max_value=10),
96 | ax = st.sampled_from([None, 1, 2, 3]))
97 | @settings(max_examples=10,
98 | deadline=None)
99 | def test_backward (self, b, w, h, c, ax):
100 |
101 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c))
102 | inpt_tf = tf.Variable(inpt)
103 |
104 | # NumPyNet model
105 | layer = L2Norm_layer(input_shape=inpt.shape, axis=ax)
106 |
107 | # Keras output
108 | with tf.GradientTape() as tape:
109 | preds = tf.math.l2_normalize(inpt_tf, axis=ax)
110 | grads = tape.gradient(preds, inpt_tf)
111 |
112 | forward_out_keras = preds.numpy()
113 | delta_keras = grads.numpy()
114 |
115 | # numpynet forward and output
116 | layer.forward(inpt)
117 | forward_out_numpynet = layer.output
118 |
119 | # Test for dimension and allclose of all output
120 | assert forward_out_numpynet.shape == forward_out_keras.shape
121 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, atol=1e-3, rtol=1e-3)
122 |
123 | # BACKWARD
124 |
125 | # Definition of starting delta for numpynet
126 | layer.delta = np.zeros(shape=layer.out_shape, dtype=float)
127 | delta = np.zeros(inpt.shape, dtype=float)
128 |
129 | # numpynet Backward
130 | layer.backward(delta=delta)
131 |
132 | # Back tests
133 | assert delta.shape == delta_keras.shape
134 | assert delta.shape == inpt.shape
135 | # np.testing.assert_allclose(delta, delta_keras, rtol=1e-5, atol=1e-6) # TODO : wrong
136 |
--------------------------------------------------------------------------------
/examples/MNIST.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | '''
5 | Little example on how to use the Network class to create a model and perform
6 | a basic classification of the MNIST dataset
7 | '''
8 |
9 | # from NumPyNet.layers.input_layer import Input_layer
10 | from NumPyNet.layers.connected_layer import Connected_layer
11 | from NumPyNet.layers.convolutional_layer import Convolutional_layer
12 | from NumPyNet.layers.maxpool_layer import Maxpool_layer
13 | from NumPyNet.layers.softmax_layer import Softmax_layer
14 | # from NumPyNet.layers.dropout_layer import Dropout_layer
15 | # from NumPyNet.layers.cost_layer import Cost_layer
16 | # from NumPyNet.layers.cost_layer import cost_type
17 | from NumPyNet.layers.batchnorm_layer import BatchNorm_layer
18 | from NumPyNet.network import Network
19 | from NumPyNet.optimizer import Adam
20 | # from NumPyNet.optimizer import Adam, SGD, Momentum
21 | from NumPyNet.utils import to_categorical
22 | from NumPyNet.utils import from_categorical
23 | from NumPyNet.metrics import mean_accuracy_score
24 |
25 | import numpy as np
26 | from sklearn import datasets
27 | from sklearn.model_selection import train_test_split
28 |
29 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
30 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
31 |
32 |
33 | def accuracy (y_true, y_pred):
34 | '''
35 | Temporary metrics to overcome "from_categorical" missing in standard metrics
36 | '''
37 | truth = from_categorical(y_true)
38 | predicted = from_categorical(y_pred)
39 | return mean_accuracy_score(truth, predicted)
40 |
41 |
42 | if __name__ == '__main__':
43 |
44 | np.random.seed(123)
45 |
46 | digits = datasets.load_digits()
47 | X, y = digits.images, digits.target
48 |
49 | # del digits
50 |
51 | # add channels to images
52 | X = np.asarray([np.dstack((x, x, x)) for x in X])
53 |
54 | X_train, X_test, y_train, y_test = train_test_split(X, y,
55 | test_size=.33,
56 | random_state=42)
57 |
58 | batch = 128
59 | num_classes = len(set(y))
60 |
61 | # normalization to [0, 1]
62 | X_train *= 1. / 255.
63 | X_test *= 1. / 255.
64 |
65 | # reduce the size of the data set for testing
66 | ############################################
67 |
68 | train_size = 1000
69 | test_size = 300
70 |
71 | X_train = X_train[:train_size, ...]
72 | y_train = y_train[:train_size]
73 | X_test = X_test[ :test_size, ...]
74 | y_test = y_test[ :test_size]
75 |
76 | ############################################
77 |
78 | n_train = X_train.shape[0]
79 | n_test = X_test.shape[0]
80 |
81 | # transform y to array of dimension 10 and in 4 dimension
82 | y_train = to_categorical(y_train).reshape(n_train, 1, 1, -1)
83 | y_test = to_categorical(y_test).reshape(n_test, 1, 1, -1)
84 |
85 | # Create the model and training
86 | model = Network(batch=batch, input_shape=X_train.shape[1:])
87 |
88 | model.add(Convolutional_layer(size=3, filters=32, stride=1, pad=True, activation='Relu'))
89 |
90 | model.add(BatchNorm_layer())
91 |
92 | model.add(Maxpool_layer(size=2, stride=1, padding=True))
93 |
94 | model.add(Connected_layer(outputs=100, activation='Relu'))
95 |
96 | model.add(BatchNorm_layer())
97 |
98 | model.add(Connected_layer(outputs=num_classes, activation='Linear'))
99 |
100 | model.add(Softmax_layer(spatial=True, groups=1, temperature=1.))
101 | # model.add(Cost_layer(cost_type=cost_type.mse))
102 |
103 | # model.compile(optimizer=SGD(lr=0.01, decay=0., lr_min=0., lr_max=np.inf))
104 | model.compile(optimizer=Adam(lr=1., decay=0.001), metrics=[accuracy])
105 |
106 | print('**************************************')
107 | print('\n Total input dimension: {}'.format(X_train.shape), '\n')
108 | print('**************MODEL SUMMARY***********')
109 |
110 | model.summary()
111 |
112 | print('\n***********START TRAINING***********\n')
113 |
114 | # Fit the model on the training set
115 | model.fit(X=X_train, y=y_train, max_iter=10, verbose=True)
116 |
117 | print('\n***********START TESTING**************\n')
118 |
119 | # Test the prediction with timing
120 | loss, out = model.evaluate(X=X_test, truth=y_test, verbose=True)
121 |
122 | truth = from_categorical(y_test)
123 | predicted = from_categorical(out)
124 | accuracy = mean_accuracy_score(truth, predicted)
125 |
126 | print('\nLoss Score: {:.3f}'.format(loss))
127 | print('Accuracy Score: {:.3f}'.format(accuracy))
128 | # SGD : best score I could obtain was 94% with 10 epochs, lr = 0.01 %
129 | # Momentum : best score I could obtain was 93% with 10 epochs
130 | # Adam : best score I could obtain was 95% with 10 epochs
131 |
--------------------------------------------------------------------------------
/testing/test_l1norm_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.layers.l1norm_layer import L1Norm_layer
10 |
11 | import numpy as np
12 | import pytest
13 | from hypothesis import strategies as st
14 | from hypothesis import given
15 | from hypothesis import settings
16 |
17 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
18 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
19 |
20 |
21 | class TestL1normLayer :
22 | '''
23 | Tests:
24 | - constructor of the L1norm_layer.
25 | - printer of the layer.
26 | - forward against tensorflow.
27 | - backward against tensorflow.
28 |
29 | to be:
30 | '''
31 |
32 | @given(ax = st.sampled_from([None, 1, 2, 3]))
33 | @settings(max_examples=20,
34 | deadline=None)
35 | def test_costructor (self, ax):
36 |
37 | layer = L1Norm_layer(axis=ax)
38 |
39 | assert layer.axis == ax
40 | assert layer.scales == None
41 | assert layer.output == None
42 | assert layer.delta == None
43 | assert layer.out_shape == None
44 |
45 | @given(b = st.integers(min_value=1, max_value=15),
46 | w = st.integers(min_value=1, max_value=100),
47 | h = st.integers(min_value=1, max_value=100),
48 | c = st.integers(min_value=1, max_value=10))
49 | @settings(max_examples=50,
50 | deadline=None)
51 | def test_printer (self, b, w, h, c):
52 |
53 | layer = L1Norm_layer(input_shape=(b, w, h, c))
54 |
55 | print(layer)
56 |
57 | layer.input_shape = (3.14, w, h, c)
58 |
59 | with pytest.raises(ValueError):
60 | print(layer)
61 |
62 | @given(b = st.integers(min_value=3, max_value=15), # unstable for low values!
63 | w = st.integers(min_value=10, max_value=100),
64 | h = st.integers(min_value=10, max_value=100),
65 | c = st.integers(min_value=2, max_value=10),
66 | ax = st.integers(min_value=1, max_value=3))
67 | @settings(max_examples=10,
68 | deadline=None)
69 | def test_forward (self, b, w, h, c, ax):
70 |
71 | # "None" axis supported only in NumPyNet
72 |
73 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c)).astype(float)
74 | inpt_tf = tf.Variable(inpt)
75 |
76 | # NumPyNet model
77 | layer = L1Norm_layer(input_shape=inpt.shape, axis=ax)
78 |
79 | # Keras output
80 | forward_out_keras = tf.keras.utils.normalize(inpt_tf, order=1, axis=ax).numpy()
81 |
82 | # numpynet forward and output
83 | layer.forward(inpt=inpt)
84 | forward_out_numpynet = layer.output
85 |
86 | # Test for dimension and allclose of all output
87 | assert forward_out_numpynet.shape == forward_out_keras.shape
88 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, atol=1e-7, rtol=1e-5)
89 | np.testing.assert_allclose(layer.delta, np.zeros(shape=(b, w, h, c), dtype=float), rtol=1e-5, atol=1e-8)
90 |
91 |
92 | @given(b = st.integers(min_value=3, max_value=15), # unstable for low values!
93 | w = st.integers(min_value=10, max_value=100),
94 | h = st.integers(min_value=10, max_value=100),
95 | c = st.integers(min_value=2, max_value=10),
96 | ax = st.integers(min_value=1, max_value=3))
97 | @settings(max_examples=10,
98 | deadline=None)
99 | def test_backward (self, b, w, h, c, ax):
100 |
101 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c)).astype(float)
102 | inpt_tf = tf.Variable(inpt)
103 |
104 | # NumPyNet model
105 | layer = L1Norm_layer(input_shape=inpt.shape, axis=ax)
106 |
107 | # Keras output
108 |
109 | with tf.GradientTape() as tape:
110 | preds = tf.keras.utils.normalize(inpt_tf, order=1, axis=ax)
111 | grads = tape.gradient(preds, inpt_tf)
112 |
113 | forward_out_keras = preds.numpy()
114 | delta_keras = grads.numpy()
115 |
116 | # numpynet forward and output
117 | layer.forward(inpt=inpt)
118 | forward_out_numpynet = layer.output
119 |
120 | # Test for dimension and allclose of all output
121 | assert forward_out_numpynet.shape == forward_out_keras.shape
122 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, atol=1e-7, rtol=1e-5)
123 |
124 | # BACKWARD
125 |
126 | # Definition of starting delta for numpynet
127 | layer.delta = np.zeros(shape=layer.out_shape, dtype=float)
128 | delta = np.zeros(inpt.shape, dtype=float)
129 |
130 | # numpynet Backward
131 | layer.backward(delta=delta)
132 |
133 | # Back tests
134 | assert delta.shape == delta_keras.shape
135 | assert delta.shape == inpt.shape
136 | # assert np.allclose(delta, delta_keras, atol=1e-6) # TODO wrong results?
137 |
--------------------------------------------------------------------------------
/testing/test_input_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.exception import NotFittedError
10 | from NumPyNet.layers.input_layer import Input_layer
11 |
12 | from random import choice
13 | import numpy as np
14 | import pytest
15 | from hypothesis import strategies as st
16 | from hypothesis import given
17 | from hypothesis import settings
18 |
19 |
20 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
21 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
22 |
23 |
24 | class TestInputLayer :
25 | '''
26 | Tests:
27 | - constructor of the Input layer object
28 | - __str__ function of the Input layer
29 | - forward function againts keras
30 | - backward function against keras
31 |
32 | to be:
33 | '''
34 |
35 | @given(b = st.integers(min_value=1, max_value=15 ),
36 | w = st.integers(min_value=1, max_value=100),
37 | h = st.integers(min_value=1, max_value=100),
38 | c = st.integers(min_value=1, max_value=10 ))
39 | @settings(max_examples=20,
40 | deadline=None)
41 | def test_constructor (self, b, w, h, c):
42 |
43 | input_shape = choice([(b, w, h, c), (b, w, h), b, None])
44 |
45 | if input_shape != (b, w, h, c):
46 | with pytest.raises(ValueError):
47 | layer = Input_layer(input_shape=input_shape)
48 |
49 | else:
50 | layer = Input_layer(input_shape=input_shape)
51 |
52 | layer.input_shape == (b, w, h, c)
53 |
54 | assert layer.output == None
55 | assert layer.delta == None
56 | assert layer.out_shape == (b, w, h, c)
57 |
58 |
59 | @given(b = st.integers(min_value=1, max_value=15 ),
60 | w = st.integers(min_value=1, max_value=100),
61 | h = st.integers(min_value=1, max_value=100),
62 | c = st.integers(min_value=1, max_value=10 ))
63 | @settings(max_examples=50,
64 | deadline=None)
65 | def test_printer (self, b, w, h, c):
66 |
67 | layer = Input_layer(input_shape=(b, w, h, c))
68 |
69 | print(layer)
70 |
71 | layer.input_shape = (3.14, w, h, c)
72 |
73 | with pytest.raises(ValueError):
74 | print(layer)
75 |
76 | @given(b = st.integers(min_value=1, max_value=15 ),
77 | w = st.integers(min_value=1, max_value=100),
78 | h = st.integers(min_value=1, max_value=100),
79 | c = st.integers(min_value=1, max_value=10 ))
80 | @settings(max_examples=20,
81 | deadline=None)
82 | def test_forward (self, b, w, h, c):
83 |
84 | inpt = np.random.uniform(low=-1, high=1., size=(b, w, h, c)).astype(float)
85 |
86 | # numpynet model init
87 | layer = Input_layer(input_shape=inpt.shape)
88 |
89 | # Keras Model init
90 | model = tf.keras.layers.InputLayer(input_shape=(w, h, c))
91 |
92 | # FORWARD
93 |
94 | # Keras Forward
95 | forward_out_keras = model(inpt)
96 |
97 | # numpynet forwrd
98 | layer.forward(inpt=inpt)
99 | forward_out_numpynet = layer.output
100 |
101 | # Forward check (Shape and Values)
102 | assert forward_out_keras.shape == forward_out_numpynet.shape
103 | np.testing.assert_allclose(forward_out_keras, forward_out_numpynet)
104 |
105 |
106 | @given(b = st.integers(min_value=1, max_value=15 ),
107 | w = st.integers(min_value=1, max_value=100),
108 | h = st.integers(min_value=1, max_value=100),
109 | c = st.integers(min_value=1, max_value=10 ))
110 | @settings(max_examples=20,
111 | deadline=None)
112 | def test_backward (self, b, w, h, c):
113 |
114 | inpt = np.random.uniform(low=-1, high=1., size=(b, w, h, c)).astype(float)
115 | tf_input = tf.Variable(inpt)
116 |
117 | # numpynet model init
118 | layer = Input_layer(input_shape=inpt.shape)
119 |
120 | # Keras Model init
121 | model = tf.keras.layers.InputLayer(input_shape=(w, h, c))
122 |
123 | # FORWARD
124 |
125 | # Tensorflow Forward and backward
126 | with tf.GradientTape() as tape :
127 | preds = model(tf_input)
128 | grads = tape.gradient(preds, tf_input)
129 |
130 | forward_out_keras = preds.numpy()
131 | delta_keras = grads.numpy()
132 |
133 | # layer forward
134 | layer.forward(inpt=inpt)
135 | forward_out_numpynet = layer.output
136 |
137 | # Forward check (Shape and Values)
138 | assert forward_out_keras.shape == forward_out_numpynet.shape
139 | np.testing.assert_allclose(forward_out_keras, forward_out_numpynet)
140 |
141 | # BACKWARD
142 |
143 | # layer delta init.
144 | layer.delta = np.ones(shape=inpt.shape, dtype=float)
145 |
146 | # Global delta init.
147 | delta = np.empty(shape=inpt.shape, dtype=float)
148 |
149 | # layer Backward
150 | layer.backward(delta=delta)
151 |
152 | # Check dimension and delta
153 | assert delta_keras.shape == delta.shape
154 | np.testing.assert_allclose(delta_keras, delta)
155 |
156 | delta = np.zeros(shape=(1, 2, 3, 4), dtype=float)
157 |
158 | with pytest.raises(ValueError):
159 | layer.backward(delta)
160 |
--------------------------------------------------------------------------------
/NumPyNet/layers/route_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | import operator as op
9 |
10 | from NumPyNet.exception import LayerError
11 | from NumPyNet.utils import check_is_fitted
12 | from NumPyNet.layers.base import BaseLayer
13 |
14 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
15 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
16 |
17 |
18 | class Route_layer(BaseLayer):
19 | '''
20 | Route layer
21 | For Now the idea is: it takes the seleted layers output and concatenate
22 | them along the batch axis OR the channels axis
23 |
24 | YOLOv3 implementation always concatenate by channels
25 |
26 | By definition, this layer can't be used without a Network model.
27 |
28 | Parameters
29 | ----------
30 | input_layers : int or list of int.
31 | indexes of the layers in the network for which the outputs have to concatenated.
32 |
33 | by_channels : bool, (default = True).
34 | It determines along which dimension the concatenation is performed. For examples if two
35 | input with size (b1, w, h , c) and (b2, w, h, c) are concatenated with by_channels=False,
36 | then the final output shape will be (b1 + b2, w, h, c).
37 | Otherwise, if the shapes are (b, w, h, c1) and (b, w, h, c2) and axis=3, the final output size
38 | will be (b, w, h, c1 + c2) (YOLOv3 model). Notice that the all the other dimensions must be equal.
39 |
40 | Example
41 | -------
42 | TODO
43 |
44 | Reference
45 | ---------
46 | TODO
47 | '''
48 |
49 | def __init__(self, input_layers, by_channels=True, **kwargs):
50 |
51 | self.axis = 3 if by_channels else 0
52 |
53 | if isinstance(input_layers, int):
54 | self.input_layer = (input_layers, )
55 |
56 | elif hasattr(input_layers, '__iter__'):
57 | self.input_layers = tuple(input_layers)
58 |
59 | else:
60 | raise ValueError('Route Layer : parameter "input_layer" is neither iterable or an integer')
61 |
62 | super(Route_layer, self).__init__()
63 |
64 | def __str__(self):
65 | return 'route {}'.format(list(self.input_layers))
66 |
67 | def _build(self, previous_layer):
68 |
69 | out_shapes = [x.out_shape for x in previous_layer]
70 | self.input_shape = list(out_shapes[-1])
71 |
72 | if self.axis:
73 | print(np.sum(map(op.itemgetter(self.axis), out_shapes)))
74 | self.input_shape[-1] = np.sum(list(map(op.itemgetter(self.axis), out_shapes)))
75 | else:
76 | self.input_shape[0] = np.sum(list(map(op.itemgetter(self.axis), out_shapes)))
77 |
78 | self.input_shape = tuple(self.input_shape)
79 | return self
80 |
81 | def __call__(self, previous_layer):
82 |
83 | for prev in previous_layer:
84 |
85 | if prev.out_shape is None:
86 | class_name = self.__class__.__name__
87 | prev_name = prev.__class__.__name__
88 | raise LayerError('Incorrect shapes found. Layer {0} cannot be connected to the previous {1} layer.'.format(class_name, prev_name))
89 |
90 | self._build(previous_layer)
91 | return self
92 |
93 | def forward(self, network):
94 | '''
95 | Concatenate along chosen axis the outputs of selected network layers
96 | In main CNN applications, like YOLOv3, the concatenation happens long channels axis
97 |
98 | Parameters
99 | ----------
100 | network : Network object type.
101 | The network model to which this layer belongs to.
102 |
103 | Returns
104 | -------
105 | self
106 | '''
107 |
108 | self.output = np.concatenate([network[layer_idx].output for layer_idx in self.input_layers], axis=self.axis)
109 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
110 |
111 | return self
112 |
113 | def backward(self, delta, network):
114 | '''
115 | Sum self.delta to the correct layer delta on the network
116 |
117 | Parameters
118 | ----------
119 | delta : array-like
120 | delta array of shape (batch, w, h, c). Global delta to be backpropagated.
121 |
122 | network: Network object type.
123 | The network model to which this layer belongs to.
124 |
125 | Returns
126 | -------
127 | self
128 | '''
129 |
130 | check_is_fitted(self, 'delta')
131 |
132 | # NumPyNet implementation
133 | if self.axis == 3: # this works for concatenation by channels axis
134 | channels_sum = 0
135 | for idx in self.input_layers:
136 | channels = network[idx].out_shape[3]
137 | network[idx].delta += self.delta[..., channels_sum: channels_sum + channels]
138 | channels_sum += channels
139 |
140 | elif self.axis == 0: # this works for concatenation by batch axis
141 | batch_sum = 0
142 | for idx in self.self.input_layers:
143 | batches = network[idx].out_shape[0]
144 | network[idx].delta += self.delta[batch_sum: batch_sum + batches, ...]
145 | batch_sum += batches
146 |
147 | return self
148 |
149 |
150 | if __name__ == '__main__':
151 |
152 | layer = Route_layer((1, 2))
153 | print(layer)
154 |
155 | print(layer.out_shape)
156 | # TODO the idea is to create a toy model for numpynet and keras, and try some
157 | # concatenation (mainly by channel, since the batch implementation doesn't really
158 | # make sense to me)
159 |
--------------------------------------------------------------------------------
/NumPyNet/layers/input_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | from NumPyNet.utils import check_is_fitted
9 | from NumPyNet.layers.base import BaseLayer
10 |
11 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
12 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
13 |
14 |
15 | class Input_layer(BaseLayer):
16 | '''
17 | Input layer, this layer can be used at the beginning of a Network to define all
18 | the model's input-output dimensions
19 |
20 | Parameters
21 | ----------
22 | input_shape : tuple
23 | Shape of the input in the format (batch, w, h, c).
24 |
25 | Example
26 | -------
27 | >>> import os
28 | >>>
29 | >>> import pylab as plt
30 | >>> from PIL import Image
31 | >>>
32 | >>> img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
33 | >>> float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
34 | >>>
35 | >>> filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
36 | >>> inpt = np.asarray(Image.open(filename), dtype=float)
37 | >>> inpt.setflags(write=1)
38 | >>> inpt = img_2_float(inpt)
39 | >>> inpt = np.expand_dims(inpt, axis=0)
40 | >>>
41 | >>> layer = Input_layer(input_shape=inpt.shape)
42 | >>>
43 | >>> # FORWARD
44 | >>>
45 | >>> layer.forward(inpt)
46 | >>> forward_out_byron = layer.output
47 | >>>
48 | >>> # BACKWARD
49 | >>>
50 | >>> delta = np.zeros(shape=inpt.shape, dtype=float)
51 | >>> layer.backward(delta)
52 | >>>
53 | >>> # Visualizations
54 | >>>
55 | >>> fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
56 | >>> fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
57 | >>>
58 | >>> fig.suptitle('Input Layer')
59 | >>>
60 | >>> ax1.imshow(float_2_img(inpt[0]))
61 | >>> ax1.set_title('Original image')
62 | >>> ax1.axis('off')
63 | >>>
64 | >>> ax2.imshow(float_2_img(layer.output[0]))
65 | >>> ax2.set_title("Forward")
66 | >>> ax2.axis("off")
67 | >>>
68 | >>> ax3.imshow(float_2_img(delta[0]))
69 | >>> ax3.set_title('Backward')
70 | >>> ax3.axis('off')
71 | >>>
72 | >>> fig.tight_layout()
73 | >>> plt.show()
74 |
75 | References
76 | ----------
77 | TODO
78 | '''
79 |
80 | def __init__(self, input_shape, **kwargs):
81 |
82 | if sum(np.shape(input_shape)) != 4:
83 | raise ValueError('Input layer error. Incorrect input_shape. Expected a 4D array (batch, width, height, channel). Given {}'.format(input_shape))
84 |
85 | super(Input_layer, self).__init__(input_shape=input_shape)
86 |
87 | def __str__(self):
88 | batch, w, h, c = self.input_shape
89 | return 'input {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d} -> {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d}'.format(batch, w, h, c)
90 |
91 | def forward(self, inpt):
92 | '''
93 | Forward function of the Input Layer: simply store the input array.
94 |
95 | Parameters
96 | ----------
97 | inpt : array-like
98 | Input batch of images in format (batch, in_w, in_h, in _c)
99 |
100 | Return
101 | ------
102 | self
103 | '''
104 |
105 | self._check_dims(shape=self.out_shape, arr=inpt, func='Forward')
106 |
107 | self.output = inpt
108 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
109 |
110 | return self
111 |
112 | def backward(self, delta):
113 | '''
114 | Simply pass the gradient.
115 |
116 | Parameters
117 | ----------
118 | delta : array-like
119 | delta array of shape (batch, w, h, c). Global delta to be backpropagated.
120 |
121 | Returns
122 | -------
123 | self
124 | '''
125 |
126 | check_is_fitted(self, 'delta')
127 | self._check_dims(shape=self.out_shape, arr=delta, func='Backward')
128 |
129 | delta[:] = self.delta
130 |
131 | return self
132 |
133 |
134 | if __name__ == '__main__':
135 |
136 | import os
137 |
138 | import pylab as plt
139 | from PIL import Image
140 |
141 | img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
142 | float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
143 |
144 | filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
145 | inpt = np.asarray(Image.open(filename), dtype=float)
146 | inpt.setflags(write=1)
147 | inpt = img_2_float(inpt)
148 | inpt = np.expand_dims(inpt, axis=0)
149 |
150 | layer = Input_layer(input_shape=inpt.shape)
151 |
152 | # FORWARD
153 |
154 | layer.forward(inpt)
155 | forward_out_byron = layer.output
156 |
157 | # BACKWARD
158 |
159 | delta = np.zeros(shape=inpt.shape, dtype=float)
160 | layer.backward(delta)
161 |
162 | # Visualizations
163 |
164 | fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
165 | fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
166 |
167 | fig.suptitle('Input Layer')
168 |
169 | ax1.imshow(float_2_img(inpt[0]))
170 | ax1.set_title('Original image')
171 | ax1.axis('off')
172 |
173 | ax2.imshow(float_2_img(layer.output[0]))
174 | ax2.set_title("Forward")
175 | ax2.axis("off")
176 |
177 | ax3.imshow(float_2_img(delta[0]))
178 | ax3.set_title('Backward')
179 | ax3.axis('off')
180 |
181 | fig.tight_layout()
182 | plt.show()
183 |
--------------------------------------------------------------------------------
/testing/test_shuffler_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.exception import NotFittedError
10 | from NumPyNet.layers.shuffler_layer import Shuffler_layer
11 |
12 | import numpy as np
13 | import pytest
14 | from hypothesis import strategies as st
15 | from hypothesis import given
16 | from hypothesis import settings
17 | from hypothesis import example
18 | from random import choice
19 |
20 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
21 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
22 |
23 |
24 | class TestShuffleLayer :
25 | '''
26 | Tests:
27 | - constructor of the layer
28 | - printer of the layer
29 | - forward of the layer against keras
30 | - backward of the layer against keras
31 |
32 | to be:
33 | '''
34 |
35 | @given(scale = st.integers(min_value=-3, max_value=10))
36 | @settings(max_examples=5,
37 | deadline=None)
38 | def test_constructor (self, scale):
39 |
40 | if scale <= 1:
41 | with pytest.raises(ValueError):
42 | layer = Shuffler_layer(scale=scale)
43 |
44 | else:
45 | layer = Shuffler_layer(scale=scale)
46 | assert layer.scale == scale
47 | assert layer.scale_step == scale * scale
48 | assert layer.input_shape == None
49 | assert layer.output == None
50 | assert layer.delta == None
51 |
52 |
53 | def test_printer (self):
54 |
55 | scale = 2
56 |
57 | layer = Shuffler_layer(scale=scale)
58 |
59 | with pytest.raises(TypeError):
60 | print(layer)
61 |
62 | layer.input_shape = (1, 1, 1, 1)
63 | print(layer)
64 |
65 |
66 | @example(couple=(2, 12), b=5, w=100, h=300)
67 | @example(couple=(4, 32), b=5, w=100, h=300)
68 | @example(couple=(4, 48), b=5, w=100, h=300)
69 | @example(couple=(6, 108), b=5, w=100, h=300)
70 | @given(couple = st.tuples(st.integers(min_value=2, max_value=10), st.integers(min_value=1, max_value=200)),
71 | b = st.integers(min_value=1, max_value=10),
72 | w = st.integers(min_value=10, max_value=100),
73 | h = st.integers(min_value=10, max_value=100),)
74 | @settings(max_examples=10,
75 | deadline=None)
76 | def test_forward (self,couple, b, w, h):
77 |
78 | scale, channels = couple
79 |
80 | # input initialization
81 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, channels))
82 |
83 | # numpynet model
84 | layer = Shuffler_layer(input_shape=inpt.shape, scale=scale)
85 |
86 | # FORWARD
87 |
88 | if channels % (scale*scale):
89 | with pytest.raises(ValueError):
90 | layer.forward(inpt=inpt)
91 |
92 | else:
93 | layer.forward(inpt=inpt)
94 | forward_out_numpynet = layer.output
95 |
96 | forward_out_keras = tf.nn.depth_to_space(inpt, block_size=scale, data_format='NHWC')
97 |
98 | assert forward_out_numpynet.shape == forward_out_keras.shape
99 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, rtol=1e-5, atol=1e-8)
100 |
101 |
102 | @example(couple=(2, 12), b=5, w=10, h=30)
103 | @example(couple=(4, 32), b=5, w=10, h=30)
104 | @example(couple=(4, 48), b=5, w=10, h=30)
105 | @example(couple=(6, 108), b=5, w=10, h=30)
106 | @given(couple = st.tuples(st.integers(min_value=2, max_value=10), st.integers(min_value=1, max_value=100)),
107 | b = st.integers(min_value=1, max_value=10),
108 | w = st.integers(min_value=10, max_value=100),
109 | h = st.integers(min_value=10, max_value=100),)
110 | @settings(max_examples=10,
111 | deadline=None)
112 | def test_backward (self, b, w, h, couple):
113 |
114 | scale, channels = couple
115 |
116 | # input initialization
117 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, channels))
118 |
119 | # numpynet model
120 | layer = Shuffler_layer(input_shape=inpt.shape, scale=scale)
121 |
122 | # FORWARD
123 |
124 | if channels % (scale*scale):
125 | with pytest.raises(ValueError):
126 | layer.forward(inpt)
127 |
128 | else:
129 |
130 | forward_out_keras = tf.nn.depth_to_space(inpt, block_size=scale, data_format='NHWC')
131 |
132 | # try to BACKWARD
133 | with pytest.raises(NotFittedError):
134 | delta = np.random.uniform(low=0., high=1., size=forward_out_keras.shape)
135 | delta = delta.reshape(inpt.shape)
136 | layer.backward(delta=delta)
137 |
138 |
139 | layer.forward(inpt=inpt)
140 | forward_out_numpynet = layer.output
141 |
142 | assert forward_out_numpynet.shape == forward_out_keras.shape
143 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, rtol=1e-5, atol=1e-8)
144 |
145 | # BACKWARD
146 |
147 | delta = np.random.uniform(low=0., high=1., size=forward_out_keras.shape).astype(float)
148 |
149 | delta_keras = tf.nn.space_to_depth(delta, block_size=scale, data_format='NHWC')
150 | inpt_keras = tf.nn.space_to_depth(forward_out_keras, block_size=scale, data_format='NHWC')
151 |
152 | layer.delta = delta
153 | delta = delta.reshape(inpt.shape)
154 |
155 | layer.backward(delta=delta)
156 |
157 | assert delta_keras.shape == delta.shape
158 | np.testing.assert_allclose(delta_keras, delta, rtol=1e-5, atol=1e-8)
159 | np.testing.assert_allclose(inpt_keras, inpt, rtol=1e-5, atol=1e-8)
160 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/activation_layer.md:
--------------------------------------------------------------------------------
1 | # Activation Layer
2 |
3 | Activation functions (or transfer functions) are linear or non linear equations which process the output of a Neural Network's neuron and bound it into a limited range of values (usually [-1,1] or [0,1]).
4 |
5 | In a fully connected neural network, the output of a simple neuron is the dot product between weight and input vectors, ranging from to -∞ to +∞ but most importantly the output is the result of a linear function.
6 | Linear functions are very simple to be dealt with, but they are limited in their complexity and thus in their learning power.
7 | Neural Networks without activation functions are just simple linear regression model.
8 | The introduction of non-linearity allow them to model a wide range of functions and to learn more complex relations in the pattern data.
9 | From a biological point of view, the activation function models the on/off state of a neuron in the output decision process.
10 |
11 | Many activation functions were proposed during the years and each one has its characteristics but not an appropriate field of application.
12 | The better one to use in a particular situation is still an open question.
13 | Each one has its pro and cons, so each Neural Network libraries implements a wide range of activations and leaves the user to perform his own tests.
14 |
15 | We stored the whole list of activation functions available in [activations.py](https://github.com/Nico-Curti/NumPyNet/blob/master/NumPyNet/activations.py), with their own formulations and derivatives.
16 | An important feature of any activation function, in fact, is that it should be differentiable, since the main procedure of model optimization (Learning) implies the backpropagation of the error gradients.
17 |
18 | The images below show some examples about the effect of the forward and backward pass of the activation layer on the same input picture:
19 |
20 | 
21 | 
22 | 
23 |
24 | The code used to generate those images can be found [in this repository](https://github.com/Nico-Curti/NumPyNet/blob/master/NumPyNet/layers/activation_layer.py), after the activation layer class definition.
25 |
26 | Below is shown an example on how to use the single layer to perform its `forward` and `backward` function:
27 |
28 | ```python
29 | # first the essential import for the library.
30 | # after the installation:
31 | from NumPyNet.layers.activation_layer import Activation_layer # class import
32 | from NumPyNet import activations # here are contained all the activation funtions definitions
33 |
34 | import numpy as np # the library is entirely based on numpy
35 |
36 | # define a batch of images (even a single image is ok, but is important that it has all
37 | # the four dimensions) in the format (batch, width, height, channels)
38 |
39 | batch, w, h, c = (5, 100, 100, 3)
40 | input = np.random.uniform(low=0., high=1., size=(batch, w, h, c)) # you can also import an image from file
41 |
42 | # Activation function definition
43 | Activ_func = activations.Relu() # it can also be:
44 | # activations.Relu (the class Relu, taken from activations.py)
45 | # 'Relu' (a string)
46 |
47 | # Layer initialization
48 | layer = Activation_layer(activation=Activ_func)
49 |
50 | # Forward pass
51 | layer.forward(inpt=input, copy=False)
52 | out_img = layer.output # the output in this case will be of shape=(batch, w, h, c), so a batch of images
53 |
54 |
55 | # Backward pass
56 | delta = np.random.uniform(low=0., high=1., size=input.shape) # definition of network delta, to be backpropagated
57 | layer.delta = np.random.uniform(low=0., high=1., size=out_img.shape) # layer delta, ideally coming from the next layer
58 | layer.backward(delta, copy=False)
59 |
60 | # now net_delta is modified and ready to be passed to the previous layer.delta
61 | ```
62 |
63 | To have an idea on what the forward and backward function actually do, take a look at the code below:
64 |
65 | ## Forward function:
66 |
67 | ```python
68 | def forward(self, inpt, copy=True):
69 | '''
70 | Forward of the activation layer, apply the selected activation function to
71 | the input
72 |
73 | Parameters:
74 | inpt: the input to be activated
75 | copy: default value is True. If True make a copy of the input before
76 | applying the activation
77 | '''
78 | self._out_shape = inpt.shape
79 | self.output = self.activation(inpt, copy=copy)
80 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
81 |
82 | ```
83 | The code is very straight-forward:
84 | 1. store the variable
85 | 2. apply the selected activation function to the input
86 | 3. initialize layer.delta to all zero.
87 |
88 | ## Backward function:
89 |
90 | ```python
91 | def backward(self, delta, copy=False):
92 | '''
93 | Compute the backward of the activation layer
94 |
95 | Parameter:
96 | delta : global error to be backpropagated
97 | '''
98 |
99 | self.delta *= self.gradient(self.output, copy=copy)
100 | delta[:] = self.delta
101 | ```
102 |
103 | Here instead :
104 | 1. multiply `layer.delta` for the derivative of the activation function (computed on the **activated** output)image
105 | 2. modify delta with the current value of `layer.delta`.
106 |
--------------------------------------------------------------------------------
/testing/test_fmath.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | from NumPyNet.fmath import pow2
8 | from NumPyNet.fmath import exp
9 | from NumPyNet.fmath import pow
10 | from NumPyNet.fmath import log2
11 | from NumPyNet.fmath import log10
12 | from NumPyNet.fmath import log
13 | from NumPyNet.fmath import atanh
14 | from NumPyNet.fmath import tanh
15 | from NumPyNet.fmath import sqrt
16 | from NumPyNet.fmath import rsqrt
17 |
18 | import timeit
19 | import numpy as np
20 |
21 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
22 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
23 | __package__ = 'Fast Math functions testing'
24 |
25 | def _timing_np (func, args=None):
26 | SETUP_CODE = '''
27 | import numpy as np
28 |
29 | def np_pow2 (x):
30 | return 2 ** x
31 |
32 | def np_rsqrt (x):
33 | return 1. / np.sqrt(x)
34 |
35 | func = eval('{func}')
36 | arr = range(0, 10000)
37 | '''.format(**{'func' : func})
38 |
39 | if args is not None:
40 | TEST_CODE = '''
41 | y = map(lambda x : func(x, {args}), arr)
42 | '''.format(**{'args' : args})
43 | else:
44 | TEST_CODE = '''
45 | y = map(func, arr)
46 | '''
47 |
48 | return timeit.repeat(setup=SETUP_CODE,
49 | stmt=TEST_CODE,
50 | repeat=100,
51 | number=1000)
52 |
53 | def _timing_fmath (func, args=None):
54 | SETUP_CODE = '''
55 | from __main__ import {func}
56 |
57 | arr = range(0, 10000)
58 | '''.format(**{'func' : func})
59 |
60 | if args is not None:
61 | TEST_CODE = '''
62 | y = map(lambda x : {func}(x, {args}), arr)
63 | '''.format(**{'func' : func, 'args' : args})
64 | else:
65 | TEST_CODE = '''
66 | y = map({func}, arr)
67 | '''.format(**{'func' : func})
68 |
69 | return timeit.repeat(setup=SETUP_CODE,
70 | stmt=TEST_CODE,
71 | repeat=100,
72 | number=1000)
73 |
74 |
75 | def timeit_fmat ():
76 |
77 | np_pow2 = min(_timing_np( 'np_pow2' ))
78 | fmath_pow2 = min(_timing_fmath( 'pow2' ))
79 | np_exp = min(_timing_np( 'np.exp' ))
80 | fmath_exp = min(_timing_fmath( 'exp' ))
81 | np_pow = min(_timing_np( 'np.power', .2))
82 | fmath_pow = min(_timing_fmath( 'pow' , .2))
83 | np_log2 = min(_timing_np( 'np.log2' ))
84 | fmath_log2 = min(_timing_fmath( 'log2' ))
85 | np_log10 = min(_timing_np( 'np.log10' ))
86 | fmath_log10 = min(_timing_fmath( 'log10' ))
87 | np_log = min(_timing_np( 'np.log' ))
88 | fmath_log = min(_timing_fmath( 'log' ))
89 | np_atanh = min(_timing_np( 'np.arctanh'))
90 | fmath_atanh = min(_timing_fmath( 'atanh' ))
91 | np_tanh = min(_timing_np( 'np.tanh' ))
92 | fmath_tanh = min(_timing_fmath( 'tanh' ))
93 | np_sqrt = min(_timing_np( 'np.sqrt' ))
94 | fmath_sqrt = min(_timing_fmath( 'sqrt' ))
95 | np_rsqrt = min(_timing_np( 'np_rsqrt' ))
96 | fmath_rsqrt = min(_timing_fmath( 'rsqrt' ))
97 |
98 | print(' CMath FMath')
99 | print('pow2 function : {:.9f} {:.9f}'.format(np_pow2, fmath_pow2))
100 | print('exp function : {:.9f} {:.9f}'.format(np_exp, fmath_exp))
101 | print('pow function : {:.9f} {:.9f}'.format(np_pow, fmath_pow))
102 | print('log2 function : {:.9f} {:.9f}'.format(np_log2, fmath_log2))
103 | print('log10 function : {:.9f} {:.9f}'.format(np_log10, fmath_log10))
104 | print('log function : {:.9f} {:.9f}'.format(np_log, fmath_log))
105 | print('atanh function : {:.9f} {:.9f}'.format(np_atanh, fmath_atanh))
106 | print('tanh function : {:.9f} {:.9f}'.format(np_tanh, fmath_tanh))
107 | print('sqrt function : {:.9f} {:.9f}'.format(np_sqrt, fmath_sqrt))
108 | print('rsqrt function : {:.9f} {:.9f}'.format(np_rsqrt, fmath_rsqrt))
109 |
110 | # CMath FMath
111 | # pow2 function : 0.000387600 0.000341400
112 | # exp function : 0.000342000 0.000346200
113 | # pow function : 0.000583300 0.000539600
114 | # log2 function : 0.000380200 0.000382200
115 | # log10 function : 0.000384900 0.000341400
116 | # log function : 0.000380500 0.000342200
117 | # atanh function : 0.000427400 0.000377600
118 | # tanh function : 0.000372500 0.000375100
119 | # sqrt function : 0.000372100 0.000341400
120 | # rsqrt function : 0.000376100 0.000341800
121 |
122 | def test_pow2 ():
123 |
124 | x = np.pi
125 | assert np.isclose(2**x, pow2(x), atol=1e-3)
126 |
127 | def test_exp ():
128 |
129 | x = np.pi
130 | assert np.isclose(np.exp(x), exp(x), atol=1e-5)
131 |
132 | def test_pow ():
133 |
134 | x = np.pi
135 | assert np.isclose(x**.2, pow(x, .2), atol=1e-4)
136 |
137 | def test_log2 ():
138 |
139 | x = np.pi
140 | assert np.isclose(np.log2(x), log2(x), atol=1e-4)
141 |
142 | def test_log10 ():
143 |
144 | x = np.pi
145 | assert np.isclose(np.log10(x), log10(x), atol=1e-3)
146 |
147 | def test_log ():
148 |
149 | x = np.pi
150 | assert np.isclose(np.log(x), log(x), atol=1e-4)
151 |
152 | def test_arctanh ():
153 |
154 | c = 1e-2
155 | x = np.pi
156 | assert np.isclose(np.arctanh(x*c), atanh(x*c), atol=1e-4)
157 |
158 | def test_tanh ():
159 |
160 | x = np.pi
161 | assert np.isclose(np.tanh(x), tanh(x), atol=1e-5)
162 |
163 | def test_sqrt ():
164 |
165 | x = np.pi
166 | assert np.isclose(np.sqrt(x), sqrt(x), atol=1e-5)
167 |
168 | def test_rsqrt ():
169 |
170 | x = np.pi
171 | assert np.isclose(1. / np.sqrt(x), rsqrt(x), atol=1e-5)
172 |
173 |
--------------------------------------------------------------------------------
/testing/test_dropout_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | # import tensorflow as tf
8 | # import tensorflow.keras.backend as K
9 |
10 | from NumPyNet.exception import NotFittedError
11 | from NumPyNet.layers.dropout_layer import Dropout_layer
12 |
13 | import numpy as np
14 | import pytest
15 | from hypothesis import strategies as st
16 | from hypothesis import given
17 | from hypothesis import settings
18 |
19 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
20 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
21 |
22 |
23 | class TestDropoutLayer:
24 | '''
25 | Tests:
26 | - constructor of the layer.
27 | - printer function.
28 | - Properties of the output. The tensorflow dropout layer exist, bust due
29 | to the random nature of the layer, is impossible to test.
30 | - Check that both forward and backards works.
31 | '''
32 |
33 | @given(prob = st.floats(min_value=-0.5, max_value=1.5))
34 | @settings(max_examples=20,
35 | deadline=None)
36 | def test_constructor (self, prob):
37 |
38 | if prob < 0. or prob > 1:
39 |
40 | with pytest.raises(ValueError):
41 | layer = Dropout_layer(prob)
42 |
43 | else:
44 | layer = Dropout_layer(prob)
45 |
46 | assert layer.probability == prob
47 |
48 | assert layer.output == None
49 | assert layer.delta == None
50 |
51 | @given(b = st.integers(min_value=1, max_value=15 ),
52 | w = st.integers(min_value=1, max_value=100),
53 | h = st.integers(min_value=1, max_value=100),
54 | c = st.integers(min_value=1, max_value=10 ),
55 | prob = st.floats(min_value=0., max_value=1.))
56 | @settings(max_examples=50,
57 | deadline=None)
58 | def test_printer (self, b, w, h, c, prob):
59 |
60 | layer = Dropout_layer(input_shape=(b, w, h, c), prob=prob)
61 |
62 | print(layer)
63 |
64 | layer.input_shape = (3.14, w, h, c)
65 |
66 | with pytest.raises(ValueError):
67 | print(layer)
68 |
69 |
70 | @given(b = st.integers(min_value=1, max_value=15 ),
71 | w = st.integers(min_value=1, max_value=100),
72 | h = st.integers(min_value=1, max_value=100),
73 | c = st.integers(min_value=1, max_value=10 ),
74 | prob = st.floats(min_value=0., max_value=1.))
75 | @settings(max_examples=20,
76 | deadline=None)
77 | def test_forward (self, b, w, h, c, prob):
78 |
79 | # Random input
80 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c)).astype(float)
81 |
82 | # Initialize the numpy_net model
83 | layer = Dropout_layer(input_shape=inpt.shape, prob=prob)
84 |
85 | # Tensor Flow dropout, just to see if it works
86 | # forward_out_keras = K.eval(tf.nn.dropout(inpt, seed=None, keep_prob=prob))
87 |
88 | layer.forward(inpt=inpt)
89 | forward_out_numpynet = layer.output
90 |
91 | zeros_out = np.count_nonzero(forward_out_numpynet)
92 |
93 | if prob == 1.:
94 | assert zeros_out == 0
95 | assert not np.all(forward_out_numpynet)
96 |
97 | elif prob == 0.:
98 | assert zeros_out == b * w * h * c
99 | np.testing.assert_allclose(forward_out_numpynet, inpt, rtol=1e-5, atol=1e-8)
100 |
101 | else:
102 | assert forward_out_numpynet.shape == inpt.shape
103 |
104 | np.testing.assert_allclose(layer.delta, np.zeros(shape=(b, w, h, c), dtype=float), rtol=1e-5, atol=1e-8)
105 |
106 |
107 | @given(b = st.integers(min_value=1, max_value=15 ),
108 | w = st.integers(min_value=1, max_value=100),
109 | h = st.integers(min_value=1, max_value=100),
110 | c = st.integers(min_value=1, max_value=10 ),
111 | prob = st.floats(min_value=0., max_value=1.))
112 | @settings(max_examples=20,
113 | deadline=None)
114 | def test_backward (self, b, w, h, c, prob):
115 |
116 | prob = 0.
117 | # Random input
118 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c)).astype(float)
119 |
120 | # Initialize the numpy_net model
121 | layer = Dropout_layer(input_shape=inpt.shape, prob=prob)
122 |
123 | # Try to backward
124 | with pytest.raises(NotFittedError):
125 | delta = np.zeros(shape=inpt.shape, dtype=float)
126 | layer.backward(delta)
127 |
128 | # Tensor Flow dropout, just to see if it works
129 | # forward_out_keras = K.eval(tf.nn.dropout(inpt, seed=None, keep_prob=prob))
130 |
131 | # FORWARD
132 |
133 | layer.forward(inpt)
134 | forward_out_numpynet = layer.output
135 |
136 | zeros_out = np.count_nonzero(forward_out_numpynet)
137 |
138 | if prob == 1.:
139 | assert zeros_out == 0
140 | assert not np.all(forward_out_numpynet)
141 |
142 | elif prob == 0.:
143 | assert zeros_out == b * w * h * c
144 | np.testing.assert_allclose(forward_out_numpynet, inpt, rtol=1e-5, atol=1e-8)
145 |
146 | else:
147 | assert forward_out_numpynet.shape == inpt.shape
148 |
149 | np.testing.assert_allclose(layer.delta, np.zeros(shape=(b, w, h, c), dtype=float), rtol=1e-5, atol=1e-8)
150 |
151 | # BACKWARD
152 |
153 | delta = np.random.uniform(low=0., high=1., size=(b, w, h, c))
154 | prev_delta = delta.copy()
155 |
156 | layer.backward(delta)
157 |
158 | assert delta.shape == inpt.shape
159 |
160 | if prob == 0.:
161 | assert np.allclose(delta, prev_delta)
162 |
163 | elif prob == 1.:
164 | np.testing.assert_allclose(delta, np.zeros(shape=inpt.shape, dtype=float), rtol=1e-5, atol=1e-8)
165 |
166 | else:
167 | assert ~np.allclose(delta, np.zeros(shape=inpt.shape, dtype=float), rtol=1e-5, atol=1e-8)
168 |
--------------------------------------------------------------------------------
/NumPyNet/layers/l2norm_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | from NumPyNet.utils import check_is_fitted
9 | from NumPyNet.layers.base import BaseLayer
10 |
11 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
12 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
13 |
14 |
15 | class L2Norm_layer(BaseLayer):
16 | '''
17 | L2Norm layer
18 |
19 | Parameters
20 | ----------
21 | input_shape : tuple (default=None)
22 | Shape of the input in the format (batch, w, h, c), None is used when the layer is part of a Network model.
23 |
24 | axis : integer, default None.
25 | Axis along which the L1Normalization is performed. If None, normalize the entire array.
26 |
27 | Example
28 | -------
29 | >>> import os
30 | >>>
31 | >>> import pylab as plt
32 | >>> from PIL import Image
33 | >>>
34 | >>> img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
35 | >>> float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
36 | >>>
37 | >>> filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
38 | >>> inpt = np.asarray(Image.open(filename), dtype=float)
39 | >>> inpt.setflags(write=1)
40 | >>> inpt = img_2_float(inpt)
41 | >>>
42 | >>> # add batch = 1
43 | >>> inpt = np.expand_dims(inpt, axis=0)
44 | >>>
45 | >>> layer = L2Norm_layer(input_shape=inpt.shape)
46 | >>>
47 | >>> # FORWARD
48 |
49 | >>> layer.forward(inpt)
50 | >>> forward_out = layer.output
51 | >>> print(layer)
52 | >>>
53 | >>> # BACKWARD
54 | >>>
55 | >>> delta = np.zeros(shape=inpt.shape, dtype=float)
56 | >>> layer.backward(delta)
57 | >>>
58 | >>> # Visualizations
59 | >>>
60 | >>> fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
61 | >>> fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
62 | >>>
63 | >>> fig.suptitle('L2Normalization Layer')
64 | >>>
65 | >>> ax1.imshow(float_2_img(inpt[0]))
66 | >>> ax1.set_title('Original image')
67 | >>> ax1.axis('off')
68 | >>>
69 | >>> ax2.imshow(float_2_img(forward_out[0]))
70 | >>> ax2.set_title("Forward")
71 | >>> ax2.axis("off")
72 | >>>
73 | >>> ax3.imshow(float_2_img(delta[0]))
74 | >>> ax3.set_title('Backward')
75 | >>> ax3.axis('off')
76 | >>>
77 | >>> fig.tight_layout()
78 | >>> plt.show()
79 |
80 | Reference
81 | ---------
82 | TODO
83 |
84 | '''
85 |
86 | def __init__(self, input_shape=None, axis=None, **kwargs):
87 |
88 | self.axis = axis
89 | self.scales = None
90 |
91 | super(L2Norm_layer, self).__init__(input_shape=input_shape)
92 |
93 | def __str__(self):
94 | batch, w, h, c = self.out_shape
95 | return 'l2norm {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d} -> {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d}'.format(
96 | batch, w, h, c)
97 |
98 | def forward(self, inpt):
99 | '''
100 | Forward of the l2norm layer, apply the l2 normalization over
101 | the input along the given axis
102 |
103 | Parameters
104 | ----------
105 | inpt : array-like
106 | Input batch of images in format (batch, in_w, in_h, in _c)
107 |
108 | Returns
109 | -------
110 | self
111 | '''
112 |
113 | self._check_dims(shape=self.input_shape, arr=inpt, func='Forward')
114 |
115 | norm = (inpt * inpt).sum(axis=self.axis, keepdims=True)
116 | norm = 1. / np.sqrt(norm + 1e-8)
117 | self.output = inpt * norm
118 | self.scales = (1. - self.output) * norm
119 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
120 |
121 | return self
122 |
123 | def backward(self, delta):
124 | '''
125 | Backward function of the l2norm layer
126 |
127 | Parameters
128 | ---------
129 | delta : array-like
130 | delta array of shape (batch, w, h, c). Global delta to be backpropagated.
131 |
132 | Returns
133 | -------
134 | self
135 | '''
136 |
137 | check_is_fitted(self, 'delta')
138 | self._check_dims(shape=self.input_shape, arr=delta, func='Backward')
139 |
140 | self.delta += self.scales
141 | delta[:] += self.delta
142 |
143 | return self
144 |
145 |
146 | if __name__ == '__main__':
147 |
148 | import os
149 |
150 | import pylab as plt
151 | from PIL import Image
152 |
153 | img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
154 | float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
155 |
156 | filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
157 | inpt = np.asarray(Image.open(filename), dtype=float)
158 | inpt.setflags(write=1)
159 | inpt = img_2_float(inpt)
160 |
161 | # add batch = 1
162 | inpt = np.expand_dims(inpt, axis=0)
163 |
164 | layer = L2Norm_layer(input_shape=inpt.shape)
165 |
166 | # FORWARD
167 |
168 | layer.forward(inpt)
169 | forward_out = layer.output
170 | print(layer)
171 |
172 | # BACKWARD
173 |
174 | delta = np.zeros(shape=inpt.shape, dtype=float)
175 | layer.backward(delta)
176 |
177 | # Visualizations
178 |
179 | fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
180 | fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
181 |
182 | fig.suptitle('L2Normalization Layer')
183 |
184 | ax1.imshow(float_2_img(inpt[0]))
185 | ax1.set_title('Original image')
186 | ax1.axis('off')
187 |
188 | ax2.imshow(float_2_img(forward_out[0]))
189 | ax2.set_title("Forward")
190 | ax2.axis("off")
191 |
192 | ax3.imshow(float_2_img(delta[0]))
193 | ax3.set_title('Backward')
194 | ax3.axis('off')
195 |
196 | fig.tight_layout()
197 | plt.show()
198 |
--------------------------------------------------------------------------------
/NumPyNet/layers/l1norm_layer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import numpy as np
8 | from NumPyNet.utils import check_is_fitted
9 | from NumPyNet.layers.base import BaseLayer
10 |
11 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
12 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
13 |
14 |
15 | class L1Norm_layer(BaseLayer):
16 | '''
17 | L1Norm layer
18 |
19 | Parameters
20 | ----------
21 | input_shape : tuple (default=None)
22 | Shape of the input in the format (batch, w, h, c), None is used when the layer is part of a Network model.
23 |
24 | axis : integer, default None.
25 | Axis along which the L1Normalization is performed. If None, normalize the entire array.
26 |
27 | Example
28 | -------
29 | >>> import os
30 | >>>
31 | >>> import pylab as plt
32 | >>> from PIL import Image
33 | >>>
34 | >>> img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
35 | >>> float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
36 | >>>
37 | >>> filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
38 | >>> inpt = np.asarray(Image.open(filename), dtype=float)
39 | >>> inpt.setflags(write=1)
40 | >>> inpt = img_2_float(inpt)
41 | >>>
42 | >>> # add batch = 1
43 | >>> inpt = np.expand_dims(inpt, axis=0)
44 | >>>
45 | >>> layer = L1Norm_layer(input_shape=inpt.shape)
46 | >>>
47 | >>> # FORWARD
48 | >>>
49 | >>> layer.forward(inpt)
50 | >>> forward_out = layer.output
51 | >>> print(layer)
52 | >>>
53 | >>> # BACKWARD
54 | >>>
55 | >>> delta = np.zeros(shape=inpt.shape, dtype=float)
56 | >>> layer.backward(delta, copy=True)
57 | >>>
58 | >>> # Visualizations
59 | >>>
60 | >>> fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
61 | >>> fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
62 | >>>
63 | >>> fig.suptitle('L1Normalization Layer')
64 | >>>
65 | >>> ax1.imshow(float_2_img(inpt[0]))
66 | >>> ax1.set_title('Original image')
67 | >>> ax1.axis('off')
68 | >>>
69 | >>> ax2.imshow(float_2_img(forward_out[0]))
70 | >>> ax2.set_title("Forward")
71 | >>> ax2.axis("off")
72 | >>>
73 | >>> ax3.imshow(float_2_img(delta[0]))
74 | >>> ax3.set_title('Backward')
75 | >>> ax3.axis('off')
76 | >>>
77 | >>> fig.tight_layout()
78 | >>> plt.show()
79 |
80 | Reference
81 | ---------
82 | TODO
83 | '''
84 |
85 | def __init__(self, input_shape=None, axis=None, **kwargs):
86 |
87 | self.axis = axis
88 | self.scales = None
89 |
90 | super(L1Norm_layer, self).__init__(input_shape=input_shape)
91 |
92 | def __str__(self):
93 | batch, w, h, c = self.input_shape
94 | return 'l1norm {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d} -> {0:>4d} x{1:>4d} x{2:>4d} x{3:>4d}'.format(
95 | batch, w, h, c)
96 |
97 | def forward(self, inpt):
98 | '''
99 | Forward of the l1norm layer, apply the l1 normalization over
100 | the input along the given axis
101 |
102 | Parameters
103 | ----------
104 | inpt : array-like
105 | Input batch of images in format (batch, in_w, in_h, in _c)
106 |
107 | Returns
108 | -------
109 | self
110 | '''
111 |
112 | self._check_dims(shape=self.input_shape, arr=inpt, func='Forward')
113 |
114 | norm = np.abs(inpt).sum(axis=self.axis, keepdims=True)
115 | norm = 1. / (norm + 1e-8)
116 | self.output = inpt * norm
117 | self.scales = -np.sign(self.output)
118 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
119 |
120 | return self
121 |
122 | def backward(self, delta):
123 | '''
124 | Backward function of the l1norm_layer
125 |
126 | Parameters
127 | ---------
128 | delta : array-like
129 | delta array of shape (batch, w, h, c). Global delta to be backpropagated.
130 |
131 | Returns
132 | -------
133 | self
134 | '''
135 |
136 | check_is_fitted(self, 'delta')
137 | self._check_dims(shape=self.input_shape, arr=delta, func='Backward')
138 |
139 | self.delta += self.scales
140 | delta[:] += self.delta
141 |
142 | return self
143 |
144 |
145 | if __name__ == '__main__':
146 |
147 | import os
148 |
149 | import pylab as plt
150 | from PIL import Image
151 |
152 | img_2_float = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 1.)).astype(float)
153 | float_2_img = lambda im : ((im - im.min()) * (1./(im.max() - im.min()) * 255.)).astype(np.uint8)
154 |
155 | filename = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'dog.jpg')
156 | inpt = np.asarray(Image.open(filename), dtype=float)
157 | inpt.setflags(write=1)
158 | inpt = img_2_float(inpt)
159 |
160 | # add batch = 1
161 | inpt = np.expand_dims(inpt, axis=0)
162 |
163 | layer = L1Norm_layer(input_shape=inpt.shape)
164 |
165 | # FORWARD
166 |
167 | layer.forward(inpt)
168 | forward_out = layer.output
169 | print(layer)
170 |
171 | # BACKWARD
172 |
173 | delta = np.zeros(shape=inpt.shape, dtype=float)
174 | layer.backward(delta, copy=True)
175 |
176 | # Visualizations
177 |
178 | fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(10, 5))
179 | fig.subplots_adjust(left=0.1, right=0.95, top=0.95, bottom=0.15)
180 |
181 | fig.suptitle('L1Normalization Layer')
182 |
183 | ax1.imshow(float_2_img(inpt[0]))
184 | ax1.set_title('Original image')
185 | ax1.axis('off')
186 |
187 | ax2.imshow(float_2_img(forward_out[0]))
188 | ax2.set_title("Forward")
189 | ax2.axis("off")
190 |
191 | ax3.imshow(float_2_img(delta[0]))
192 | ax3.set_title('Backward')
193 | ax3.axis('off')
194 |
195 | fig.tight_layout()
196 | plt.show()
197 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/cost_layer.md:
--------------------------------------------------------------------------------
1 | # Cost layer
2 |
3 | In order to understand and quantify how well our model makes prediction on data, we define a *cost function*, which is a measure of the error committed by the network during training, and, for this purpose, the *cost layer* is often the last one in a CNN.
4 | An easy example of cost function is the *mean squared error* (or L2 norm):
5 |
6 | ^2)
7 |
8 | where `y` is the predicted vector, while `t` is the array of true labels.
9 | It's clear that an higher difference between prediction and truth, produces an higher cost.
10 | The minimization of this function is the objective of the backpropagation algorithm.
11 | Of course, a labeled dataset is needed to compute the cost (and, as a consequence, to train the Network).
12 |
13 | **TODO**: explain which cost functions we implemented
14 | The definition of the differents cost function is inside the `Cost_layer` class in [this repository](https://github.com/Nico-Curti/NumPyNet/blob/master/NumPyNet/layers/cost_layer.py)
15 |
16 | Here an example on how to use the cost layer as a single layer:
17 |
18 | ```python
19 | # first the essential import for the library.
20 | # after the installation:
21 | from NumPyNet.layers.cost_layer import Cost_layer # class import
22 | from NumPyNet.layers.cost_layer import cost_type as cs # cost_type enum class import
23 |
24 | import numpy as np # the library is entirely based on numpy
25 |
26 | batch, w, h, c = (5, 100, 100, 3)
27 | input = np.random.uniform(low=0., high=1., size=(batch, w, h, c)) # usually a vector or an image
28 |
29 | cost = cs.mse # Mean squared error function.
30 |
31 | # Layer initialization, with parameters scales and bias
32 | layer = Cost_layer(input_shape=input.shape,
33 | cost_type=,
34 | scale=1.,
35 | ratio=0.,
36 | noobject_scale=1.,
37 | threshold=0.,
38 | smoothing=0.)
39 |
40 | # Forward pass
41 | layer.forward(inpt=input, copy=False)
42 | out_img = layer.output # the output in this case will be the same shape of the input
43 |
44 | # Backward pass
45 | delta = np.random.uniform(low=0., high=1., size=input.shape) # definition of network delta, to be backpropagated
46 | layer.delta = np.random.uniform(low=0., high=1., size=out_img.shape) # layer delta, ideally coming from the next layer
47 | layer.backward(delta, copy=False)
48 |
49 | # now net_delta is modified and ready to be passed to the previous layer.delta
50 | ```
51 |
52 | To have a look more in details on what's happening, the definitions of `forward` and `backward` functions:
53 |
54 | ```python
55 | def forward(self, inpt, truth=None):
56 | '''
57 | Forward function for the cost layer. Using the chosen
58 | cost function, computes output, delta and cost.
59 | Parameters:
60 | inpt: the output of the previous layer.
61 | truth: truth values, it should have the same
62 | dimension as inpt.
63 | '''
64 | self._out_shape = inpt.shape
65 |
66 | if truth is not None:
67 |
68 | if self.smoothing: self._smoothing(truth) # smooth is applied on truth
69 |
70 | if self.cost_type == cost_type.smooth: self._smooth_l1(inpt, truth) # smooth_l1 if smooth not zero
71 | elif self.cost_type == cost_type.mae: self._l1(inpt, truth) # call for l1 if mae is cost
72 | elif self.cost_type == cost_type.wgan: self._wgan(inpt, truth) # call for wgan
73 | elif self.cost_type == cost_type.hellinger: self._hellinger(inpt, truth) # call for hellinger distance
74 | elif self.cost_type == cost_type.hinge: self._hinge(inpt, truth) # call for hellinger distance
75 | elif self.cost_type == cost_type.logcosh: self._logcosh(inpt, truth) # call for hellinger distance
76 | else: self._l2(inpt, truth) # call for l2 if mse or nothing
77 |
78 | if self.cost_type == cost_type.seg and self.noobject_scale != 1.: # seg if noobject_scale is not 1.
79 | self._seg(truth)
80 |
81 | if self.cost_type == cost_type.masked: # l2 Masked truth values if selected
82 | self._masked(inpt, truth)
83 |
84 | if self.ratio: #
85 | self._ratio(truth)
86 |
87 | if self.threshold: #
88 | self._threshold()
89 |
90 |
91 | norm = 1. / self.delta.size # normalization of delta!
92 | self.delta *= norm
93 |
94 | self.cost = np.mean(self.output) # compute the cost
95 | ```
96 |
97 | The code proceeds as follow, if the truth array is given:
98 |
99 | * The first part is a "switch" that apply the selected cost function. In every cost function output and delta are updated (Note that `layer.output` and `layer.delta` have always the same dimensions as the input). In the case of a *mean squared error* function, outi will be (xi - ti)2
100 | * In the second part a series of method is applied to input, output or the truth array, if the respective variable has the correct value.
101 | * In the last part, `layer.delta` is normalized and `layer.cost` is computed as the mean of the output.
102 |
103 | And here's the backward function:
104 |
105 | ```python
106 | def backward(self, delta):
107 | '''
108 | Backward function of the cost_layer, it updates the delta
109 | variable to be backpropagated. self.delta is updated inside the cost function.
110 | Parameters:
111 | delta: array, error of the network, to be backpropagated
112 | '''
113 | delta[:] += self.scale * self.delta
114 | ```
115 |
116 | That's just an update of `delta` with a scaled `layer.delta`
117 |
--------------------------------------------------------------------------------
/testing/test_softmax_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.layers.softmax_layer import Softmax_layer
10 |
11 | import numpy as np
12 | import pytest
13 | from hypothesis import strategies as st
14 | from hypothesis import given
15 | from hypothesis import settings
16 | from hypothesis import example
17 |
18 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
19 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
20 |
21 |
22 | class TestSoftmaxLayer :
23 | '''
24 | Tests:
25 | - costructor of Softmax_layer object
26 | - print function
27 | - forward function against tf.keras
28 | - backward function against tf.keras
29 |
30 | to be:
31 | '''
32 |
33 | @given(s = st.floats(-2, 10),
34 | t = st.floats(0, 10))
35 | @settings(max_examples=10,
36 | deadline=None)
37 | def test_constructor (self, s, t):
38 |
39 | if t > 0:
40 | layer = Softmax_layer(spatial=s, temperature=t)
41 |
42 | assert layer.output == None
43 | assert layer.delta == None
44 |
45 | assert layer.spatial == s
46 |
47 | assert layer.temperature == 1. / t
48 |
49 | else :
50 | with pytest.raises(ValueError):
51 | layer = Softmax_layer(spatial=s, temperature=t)
52 |
53 | @given(b = st.integers(min_value=1, max_value=15 ),
54 | w = st.integers(min_value=1, max_value=100),
55 | h = st.integers(min_value=1, max_value=100),
56 | c = st.integers(min_value=1, max_value=10 ))
57 | @settings(max_examples=50,
58 | deadline=None)
59 | def test_printer (self, b, w, h, c):
60 |
61 | layer = Softmax_layer(input_shape=(b, w, h, c))
62 |
63 | print(layer)
64 |
65 | layer.input_shape = (3.14, w, h, c)
66 |
67 | with pytest.raises(ValueError):
68 | print(layer)
69 |
70 | @example(b=5, w=1, h=1, c=100, spatial=True) # typical case
71 | @given(b = st.integers(min_value=1, max_value=10),
72 | w = st.integers(min_value=1, max_value=100),
73 | h = st.integers(min_value=1, max_value=100),
74 | c = st.integers(min_value=10, max_value=100),
75 | spatial = st.booleans())
76 | @settings(max_examples=10,
77 | deadline=None)
78 | def test_forward (self, b, w, h, c, spatial):
79 |
80 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c)).astype(float)
81 | truth = np.random.choice([0., 1.], p=[.5, .5], size=(b, w, h, c)).astype(float)
82 |
83 | if spatial :
84 | inpt_tf = tf.Variable(inpt.copy())
85 | truth_tf = tf.Variable(truth.copy())
86 |
87 | else :
88 | inpt_tf = tf.Variable(inpt.copy().reshape(b, -1))
89 | truth_tf = tf.Variable(truth.copy().reshape(b, -1))
90 |
91 | # NumPyNet layer
92 | layer = Softmax_layer(input_shape=inpt.shape, temperature=1., spatial=spatial)
93 |
94 | # Tensorflow layer
95 | model = tf.keras.layers.Softmax(axis=-1)
96 | loss = tf.keras.losses.CategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM)
97 |
98 | # Tensorflow softmax
99 | preds = model(inpt_tf)
100 | # Computing loss for tensorflow
101 | keras_loss = loss(truth_tf, preds).numpy()
102 |
103 | forward_out_keras = preds.numpy().reshape(b, w, h, c)
104 |
105 | # Softmax + crossentropy NumPyNet
106 | layer.forward(inpt=inpt, truth=truth)
107 | forward_out_numpynet = layer.output
108 | numpynet_loss = layer.cost
109 |
110 | # testing softmax
111 | np.testing.assert_allclose(forward_out_keras, forward_out_numpynet, rtol=1e-5, atol=1e-8)
112 |
113 | # testing crossentropy
114 | np.testing.assert_allclose(keras_loss, numpynet_loss, rtol=1e-5, atol=1e-6)
115 |
116 |
117 | @example(b=5, w=1, h=1, c=100, spatial=True) # typical case
118 | @given(b = st.integers(min_value=1, max_value=10),
119 | w = st.integers(min_value=1, max_value=100),
120 | h = st.integers(min_value=1, max_value=100),
121 | c = st.integers(min_value=10, max_value=100),
122 | spatial = st.booleans())
123 | @settings(max_examples=10,
124 | deadline=None)
125 | def test_backward (self, b, w, h, c, spatial):
126 |
127 | w, h = (1, 1) # backward working only in this case for spatial=False
128 |
129 | inpt = np.random.uniform(low=0., high=1., size=(b, w, h, c)).astype(float)
130 | truth = np.random.choice([0., 1.], p=[.5, .5], size=(b, w, h, c)).astype(float)
131 |
132 | if spatial :
133 | inpt_tf = tf.Variable(inpt)
134 | truth_tf = tf.Variable(truth)
135 |
136 | else :
137 | inpt_tf = tf.Variable(inpt.copy().reshape(b,-1))
138 | truth_tf = tf.Variable(truth.copy().reshape(b, -1))
139 |
140 | # NumPyNet layer
141 | layer = Softmax_layer(input_shape=inpt.shape, temperature=1., spatial=spatial)
142 |
143 | # Tensorflow layer
144 | model = tf.keras.layers.Softmax(axis=-1)
145 | loss = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.SUM)
146 |
147 | with tf.GradientTape() as tape :
148 | preds = model(inpt_tf)
149 | cost = loss(truth_tf, preds)
150 | grads = tape.gradient(cost, inpt_tf)
151 |
152 | forward_out_keras = preds.numpy().reshape(b, w, h, c)
153 | keras_loss = cost.numpy()
154 | delta_keras = grads.numpy().reshape(b, w, h, c)
155 |
156 | layer.forward(inpt=inpt, truth=truth)
157 | forward_out_numpynet = layer.output
158 | numpynet_loss = layer.cost
159 |
160 | delta = np.zeros(shape=inpt.shape, dtype=float)
161 | layer.backward(delta=delta)
162 |
163 | np.testing.assert_allclose(forward_out_keras, forward_out_numpynet, rtol=1e-5, atol=1e-8)
164 | np.testing.assert_allclose(keras_loss, numpynet_loss, rtol=1e-5, atol=1e-6)
165 | np.testing.assert_allclose(delta, delta_keras, rtol=1e-5, atol=1e-8)
166 |
--------------------------------------------------------------------------------
/docs/NumPyNet/layers/shortcut_layer.md:
--------------------------------------------------------------------------------
1 | # Shortcut Layer
2 |
3 | Deep Neural Networks model suffer from the degradation problem, i.e. the accuracy reduction with increasing depth of the network; Moreover, the accuracy loss happens in the training dataset.
4 |
5 | In [the original paper](https://arxiv.org/pdf/1512.03385.pdf), they addres the degradation problem by introducing a *deep residual learning* framework, or *shortcut connections*, that skips one ore more layers (as shown in the figure below), without introducing extra parameters or computational cost.
6 |
7 | In the original paper, the shortcut connections perform an operation like:
8 |
9 | &space;=&space;F(x)&space;+&space;x)
10 |
11 | Where `F(x)` is the ouput of layer l-1, `x` is the output of the layer l-2, and `H(x)` is the output of the layer l, or `Shortcut_layer` (see image below for reference).
12 |
13 | 
14 |
15 | The object `Shortcut_layer` is an implementation of this concept, where is possible to add weights to the linear combination, α and β, as :
16 |
17 |
18 |
19 |
20 |
21 | And it can actually deal with different sized inputs.
22 | The code below shows an example on how to use the single shortcut layer `forward` and `backward`:
23 |
24 | ```python
25 | # first the essential import for the library.
26 | # after the installation:
27 | from NumPyNet.layers.shortcut_layer import Shortcut_layer # class import
28 | from NumPyNet import activations
29 |
30 | import numpy as np # the library is entirely based on numpy
31 |
32 | # define a batch of images (even a single image is ok, but is important that it has all the four dimensions) in the format (batch, width, height, channels)
33 |
34 | batch, w, h, c = (5, 100, 100, 3) # batch != 1 in this case
35 | input1 = np.random.uniform(low=0., high=1., size=(batch, w, h, c)) # you can also import some images from file
36 | input2 = np.random.uniform(low=0., high=1., size=(batch, w, h, c)) # second input
37 |
38 | alpha = 0.75
39 | beta = 0.5
40 |
41 | activ_func = activations.Relu() # it can also be:
42 | # activations.Relu (class Relu)
43 | # "Relu" (a string, case insensitive)
44 |
45 | # Layer initialization, with parameters scales and bias
46 | layer = Shortcut_layer(activation=activ_func, alpha=0.75, beta=0.5)
47 |
48 |
49 | # Forward pass
50 | layer.forward(inpt=input1, prev_output=input2)
51 | out_img = layer.output # the output in this case will be a batch of images of shape = (batch, out_width, out_heigth , out_channels)
52 |
53 | # Backward pass
54 | delta = np.random.uniform(low=0., high=1., size=input.shape) # definition of network delta, to be backpropagated
55 | layer.delta = np.random.uniform(low=0., high=1., size=out_img.shape) # layer delta, ideally coming from the next layer
56 | layer.backward(delta, copy=False)
57 |
58 | # now net_delta is modified and ready to be passed to the previous layer.delta
59 | # and also the updates for weights and bias are computed in the backward
60 | ```
61 |
62 | To have a look more in details on what's happening, the defintions of `forward` and `backward` are:
63 |
64 | ```python
65 | def forward(self, inpt, prev_output):
66 | '''
67 | Forward function of the Shortcut layer: activation of the linear combination between input
68 |
69 | Parameters:
70 | inpt : array of shape (batch, w, h, c), first input of the layer
71 | prev_output : array of shape (batch, w, h, c), second input of the layer
72 | '''
73 | # assert inpt.shape == prev_output.shape
74 |
75 | self._out_shape = [inpt.shape, prev_output.shape]
76 |
77 | if inpt.shape == prev_output.shape:
78 | self.output = self.alpha * inpt[:] + self.beta * prev_output[:]
79 |
80 | else:
81 |
82 | # If the layer are combined the smaller one is distributed according to the
83 | # sample stride
84 | # Example:
85 | #
86 | # inpt = [[1, 1, 1, 1], prev_output = [[1, 1],
87 | # [1, 1, 1, 1], [1, 1]]
88 | # [1, 1, 1, 1],
89 | # [1, 1, 1, 1]]
90 | #
91 | # output = [[2, 1, 2, 1],
92 | # [1, 1, 1, 1],
93 | # [2, 1, 2, 1],
94 | # [1, 1, 1, 1]]
95 |
96 | if (self.ix, self.iy, self.kx) is (None, None, None):
97 | self._stride_index(inpt.shape, prev_output.shape)
98 |
99 | self.output = inpt.copy()
100 | self.output[:, ix, jx, kx] = self.alpha * self.outpu[:, ix, jx, kx] + self.beta * prev_output[:, iy, jy, ky]
101 |
102 |
103 | self.output = self.activation(self.output)
104 | self.delta = np.zeros(shape=self.out_shape, dtype=float)
105 | ```
106 |
107 | If the shapes of the two inputs are identical, then the output is the simple linear combinations of the two.
108 | If that's not the case, the function `_stride_index` computes the indices (for both inputs) which corresponding positions will be combined togheter.
109 |
110 | ```python
111 | def backward(self, delta, prev_delta):
112 | '''
113 | Backward function of the Shortcut layer
114 |
115 | Parameters:
116 | delta : array of shape (batch, w, h, c), first delta to be backpropagated
117 | delta_prev : array of shape (batch, w, h, c), second delta to be backporpagated
118 |
119 | '''
120 |
121 | # derivatives of the activation funtion w.r.t. to input
122 | self.delta *= self.gradient(self.output)
123 |
124 | delta[:] += self.delta * self.alpha
125 |
126 | if (self.ix, self.iy, self.kx) is (None, None, None): # same shapes
127 | prev_delta[:] += self.delta[:] * self.beta
128 |
129 | else: # different shapes
130 | prev_delta[:, self.ix, self.jx, self.kx] += self.beta * self.delta[:, self.iy, self.jy, self.ky]
131 | ```
132 |
133 | `backward` makes use of the same indixes to backpropagate delta for both input layers.
134 |
--------------------------------------------------------------------------------
/NumPyNet/fmath.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import struct
8 | from NumPyNet import ENABLE_FMATH
9 |
10 | __author__ = ['Nico Curti']
11 | __email__ = ['nico.curti2@unibo.it']
12 |
13 |
14 | def pow2 (x):
15 | '''
16 | Fast math version of 'pow2' function
17 |
18 | Parameters
19 | ----------
20 | x : float
21 | Value to evaluate
22 |
23 | Returns
24 | -------
25 | res : float
26 | Result of the function
27 | '''
28 | offset = 1 if x < 0. else 0
29 | clipp = -126. if x < -126. else x
30 | z = clipp - int(clipp) + offset
31 |
32 | packed_x = struct.pack('i', int((1 << 23) * (clipp + 121.2740575 + 27.7280233 / (4.84252568 - z) - 1.49012907 * z)))
33 | return struct.unpack('f', packed_x)[0]
34 |
35 | def exp (x):
36 | '''
37 | Fast math version of 'exp' function
38 |
39 | Parameters
40 | ----------
41 | x : float
42 | Value to evaluate
43 |
44 | Returns
45 | -------
46 | res : float
47 | Result of the function
48 | '''
49 | return pow2(1.442695040 * x)
50 |
51 | def log2 (x):
52 | '''
53 | Fast math version of 'log2' function
54 |
55 | Parameters
56 | ----------
57 | x : float
58 | Value to evaluate
59 |
60 | Returns
61 | -------
62 | res : float
63 | Result of the function
64 | '''
65 | packed_x = struct.pack('f', x)
66 | i = struct.unpack('i', packed_x)[0]
67 | mx = (i & 0x007FFFFF) | 0x3f000000
68 | packed_x = struct.pack('i', mx)
69 |
70 | f = struct.unpack('f', packed_x)[0]
71 | i *= 1.1920928955078125e-7
72 | return i - 124.22551499 - 1.498030302 * f - 1.72587999 / (0.3520887068 + f);
73 |
74 | def log (x):
75 | '''
76 | Fast math version of 'log' function
77 |
78 | Parameters
79 | ----------
80 | x : float
81 | Value to evaluate
82 |
83 | Returns
84 | -------
85 | res : float
86 | Result of the function
87 | '''
88 | packed_x = struct.pack('f', x)
89 | i = struct.unpack('i', packed_x)[0]
90 | y = (i - 1064992212.25472) / (1092616192. - 1064992212.25472)
91 | ey = exp(y)
92 | y -= (ey - x) / ey
93 | ey = exp(y)
94 | y -= (ey - x) / ey
95 | ey = exp(y)
96 | y -= (ey - x) / ey
97 | ey = exp(y)
98 | y -= (ey - x) / ey
99 | return y
100 |
101 | def pow (a, b):
102 | '''
103 | Fast math version of 'pow' function
104 |
105 | Parameters
106 | ----------
107 | a : float
108 | Base
109 |
110 | b : float
111 | Exponent
112 |
113 | Returns
114 | -------
115 | res : float
116 | Result of the function
117 | '''
118 | return pow2(b * log2(a))
119 |
120 | def log10 (x):
121 | '''
122 | Fast math version of 'log10' function
123 |
124 | Parameters
125 | ----------
126 | x : float
127 | Value to evaluate
128 |
129 | Returns
130 | -------
131 | res : float
132 | Result of the function
133 | '''
134 | packed_x = struct.pack('f', x)
135 | i = struct.unpack('i', packed_x)[0]
136 | y = (i - 1064992212.25472) / (1092616192. - 1064992212.25472)
137 | y10 = pow(10, y)
138 | y -= (y10 - x) / (2.302585092994046 * y10)
139 | y10 = pow(10, y)
140 | y -= (y10 - x) / (2.302585092994046 * y10)
141 | return y
142 |
143 | def atanh (x):
144 | '''
145 | Fast math version of 'atanh' function
146 |
147 | Parameters
148 | ----------
149 | x : float
150 | Value to evaluate
151 |
152 | Returns
153 | -------
154 | res : float
155 | Result of the function
156 | '''
157 | return .5 * log((1. + x) / (1. - x))
158 |
159 | def tanh (x):
160 | '''
161 | Fast math version of 'tanh' function
162 |
163 | Parameters
164 | ----------
165 | x : float
166 | Value to evaluate
167 |
168 | Returns
169 | -------
170 | res : float
171 | Result of the function
172 | '''
173 | e = exp(-2 * x)
174 | return (1. - e) / (1. + e)
175 |
176 | def hardtanh (x):
177 | '''
178 | Fast math version of 'hardtanh' function
179 |
180 | Parameters
181 | ----------
182 | x : float
183 | Value to evaluate
184 |
185 | Returns
186 | -------
187 | res : float
188 | Result of the function
189 | '''
190 |
191 | if x >= -1 and x <= 1.: return x
192 | elif x < -1 : return -1.
193 | else : return 1.
194 |
195 | def sqrt (x):
196 | '''
197 | Fast math version of 'sqrt' function
198 |
199 | Parameters
200 | ----------
201 | x : float
202 | Value to evaluate
203 |
204 | Returns
205 | -------
206 | res : float
207 | Result of the function
208 | '''
209 |
210 | xhalf = .5 * x
211 |
212 | packed_x = struct.pack('f', x)
213 | i = struct.unpack('i', packed_x)[0] # treat float's bytes as int
214 | i = 0x5f3759df - (i >> 1) # arithmetic with magic number
215 | packed_i = struct.pack('i', i)
216 | y = struct.unpack('f', packed_i)[0] # treat int's bytes as float
217 |
218 | y = y * (1.5 - (xhalf * y * y)) # Newton's method
219 | y = y * (1.5 - (xhalf * y * y)) # Newton's method
220 | return x * y
221 |
222 | def rsqrt (x):
223 | '''
224 | Fast math version of 'rsqrt' function
225 |
226 | Parameters
227 | ----------
228 | x : float
229 | Value to evaluate
230 |
231 | Returns
232 | -------
233 | res : float
234 | Result of the function
235 | '''
236 |
237 | xhalf = .5 * x
238 |
239 | packed_x = struct.pack('f', x)
240 | i = struct.unpack('i', packed_x)[0] # treat float's bytes as int
241 | i = 0x5f3759df - (i >> 1) # arithmetic with magic number
242 | packed_i = struct.pack('i', i)
243 | y = struct.unpack('f', packed_i)[0] # treat int's bytes as float
244 |
245 | y = y * (1.5 - (xhalf * y * y)) # Newton's method
246 | y = y * (1.5 - (xhalf * y * y)) # Newton's method
247 | return y
248 |
249 |
250 | if ENABLE_FMATH:
251 |
252 | import numpy as np
253 |
254 | np.pow2 = pow2
255 | np.exp = exp
256 | np.log2 = log2
257 | np.log = log
258 | np.pow = pow
259 | np.log10 = log10
260 | np.atanh = atanh
261 | np.tanh = tanh
262 | np.hardtanh = hardtanh
263 | np.sqrt = sqrt
264 | np.rsqrt = rsqrt
265 |
--------------------------------------------------------------------------------
/testing/test_avgpool_layer.py:
--------------------------------------------------------------------------------
1 | # !/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 |
4 | from __future__ import division
5 | from __future__ import print_function
6 |
7 | import tensorflow as tf
8 |
9 | from NumPyNet.exception import LayerError
10 | from NumPyNet.exception import NotFittedError
11 | from NumPyNet.layers.avgpool_layer import Avgpool_layer
12 |
13 | import numpy as np
14 | import pytest
15 | from hypothesis import strategies as st
16 | from hypothesis import given
17 | from hypothesis import settings
18 |
19 | __author__ = ['Mattia Ceccarelli', 'Nico Curti']
20 | __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
21 |
22 |
23 | class TestAvgpoolLayer:
24 | '''
25 | Tests:
26 | - costructor of Avgpool_layer object
27 | - print function
28 | - forward function against tf.keras
29 | - backward function against tf.keras
30 | '''
31 |
32 | @given(size = st.integers(min_value=-10, max_value=10),
33 | stride = st.integers(min_value=0, max_value=10),
34 | pad = st.booleans())
35 | @settings(max_examples=20,
36 | deadline=None)
37 | def test_constructor (self, size, stride, pad):
38 |
39 | if size <= 0 :
40 |
41 | with pytest.raises(LayerError):
42 | layer = Avgpool_layer(size=size, stride=stride, pad=pad)
43 |
44 | else:
45 | layer = Avgpool_layer(size=size, stride=stride, pad=pad)
46 |
47 | assert layer.size == (size, size)
48 | assert len(layer.size) == 2
49 |
50 | if stride:
51 | assert layer.stride == (stride, stride)
52 | else:
53 | assert layer.size == layer.stride
54 |
55 | assert len(layer.stride) == 2
56 |
57 | assert layer.delta == None
58 | assert layer.output == None
59 |
60 | assert layer.pad == pad
61 | assert layer.pad_left == 0
62 | assert layer.pad_right == 0
63 | assert layer.pad_top == 0
64 | assert layer.pad_bottom == 0
65 |
66 |
67 | @given(size = st.integers(min_value=1, max_value=10),
68 | stride = st.integers(min_value=0, max_value=10),
69 | pad = st.booleans())
70 | @settings(max_examples=10,
71 | deadline=None)
72 | def test_printer (self, size, stride, pad):
73 |
74 | layer = Avgpool_layer(size=size, stride=stride, pad=pad)
75 |
76 | with pytest.raises(TypeError):
77 | print(layer)
78 |
79 | layer.input_shape = (1, 2, 3, 4)
80 |
81 | print(layer)
82 |
83 | @given(batch = st.integers(min_value=1, max_value=15),
84 | w = st.integers(min_value=15, max_value=100),
85 | h = st.integers(min_value=15, max_value=100),
86 | c = st.integers(min_value=1, max_value=10),
87 | size = st.integers(min_value=1, max_value=10),
88 | stride = st.integers(min_value=1, max_value=10),
89 | pad = st.booleans())
90 | @settings(max_examples=10,
91 | deadline=None)
92 | def test_forward (self, batch, w, h, c, size, stride, pad):
93 |
94 | inpt = np.random.uniform(low=0., high=1., size=(batch, w, h, c)).astype(float)
95 |
96 | # Numpy_net model
97 | numpynet = Avgpool_layer(input_shape=inpt.shape, size=size, stride=stride, pad=pad)
98 |
99 | keras_pad = 'same' if pad else 'valid'
100 |
101 | # Keras model initialization.
102 | model = tf.keras.layers.AveragePooling2D(pool_size=(size, size), strides=stride, padding=keras_pad, data_format='channels_last')
103 |
104 | # Keras Output
105 | forward_out_keras = model(inpt).numpy()
106 |
107 | # numpynet forward and output
108 | numpynet.forward(inpt=inpt)
109 | forward_out_numpynet = numpynet.output
110 |
111 | # Test for dimension and allclose of all output
112 | assert forward_out_numpynet.shape == forward_out_keras.shape
113 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, rtol=1e-5, atol=1e-8)
114 |
115 |
116 | @given(batch = st.integers(min_value=1, max_value=15),
117 | w = st.integers(min_value=15, max_value=100),
118 | h = st.integers(min_value=15, max_value=100),
119 | c = st.integers(min_value=1, max_value=10),
120 | size = st.integers(min_value=1, max_value=10),
121 | stride = st.integers(min_value=1, max_value=10),
122 | pad = st.booleans())
123 | @settings(max_examples=10,
124 | deadline=None)
125 | def test_backward (self, batch, w, h, c, size, stride, pad):
126 |
127 | inpt = np.random.uniform(low=0., high=1., size=(batch, w, h, c)).astype(float)
128 | tf_input = tf.Variable(inpt)
129 |
130 | # Numpy_net model
131 | numpynet = Avgpool_layer(input_shape=inpt.shape, size=size, stride=stride, pad=pad)
132 |
133 | keras_pad = 'same' if pad else 'valid'
134 |
135 | # Keras model initialization.
136 | model = tf.keras.layers.AveragePooling2D(pool_size=(size, size), strides=stride, padding=keras_pad, data_format='channels_last')
137 |
138 | # Keras Output
139 | with tf.GradientTape() as tape :
140 | preds = model(tf_input)
141 | grads = tape.gradient(preds, tf_input)
142 |
143 | forward_out_keras = preds.numpy()
144 | delta_keras = grads.numpy()
145 |
146 |
147 | # try to backward
148 | with pytest.raises(NotFittedError):
149 | # Global delta init.
150 | delta = np.empty(shape=inpt.shape, dtype=float)
151 |
152 | # numpynet Backward
153 | numpynet.backward(delta=delta)
154 |
155 | # numpynet forward and output
156 | numpynet.forward(inpt=inpt)
157 | forward_out_numpynet = numpynet.output
158 |
159 | # Test for dimension and allclose of all output
160 | assert forward_out_numpynet.shape == forward_out_keras.shape
161 | np.testing.assert_allclose(forward_out_numpynet, forward_out_keras, rtol=1e-5, atol=1e-8)
162 |
163 | # BACKWARD
164 |
165 | # Definition of starting delta for numpynet
166 | numpynet.delta = np.ones(shape=numpynet.out_shape, dtype=float)
167 | delta = np.zeros(shape=inpt.shape, dtype=float)
168 |
169 | # numpynet Backward
170 | numpynet.backward(delta=delta)
171 |
172 | # Back tests
173 | assert delta.shape == delta_keras.shape
174 | assert delta.shape == inpt.shape
175 | np.testing.assert_allclose(delta, delta_keras, rtol=1e-5, atol=1e-8)
176 |
--------------------------------------------------------------------------------