├── .gitattributes ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── example.py ├── example_lenna.py ├── images ├── blockdiagram.png ├── input1.bmp ├── input2.bmp ├── input3.bmp ├── input4.BMP ├── input5.bmp ├── input6.bmp ├── input7.bmp ├── input8.bmp ├── lenna.gif ├── lenna_diagonal.png ├── lenna_edge.png ├── output1.png ├── output2.png ├── output3.png ├── output4.png ├── output5.png └── output6.png ├── pycnn.py ├── requirements.txt └── test ├── test_misc.py └── test_processing.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | *.sln merge=union 7 | *.csproj merge=union 8 | *.vbproj merge=union 9 | *.fsproj merge=union 10 | *.dbproj merge=union 11 | 12 | # Standard to msysgit 13 | *.doc diff=astextplain 14 | *.DOC diff=astextplain 15 | *.docx diff=astextplain 16 | *.DOCX diff=astextplain 17 | *.dot diff=astextplain 18 | *.DOT diff=astextplain 19 | *.pdf diff=astextplain 20 | *.PDF diff=astextplain 21 | *.rtf diff=astextplain 22 | *.RTF diff=astextplain 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ################# 2 | ## Eclipse 3 | ################# 4 | 5 | *.pydevproject 6 | .project 7 | .metadata 8 | bin/ 9 | tmp/ 10 | *.tmp 11 | *.bak 12 | *.swp 13 | *~.nib 14 | local.properties 15 | .classpath 16 | .settings/ 17 | .loadpath 18 | 19 | # External tool builders 20 | .externalToolBuilders/ 21 | 22 | # Locally stored "Eclipse launch configurations" 23 | *.launch 24 | 25 | # CDT-specific 26 | .cproject 27 | 28 | # PDT-specific 29 | .buildpath 30 | 31 | 32 | ################# 33 | ## Visual Studio 34 | ################# 35 | 36 | ## Ignore Visual Studio temporary files, build results, and 37 | ## files generated by popular Visual Studio add-ons. 38 | 39 | # User-specific files 40 | *.suo 41 | *.user 42 | *.sln.docstates 43 | 44 | # Build results 45 | 46 | [Dd]ebug/ 47 | [Rr]elease/ 48 | x64/ 49 | build/ 50 | [Bb]in/ 51 | [Oo]bj/ 52 | 53 | # MSTest test Results 54 | [Tt]est[Rr]esult*/ 55 | [Bb]uild[Ll]og.* 56 | 57 | *_i.c 58 | *_p.c 59 | *.ilk 60 | *.meta 61 | *.obj 62 | *.pch 63 | *.pdb 64 | *.pgc 65 | *.pgd 66 | *.rsp 67 | *.sbr 68 | *.tlb 69 | *.tli 70 | *.tlh 71 | *.tmp 72 | *.tmp_proj 73 | *.log 74 | *.vspscc 75 | *.vssscc 76 | .builds 77 | *.pidb 78 | *.log 79 | *.scc 80 | 81 | # Visual C++ cache files 82 | ipch/ 83 | *.aps 84 | *.ncb 85 | *.opensdf 86 | *.sdf 87 | *.cachefile 88 | 89 | # Visual Studio profiler 90 | *.psess 91 | *.vsp 92 | *.vspx 93 | 94 | # Guidance Automation Toolkit 95 | *.gpState 96 | 97 | # ReSharper is a .NET coding add-in 98 | _ReSharper*/ 99 | *.[Rr]e[Ss]harper 100 | 101 | # TeamCity is a build add-in 102 | _TeamCity* 103 | 104 | # DotCover is a Code Coverage Tool 105 | *.dotCover 106 | 107 | # NCrunch 108 | *.ncrunch* 109 | .*crunch*.local.xml 110 | 111 | # Installshield output folder 112 | [Ee]xpress/ 113 | 114 | # DocProject is a documentation generator add-in 115 | DocProject/buildhelp/ 116 | DocProject/Help/*.HxT 117 | DocProject/Help/*.HxC 118 | DocProject/Help/*.hhc 119 | DocProject/Help/*.hhk 120 | DocProject/Help/*.hhp 121 | DocProject/Help/Html2 122 | DocProject/Help/html 123 | 124 | # Click-Once directory 125 | publish/ 126 | 127 | # Publish Web Output 128 | *.Publish.xml 129 | *.pubxml 130 | 131 | # NuGet Packages Directory 132 | ## TODO: If you have NuGet Package Restore enabled, uncomment the next line 133 | #packages/ 134 | 135 | # Windows Azure Build Output 136 | csx 137 | *.build.csdef 138 | 139 | # Windows Store app package directory 140 | AppPackages/ 141 | 142 | # Others 143 | sql/ 144 | *.Cache 145 | ClientBin/ 146 | [Ss]tyle[Cc]op.* 147 | ~$* 148 | *~ 149 | *.dbmdl 150 | *.[Pp]ublish.xml 151 | *.pfx 152 | *.publishsettings 153 | 154 | # RIA/Silverlight projects 155 | Generated_Code/ 156 | 157 | # Backup & report files from converting an old project file to a newer 158 | # Visual Studio version. Backup files are not needed, because we have git ;-) 159 | _UpgradeReport_Files/ 160 | Backup*/ 161 | UpgradeLog*.XML 162 | UpgradeLog*.htm 163 | 164 | # SQL Server files 165 | App_Data/*.mdf 166 | App_Data/*.ldf 167 | 168 | ############# 169 | ## Windows detritus 170 | ############# 171 | 172 | # Windows image file caches 173 | Thumbs.db 174 | ehthumbs.db 175 | 176 | # Folder config file 177 | Desktop.ini 178 | 179 | # Recycle Bin used on file shares 180 | $RECYCLE.BIN/ 181 | 182 | # Mac crap 183 | .DS_Store 184 | 185 | 186 | ############# 187 | ## Python 188 | ############# 189 | 190 | *.py[co] 191 | 192 | # Packages 193 | *.egg 194 | *.egg-info 195 | dist/ 196 | build/ 197 | eggs/ 198 | parts/ 199 | var/ 200 | sdist/ 201 | develop-eggs/ 202 | .installed.cfg 203 | 204 | # Installer logs 205 | pip-log.txt 206 | 207 | # Unit test / coverage reports 208 | .coverage 209 | .tox 210 | 211 | #Translations 212 | *.mo 213 | 214 | #Mr Developer 215 | .mr.developer.cfg 216 | 217 | ############# 218 | ## Jetbrains PyCharm 219 | ############# 220 | .idea/ 221 | *.iws 222 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | - "3.5" 5 | sudo: required 6 | before_install: 7 | - sudo apt-get install libblas-dev liblapack-dev gfortran -y 8 | - pip install codecov 9 | - pip install flake8 10 | # command to install dependencies 11 | install: "pip install -r requirements.txt" 12 | # command to run tests 13 | before_script: 14 | - flake8 . 15 | script: 16 | - nosetests --with-coverage --cover-inclusive 17 | after_success: 18 | - codecov 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Ankit Aggarwal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyCNN: Image Processing with Cellular Neural Networks in Python 2 | 3 | [![Build Status](https://travis-ci.org/ankitaggarwal011/PyCNN.svg?branch=master)](https://travis-ci.org/ankitaggarwal011/PyCNN) 4 | [![Coverage Status](https://codecov.io/gh/ankitaggarwal011/PyCNN/coverage.svg?branch=master)](https://codecov.io/gh/ankitaggarwal011/PyCNN) 5 | 6 | **Cellular Neural Networks (CNN)** [[wikipedia]](https://en.wikipedia.org/wiki/Cellular_neural_network) [[paper]](http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7600) are a parallel computing paradigm that was first proposed in 1988. Cellular neural networks are similar to neural networks, with the difference that communication is allowed only between neighboring units. Image Processing is one of its [applications](https://en.wikipedia.org/wiki/Cellular_neural_network#Applications). CNN processors were designed to perform image processing; specifically, the original application of CNN processors was to perform real-time ultra-high frame-rate (>10,000 frame/s) processing unachievable by digital processors. 7 | 8 | This python library is the implementation of CNN for the application of **Image Processing**. 9 | 10 | **Note**: The library has been **cited** in the research published on [Using Python and Julia for Efficient Implementation of Natural Computing and Complexity Related Algorithms](http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=7168488), look for the reference #19 in the references section. I'm glad that this library could be of help to the community. 11 | 12 | **Note**: Cellular neural network (CNN) must not be confused with completely different convolutional neural network (ConvNet). 13 | 14 | ![alt text](images/blockdiagram.png "CNN Architecture") 15 | 16 | As shown in the above diagram, imagine a control system with a feedback loop. f(x) is the piece-wise linear sigmoid function. The control (template B) and the feedback (template A) templates (coefficients) are configurable and controls the output of the system. Significant research had been done in determining the templates for common image processing techniques, these templates are published in this [Template Library](http://cnn-technology.itk.ppke.hu/Template_library_v4.0alpha1.pdf). 17 | 18 | ### Further reading: 19 | - [Methods for image processing and pattern formation in Cellular Neural Networks: A Tutorial](http://ai.pku.edu.cn/aiwebsite/research.files/collected%20papers%20-%20others/Methods%20for%20image%20processing%20and%20pattern%20formation%20in%20Cellular%20Neural%20Networks%20-%20a%20tutorial.pdf) 20 | 21 | ## Motivation 22 | 23 | This is an extension of a demo at 14th Cellular Nanoscale Networks and Applications (CNNA) Conference 2014. I have written a blog post, available at [Image Processing in CNN with Python on Raspberry Pi](http://blog.ankitaggarwal.me/technology/image-processing-with-cellular-neural-networks-in-python-on-raspberry-pi). 24 | The library was used in my paper [B3: A plug-n-play internet enabled platform for real time image processing](http://ieeexplore.ieee.org/document/6888614/) published in IEEE Xplore. 25 | 26 | ## Dependencies 27 | 28 | The library is supported for Python >= 2.7 and Python >= 3.3. 29 | 30 | The python modules needed in order to use this library. 31 | ``` 32 | Pillow: 3.3.1 33 | Scipy: 0.18.0 34 | Numpy: 1.11.1 + mkl 35 | ``` 36 | Note: Scipy and Numpy can be installed on a Windows machines using binaries provided over [here](http://www.lfd.uci.edu/%7Egohlke/pythonlibs). 37 | 38 | ## Example 1 39 | 40 | ```sh 41 | $ python example.py 42 | ``` 43 | 44 | #### OR 45 | 46 | ```python 47 | from pycnn import PyCNN 48 | 49 | cnn = PyCNN() 50 | ``` 51 | 52 | **Input:** 53 | 54 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/input1.bmp) 55 | 56 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/input3.bmp) 57 | 58 | **Edge Detection:** 59 | 60 | ```python 61 | cnn.edgeDetection('images/input1.bmp', 'images/output1.png') 62 | ``` 63 | 64 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/output1.png) 65 | 66 | **Grayscale Edge Detection** 67 | 68 | ```python 69 | cnn.grayScaleEdgeDetection('images/input1.bmp', 'images/output2.png') 70 | ``` 71 | 72 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/output2.png) 73 | 74 | **Corner Detection:** 75 | 76 | ```python 77 | cnn.cornerDetection('images/input1.bmp', 'images/output3.png') 78 | ``` 79 | 80 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/output3.png) 81 | 82 | **Diagonal line Detection:** 83 | 84 | ```python 85 | cnn.diagonalLineDetection('images/input1.bmp', 'images/output4.png') 86 | ``` 87 | 88 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/output4.png) 89 | 90 | **Inversion (Logic NOT):** 91 | 92 | ```python 93 | cnn.inversion('images/input1.bmp', 'images/output5.png') 94 | ``` 95 | 96 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/output5.png) 97 | 98 | **Optimal Edge Detection:** 99 | 100 | ```python 101 | cnn.optimalEdgeDetection('images/input3.bmp', 'images/output6.png') 102 | ``` 103 | 104 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/output6.png) 105 | 106 | ## Example 2 107 | 108 | ```sh 109 | $ python example_lenna.py 110 | ``` 111 | 112 | #### OR 113 | 114 | ```python 115 | from pycnn import PyCNN 116 | 117 | cnn = PyCNN() 118 | ``` 119 | 120 | **Input:** 121 | 122 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/lenna.gif) 123 | 124 | **Edge Detection:** 125 | 126 | ```python 127 | cnn.edgeDetection('images/lenna.gif', 'images/lenna_edge.png') 128 | ``` 129 | 130 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/lenna_edge.png) 131 | 132 | **Diagonal line Detection:** 133 | 134 | ```python 135 | cnn.diagonalLineDetection('images/lenna.gif', 'images/lenna_diagonal.png') 136 | ``` 137 | 138 | ![](https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/master/images/lenna_diagonal.png) 139 | 140 | ## Usage 141 | 142 | Import module 143 | 144 | ```python 145 | from pycnn import PyCNN 146 | ``` 147 | 148 | Initialize object 149 | 150 | ```python 151 | cnn = PyCNN() 152 | 153 | # object variables: 154 | # m: width of the image (number of columns) 155 | # n: height of image (number of rows) 156 | ``` 157 | 158 | ```python 159 | # name: name of image processing method (say, Edge detection); type: string 160 | # inputImageLocation: location of the input image; type: string. 161 | # outputImageLocation: location of the output image; type: string. 162 | # tempA_A: feedback template; type: n x n list, e.g. 3 x 3, 5 x 5. 163 | # tempB_B: control template; type: n x n list, e.g. 3 x 3, 5 x 5. 164 | # initialCondition: initial condition, type: float. 165 | # Ib_b: bias, type: float. 166 | # t: time points for integration, type: ndarray. 167 | # Note: Some image processing methods might need more time point samples than default. 168 | # Display the output with each time point to see the evolution until the final convergence 169 | # to the output, looks pretty cool. 170 | ``` 171 | 172 | General image processing 173 | 174 | ```python 175 | cnn.generalTemplates(name, inputImageLocation, outputImageLocation, tempA_A, tempB_B, 176 | initialCondition, Ib_b, t) 177 | ``` 178 | 179 | Edge detection 180 | 181 | ```python 182 | cnn.edgeDetection(inputImageLocation, outputImageLocation) 183 | ``` 184 | 185 | Grayscale edge detection 186 | 187 | ```python 188 | cnn.grayScaleEdgeDetection(inputImageLocation, outputImageLocation) 189 | ``` 190 | 191 | Corner detection 192 | 193 | ```python 194 | cnn.cornerDetection(inputImageLocation, outputImageLocation) 195 | ``` 196 | 197 | Diagonal line detection 198 | 199 | ```python 200 | cnn.diagonalLineDetection(inputImageLocation, outputImageLocation) 201 | ``` 202 | 203 | Inversion (Login NOT) 204 | 205 | ```python 206 | cnn.inversion(inputImageLocation, outputImageLocation) 207 | ``` 208 | 209 | Optimal Edge Detection 210 | 211 | ```python 212 | cnn.optimalEdgeDetection(inputImageLocation, outputImageLocation) 213 | ``` 214 | 215 | ## License 216 | 217 | [MIT License](https://github.com/ankitaggarwal011/PyCNN/blob/master/LICENSE) 218 | 219 | ## Contribute 220 | 221 | Want to work on the project? Any kind of contribution is welcome! 222 | 223 | Follow these steps: 224 | - Fork the project. 225 | - Create a new branch. 226 | - Make your changes and write tests when practical. 227 | - Commit your changes to the new branch. 228 | - Send a pull request. 229 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | from pycnn import PyCNN 4 | 5 | # Initialize object 6 | cnn = PyCNN() 7 | 8 | # Perform respective image processing techniques on the given image 9 | 10 | cnn.edgeDetection('images/input1.bmp', 'images/output1.png') 11 | cnn.grayScaleEdgeDetection('images/input1.bmp', 'images/output2.png') 12 | cnn.cornerDetection('images/input1.bmp', 'images/output3.png') 13 | cnn.diagonalLineDetection('images/input1.bmp', 'images/output4.png') 14 | cnn.inversion('images/input1.bmp', 'images/output5.png') 15 | cnn.optimalEdgeDetection('images/input3.bmp', 'images/output6.png') 16 | -------------------------------------------------------------------------------- /example_lenna.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | from pycnn import PyCNN 4 | 5 | # Initialize object 6 | cnn = PyCNN() 7 | 8 | # Perform respective image processing techniques on the given image 9 | 10 | cnn.edgeDetection('images/lenna.gif', 'images/lenna_edge.png') 11 | cnn.diagonalLineDetection('images/lenna.gif', 'images/lenna_diagonal.png') 12 | -------------------------------------------------------------------------------- /images/blockdiagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/blockdiagram.png -------------------------------------------------------------------------------- /images/input1.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input1.bmp -------------------------------------------------------------------------------- /images/input2.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input2.bmp -------------------------------------------------------------------------------- /images/input3.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input3.bmp -------------------------------------------------------------------------------- /images/input4.BMP: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input4.BMP -------------------------------------------------------------------------------- /images/input5.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input5.bmp -------------------------------------------------------------------------------- /images/input6.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input6.bmp -------------------------------------------------------------------------------- /images/input7.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input7.bmp -------------------------------------------------------------------------------- /images/input8.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/input8.bmp -------------------------------------------------------------------------------- /images/lenna.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/lenna.gif -------------------------------------------------------------------------------- /images/lenna_diagonal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/lenna_diagonal.png -------------------------------------------------------------------------------- /images/lenna_edge.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/lenna_edge.png -------------------------------------------------------------------------------- /images/output1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/output1.png -------------------------------------------------------------------------------- /images/output2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/output2.png -------------------------------------------------------------------------------- /images/output3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/output3.png -------------------------------------------------------------------------------- /images/output4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/output4.png -------------------------------------------------------------------------------- /images/output5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/output5.png -------------------------------------------------------------------------------- /images/output6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ankitaggarwal011/PyCNN/44b07510df689b0fe0241906f79781f777cb2d04/images/output6.png -------------------------------------------------------------------------------- /pycnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | The MIT License (MIT) 6 | 7 | Copyright (c) 2014 Ankit Aggarwal 8 | 9 | Permission is hereby granted, free of charge, to any person obtaining a copy 10 | of this software and associated documentation files (the "Software"), to deal 11 | in the Software without restriction, including without limitation the rights 12 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 13 | copies of the Software, and to permit persons to whom the Software is 14 | furnished to do so, subject to the following conditions: 15 | 16 | The above copyright notice and this permission notice shall be included in all 17 | copies or substantial portions of the Software. 18 | 19 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 22 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 25 | SOFTWARE. 26 | """ 27 | 28 | from __future__ import print_function 29 | import scipy.signal as sig 30 | import scipy.integrate as sint 31 | from PIL import Image as img 32 | import numpy as np 33 | import os.path 34 | import warnings 35 | 36 | SUPPORTED_FILETYPES = ( 37 | 'jpeg', 'jpg', 'png', 'tiff', 'gif', 'bmp', 38 | ) 39 | 40 | warnings.filterwarnings('ignore') # Ignore trivial warnings 41 | 42 | 43 | class PyCNN(object): 44 | """Image Processing with Cellular Neural Networks (CNN). 45 | 46 | Cellular Neural Networks (CNN) are a parallel computing paradigm that was 47 | first proposed in 1988. Cellular neural networks are similar to neural 48 | networks, with the difference that communication is allowed only between 49 | neighboring units. Image Processing is one of its applications. CNN 50 | processors were designed to perform image processing; specifically, the 51 | original application of CNN processors was to perform real-time ultra-high 52 | frame-rate (>10,000 frame/s) processing unachievable by digital processors. 53 | 54 | This python library is the implementation of CNN for the application of 55 | Image Processing. 56 | 57 | 58 | Attributes: 59 | n (int): Height of the image. 60 | m (int): Width of the image. 61 | """ 62 | 63 | def __init__(self): 64 | """Sets the initial class attributes m (width) and n (height).""" 65 | self.m = 0 # width (number of columns) 66 | self.n = 0 # height (number of rows) 67 | 68 | def f(self, t, x, Ib, Bu, tempA): 69 | """Computes the derivative of x at t. 70 | 71 | Args: 72 | x: The input. 73 | Ib (float): System bias. 74 | Bu: Convolution of control template with input. 75 | tempA (:obj:`list` of :obj:`list`of :obj:`float`): Feedback 76 | template. 77 | """ 78 | x = x.reshape((self.n, self.m)) 79 | dx = -x + Ib + Bu + sig.convolve2d(self.cnn(x), tempA, 'same') 80 | return dx.reshape(self.m * self.n) 81 | 82 | def cnn(self, x): 83 | """Piece-wise linear sigmoid function. 84 | 85 | Args: 86 | x : Input to the piece-wise linear sigmoid function. 87 | """ 88 | return 0.5 * (abs(x + 1) - abs(x - 1)) 89 | 90 | def validate(self, inputLocation): 91 | """Checks if a string path exists or is from a supported file type. 92 | 93 | Args: 94 | inputLocation (str): A string with the path to the image. 95 | 96 | Raises: 97 | IOError: If `inputLocation` does not exist or is not a file. 98 | Exception: If file type is not supported. 99 | """ 100 | _, ext = os.path.splitext(inputLocation) 101 | ext = ext.lstrip('.').lower() 102 | if not os.path.exists(inputLocation): 103 | raise IOError('File {} does not exist.'.format(inputLocation)) 104 | elif not os.path.isfile(inputLocation): 105 | raise IOError('Path {} is not a file.'.format(inputLocation)) 106 | elif ext not in SUPPORTED_FILETYPES: 107 | raise Exception( 108 | '{} file type is not supported. Supported: {}'.format( 109 | ext, ', '.join(SUPPORTED_FILETYPES) 110 | ) 111 | ) 112 | 113 | # tempA: feedback template, tempB: control template 114 | def imageProcessing(self, inputLocation, outputLocation, 115 | tempA, tempB, initialCondition, Ib, t): 116 | """Process the image with the input arguments. 117 | 118 | Args: 119 | inputLocation (str): The string path for the input image. 120 | outputLocation (str): The string path for the output processed 121 | image. 122 | tempA (:obj:`list` of :obj:`list`of :obj:`float`): Feedback 123 | template. 124 | tempB (:obj:`list` of :obj:`list`of :obj:`float`): Control 125 | template. 126 | initialCondition (float): The initial state. 127 | Ib (float): System bias. 128 | t (numpy.ndarray): A numpy array with evenly spaced numbers 129 | representing time points. 130 | """ 131 | gray = img.open(inputLocation).convert('RGB') 132 | self.m, self.n = gray.size 133 | u = np.array(gray) 134 | u = u[:, :, 0] 135 | z0 = u * initialCondition 136 | Bu = sig.convolve2d(u, tempB, 'same') 137 | z0 = z0.flatten() 138 | tFinal = t.max() 139 | tInitial = t.min() 140 | if t.size > 1: 141 | dt = t[1] - t[0] 142 | else: 143 | dt = t[0] 144 | ode = sint.ode(self.f) \ 145 | .set_integrator('vode') \ 146 | .set_initial_value(z0, tInitial) \ 147 | .set_f_params(Ib, Bu, tempA) 148 | while ode.successful() and ode.t < tFinal + 0.1: 149 | ode_result = ode.integrate(ode.t + dt) 150 | z = self.cnn(ode_result) 151 | out_l = z[:].reshape((self.n, self.m)) 152 | out_l = out_l / (255.0) 153 | out_l = np.uint8(np.round(out_l * 255)) 154 | # The direct vectorization was causing problems on Raspberry Pi. 155 | # In case anyone face a similar issue, use the below 156 | # loops rather than the above direct vectorization. 157 | # for i in range(out_l.shape[0]): 158 | # for j in range(out_l.shape[1]): 159 | # out_l[i][j] = np.uint8(round(out_l[i][j] * 255)) 160 | out_l = img.fromarray(out_l).convert('RGB') 161 | out_l.save(outputLocation) 162 | 163 | # general image processing for given templates 164 | def generalTemplates(self, 165 | name='Image processing', 166 | inputLocation='', 167 | outputLocation='output.png', 168 | tempA_A=[[0.0, 0.0, 0.0], 169 | [0.0, 0.0, 0.0], 170 | [0.0, 0.0, 0.0]], 171 | tempB_B=[[0.0, 0.0, 0.0], 172 | [0.0, 0.0, 0.0], 173 | [0.0, 0.0, 0.0]], 174 | initialCondition=0.0, 175 | Ib_b=0.0, 176 | t=np.linspace(0, 10.0, num=2)): 177 | """Validate and process the image with the input arguments. 178 | 179 | Args: 180 | name (str): The name of the template. 181 | inputLocation (str): The string path for the input image. 182 | outputLocation (str): The string path for the output processed 183 | image. 184 | tempA_A (:obj:`list` of :obj:`list`of :obj:`float`): Feedback 185 | template. 186 | tempB_B (:obj:`list` of :obj:`list`of :obj:`float`): Control 187 | template. 188 | initialCondition (float): The initial state. 189 | Ib_b (float): System bias. 190 | t (numpy.ndarray): A numpy array with evenly spaced numbers 191 | representing time points. 192 | """ 193 | self.validate(inputLocation) 194 | print(name, 'initialized.') 195 | self.imageProcessing(inputLocation, 196 | outputLocation, 197 | np.array(tempA_A), 198 | np.array(tempB_B), 199 | initialCondition, 200 | Ib_b, 201 | t) 202 | print('Processing on image %s is complete' % (inputLocation)) 203 | print('Result is saved at %s.\n' % (outputLocation)) 204 | 205 | def edgeDetection(self, inputLocation='', outputLocation='output.png'): 206 | """Performs Edge Detection on the input image. 207 | 208 | The output is a binary image showing all edges of the input image in 209 | black. 210 | 211 | A = [[0.0 0.0 0.0], 212 | [0.0 1.0 0.0], 213 | [0.0 0.0 0.0]] 214 | 215 | B = [[−1.0 −1.0 −1.0], 216 | [−1.0 8.0 −1.0], 217 | [−1.0 −1.0 −1.0]] 218 | 219 | z = −1.0 220 | 221 | Initial state = 0.0 222 | 223 | Args: 224 | inputLocation (str): The string path for the input image. 225 | outputLocation (str): The string path for the output processed 226 | image. 227 | """ 228 | name = 'Edge detection' 229 | tempA = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]] 230 | tempB = [[-1.0, -1.0, -1.0], [-1.0, 8.0, -1.0], [-1.0, -1.0, -1.0]] 231 | Ib = -1.0 232 | # num refers to the number of samples of time points from start = 0 to 233 | # end = 10.0 234 | t = np.linspace(0, 10.0, num=2) 235 | # some image processing methods might require more time point samples. 236 | initialCondition = 0.0 237 | self.generalTemplates( 238 | name, 239 | inputLocation, 240 | outputLocation, 241 | tempA, 242 | tempB, 243 | initialCondition, 244 | Ib, 245 | t) 246 | 247 | def grayScaleEdgeDetection(self, inputLocation='', 248 | outputLocation='output.png'): 249 | """Performs Gray-scale Edge Detection on the input image. 250 | 251 | The output is a Gray-scale image showing an edge map of the input 252 | image in black. 253 | 254 | A = [[0.0 0.0 0.0], 255 | [0.0 2.0 0.0], 256 | [0.0 0.0 0.0]] 257 | 258 | B = [[−1.0 −1.0 −1.0], 259 | [−1.0 8.0 −1.0], 260 | [−1.0 −1.0 −1.0]] 261 | 262 | z = −0.5 263 | 264 | Initial state = 0.0 265 | 266 | Args: 267 | inputLocation (str): The string path for the input image. 268 | outputLocation (str): The string path for the output processed 269 | image. 270 | """ 271 | name = 'Grayscale edge detection' 272 | tempA = [[0.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 0.0]] 273 | tempB = [[-1.0, -1.0, -1.0], [-1.0, 8.0, -1.0], [-1.0, -1.0, -1.0]] 274 | Ib = -0.5 275 | t = np.linspace(0, 1.0, num=101) 276 | initialCondition = 0.0 277 | self.generalTemplates( 278 | name, 279 | inputLocation, 280 | outputLocation, 281 | tempA, 282 | tempB, 283 | initialCondition, 284 | Ib, 285 | t) 286 | 287 | def cornerDetection(self, inputLocation='', outputLocation='output.png'): 288 | """Performs Corner Detection on the input image. 289 | 290 | The output is a binary image where black pixels represent the convex 291 | corners of objects in the input image. 292 | 293 | A = [[0.0 0.0 0.0], 294 | [0.0 1.0 0.0], 295 | [0.0 0.0 0.0]] 296 | 297 | B = [[−1.0 −1.0 −1.0], 298 | [−1.0 4.0 −1.0], 299 | [−1.0 −1.0 −1.0]] 300 | 301 | z = −5.0 302 | 303 | Initial state = 0.0 304 | 305 | Args: 306 | inputLocation (str): The string path for the input image. 307 | outputLocation (str): The string path for the output processed 308 | image. 309 | """ 310 | name = 'Corner detection' 311 | tempA = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]] 312 | tempB = [[-1.0, -1.0, -1.0], [-1.0, 4.0, -1.0], [-1.0, -1.0, -1.0]] 313 | Ib = -5.0 314 | t = np.linspace(0, 10.0, num=11) 315 | initialCondition = 0.0 316 | self.generalTemplates( 317 | name, 318 | inputLocation, 319 | outputLocation, 320 | tempA, 321 | tempB, 322 | initialCondition, 323 | Ib, 324 | t) 325 | 326 | def diagonalLineDetection(self, inputLocation='', 327 | outputLocation='output.png'): 328 | """Performs Diagonal Line-Detection on the input image. 329 | 330 | The output is a binary image representing the locations of diagonal 331 | lines in the input image. 332 | 333 | A = [[0.0 0.0 0.0], 334 | [0.0 1.0 0.0], 335 | [0.0 0.0 0.0]] 336 | 337 | B = [[−1.0 0.0 −1.0], 338 | [0.0 1.0 0.0], 339 | [1.0 0.0 −1.0]] 340 | 341 | z = −4.0 342 | 343 | Initial state = 0.0 344 | 345 | Args: 346 | inputLocation (str): The string path for the input image. 347 | outputLocation (str): The string path for the output processed 348 | image. 349 | """ 350 | name = 'Diagonal line detection' 351 | tempA = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]] 352 | tempB = [[-1.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, -1.0]] 353 | Ib = -4.0 354 | t = np.linspace(0, 0.2, num=101) 355 | initialCondition = 0.0 356 | self.generalTemplates( 357 | name, 358 | inputLocation, 359 | outputLocation, 360 | tempA, 361 | tempB, 362 | initialCondition, 363 | Ib, 364 | t) 365 | 366 | def inversion(self, inputLocation='', outputLocation='output.png'): 367 | """Performs Inversion (Logic NOT) on the input image. 368 | 369 | A = [[0.0 0.0 0.0], 370 | [0.0 1.0 0.0], 371 | [0.0 0.0 0.0]] 372 | 373 | B = [[0.0 0.0 0.0], 374 | [1.0 1.0 1.0], 375 | [0.0 0.0 0.0]] 376 | 377 | z = −2.0 378 | 379 | Initial state = 0.0 380 | 381 | Args: 382 | inputLocation (str): The string path for the input image. 383 | outputLocation (str): The string path for the output processed 384 | image. 385 | """ 386 | name = 'Inversion' 387 | tempA = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]] 388 | tempB = [[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [0.0, 0.0, 0.0]] 389 | Ib = -2.0 390 | t = np.linspace(0, 10.0, num=101) 391 | initialCondition = 0.0 392 | self.generalTemplates( 393 | name, 394 | inputLocation, 395 | outputLocation, 396 | tempA, 397 | tempB, 398 | initialCondition, 399 | Ib, 400 | t) 401 | 402 | def optimalEdgeDetection(self, inputLocation='', 403 | outputLocation='output.png'): 404 | """Performs Optimal Edge Detection on the input image. 405 | 406 | A = [[0.0 0.0 0.0], 407 | [0.0 0.0 0.0], 408 | [0.0 0.0 0.0]] 409 | 410 | B = [[-0.11 0.0 0.11], 411 | [-0.28.0 0.0 0.28], 412 | [-0.11 0.0 0.11]] 413 | 414 | z = 0.0 415 | 416 | Initial state = 0.0 417 | 418 | Args: 419 | inputLocation (str): The string path for the input image. 420 | outputLocation (str): The string path for the output processed 421 | image. 422 | """ 423 | name = 'Optimal Edge Detection' 424 | tempA = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]] 425 | tempB = [[-0.11, 0.0, 0.11], [-0.28, 0.0, 0.28], [-0.11, 0.0, 0.11]] 426 | Ib = 0.0 427 | t = np.linspace(0, 10.0, num=101) 428 | initialCondition = 0.0 429 | self.generalTemplates( 430 | name, 431 | inputLocation, 432 | outputLocation, 433 | tempA, 434 | tempB, 435 | initialCondition, 436 | Ib, 437 | t) 438 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pillow==3.3.2 2 | numpy==1.11.1 3 | scipy==0.18.0 4 | -------------------------------------------------------------------------------- /test/test_misc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | import tempfile 4 | import shutil 5 | 6 | from pycnn import PyCNN 7 | 8 | BASE_DIR = os.path.abspath(os.path.join( 9 | os.path.join(os.path.dirname(__file__)), 10 | '..', 11 | )) 12 | 13 | 14 | class TestMiscFunctions(unittest.TestCase): 15 | 16 | def setUp(self): 17 | self.tempdir = tempfile.mkdtemp() 18 | self.cnn = PyCNN() 19 | self.exits_file_name = os.path.join(self.tempdir, 'exist.bmp') 20 | open(self.exits_file_name, 'w').close() 21 | self.not_supported_file = os.path.join(self.tempdir, 'exist.jp2') 22 | open(self.not_supported_file, 'w').close() 23 | 24 | def tearDown(self): 25 | if os.path.exists(self.tempdir): 26 | shutil.rmtree(self.tempdir) 27 | 28 | def test_is_valid(self): 29 | self.cnn.validate(self.exits_file_name) 30 | with self.assertRaisesRegexp(IOError, 'does not exist'): 31 | self.cnn.validate('/not/exist/filename') 32 | with self.assertRaisesRegexp(IOError, 'is not a file'): 33 | self.cnn.validate(self.tempdir) 34 | with self.assertRaisesRegexp(Exception, 'is not supported'): 35 | self.cnn.validate(self.not_supported_file) 36 | -------------------------------------------------------------------------------- /test/test_processing.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | import unittest 4 | import filecmp 5 | 6 | from pycnn import PyCNN 7 | 8 | IMAGE_DIR = os.path.abspath(os.path.join( 9 | os.path.join(os.path.dirname(__file__)), 10 | '..', 11 | 'images', 12 | )) 13 | 14 | 15 | class TestProcessing(unittest.TestCase): 16 | 17 | def setUp(self): 18 | self.cnn = PyCNN() 19 | self.input1 = os.path.join(IMAGE_DIR, 'input1.bmp') 20 | self.input3 = os.path.join(IMAGE_DIR, 'input3.bmp') 21 | self.output = os.path.join( 22 | tempfile.gettempdir(), 23 | 'cnn_output.png', 24 | ) 25 | 26 | def tearDown(self): 27 | if os.path.exists(self.output): 28 | os.remove(self.output) 29 | 30 | def test_edgeDetection(self): 31 | self.cnn.edgeDetection(self.input1, self.output) 32 | self.assertTrue(filecmp.cmp( 33 | self.output, os.path.join(IMAGE_DIR, 'output1.png'), 34 | )) 35 | 36 | def test_grayScaleEdgeDetection(self): 37 | self.cnn.grayScaleEdgeDetection(self.input1, self.output) 38 | self.assertTrue(filecmp.cmp( 39 | self.output, os.path.join(IMAGE_DIR, 'output2.png'), 40 | )) 41 | 42 | def test_cornerDetection(self): 43 | self.cnn.cornerDetection(self.input1, self.output) 44 | self.assertTrue(filecmp.cmp( 45 | self.output, os.path.join(IMAGE_DIR, 'output3.png'), 46 | )) 47 | 48 | def test_diagonalLineDetection(self): 49 | self.cnn.diagonalLineDetection(self.input1, self.output) 50 | self.assertTrue(filecmp.cmp( 51 | self.output, os.path.join(IMAGE_DIR, 'output4.png'), 52 | )) 53 | 54 | def test_inversion(self): 55 | self.cnn.inversion(self.input1, self.output) 56 | self.assertTrue(filecmp.cmp( 57 | self.output, os.path.join(IMAGE_DIR, 'output5.png'), 58 | )) 59 | 60 | def test_optimalEdgeDetection(self): 61 | self.cnn.optimalEdgeDetection(self.input3, self.output) 62 | self.assertTrue(filecmp.cmp( 63 | self.output, os.path.join(IMAGE_DIR, 'output6.png'), 64 | )) 65 | 66 | 67 | class TestProcessingLena(unittest.TestCase): 68 | 69 | def setUp(self): 70 | self.cnn = PyCNN() 71 | self.input = os.path.join(IMAGE_DIR, 'lenna.gif') 72 | self.output = os.path.join( 73 | tempfile.gettempdir(), 74 | 'cnn_output.png', 75 | ) 76 | 77 | def test_edgeDetection(self): 78 | self.cnn.edgeDetection(self.input, self.output) 79 | self.assertTrue(filecmp.cmp( 80 | self.output, os.path.join(IMAGE_DIR, 'lenna_edge.png'), 81 | )) 82 | 83 | def test_diagonalLineDetection(self): 84 | self.cnn.diagonalLineDetection(self.input, self.output) 85 | self.assertTrue(filecmp.cmp( 86 | self.output, os.path.join(IMAGE_DIR, 'lenna_diagonal.png'), 87 | )) 88 | --------------------------------------------------------------------------------