├── images
├── gitignore
├── 1.jpg
├── 1.png
├── 2.png
├── 3.png
├── rf.png
├── 100.png
├── 101.png
├── test.png
├── train.png
├── center.png
├── graph1.png
├── graph2.png
├── graph3.png
├── graph4.png
├── neuron1.png
├── neuron2.png
├── neuron3.png
├── neuron4.png
├── spikes.jpg
├── figure_1.png
├── figure_11.png
├── figure_12.png
├── figure_13.png
├── figure_14.png
├── figure_2.png
├── figure_3.png
├── figure_4.png
├── imp_train.png
├── stdp_curve.jpg
└── classify_neurons
│ ├── 1.JPG
│ ├── 2.JPG
│ ├── 3.JPG
│ ├── 4.JPG
│ ├── 5.JPG
│ ├── 6.JPG
│ ├── 7.JPG
│ └── 8.JPG
├── training
├── gitignore
├── parameters.py
├── rl.py
├── neuron.py
├── var_th.py
├── reconstruct.py
├── recep_field.py
├── spike_train.py
└── learning.py
├── classification
├── gitignore
├── neuron.pyc
├── recep_field.pyc
├── spike_train.pyc
├── training_images
│ ├── 1.png
│ ├── 2.png
│ ├── 3.png
│ ├── 4.png
│ ├── 5.png
│ └── 6.png
├── weight_initialization.pyc
├── weight_initialization.py
├── neuron.py
├── recep_field.py
├── spike_train.py
└── classify.py
├── temp-snn
├── training
│ ├── gitignore
│ └── learning.py
├── classification
│ ├── gitignore
│ ├── weight_initialization.py
│ └── classify.py
├── docs
│ ├── images
│ │ ├── gitignore
│ │ ├── 1.jpg
│ │ ├── 1.png
│ │ ├── 2.png
│ │ ├── 3.png
│ │ ├── 100.png
│ │ ├── 101.png
│ │ ├── rf.png
│ │ ├── center.png
│ │ ├── graph1.png
│ │ ├── graph2.png
│ │ ├── graph3.png
│ │ ├── graph4.png
│ │ ├── spikes.jpg
│ │ ├── test.png
│ │ ├── train.png
│ │ ├── figure_1.png
│ │ ├── figure_2.png
│ │ ├── figure_3.png
│ │ ├── figure_4.png
│ │ ├── neuron1.png
│ │ ├── neuron2.png
│ │ ├── neuron3.png
│ │ ├── neuron4.png
│ │ ├── figure_11.png
│ │ ├── figure_12.png
│ │ ├── figure_13.png
│ │ ├── figure_14.png
│ │ ├── imp_train.png
│ │ └── stdp_curve.jpg
│ └── README.md
├── snn
│ ├── __init__.py
│ ├── test
│ │ ├── 0.png
│ │ ├── 1.png
│ │ └── 2.png
│ ├── training
│ │ ├── 0.png
│ │ ├── 1.png
│ │ └── 2.png
│ ├── rl.py
│ ├── parameters.py
│ ├── var_th.py
│ ├── neuron.py
│ ├── weight_initialization.py
│ ├── reconstruct.py
│ ├── recep_field.py
│ ├── classify.py
│ ├── spike_train.py
│ ├── learning.py
│ ├── weights.txt
│ └── weights_training.txt
├── .gitignore
├── data
│ ├── test
│ │ ├── 0.png
│ │ ├── 1.png
│ │ └── 2.png
│ └── training
│ │ ├── 0.png
│ │ ├── 1.png
│ │ └── 2.png
├── README.md
├── recreate.py
└── weights.txt
├── neuron
├── neuron.png
├── spikes.png
├── neuron.py
└── README.md
├── multi_layer
├── weights
│ └── .gitignore
├── training_images
│ ├── 1.png
│ ├── 2.png
│ ├── 3.png
│ ├── 4.png
│ ├── 5.png
│ └── 6.png
├── rl.py
├── neuron.py
├── var_th.py
├── parameters.py
├── recep_field.py
├── reconstruct.py
├── spike_train.py
└── learning.py
├── receptive_field
├── README.md
└── receptive_field.py
├── synapse
├── README.md
└── synapse.py
├── encoding
├── README.md
└── spike_train.py
├── README.md
└── LICENSE
/images/gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/training/gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/classification/gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/temp-snn/training/gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/temp-snn/classification/gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/temp-snn/docs/images/gitignore:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/temp-snn/snn/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 |
--------------------------------------------------------------------------------
/images/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/1.jpg
--------------------------------------------------------------------------------
/images/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/1.png
--------------------------------------------------------------------------------
/images/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/2.png
--------------------------------------------------------------------------------
/images/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/3.png
--------------------------------------------------------------------------------
/images/rf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/rf.png
--------------------------------------------------------------------------------
/images/100.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/100.png
--------------------------------------------------------------------------------
/images/101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/101.png
--------------------------------------------------------------------------------
/images/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/test.png
--------------------------------------------------------------------------------
/images/train.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/train.png
--------------------------------------------------------------------------------
/images/center.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/center.png
--------------------------------------------------------------------------------
/images/graph1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/graph1.png
--------------------------------------------------------------------------------
/images/graph2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/graph2.png
--------------------------------------------------------------------------------
/images/graph3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/graph3.png
--------------------------------------------------------------------------------
/images/graph4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/graph4.png
--------------------------------------------------------------------------------
/images/neuron1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/neuron1.png
--------------------------------------------------------------------------------
/images/neuron2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/neuron2.png
--------------------------------------------------------------------------------
/images/neuron3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/neuron3.png
--------------------------------------------------------------------------------
/images/neuron4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/neuron4.png
--------------------------------------------------------------------------------
/images/spikes.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/spikes.jpg
--------------------------------------------------------------------------------
/neuron/neuron.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/neuron/neuron.png
--------------------------------------------------------------------------------
/neuron/spikes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/neuron/spikes.png
--------------------------------------------------------------------------------
/images/figure_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_1.png
--------------------------------------------------------------------------------
/images/figure_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_11.png
--------------------------------------------------------------------------------
/images/figure_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_12.png
--------------------------------------------------------------------------------
/images/figure_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_13.png
--------------------------------------------------------------------------------
/images/figure_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_14.png
--------------------------------------------------------------------------------
/images/figure_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_2.png
--------------------------------------------------------------------------------
/images/figure_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_3.png
--------------------------------------------------------------------------------
/images/figure_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/figure_4.png
--------------------------------------------------------------------------------
/images/imp_train.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/imp_train.png
--------------------------------------------------------------------------------
/images/stdp_curve.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/stdp_curve.jpg
--------------------------------------------------------------------------------
/multi_layer/weights/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore everything in this directory
2 | *
3 | # Except this file
4 | !.gitignore
5 |
--------------------------------------------------------------------------------
/temp-snn/snn/test/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/snn/test/0.png
--------------------------------------------------------------------------------
/temp-snn/snn/test/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/snn/test/1.png
--------------------------------------------------------------------------------
/temp-snn/snn/test/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/snn/test/2.png
--------------------------------------------------------------------------------
/classification/neuron.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/neuron.pyc
--------------------------------------------------------------------------------
/temp-snn/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | *$py.class
4 |
5 | .spyderproject
6 | .spyproject
7 | .DS_Store
8 |
--------------------------------------------------------------------------------
/temp-snn/data/test/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/data/test/0.png
--------------------------------------------------------------------------------
/temp-snn/data/test/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/data/test/1.png
--------------------------------------------------------------------------------
/temp-snn/data/test/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/data/test/2.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/1.jpg
--------------------------------------------------------------------------------
/temp-snn/docs/images/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/1.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/2.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/3.png
--------------------------------------------------------------------------------
/temp-snn/data/training/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/data/training/0.png
--------------------------------------------------------------------------------
/temp-snn/data/training/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/data/training/1.png
--------------------------------------------------------------------------------
/temp-snn/data/training/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/data/training/2.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/100.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/100.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/101.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/101.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/rf.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/rf.png
--------------------------------------------------------------------------------
/temp-snn/snn/training/0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/snn/training/0.png
--------------------------------------------------------------------------------
/temp-snn/snn/training/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/snn/training/1.png
--------------------------------------------------------------------------------
/temp-snn/snn/training/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/snn/training/2.png
--------------------------------------------------------------------------------
/classification/recep_field.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/recep_field.pyc
--------------------------------------------------------------------------------
/classification/spike_train.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/spike_train.pyc
--------------------------------------------------------------------------------
/images/classify_neurons/1.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/1.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/2.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/2.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/3.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/3.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/4.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/4.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/5.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/5.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/6.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/6.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/7.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/7.JPG
--------------------------------------------------------------------------------
/images/classify_neurons/8.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/images/classify_neurons/8.JPG
--------------------------------------------------------------------------------
/temp-snn/docs/images/center.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/center.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/graph1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/graph1.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/graph2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/graph2.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/graph3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/graph3.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/graph4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/graph4.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/spikes.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/spikes.jpg
--------------------------------------------------------------------------------
/temp-snn/docs/images/test.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/test.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/train.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/train.png
--------------------------------------------------------------------------------
/multi_layer/training_images/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/multi_layer/training_images/1.png
--------------------------------------------------------------------------------
/multi_layer/training_images/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/multi_layer/training_images/2.png
--------------------------------------------------------------------------------
/multi_layer/training_images/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/multi_layer/training_images/3.png
--------------------------------------------------------------------------------
/multi_layer/training_images/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/multi_layer/training_images/4.png
--------------------------------------------------------------------------------
/multi_layer/training_images/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/multi_layer/training_images/5.png
--------------------------------------------------------------------------------
/multi_layer/training_images/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/multi_layer/training_images/6.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_1.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_2.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_3.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_4.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/neuron1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/neuron1.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/neuron2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/neuron2.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/neuron3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/neuron3.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/neuron4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/neuron4.png
--------------------------------------------------------------------------------
/classification/training_images/1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/training_images/1.png
--------------------------------------------------------------------------------
/classification/training_images/2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/training_images/2.png
--------------------------------------------------------------------------------
/classification/training_images/3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/training_images/3.png
--------------------------------------------------------------------------------
/classification/training_images/4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/training_images/4.png
--------------------------------------------------------------------------------
/classification/training_images/5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/training_images/5.png
--------------------------------------------------------------------------------
/classification/training_images/6.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/training_images/6.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_11.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_11.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_12.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_12.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_13.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_13.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/figure_14.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/figure_14.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/imp_train.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/imp_train.png
--------------------------------------------------------------------------------
/temp-snn/docs/images/stdp_curve.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/temp-snn/docs/images/stdp_curve.jpg
--------------------------------------------------------------------------------
/classification/weight_initialization.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Shikhargupta/Spiking-Neural-Network/HEAD/classification/weight_initialization.pyc
--------------------------------------------------------------------------------
/receptive_field/README.md:
--------------------------------------------------------------------------------
1 | # Receptive Field
2 |
3 | Receptive field is an area in which stimulation leads to response of a particular sensory neuron. In the case of an SNN, where the input is an image, receptive field of a sensory neuron is the part of the image which increases the its membrane potential. Here on-centered receptive field is used.
4 |
5 |
6 |
7 | To realise an on centerd receptive field, a sliding window is used whose cells are weighted according to the [Manhattan Distance] (https://xlinux.nist.gov/dads/HTML/manhattanDistance.html) from the centre of the window. The fields for different neurons are overlapping.
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/classification/weight_initialization.py:
--------------------------------------------------------------------------------
1 | ################################ README ########################################
2 | # This file is used to initialize the network with trained weights.'image_names'
3 | # consists of names of the images that are needed to be read.
4 | ################################################################################
5 |
6 | import numpy as np
7 | import imageio
8 |
9 | def learned_weights():
10 | image_names = ["1", "2", "3", "4", "5", "6"]
11 | ans = []
12 | for image in image_names:
13 | temp = []
14 | img = imageio.imread("training_images/" + image + ".png")
15 | for i in img:
16 | for j in i:
17 | if(j==0):
18 | temp.append(-0.7)
19 | else:
20 | temp.append(1)
21 | ans.append(temp)
22 | return ans
23 |
24 | if __name__ == '__main__':
25 | a = learned_weights()
26 | print a
27 |
--------------------------------------------------------------------------------
/synapse/README.md:
--------------------------------------------------------------------------------
1 | ## Synapse
2 |
3 | In neurobiology synapse is a junction between two nerve cells, consisting of a minute gap across which impulses pass by diffusion of a neurotransmitter.
4 | In an SNN, synapse is the weighted path for generated spikes from one neuron to the other connected neurons.
5 |
6 |
7 |
8 |
9 | This is the implementation of a simple network of 2 layers with 5 neurons in the first layer and 3 in the second as shown in the figure. Each neuron in the first layer is connected to all the neurons in the second layer via synapse. Synapses are realised by a 2D matrix of size (5x3) initialised with random weights.
10 |
11 | This provides a framework for the SNN with learned weights so that it can be used for classification (or prediction). It can be expanded to any number of layers with any number of neurons in it.
12 |
--------------------------------------------------------------------------------
/training/parameters.py:
--------------------------------------------------------------------------------
1 | ################################################ README #########################################################
2 |
3 | # This file contains all the parameters of the network.
4 |
5 | #################################################################################################################
6 |
7 | class param:
8 | scale = 1
9 | T = 200
10 | t_back = -20
11 | t_fore = 20
12 |
13 | pixel_x = 28
14 | Prest = 0
15 | m = pixel_x*pixel_x #Number of neurons in first layer
16 | n = 3 #Number of neurons in second layer
17 | Pmin = -500*scale
18 | # Pth = 5
19 | # D = 0.7
20 | w_max = 1.5*scale
21 | w_min = -1.2*scale
22 | sigma = 0.1 #0.02
23 | A_plus = 0.8 # time difference is positive i.e negative reinforcement
24 | A_minus = 0.3 # 0.01 # time difference is negative i.e positive reinforcement
25 | tau_plus = 8
26 | tau_minus = 5
27 |
28 | epoch = 12
29 |
30 |
31 | fr_bits = 12
32 | int_bits = 12
--------------------------------------------------------------------------------
/multi_layer/rl.py:
--------------------------------------------------------------------------------
1 | ########################################################## README ###########################################################
2 |
3 | # This file implements STDP curve and weight update rule
4 |
5 | ##############################################################################################################################
6 |
7 |
8 |
9 | import numpy as np
10 | from matplotlib import pyplot as plt
11 | from parameters import param as par
12 |
13 | #STDP reinforcement learning curve
14 | def rl(t):
15 |
16 | if t>0:
17 | return -par.A_plus*np.exp(-float(t)/par.tau_plus)
18 | if t<=0:
19 | return par.A_minus*np.exp(float(t)/par.tau_minus)
20 |
21 |
22 | #STDP weight update rule
23 | def update(w, del_w):
24 | if del_w<0:
25 | return w + par.sigma*del_w*(w-abs(par.w_min))*par.scale
26 | elif del_w>0:
27 | return w + par.sigma*del_w*(par.w_max-w)*par.scale
28 |
29 | if __name__ == '__main__':
30 |
31 | print rl(-20)*par.sigma
32 |
33 |
--------------------------------------------------------------------------------
/temp-snn/snn/rl.py:
--------------------------------------------------------------------------------
1 | ########################################################## README ###########################################################
2 |
3 | # This file implements STDP curve and weight update rule
4 |
5 | ##############################################################################################################################
6 |
7 |
8 |
9 | import numpy as np
10 | from matplotlib import pyplot as plt
11 | from parameters import param as par
12 |
13 | #STDP reinforcement learning curve
14 | def rl(t):
15 |
16 | if t>0:
17 | return -par.A_plus*np.exp(-float(t)/par.tau_plus)
18 | if t<=0:
19 | return par.A_minus*np.exp(float(t)/par.tau_minus)
20 |
21 |
22 | #STDP weight update rule
23 | def update(w, del_w):
24 | if del_w<0:
25 | return w + par.sigma*del_w*(w-abs(par.w_min))*par.scale
26 | elif del_w>0:
27 | return w + par.sigma*del_w*(par.w_max-w)*par.scale
28 |
29 | if __name__ == '__main__':
30 |
31 | print(rl(-20)*par.sigma)
32 |
33 |
--------------------------------------------------------------------------------
/training/rl.py:
--------------------------------------------------------------------------------
1 | ########################################################## README ###########################################################
2 |
3 | # This file implements STDP curve and weight update rule
4 |
5 | ##############################################################################################################################
6 |
7 |
8 |
9 | import numpy as np
10 | from matplotlib import pyplot as plt
11 | from parameters import param as par
12 |
13 | #STDP reinforcement learning curve
14 | def rl(t):
15 |
16 | if t>0:
17 | return -par.A_plus*np.exp(-float(t)/par.tau_plus)
18 | if t<=0:
19 | return par.A_minus*np.exp(float(t)/par.tau_minus)
20 |
21 |
22 | #STDP weight update rule
23 | def update(w, del_w):
24 | if del_w<0:
25 | return w + par.sigma*del_w*(w-abs(par.w_min))*par.scale
26 | elif del_w>0:
27 | return w + par.sigma*del_w*(par.w_max-w)*par.scale
28 |
29 | if __name__ == '__main__':
30 |
31 | print rl(-20)*par.sigma
32 |
33 |
--------------------------------------------------------------------------------
/training/neuron.py:
--------------------------------------------------------------------------------
1 | ############################################################ README ##############################################################
2 |
3 | # This is neuron class which defines the dynamics of a neuron. All the parameters are initialised and methods are included to check
4 | # for spikes and apply lateral inhibition.
5 |
6 | ###################################################################################################################################
7 |
8 | import numpy as np
9 | import random
10 | from matplotlib import pyplot as plt
11 | from parameters import param as par
12 |
13 | class neuron:
14 | def __init__(self):
15 | self.t_ref = 30
16 | self.t_rest = -1
17 | self.P = par.Prest
18 | def check(self):
19 | if self.P>= self.Pth:
20 | self.P = par.Prest
21 | return 1
22 | elif self.P < par.Pmin:
23 | self.P = par.Prest
24 | return 0
25 | else:
26 | return 0
27 | def inhibit(self):
28 | self.P = par.Pmin
29 | def initial(self, th):
30 | self.Pth = th
31 | self.t_rest = -1
32 | self.P = par.Prest
--------------------------------------------------------------------------------
/multi_layer/neuron.py:
--------------------------------------------------------------------------------
1 | ############################################################ README ##############################################################
2 |
3 | # This is neuron class which defines the dynamics of a neuron. All the parameters are initialised and methods are included to check
4 | # for spikes and apply lateral inhibition.
5 |
6 | ###################################################################################################################################
7 |
8 | import numpy as np
9 | import random
10 | from matplotlib import pyplot as plt
11 | from parameters import param as par
12 |
13 | class neuron:
14 | def __init__(self):
15 | self.t_ref = 30
16 | self.t_rest = -1
17 | self.P = par.Prest
18 | def check(self):
19 | if self.P> self.Pth:
20 | self.P = par.Prest
21 | return 1
22 | elif self.P < par.Pmin:
23 | self.P = par.Prest
24 | return 0
25 | else:
26 | return 0
27 | def inhibit(self):
28 | self.P = par.Pmin
29 | def initial(self, th):
30 | self.Pth = th
31 | self.t_rest = -1
32 | self.P = par.Prest
33 |
--------------------------------------------------------------------------------
/temp-snn/snn/parameters.py:
--------------------------------------------------------------------------------
1 | ################################################ README #########################################################
2 |
3 | # This file contains all the parameters of the network.
4 |
5 | #################################################################################################################
6 |
7 | class param:
8 | scale = 1
9 | T = 150
10 | t_back = -20
11 | t_fore = 20
12 |
13 | #pixel_x = 28
14 | pixel_x = 16
15 | m = pixel_x*pixel_x #Number of neurons in first layer
16 | n = 3 #Number of neurons in second layer
17 | Pref = 0.
18 | Prest = 0.
19 | Pmin = -5.0*scale
20 | Pth = 50.0*scale
21 | D = 0.75*scale
22 |
23 | w_max = 2.0*scale
24 | w_min = -1.2*scale
25 | sigma = 0.02 #0.02
26 | A_plus = 0.8 # time difference is positive i.e negative reinforcement
27 | A_minus = 0.3 # 0.01 # time difference is negative i.e positive reinforcement
28 | tau_plus = 10
29 | tau_minus = 10
30 |
31 | epoch = 20
32 |
33 |
34 | fr_bits = 12
35 | int_bits = 12
36 |
--------------------------------------------------------------------------------
/classification/neuron.py:
--------------------------------------------------------------------------------
1 | ########################### README #############################################
2 | # This is a neuron class which holds all the parameters and functions associated
3 | # with a neuron of the network.
4 | ################################################################################
5 |
6 | import numpy as np
7 |
8 | global Pref, Pmin, Pth, D, Prest
9 | Pref = 0
10 | Prest = 0
11 | Pmin = -1
12 | Pth = 140 #Should be Pth = 6 for deterministic spike train
13 | D = 0.5
14 |
15 | class neuron:
16 | def __init__(self):
17 | self.Pth = Pth
18 | self.t_ref = 4
19 | self.t_rest = -1
20 | self.P = Prest
21 | self.D = D
22 | self.Pmin = Pmin
23 | self.Prest = Prest
24 | #Check if membrane potential has crossed the thresold value
25 | def check(self):
26 | if self.P>= Pth:
27 | self.P = Pref
28 | return 1
29 | elif self.P < Pmin:
30 | self.P = Prest
31 | return 0
32 | else:
33 | return 0
34 | #Lateral Inhibition
35 | def inhibit(self):
36 | self.P = Pmin
37 | def initial(self):
38 | self.t_rest = -1
39 | self.P = Prest
40 |
--------------------------------------------------------------------------------
/neuron/neuron.py:
--------------------------------------------------------------------------------
1 | from numpy import *
2 | import random
3 | from matplotlib import pyplot as plt
4 |
5 | #defining time scale
6 | T = 50;
7 | dt = 0.125;
8 | time = arange(0, T+dt, dt)
9 |
10 | #generating random spike train to be fed to neuron
11 | S = []
12 | for k in range(len(time)):
13 | a = random.randrange(0,2)
14 | S.append(a)
15 |
16 | #initialising membrane potential vector
17 | Pn = zeros(len(time))
18 |
19 | #definig other parameters
20 | Pref = 0 #resting potential
21 | Pmin = -1 #minimum potential
22 | Pth = 25 #threshold
23 | D = 0.25 #leakage factor
24 | Pspike = 4 #spike potential
25 |
26 | count = 0 #refractory counter
27 | t_ref = 5 #refractory period
28 | t_rest = 0
29 |
30 | #updating membrane potential according to simplified equations
31 | for i, t in enumerate(time):
32 | if i==0:
33 | Pn[i] = S[i] - D
34 | else:
35 | if t<=t_rest:
36 | Pn[i] = Pref
37 | elif t>t_rest:
38 | if Pn[i-1]>Pmin:
39 | Pn[i] = Pn[i-1] + S[i] - D
40 | else:
41 | Pn[i] = 0
42 | if Pn[i]>=Pth:
43 | Pn[i] += Pspike
44 | t_rest = t + t_ref
45 |
46 |
47 |
--------------------------------------------------------------------------------
/receptive_field/receptive_field.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import random
3 |
4 | #Initializing a random input matrix. During implementation this would be a 16x16 image
5 | inp = np.random.randint(0, 255, size=(16,16))
6 |
7 | w = np.zeros([5,5]) #5x5 window initialized with zeros
8 | pot = np.zeros([16,16]) #Initializing membrane potential matrix for 256 input neurons
9 | ran = [-2,-1,0,1,2] #Weight vectors for the window
10 | ox = 2 #Origin x coordinate in the window matrix
11 | oy = 2 #Origin y coordinate in the window matrix
12 | w[ox][oy] = 1 #Manhattan distance zero
13 |
14 | #Assigning weights to matrix elements according to mahanttan distance from the origin
15 | for i in range(5):
16 | for j in range(5):
17 | d = abs(ox-i) + abs(oy-j)
18 | w[i][j] = (-0.375)*d + 1
19 |
20 | #Calculating membrane potential for each of the 256 input neurons
21 | for i in range(16):
22 | for j in range(16):
23 | summ = 0
24 | for m in ran:
25 | for n in ran:
26 | if (i+m)>=0 and (i+m)<=15 and (j+n)>=0 and (j+n)<=15:
27 | summ = summ + w[ox+m][oy+n]*inp[i+m][j+n]
28 | pot[i][j] = summ
29 |
--------------------------------------------------------------------------------
/neuron/README.md:
--------------------------------------------------------------------------------
1 | ## Neuron
2 | Neuron is the basic building block of an SNN and several interconnected neurons form the input, hidden and output layers. This neuron imitates the general [integrate and fire model] (http://neuronaldynamics.epfl.ch/online/Ch1.S3.html).
3 | * In the absence of stimulus, the membrane possesses a resting potential. Every input spike from connected neurons increases or decrease its membrane potential.
4 | * When the potential crosses a threshold value, neuron enters into refractory period in which no new input is allowed and the potential remains constant.
5 | * To avoid strong negative polarization of membrane, its potential is limited by Pmin.
6 | * As long as Pn>Pmin, there is a constant leakage of potential.
7 |
8 | ## Graphs
9 |
10 |
11 |
12 |
13 |
14 | Above graph shows the membrane potential throughout the 50 time units (TU) as a result of input spike train. Below is the corresponding randomly generated input spike train.
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/temp-snn/README.md:
--------------------------------------------------------------------------------
1 | # Spiking-Neural-Network
2 |
3 | This project implements a very basic SNN library that supports a very simple image classification example.
4 |
5 | While simple, the code here implements a variety of useful and important SNN functions and produces a functional classifier.
6 |
7 | To read more look at [the docs](docs/README.md).
8 |
9 | ## Running
10 | * cd into the root where the project is cloned
11 | * run classification/classify.py
12 |
13 | it will use pregenerated weights and output the results of classifying the test images
14 |
15 | You can also run training/learning.py to generate a new weights file and output `neuron[1-3].png`
16 | which will show the reconstructed weights. One neuron should look random, the other two will produce
17 | a pattern similar to the O, and another a pattern similar to the X.
18 |
19 | To use the new weights, the weights for the X must be on the first line of weights.txt and
20 | the weights for the O on the second line. When the weights are generated the first line of weights_training.txt
21 | will corrispond to the weights shown in neuron1.png, the second line to neuron2.png and so
22 | on, reorder them if needed.
--------------------------------------------------------------------------------
/temp-snn/snn/var_th.py:
--------------------------------------------------------------------------------
1 | ############################################## README #################################################
2 |
3 | # This calculates threshold for an image depending upon its spiking activity.
4 |
5 | ########################################################################################################
6 |
7 |
8 | import numpy as np
9 | from neuron import neuron
10 | import random
11 | from matplotlib import pyplot as plt
12 | from recep_field import rf
13 | from spike_train import encode
14 | from rl import rl
15 | from rl import update
16 | from reconstruct import reconst_weights
17 | from parameters import param as par
18 | import os
19 |
20 |
21 | def threshold(train):
22 |
23 | tu = np.shape(train[0])[0]
24 | thresh = 0
25 | for i in range(tu):
26 | simul_active = sum(train[:,i])
27 | if simul_active>thresh:
28 | thresh = simul_active
29 |
30 | return (thresh/3)*par.scale
31 |
32 |
33 | if __name__ == '__main__':
34 |
35 | # img = cv2.imread("mnist1/" + str(1) + ".png", 0)
36 | img = np.array(Image.open("mnist1/" + str(1) + ".png", 0))
37 | print(img)
38 | # pot = rf(img)
39 | # train = np.array(encode(pot))
40 | # print threshold(train)
--------------------------------------------------------------------------------
/training/var_th.py:
--------------------------------------------------------------------------------
1 | ############################################## README #################################################
2 |
3 | # This calculates threshold for an image depending upon its spiking activity.
4 |
5 | ########################################################################################################
6 |
7 |
8 | import numpy as np
9 | from neuron import neuron
10 | import random
11 | from matplotlib import pyplot as plt
12 | from recep_field import rf
13 | import cv2
14 | from spike_train import encode
15 | from rl import rl
16 | from rl import update
17 | from reconstruct import reconst_weights
18 | from parameters import param as par
19 | import os
20 |
21 |
22 | def threshold(train):
23 |
24 | tu = np.shape(train[0])[0]
25 | thresh = 0
26 | for i in range(tu):
27 | simul_active = sum(train[:,i])
28 | if simul_active>thresh:
29 | thresh = simul_active
30 |
31 | return (thresh/3)*par.scale
32 |
33 |
34 | if __name__ == '__main__':
35 |
36 | # img = cv2.imread("mnist1/" + str(1) + ".png", 0)
37 | img = np.array(Image.open("mnist1/" + str(1) + ".png", 0))
38 | print img
39 | # pot = rf(img)
40 | # train = np.array(encode(pot))
41 | # print threshold(train)
--------------------------------------------------------------------------------
/temp-snn/snn/neuron.py:
--------------------------------------------------------------------------------
1 | ############################################################ README ##############################################################
2 |
3 | # This is neuron class which defines the dynamics of a neuron. All the parameters are initialised and methods are included to check
4 | # for spikes and apply lateral inhibition.
5 |
6 | ###################################################################################################################################
7 |
8 | import numpy as np
9 | import random
10 | from matplotlib import pyplot as plt
11 | from parameters import param as par
12 |
13 | class neuron:
14 | def __init__(self):
15 | self.t_ref = 30
16 | self.t_rest = -1
17 | self.P = par.Prest
18 | self.Prest = par.Prest
19 | def check(self):
20 | if self.P>= self.Pth:
21 | self.P = self.Prest
22 | return 1
23 | elif self.P < par.Pmin:
24 | self.P = par.Prest
25 | return 0
26 | else:
27 | return 0
28 | def inhibit(self):
29 | self.P = par.Pmin
30 | def initial(self, th):
31 | self.Pth = th
32 | self.t_rest = -1
33 | self.P = par.Prest
--------------------------------------------------------------------------------
/temp-snn/snn/weight_initialization.py:
--------------------------------------------------------------------------------
1 | # Read and return the weights produced by training.py for the X matching synapses
2 | def learned_weights_x():
3 | ans = []
4 | with open('weights.txt', 'r') as weight_file:
5 | lines = weight_file.readlines()
6 | for i in lines[0].split('\t'):
7 | ans.append(float(i))
8 | return ans
9 |
10 | # Read and return the weights produced by training.py for the O matching synapses
11 | def learned_weights_o():
12 | ans = []
13 |
14 | with open('weights.txt', 'r') as weight_file:
15 | lines = weight_file.readlines()
16 | for i in lines[1].split('\t'):
17 | ans.append(float(i))
18 | return ans
19 |
20 | def learned_weights_synapse(id):
21 | ans = []
22 | with open('weights.txt', 'r') as weight_file:
23 | lines = weight_file.readlines()
24 | if (len(lines) <= id):
25 | return ans
26 | for i in lines[id].split('\t'):
27 | ans.append(float(i))
28 | return ans
29 |
30 | # Just show that we read the weights and processed them into a sequence to feed to the classification
31 | if __name__ == '__main__':
32 | a = learned_weights_x()
33 | print(a)
34 |
--------------------------------------------------------------------------------
/temp-snn/classification/weight_initialization.py:
--------------------------------------------------------------------------------
1 | # Read and return the weights produced by training.py for the X matching synapses
2 | def learned_weights_x():
3 | ans = []
4 | with open('weights.txt', 'r') as weight_file:
5 | lines = weight_file.readlines()
6 | for i in lines[0].split('\t'):
7 | ans.append(float(i))
8 | return ans
9 |
10 | # Read and return the weights produced by training.py for the O matching synapses
11 | def learned_weights_o():
12 | ans = []
13 |
14 | with open('weights.txt', 'r') as weight_file:
15 | lines = weight_file.readlines()
16 | for i in lines[1].split('\t'):
17 | ans.append(float(i))
18 | return ans
19 |
20 | def learned_weights_synapse(id):
21 | ans = []
22 | with open('weights.txt', 'r') as weight_file:
23 | lines = weight_file.readlines()
24 | if (len(lines) <= id):
25 | return ans
26 | for i in lines[id].split('\t'):
27 | ans.append(float(i))
28 | return ans
29 |
30 | # Just show that we read the weights and processed them into a sequence to feed to the classification
31 | if __name__ == '__main__':
32 | a = learned_weights_x()
33 | print(a)
34 |
--------------------------------------------------------------------------------
/multi_layer/var_th.py:
--------------------------------------------------------------------------------
1 | ############################################## README #################################################
2 |
3 | # This calculates threshold for an image depending upon its spiking activity.
4 |
5 | ########################################################################################################
6 |
7 |
8 | import numpy as np
9 | from neuron import neuron
10 | import random
11 | from matplotlib import pyplot as plt
12 | from recep_field import rf
13 | import cv2
14 | from spike_train import encode
15 | from rl import rl
16 | from rl import update
17 | from reconstruct import reconst_weights
18 | from parameters import param as par
19 | import os
20 |
21 |
22 | def threshold(train):
23 |
24 | tu = np.shape(train[0])[0]
25 | thresh = float(0)
26 | for i in range(tu):
27 | simul_active = float(sum(train[:,i]))
28 | if simul_active>thresh:
29 | thresh = simul_active
30 |
31 | #return (thresh/3)*par.scale
32 | return (thresh/10)*par.scale
33 |
34 |
35 | if __name__ == '__main__':
36 |
37 | # img = cv2.imread("mnist1/" + str(1) + ".png", 0)
38 | img = np.array(Image.open("mnist1/" + str(1) + ".png", 0))
39 | print img
40 | # pot = rf(img)
41 | # train = np.array(encode(pot))
42 | # print threshold(train)
43 |
--------------------------------------------------------------------------------
/temp-snn/recreate.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | import math
6 | import snn
7 | import imageio
8 | from snn.parameters import param as par
9 | from snn.recep_field import rf
10 | from snn.spike_train import encode, encode2
11 |
12 | img = imageio.imread("/Users/johnsoni/Downloads/mnist_png/training/5/0.png")
13 | #img = imageio.imread("data/training/0.png")
14 |
15 | pot = rf(img)
16 |
17 | # for i in pot:
18 | # m.append(max(i))
19 | # n.append(min(i))
20 |
21 | # print max(m), min(n)
22 | #train = encode2(img)
23 | train = encode(pot)
24 | f = open('train6.txt', 'w')
25 | print(np.shape(train))
26 |
27 | for j in range(len(train)):
28 | for i in range(len(train[j])):
29 | f.write(str(int(train[j][i])))
30 | f.write('\n')
31 |
32 | f.close()
33 |
34 | with open('train6.txt','r') as f:
35 | pixels = []
36 | for line in f:
37 | sum = 0
38 | for i in line:
39 | if i == '1':
40 | sum += 1
41 | pixels.append(sum)
42 | pixels = np.array(pixels, dtype='float64')
43 | pixels = pixels/np.max(pixels) * 255.0
44 | dim = int(math.sqrt(len(pixels)))
45 | img = np.array(pixels, dtype='uint8').reshape((dim, dim))
46 | plt.imshow(img)
47 | plt.show()
48 |
49 |
--------------------------------------------------------------------------------
/encoding/README.md:
--------------------------------------------------------------------------------
1 | # Generating Spike Trains
2 |
3 | Input neuron layer has to be fed with the stimulus caused by its receptive field. Stimulus calculated from sliding window is an analog value and has to be converted into a spike train so that neuron can understand it. This encoder serves as an interface between numerical data (from the physical world, digital simulations, etc) and SNNs. It makes the conversion of information to artificial neuron spikes. The type of encoding adopted here is **rate coding**. It suggests that the information is carried by the firing rate of the neuron. Hence, spike train generated has frequency proportional to the corresponding membrane potential.
4 |
5 | The average firing rate of retinal ganglion neurons lies between 1-200 Hz therefore the potential is scaled accordingly. Here are some examples of varying firing rates
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 | The image used as the input is the one from the [semeion dataset] (https://archive.ics.uci.edu/ml/machine-learning-databases/semeion/) of handwritten integers
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/encoding/spike_train.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import math
4 | from matplotlib import pyplot as plt
5 |
6 | # Sliding window implementation of receptive field
7 | w = np.zeros([5,5])
8 | pot = np.zeros([16,16])
9 | ran = [-2,-1,0,1,2]
10 | ox = 2
11 | oy = 2
12 | w[ox][oy] = 1
13 |
14 | for i in range(5):
15 | for j in range(5):
16 | d = abs(ox-i) + abs(oy-j)
17 | w[i][j] = (-0.375)*d + 1
18 |
19 | #reading dataset image (16x16)
20 | img = cv2.imread('1.png', 0)
21 |
22 | #calculating potential map of the image (256 input neuron potential)
23 | for i in range(16):
24 | for j in range(16):
25 | summ = 0
26 | for m in ran:
27 | for n in ran:
28 | if (i+m)>=0 and (i+m)<=15 and (j+n)>=0 and (j+n)<=15:
29 | summ = summ + w[ox+m][oy+n]*img[i+m][j+n]
30 | pot[i][j] = summ
31 |
32 | #defining time frame of 1s with steps of 5ms
33 | T = 1;
34 | dt = 0.005
35 | time = np.arange(0, T+dt, dt)
36 |
37 | #initializing spike train
38 | train = []
39 |
40 | for l in range(16):
41 | for m in range(16):
42 |
43 | temp = np.zeros([201,])
44 | #calculating firing rate proportional to the membrane potential
45 | freq = math.ceil(0.102*pot[l][m] + 52.02)
46 | freq1 = math.ceil(200/freq)
47 |
48 | #generating spikes according to the firing rate
49 | k = 0
50 | while k<200:
51 | temp[k] = 1
52 | k = k + freq1
53 | train.append(temp)
54 |
--------------------------------------------------------------------------------
/multi_layer/parameters.py:
--------------------------------------------------------------------------------
1 | ################################################ README #########################################################
2 |
3 | # This file contains all the parameters of the network.
4 |
5 | #################################################################################################################
6 |
7 | class param:
8 | scale = 1
9 | T = 200
10 | t_back = -20
11 | t_fore = 20
12 |
13 | pixel_x = 28
14 | Prest = float(0)
15 | m = pixel_x*pixel_x #Number of neurons in first layer
16 | n = 8 #Number of neurons in second layer
17 | Pmin = -500*scale
18 | # Pth = 5
19 | # D = 0.7
20 | w_max = 1.5*scale
21 | w_min = -1.2*scale
22 | sigma = 0.1 #0.02
23 | A_plus = 0.8 # time difference is positive i.e negative reinforcement
24 | A_minus = 0.3 # 0.01 # time difference is negative i.e positive reinforcement
25 | tau_plus = 8
26 | tau_minus = 5
27 |
28 | epoch = 12
29 |
30 |
31 | fr_bits = 12
32 | int_bits = 12
33 |
34 | num_layers = 2 #input layer + hidden layers + output layer
35 | num_layer_neurons = [m, n]
36 | num_layers = 3 #input layer + hidden layers + output layer
37 | num_layer_neurons = [m, 16,n]
38 | num_layers = 4 #input layer + hidden layers + output layer
39 | num_layer_neurons = [m, 64,16,n]
40 | #num_layers = 5 #input layer + hidden layers + output layer
41 | #num_layer_neurons = [m, 256,64,16,n]
42 |
43 |
--------------------------------------------------------------------------------
/classification/recep_field.py:
--------------------------------------------------------------------------------
1 | ############################ README ############################################
2 | # This file is used to apply receptive field to the image to imitate how
3 | # retinal ganglion cells perceive in real world scenario. Here 'w' is the filter
4 | # that need to be convoluted with the image. Sophisticated python libraries for
5 | # convolution can be used for optimization.
6 | ################################################################################
7 |
8 | import numpy as np
9 |
10 | def rf(inp):
11 | w = [[-0.5,-0.125, 0.25, -0.125, -0.5 ],
12 | [-0.125 , 0.25 , 0.625 , 0.25 , -0.125],
13 | [ 0.25 ,0.625 , 1. , 0.625 , 0.25 ],
14 | [-0.125 , 0.25 , 0.625 , 0.25, -0.125],
15 | [-0.5 , -0.125 , 0.25 , -0.125 ,-0.5 ]]
16 | pot = np.zeros([28,28])
17 | ran = [-2,-1,0,1,2]
18 | ox = 2
19 | oy = 2
20 |
21 | #Convolution
22 | for i in range(28):
23 | for j in range(28):
24 | summ = 0
25 | for m in ran:
26 | for n in ran:
27 | if (i+m)>=0 and (i+m)<=15 and (j+n)>=0 and (j+n)<=15:
28 | summ = summ + w[ox+m][oy+n]*inp[i+m][j+n]/255
29 | pot[i][j] = summ
30 | return pot
31 |
32 | # if __name__ == '__main__':
33 |
34 | # maxx = -1000
35 | # minn = 1000
36 |
37 | # for j in range(1,1500):
38 | # img = cv2.imread("images/" + str(j) + ".png", 0)
39 | # pot = rf(img)
40 | # for c in pot:
41 | # if max(c)>maxx:
42 | # maxx= max(c)
43 | # if min(c)=0 and (i+m)<=par.pixel_x-1 and (j+n)>=0 and (j+n)<=par.pixel_x-1:
38 | summ = summ + w[ox+m][oy+n]*inp[i+m][j+n]/255
39 | pot[i][j] = summ
40 | return pot
41 |
42 | if __name__ == '__main__':
43 |
44 | img = cv2.imread("mnist1/" + str(1) + ".png", 0)
45 | pot = rf(img)
46 | max_a = []
47 | min_a = []
48 | for i in pot:
49 | max_a.append(max(i))
50 | min_a.append(min(i))
51 | print "max", max(max_a)
52 | print "min", min(min_a)
--------------------------------------------------------------------------------
/multi_layer/recep_field.py:
--------------------------------------------------------------------------------
1 | ####################################################### README #########################################################
2 |
3 | # This file consists of function that convolves an image with a receptive field so that input to the network is
4 | # close to the form perceived by our eyes.
5 |
6 | #########################################################################################################################
7 |
8 |
9 | import numpy as np
10 | import cv2
11 | from parameters import param as par
12 |
13 | def rf(inp):
14 | sca1 = 0.625
15 | sca2 = 0.125
16 | sca3 = -0.125
17 | sca4 = -.5
18 |
19 | #Receptive field kernel
20 | w = [[ sca4 ,sca3 , sca2 ,sca3 ,sca4],
21 | [ sca3 ,sca2 , sca1 ,sca2 ,sca3],
22 | [ sca2 ,sca1 , 1 ,sca1 ,sca2],
23 | [ sca3 ,sca2 , sca1 ,sca2 ,sca3],
24 | [ sca4 ,sca3 , sca2 ,sca3 ,sca4]]
25 |
26 | pot = np.zeros([par.pixel_x,par.pixel_x])
27 | ran = [-2,-1,0,1,2]
28 | ox = 2
29 | oy = 2
30 |
31 | #Convolution
32 | for i in range(par.pixel_x):
33 | for j in range(par.pixel_x):
34 | summ = 0
35 | for m in ran:
36 | for n in ran:
37 | if (i+m)>=0 and (i+m)<=par.pixel_x-1 and (j+n)>=0 and (j+n)<=par.pixel_x-1:
38 | summ = summ + w[ox+m][oy+n]*inp[i+m][j+n]/255
39 | pot[i][j] = summ
40 | return pot
41 |
42 | if __name__ == '__main__':
43 |
44 | img = cv2.imread("mnist1/" + str(1) + ".png", 0)
45 | pot = rf(img)
46 | max_a = []
47 | min_a = []
48 | for i in pot:
49 | max_a.append(max(i))
50 | min_a.append(min(i))
51 | print "max", max(max_a)
52 | print "min", min(min_a)
--------------------------------------------------------------------------------
/temp-snn/snn/reconstruct.py:
--------------------------------------------------------------------------------
1 | ###################################################### README #####################################################
2 |
3 | # This file is used to leverage the generative property of a Spiking Neural Network. reconst_weights function is used
4 | # for that purpose. Looking at the reconstructed images helps to analyse training process.
5 |
6 | ####################################################################################################################
7 |
8 |
9 | import numpy as np
10 | from numpy import interp
11 | import imageio
12 | from recep_field import rf
13 | from parameters import param as par
14 |
15 |
16 | def reconst_weights(weights, num):
17 | weights = np.array(weights)
18 | weights = np.reshape(weights, (par.pixel_x,par.pixel_x))
19 | img = np.zeros((par.pixel_x,par.pixel_x))
20 | for i in range(par.pixel_x):
21 | for j in range(par.pixel_x):
22 | img[i][j] = int(interp(weights[i][j], [par.w_min,par.w_max], [0,255]))
23 |
24 | imageio.imwrite('neuron' + str(num) + '.png', img)
25 | return img
26 |
27 | def reconst_rf(weights, num):
28 | weights = np.array(weights)
29 | weights = np.reshape(weights, (par.pixel_x,par.pixel_x))
30 | img = np.zeros((par.pixel_x,par.pixel_x))
31 | for i in range(par.pixel_x):
32 | for j in range(par.pixel_x):
33 | img[i][j] = int(interp(weights[i][j], [-2,3.625], [0,255]))
34 |
35 | imageio.imwrite('neuron' + str(num) + '.png', img)
36 | return img
37 |
38 |
39 | if __name__ == '__main__':
40 |
41 | img = imageio.imread("images2/" + "69" + ".png")
42 | pot = rf(img)
43 | reconst_rf(pot, 12)
--------------------------------------------------------------------------------
/multi_layer/reconstruct.py:
--------------------------------------------------------------------------------
1 | # uncompyle6 version 3.2.4
2 | # Python bytecode 2.7 (62211)
3 | # Decompiled from: Python 2.7.15rc1 (default, Nov 12 2018, 14:31:15)
4 | # [GCC 7.3.0]
5 | # Embedded file name: /home/vonfaust/data/snn/codebase/python_imple/training/reconstruct.py
6 | # Compiled at: 2018-12-30 05:37:08
7 | import numpy as np
8 | from numpy import interp
9 | import cv2
10 | from recep_field import rf
11 | from parameters import param as par
12 |
13 | def reconst_weights(weights, num, layer, reshape_x, reshape_y):
14 | weights = np.array(weights)
15 | weights = np.reshape(weights, (reshape_x, reshape_y))
16 | img = np.zeros((reshape_x, reshape_y))
17 | for i in range(reshape_x):
18 | for j in range(reshape_y):
19 | img[i][j] = int(interp(weights[i][j], [par.w_min, par.w_max], [0, 255]))
20 |
21 | img = np.resize(img, (28, 28))
22 | cv2.imwrite('weights/layer_' + str(layer) + '_neuron_' + str(num) + '.png', img)
23 | return img
24 |
25 |
26 | def reconst_rf(weights, num):
27 | weights = np.array(weights)
28 | weights = np.reshape(weights, (par.pixel_x, par.pixel_x))
29 | img = np.zeros((par.pixel_x, par.pixel_x))
30 | for i in range(par.pixel_x):
31 | for j in range(par.pixel_x):
32 | img[i][j] = int(interp(weights[i][j], [-2, 3.625], [0, 255]))
33 |
34 | cv2.imwrite('neuron' + str(num) + '.png', img)
35 | return img
36 |
37 |
38 | if __name__ == '__main__':
39 | img = cv2.imread('images2/69.png', 0)
40 | pot = rf(img)
41 | reconst_rf(pot, 12)
42 | # okay decompiling reconstruct.pyc
43 |
--------------------------------------------------------------------------------
/temp-snn/snn/recep_field.py:
--------------------------------------------------------------------------------
1 | ####################################################### README #########################################################
2 |
3 | # This file consists of function that convolves an image with a receptive field so that input to the network is
4 | # close to the form perceived by our eyes.
5 |
6 | #########################################################################################################################
7 |
8 |
9 | import numpy as np
10 | import imageio
11 | from parameters import param as par
12 |
13 | def rf(inp):
14 | sca1 = 0.625
15 | sca2 = 0.125
16 | sca3 = -0.125
17 | sca4 = -.5
18 |
19 | #Receptive field kernel
20 | w = [[ sca4 ,sca3 , sca2 ,sca3 ,sca4],
21 | [ sca3 ,sca2 , sca1 ,sca2 ,sca3],
22 | [ sca2 ,sca1 , 1 ,sca1 ,sca2],
23 | [ sca3 ,sca2 , sca1 ,sca2 ,sca3],
24 | [ sca4 ,sca3 , sca2 ,sca3 ,sca4]]
25 |
26 | pot = np.zeros([inp.shape[0],inp.shape[1]])
27 | ran = [-2,-1,0,1,2]
28 | ox = 2
29 | oy = 2
30 |
31 | #Convolution
32 | for i in range(inp.shape[0]):
33 | for j in range(inp.shape[1]):
34 | summ = 0
35 | for m in ran:
36 | for n in ran:
37 | if (i+m)>=0 and (i+m)<=inp.shape[0]-1 and (j+n)>=0 and (j+n)<=inp.shape[0]-1:
38 | summ = summ + w[ox+m][oy+n] * inp[i+m][j+n]/255
39 | pot[i][j] = summ
40 | return pot
41 |
42 | if __name__ == '__main__':
43 | img = imageio.imread("images/" + str(100) + ".png")
44 | pot = rf(img)
45 | max_a = []
46 | min_a = []
47 | for i in pot:
48 | max_a.append(max(i))
49 | min_a.append(min(i))
50 | for i in range(16):
51 | temp = ''
52 | for j in pot[i]:
53 | temp += '%02d ' % int(j)
54 | print(temp)
55 | print("max", max(max_a))
56 | print("min", min(min_a))
--------------------------------------------------------------------------------
/training/spike_train.py:
--------------------------------------------------------------------------------
1 | ######################################################## README #############################################################
2 |
3 | # This file generates rate based spike train from the potential map.
4 |
5 | ############################################################################################################################
6 |
7 |
8 | import numpy as np
9 | from numpy import interp
10 | from neuron import neuron
11 | import random
12 | from matplotlib import pyplot as plt
13 | from recep_field import rf
14 | import cv2
15 | from rl import rl
16 | from rl import update
17 | import math
18 | from parameters import param as par
19 |
20 | def encode(pot):
21 |
22 | #initializing spike train
23 | train = []
24 |
25 | for l in range(par.pixel_x):
26 | for m in range(par.pixel_x):
27 |
28 | temp = np.zeros([(par.T+1),])
29 |
30 | #calculating firing rate proportional to the membrane potential
31 | freq = interp(pot[l][m], [-1.069,2.781], [1,20])
32 |
33 | # print freq
34 | if freq<=0:
35 | print error
36 |
37 | freq1 = math.ceil(600/freq)
38 |
39 | #generating spikes according to the firing rate
40 | k = freq1
41 | if(pot[l][m]>0):
42 | while k<(par.T+1):
43 | temp[k] = 1
44 | k = k + freq1
45 | train.append(temp)
46 | # print sum(temp)
47 | return train
48 |
49 | if __name__ == '__main__':
50 | # m = []
51 | # n = []
52 | img = cv2.imread("mnist1/6/" + str(15) + ".png", 0)
53 |
54 | pot = rf(img)
55 |
56 | # for i in pot:
57 | # m.append(max(i))
58 | # n.append(min(i))
59 |
60 | # print max(m), min(n)
61 | train = encode(pot)
62 | f = open('look_ups/train6.txt', 'w')
63 | print np.shape(train)
64 |
65 | for i in range(201):
66 | for j in range(784):
67 | f.write(str(int(train[j][i])))
68 | f.write('\n')
69 |
70 | f.close()
--------------------------------------------------------------------------------
/multi_layer/spike_train.py:
--------------------------------------------------------------------------------
1 | ######################################################## README #############################################################
2 |
3 | # This file generates rate based spike train from the potential map.
4 |
5 | ############################################################################################################################
6 |
7 |
8 | import numpy as np
9 | from numpy import interp
10 | from neuron import neuron
11 | import random
12 | from matplotlib import pyplot as plt
13 | from recep_field import rf
14 | import cv2
15 | from rl import rl
16 | from rl import update
17 | import math
18 | from parameters import param as par
19 |
20 | def encode(pot):
21 |
22 | #initializing spike train
23 | train = []
24 |
25 | for l in range(par.pixel_x):
26 | for m in range(par.pixel_x):
27 |
28 | temp = np.zeros([(par.T+1),])
29 |
30 | #calculating firing rate proportional to the membrane potential
31 | freq = interp(pot[l][m], [-1.069,2.781], [1,20])
32 |
33 | # print freq
34 | if freq<=0:
35 | print error
36 |
37 | freq1 = math.ceil(600/freq)
38 |
39 | #generating spikes according to the firing rate
40 | k = freq1
41 | if(pot[l][m]>0):
42 |
43 | while k<(par.T+1):
44 | k = int(k)
45 | temp[k] = 1
46 | k = k + freq1
47 |
48 | train.append(temp)
49 | #print temp
50 | #print sum(temp)
51 | return train
52 |
53 | if __name__ == '__main__':
54 | # m = []
55 | # n = []
56 | img = cv2.imread("mnist1/6/" + str(15) + ".png", 0)
57 |
58 | pot = rf(img)
59 |
60 | # for i in pot:
61 | # m.append(max(i))
62 | # n.append(min(i))
63 |
64 | # print max(m), min(n)
65 | train = encode(pot)
66 | f = open('look_ups/train6.txt', 'w')
67 | print np.shape(train)
68 |
69 | for i in range(201):
70 | for j in range(784):
71 | f.write(str(int(train[j][i])))
72 | f.write('\n')
73 |
74 | f.close()
75 |
--------------------------------------------------------------------------------
/classification/spike_train.py:
--------------------------------------------------------------------------------
1 | ########################### README ############################################
2 | # This file is used to generate spike train from potential map. There are two
3 | # methods to do so. One is deterministic where we calculate the spike frequency
4 | # directly proportional to the potential of that pixel and construct a train
5 | # with equally spaced spikes. Other one is stochastic where we calculate the
6 | # probability of the pixel to fire a spike and construct a spike train
7 | # accordingly
8 | ###############################################################################
9 |
10 | import numpy as np
11 | from numpy import interp
12 | from neuron import neuron
13 | import random
14 | from recep_field import rf
15 | import imageio
16 | import math
17 | from sklearn.preprocessing import normalize
18 |
19 | # Builds a probabilistic spike train
20 | def encode_stochastic(img):
21 | T = 200
22 | train = []
23 | pot1 = normalize(img, norm='l2')
24 | for l in range(28):
25 | for m in range(28):
26 | temp = np.random.uniform(size=(T+1))
27 | temp = (temp < pot1[l][m])
28 | train.append(temp)
29 | return train
30 |
31 | def encode_deterministic(pot):
32 | #defining time frame of 1s with steps of 5ms
33 | T = 200;
34 | #initializing spike train
35 | train = []
36 |
37 | for l in range(28):
38 | for m in range(28):
39 | temp = np.zeros([(T+1),])
40 | #calculating firing rate proportional to the membrane potential
41 | freq = interp(pot[l][m], [-2,5], [1,20])
42 | # print freq
43 | if freq>0:
44 | freq1 = math.ceil(T/freq)
45 | #generating spikes according to the firing rate
46 | k = freq1
47 | while k<(T+1):
48 | temp[int(k)] = 1
49 | k = k + freq1
50 | train.append(temp)
51 | # print sum(temp)
52 | return train
53 |
54 |
55 | if __name__ == '__main__':
56 | m = []
57 | n = []
58 | img = imageio.imread("training_images/1.png")
59 | # pot = rf(img)
60 | # train = encode_deterministic(pot)
61 | # print train
62 | # print img
63 | encode_stochastic(img)
64 |
--------------------------------------------------------------------------------
/synapse/synapse.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import random
3 | from matplotlib import pyplot as plt
4 |
5 | #constant global parameters which are same for the whole network
6 | global Pref, Pmin, Pth, D, Pspike, time, T, dt
7 | T = 500
8 | dt = 0.125
9 | Pref = 0
10 | Pmin = -1
11 | Pth = 5
12 | D = 1
13 | Pspike = 4
14 | t_ref = 5
15 | time = np.arange(0, T+dt, dt)
16 |
17 | #neuron class which can be instantiated in the main function as many times as required. It follows the integrate and fire model.
18 | #The out function takes in the matrices of spikes and weights and returns the output train of spikes.
19 | class neuron:
20 | def __init__(self):
21 | self.t_rest = 0
22 | self.Pn = np.zeros(len(time))
23 | self.spike = np.zeros(len(time))
24 | def out(self,S, w):
25 | for i, t in enumerate(time):
26 | if i==0:
27 | a1 = S[:,i]
28 | self.Pn[i] = np.dot(w,a1) - D
29 | self.spike[i] = 0
30 | else:
31 | if t<=self.t_rest:
32 | self.Pn[i] = Pref
33 | self.spike[i] = 0
34 | elif t>self.t_rest:
35 | if self.Pn[i-1]>Pmin:
36 | a1 = S[:,i]
37 | self.Pn[i] = self.Pn[i-1] + np.dot(w,a1) - 0.25
38 | self.spike[i] = 0
39 | else:
40 | self.Pn[i] = 0
41 | self.spike[i] = 0
42 | if self.Pn[i]>=Pth:
43 | self.Pn[i] += Pspike
44 | self.t_rest = t + t_ref
45 | self.spike[i] = 1
46 |
47 | return self.spike
48 |
49 |
50 | if __name__=='__main__':
51 | m = 5 #Number of neurons in first layer
52 | n = 3 #Number of neurons in second layer
53 | #creating two layers of m and n neurons
54 | layer1 = []
55 | layer2 = []
56 |
57 | for i in range(m):
58 | a = neuron()
59 | layer1.append(a)
60 | for i in range(n):
61 | a = neuron()
62 | layer2.append(a)
63 |
64 | #initialising synapse array with random integers
65 | synapse = np.random.randint(0, 5, size=(n,m))
66 | S_in = []
67 |
68 | #initialising the input spike trains
69 | for l in range(m):
70 | temp = []
71 | for k in range(len(time)):
72 | a = random.randrange(0,2)
73 | temp.append(a)
74 | S_in.append(temp)
75 |
76 | #output of the first layer
77 | out_l1 = []
78 | w_in = np.eye(m)
79 | S_in = np.array(S_in)
80 | for l in range(m):
81 | temp = []
82 | temp = layer1[l].out(S_in,w_in[l])
83 | out_l1.append(temp)
84 | out_l1 = np.array(out_l1)
85 |
86 | #output of the second layer which was fed with the output of the first layer
87 | out_l2 = []
88 | for l in range(n):
89 | temp = []
90 | temp = layer2[l].out(out_l1,synapse[l])
91 | out_l2.append(temp)
92 |
--------------------------------------------------------------------------------
/temp-snn/snn/classify.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from neuron import neuron
3 | import random
4 | from recep_field import rf
5 | from spike_train import encode
6 | from parameters import param as par
7 | from weight_initialization import learned_weights_x, learned_weights_o, learned_weights_synapse
8 | import imageio
9 |
10 | #Parameters
11 | global time, T, dt, t_back, t_fore, w_min
12 | time = np.arange(1, par.T+1, 1)
13 |
14 | layer2 = []
15 |
16 | # creating the hidden layer of neurons
17 | for i in range(par.n):
18 | a = neuron()
19 | layer2.append(a)
20 |
21 | #synapse matrix
22 | synapse = np.zeros((par.n,par.m))
23 |
24 | #learned weights
25 | synapse[0] = learned_weights_x()
26 | synapse[1] = learned_weights_o()
27 |
28 | #random initialization for rest of the synapses
29 | for i in range(par.n):
30 | synapse[i] = learned_weights_synapse(i)
31 | #for j in range(par.m):
32 | # synapse[i][j] = random.uniform(0, 0.4*par.scale)
33 |
34 | for k in range(1):
35 |
36 | for i in range(3):
37 | spike_count = [0,0,0,0]
38 |
39 | #read the image to be classified
40 | img = imageio.imread("test/{}.png".format(i))
41 |
42 | #initialize the potentials of output neurons
43 | for x in layer2:
44 | x.initial(par.Pth)
45 |
46 | #calculate teh membrane potentials of input neurons
47 | pot = rf(img)
48 |
49 | #generate spike trains
50 | train = np.array(encode(pot))
51 |
52 | #flag for lateral inhibition
53 | f_spike = 0
54 |
55 | active_pot = [0,0,0,0]
56 |
57 | for t in time:
58 | for j, x in enumerate(layer2):
59 | active = []
60 |
61 | #update potential if not in refractory period
62 | if(x.t_restpar.Prest):
65 | x.P -= par.D
66 | active_pot[j] = x.P
67 |
68 | # Lateral Inhibition
69 | if(f_spike==0):
70 | high_pot = max(active_pot)
71 | if(high_pot>par.Pth):
72 | f_spike = 1
73 | winner = np.argmax(active_pot)
74 | print(i, winner)
75 | for s in range(par.n):
76 | if(s!=winner):
77 | layer2[s].P = par.Prest
78 |
79 | #Check for spikes
80 | for j,x in enumerate(layer2):
81 | s = x.check()
82 | if(s==1):
83 | print(j, s)
84 | spike_count[j] += 1
85 | x.t_rest = t + x.t_ref
86 | print(spike_count)
87 |
--------------------------------------------------------------------------------
/temp-snn/classification/classify.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from snn.neuron import neuron
3 | import random
4 | from snn.recep_field import rf
5 | from snn.spike_train import encode
6 | from snn.parameters import param as par
7 | from classification.weight_initialization import learned_weights_x, learned_weights_o, learned_weights_synapse
8 | import imageio
9 |
10 | #Parameters
11 | global time, T, dt, t_back, t_fore, w_min
12 | time = np.arange(1, par.T+1, 1)
13 |
14 | layer2 = []
15 |
16 | # creating the hidden layer of neurons
17 | for i in range(par.n):
18 | a = neuron()
19 | layer2.append(a)
20 |
21 | #synapse matrix
22 | synapse = np.zeros((par.n,par.m))
23 |
24 | #learned weights
25 | synapse[0] = learned_weights_x()
26 | synapse[1] = learned_weights_o()
27 |
28 | #random initialization for rest of the synapses
29 | for i in range(par.n):
30 | synapse[i] = learned_weights_synapse(i)
31 | #for j in range(par.m):
32 | # synapse[i][j] = random.uniform(0, 0.4*par.scale)
33 |
34 | for k in range(1):
35 |
36 | for i in range(3):
37 | spike_count = [0,0,0,0]
38 |
39 | #read the image to be classified
40 | img = imageio.imread("data/test/{}.png".format(i))
41 |
42 | #initialize the potentials of output neurons
43 | for x in layer2:
44 | x.initial(par.Pth)
45 |
46 | #calculate teh membrane potentials of input neurons
47 | pot = rf(img)
48 |
49 | #generate spike trains
50 | train = np.array(encode(pot))
51 |
52 | #flag for lateral inhibition
53 | f_spike = 0
54 |
55 | active_pot = [0,0,0,0]
56 |
57 | for t in time:
58 | for j, x in enumerate(layer2):
59 | active = []
60 |
61 | #update potential if not in refractory period
62 | if(x.t_restpar.Prest):
65 | x.P -= par.D
66 | active_pot[j] = x.P
67 |
68 | # Lateral Inhibition
69 | if(f_spike==0):
70 | high_pot = max(active_pot)
71 | if(high_pot>par.Pth):
72 | f_spike = 1
73 | winner = np.argmax(active_pot)
74 | print(i, winner)
75 | for s in range(par.n):
76 | if(s!=winner):
77 | layer2[s].P = par.Prest
78 |
79 | #Check for spikes
80 | for j,x in enumerate(layer2):
81 | s = x.check()
82 | if(s==1):
83 | print(j, s)
84 | spike_count[j] += 1
85 | x.t_rest = t + x.t_ref
86 | print(spike_count)
87 |
--------------------------------------------------------------------------------
/classification/classify.py:
--------------------------------------------------------------------------------
1 | ##################### README ###################################################
2 | # This file executes the classification algorithm over input testing images.
3 | # Winner neurons inhibit other neurons by a phenomenon called Lateral inhibition
4 | # Spike for each output neuron at each time stamp is monitored.
5 | ################################################################################
6 | import numpy as np
7 | from neuron import neuron
8 | import random
9 | from recep_field import rf
10 | import imageio
11 | from spike_train import *
12 | from weight_initialization import learned_weights
13 |
14 | #Parameters
15 | global time, T, dt, t_back, t_fore, w_min
16 | T = 200
17 | time = np.arange(1, T+1, 1)
18 | t_back = -20
19 | t_fore = 20
20 | Pth = 150 #Should be Pth = 6 for deterministic spike train
21 | m = 784 #Number of neurons in first layer
22 | n = 8 #Number of neurons in second layer
23 | epoch = 1
24 | num_of_images = 6
25 | w_max = 0.5
26 | w_min = -0.5
27 |
28 | layer2 = []
29 | # creating the hidden layer of neurons
30 | for i in range(n):
31 | a = neuron()
32 | layer2.append(a)
33 |
34 | #synapse matrix
35 | synapse = np.zeros((n,m))
36 | #learned weights
37 | weight_matrix = learned_weights()
38 | for i in range (num_of_images):
39 | synapse[i] = weight_matrix[i]
40 |
41 | #random initialization for rest of the synapses
42 | for i in range(num_of_images,n):
43 | for j in range(m):
44 | synapse[i][j] = random.uniform(w_min,w_max)
45 |
46 | for k in range(epoch):
47 | for i in range(1,7):
48 | spike_count = np.zeros((n,1))
49 |
50 | #read the image to be classified
51 | img = imageio.imread("training_images/" + str(i) + ".png")
52 |
53 | #initialize the potentials of output neurons
54 | for x in layer2:
55 | x.initial()
56 |
57 | #calculate teh membrane potentials of input neurons
58 | pot = rf(img)
59 |
60 | #generate spike trains. Select between deterministic and stochastic
61 | # train = np.array(encode_deterministic(pot))
62 | train = np.array(encode_stochastic(img))
63 |
64 | #flag for lateral inhibition
65 | f_spike = 0
66 | active_pot = np.zeros((n,1))
67 | for t in time:
68 | for j, x in enumerate(layer2):
69 | active = []
70 |
71 | #update potential if not in refractory period
72 | if(x.t_restx.Prest):
75 | x.P -= x.D
76 | active_pot[j] = x.P
77 |
78 | # Lateral Inhibition
79 | if(f_spike==0):
80 | high_pot = max(active_pot)
81 | if(high_pot>Pth):
82 | f_spike = 1
83 | winner = np.argmax(active_pot)
84 | for s in range(n):
85 | if(s!=winner):
86 | layer2[s].P = layer2[s].Pmin
87 |
88 | #Check for spikes
89 | for j,x in enumerate(layer2):
90 | s = x.check()
91 | if(s==1):
92 | spike_count[j] += 1
93 | x.t_rest = t + x.t_ref
94 | print spike_count
95 |
--------------------------------------------------------------------------------
/temp-snn/snn/spike_train.py:
--------------------------------------------------------------------------------
1 | ######################################################## README #############################################################
2 |
3 | # This file generates rate based spike train from the potential map.
4 |
5 | ############################################################################################################################
6 |
7 |
8 | import numpy as np
9 | from numpy import interp
10 | from matplotlib import pyplot as plt
11 | import imageio
12 | import math
13 | from parameters import param as par
14 | from recep_field import rf
15 |
16 | def encode2(pixels):
17 |
18 | #initializing spike train
19 | train = []
20 |
21 | for l in range(pixels.shape[0]):
22 | for m in range(pixels.shape[1]):
23 |
24 | temp = np.zeros([(par.T+1),])
25 |
26 | #calculating firing rate proportional to the membrane potential
27 | freq = interp(pixels[l][m], [0, 255], [1,20])
28 | #print(pot[l][m], freq)
29 | # print freq
30 |
31 | assert freq > 0
32 |
33 | freq1 = math.ceil(600/freq)
34 |
35 | #generating spikes according to the firing rate
36 | k = freq1
37 | if(pixels[l][m]>0):
38 | while k<(par.T+1):
39 | temp[k] = 1
40 | k = k + freq1
41 | train.append(temp)
42 | # print sum(temp)
43 | return train
44 |
45 | def encode(pot):
46 |
47 | #initializing spike train
48 | train = []
49 |
50 | for l in range(pot.shape[0]):
51 | for m in range(pot.shape[1]):
52 |
53 | temp = np.zeros([(par.T+1),])
54 |
55 | #calculating firing rate proportional to the membrane potential
56 | freq = interp(pot[l][m], [-1.069,2.781], [1,20])
57 | #print(pot[l][m], freq)
58 | # print freq
59 |
60 | assert freq > 0
61 |
62 | freq1 = math.ceil(600/freq)
63 |
64 | #generating spikes according to the firing rate
65 | k = freq1
66 | if(pot[l][m]>0):
67 | while k<(par.T+1):
68 | temp[int(k)] = 1
69 | k = k + freq1
70 | train.append(temp)
71 | # print sum(temp)
72 | return train
73 |
74 | if __name__ == '__main__':
75 | # m = []
76 | # n = []
77 | img = imageio.imread("/Users/johnsoni/Downloads/mnist_png/training/5/0.png")
78 | #img = imageio.imread("data/training/0.png")
79 |
80 | pot = rf(img)
81 |
82 | # for i in pot:
83 | # m.append(max(i))
84 | # n.append(min(i))
85 |
86 | # print max(m), min(n)
87 | #train = encode2(img)
88 | train = encode(pot)
89 | f = open('train6.txt', 'w')
90 | print(np.shape(train))
91 |
92 | for j in range(len(train)):
93 | for i in range(len(train[j])):
94 | f.write(str(int(train[j][i])))
95 | f.write('\n')
96 |
97 | f.close()
98 |
--------------------------------------------------------------------------------
/training/learning.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | ####################################################### README ####################################################################
4 |
5 | # This is the main file which calls all the functions and trains the network by updating weights
6 |
7 |
8 | #####################################################################################################################################
9 |
10 |
11 | import numpy as np
12 | from neuron import neuron
13 | import random
14 | from matplotlib import pyplot as plt
15 | from recep_field import rf
16 | import cv2
17 | from spike_train import encode
18 | from rl import rl
19 | from rl import update
20 | from reconstruct import reconst_weights
21 | from parameters import param as par
22 | from var_th import threshold
23 | import os
24 |
25 | #potentials of output neurons
26 | pot_arrays = []
27 | for i in range(par.n):
28 | pot_arrays.append([])
29 |
30 | #time series
31 | time = np.arange(1, par.T+1, 1)
32 |
33 | layer2 = []
34 |
35 | # creating the hidden layer of neurons
36 | for i in range(par.n):
37 | a = neuron()
38 | layer2.append(a)
39 |
40 | #synapse matrix initialization
41 | synapse = np.zeros((par.n,par.m))
42 |
43 | for i in range(par.n):
44 | for j in range(par.m):
45 | synapse[i][j] = random.uniform(0,0.4*par.scale)
46 |
47 |
48 | for k in range(par.epoch):
49 | for i in range(322,323):
50 | print i," ",k
51 | img = cv2.imread("mnist1/" + str(i) + ".png", 0)
52 |
53 | #Convolving image with receptive field
54 | pot = rf(img)
55 |
56 | #Generating spike train
57 | train = np.array(encode(pot))
58 |
59 | #calculating threshold value for the image
60 | var_threshold = threshold(train)
61 |
62 | # print var_threshold
63 | # synapse_act = np.zeros((par.n,par.m))
64 | # var_threshold = 9
65 | # print var_threshold
66 | # var_D = (var_threshold*3)*0.07
67 |
68 | var_D = 0.15*par.scale
69 |
70 | for x in layer2:
71 | x.initial(var_threshold)
72 |
73 | #flag for lateral inhibition
74 | f_spike = 0
75 |
76 | img_win = 100
77 |
78 | active_pot = []
79 | for index1 in range(par.n):
80 | active_pot.append(0)
81 |
82 | #Leaky integrate and fire neuron dynamics
83 | for t in time:
84 | for j, x in enumerate(layer2):
85 | active = []
86 | if(x.t_restpar.Prest):
89 | x.P -= var_D
90 | active_pot[j] = x.P
91 |
92 | pot_arrays[j].append(x.P)
93 |
94 | # Lateral Inhibition
95 | if(f_spike==0):
96 | high_pot = max(active_pot)
97 | if(high_pot>var_threshold):
98 | f_spike = 1
99 | winner = np.argmax(active_pot)
100 | img_win = winner
101 | print "winner is " + str(winner)
102 | for s in range(par.n):
103 | if(s!=winner):
104 | layer2[s].P = par.Pmin
105 |
106 | #Check for spikes and update weights
107 | for j,x in enumerate(layer2):
108 | s = x.check()
109 | if(s==1):
110 | x.t_rest = t + x.t_ref
111 | x.P = par.Prest
112 | for h in range(par.m):
113 | for t1 in range(-2,par.t_back-1, -1):
114 | if 0<=t+t1par.Prest):
92 | x.P -= var_D
93 | active_pot[j] = x.P
94 |
95 | pot_arrays[j].append(x.P)
96 |
97 | # Lateral Inhibition
98 | if(f_spike==0):
99 | high_pot = max(active_pot)
100 | if(high_pot>par.Pth):
101 | f_spike = 1
102 | winner = np.argmax(active_pot)
103 | img_win = winner
104 | print("winner is " + str(winner))
105 | for s in range(par.n):
106 | if(s!=winner):
107 | layer2[s].P = -500
108 |
109 | #Check for spikes and update weights
110 | for j,x in enumerate(layer2):
111 | s = x.check()
112 | if(s==1):
113 | spike_probe[j].append((len(pot_arrays[j]), 1))
114 | x.t_rest = t + x.t_ref
115 | x.P = par.Prest
116 | for h in range(par.m):
117 | for t1 in range(-2,par.t_back-1, -1):
118 | if 0<=t+t1par.Prest):
95 | x.P -= var_D
96 | active_pot[j] = x.P
97 |
98 | pot_arrays[j].append(x.P)
99 |
100 | # Lateral Inhibition
101 | if(f_spike==0):
102 | high_pot = max(active_pot)
103 | if(high_pot>par.Pth):
104 | f_spike = 1
105 | winner = np.argmax(active_pot)
106 | img_win = winner
107 | print("winner is " + str(winner))
108 | for s in range(par.n):
109 | if(s!=winner):
110 | layer2[s].P = -500
111 |
112 | #Check for spikes and update weights
113 | for j,x in enumerate(layer2):
114 | s = x.check()
115 | if(s==1):
116 | spike_probe[j].append((len(pot_arrays[j]), 1))
117 | x.t_rest = t + x.t_ref
118 | x.P = par.Prest
119 | for h in range(par.m):
120 | for t1 in range(-2,par.t_back-1, -1):
121 | if 0<=t+t1
7 |
8 |
9 |
10 | ## Network Elements
11 | * [Neuron](neuron/)
12 | * [Synapse](synapse/)
13 | * [Receptive field](receptive_field/)
14 | * [Spike train](encoding/)
15 |
16 |
17 | ## [SNN Simulator for Classification](classification/)
18 | Assuming that we have learned the optimal weights of the network using the STDP algorithm (will be implemented next), this uses the weights to classify the input patterns into different classes. The simulator uses the 'winner-takes-all' strategy to supress the non firing neurons and produce distinguishable results. Steps involved while classifying the patterns are:
19 |
20 | - For each input neuron membrane potential is calculated in its [receptive field](receptive_field/) (5x5 window).
21 | - [Spike train](encoding/) is generated for each input neuron with spike frequency proportional to the membrane potential.
22 | - Foe each image, at each time step, potential of the neuron is updated according to the input spike and the weights associated.
23 | - First firing output neuron performs lateral inhibition on the rest of the output neurons.
24 | - Simulator checks for output spike.
25 |
26 | ### Results
27 | The simulator was tested upon binary classification. It can be extended upto any number of classes. The images for two classes are:
28 |
29 |
30 |
31 | Each of the classes were presented to the network for 1000 time units each. The activity of the neurons was recorded. Here are the graphs of the potential of output neurons versus time unit.
32 |
33 | First 1000 TU corresponds to class1, next 1000 to class2. Red line indicates the threshold potential.
34 |
35 |
36 |
37 | The 1st output neuron is active for class1, 2nd is active for class2, and 3rd and 4th are mute for both the classes. Hence, by recording the total spikes in output neurons, we can determine the class to which the pattern belongs.
38 |
39 |
40 | ## [Training an SNN](training)
41 | In the previous section we assumed that our network is trained i.e weights are learned using STDP and can be used to classify patterns. Here we'll see how STDP works and what all need to be taken care of while implementing this training algorithm.
42 |
43 | ### Spike Time Dependent Plasticity
44 | STDP is actually a biological process used by brain to modify it's neural connections (synapses). Since the unmatched learning efficiency of brain has been appreciated since decades, this rule was incorporated in ANNs to train a neural network. Moulding of weights is based on the following two rules -
45 | - Any synapse that contribute to the firing of a post-synaptic neuron should be made strong i.e it's value should be increased.
46 | - Synapses that don't contribute to the firing of a post-synaptic neuron should be dimished i.e it's value should be decreased.
47 |
48 | Here is an explanation of how this algorithm works:
49 |
50 | Consider the scenario depicted in this figure
51 |
52 |
53 |
54 |
55 |
56 | Four neurons connect to a single neuron by synapse. Each pre synaptic neuron is firing at its own rate and the spikes are sent forward by the corresponding synapse. The intensity of spike translated to post synaptic neuron depends upon the strength of the connecting synapse. Now, because of the input spikes membrane potential of post synaptic neuron increases and sends out a spike after crossing the threshold. At the time when post synaptic neuron spikes, we'll monitor which all pre synaptic neurons helped it to fire. This could be done by observing which pre synaptic neurons sent out spikes before post synaptic neuron spiked. This way they helped in post synaptic spike by increasing the membrane potential and hence the corresponding synapse is strengthend. The factor by which the weight of synapse is increased is inversly proportional to the time difference between post synaptic and pre synaptic spikes given by this graph
57 |
58 |
59 |
60 |
61 |
62 | ### Generative Property of SNN
63 | This property of Spiking Neural Network is very useful in analysing training process. All the synapses connected to an output layer neuron, if scaled to proper values and rearranged in form of an image, depicts what pattern that neuron has learned and how disctinctly it can classify that pattern. For an example, after training a network with MNIST dataset if we scale the weights of all the snypases connected to a particular output neuron (784 in number) and form a 28x28 image with those scaled up weights we will get a grayscale pattern learned by that neuron. This property will be used later while demonstrating the results. [This](training/reconstruct.py) file contains the function that reconstructs image from weights.
64 |
65 | ### Variable Threshold
66 | In unsupervised learning it is very difficult to train a network where patterns have varied amount of activations (white pixels in case of MNIST). Patterns with higher activations tend to win in competetive learning and hence overshadow others (this problem will be demonstrated later). Therefore this method of normalization was introduced to bring them all down to same level. Threshold for each pattern is calculated based on the number of activation it contains. Higher the number of activations, higher is the threshold value. [This](training/var_th.py) file holds function to calculate threshold for each image.
67 |
68 | ### Lateral Inhibition
69 | In neurobiology, lateral inhibition is the capacity of an excited neuron to reduce the activity of its neighbors. Lateral inhibition disables the spreading of action potentials from excited neurons to neighboring neurons in the lateral direction. This creates a contrast in stimulation that allows increased sensory perception. This propoerty is also called as Winner-Takes-All (WTA). The neuron that gets excited first inhibits (lowers down the membrane potential) of other neurons in same layer.
70 |
71 |
72 | ## Training for 3 class dataset
73 | Here are the results after training an SNN using MNIST dataset with 3 classes (0-2) with 5 output neurons. We will leverage the generative property of SNN and reconstruct the images using trained weights connected to each output neuron to see how well the network has learned each pattern. Also, we see the membrane potential versus time plots for each output neuron to see how the training process was executes and made that neuron sensitive to a particular pattern only.
74 |
75 | **Neuron1**
76 |
77 |
78 |
79 | **Neuron2**
80 |
81 |
82 |
83 |
84 |
85 | **Neuron3**
86 |
87 |
88 |
89 |
90 |
91 | **Neuron4**
92 |
93 |
94 |
95 |
96 |
97 | Here we can see clearly observe that Neuron 1 has learned pattern '1', Neuron 2 has learned '0', Neuron 3 is noise and Neuron 4 has learned '2'. Consider the plot of Neuron 1. In the beginning when the weights were randomly assigned it was firing for all the patterns. As the training proceeded, it became specific to pattern '1' only and was in inhibitory state for the rest. Onobserving Neuron 3 we can coclude that it reactsa to all the patterns and can be considered as noise. Hence, it is advisable to have 20% more output neurons than number of classes.
98 |
99 | There is a slight overlapping of '2' and '0' which is a common problem in competetive learning. This can be eliminated proper fine tuning of parameters.
100 |
101 | ### Improper training
102 | If we don't use variable threshold for normalization, we will observe some patterns over shadowing others. Here is an example:
103 |
104 |
105 |
106 |
107 |
108 | Here same threshold voltage was used for both the patterns and hence resulted in overlapping. This could sbe avoided by either choosing a dataset where each image has more or less same number of activations or normalizing the number of activations.
109 |
110 | ## Parameters
111 | Building a Spiking Neural Network from scratch not an easy job. There are several parameters that need to be tuned and taken care of. Combinations of so many parameters make it worse. Some of the major parameters that play an important role in the dynamics of network are -
112 | - Learning Rate
113 | - Threshold Potential
114 | - Weight Initialization
115 | - Number of Spikes Per Sample
116 | - Range of Weights
117 |
118 | I have demonstrated how some of these parameters affect the network and how they should be handeled [here](https://github.com/Shikhargupta/snn-brian-mlp/tree/master/simple_demo) under the heading Parameter Analysis.
119 |
120 |
121 | ## Contributions
122 | I was helped on this project by my collegue at Indian Institute of Technology, Guwahati - Arpan Vyas. He further went on to design an architecture of hardware accelerator for this Simplified SNN and deploy it on FPGA and hence reducing the training time considerably. [Here](https://github.com/arpanvyas) is his Github profile.
123 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Spiking-Neural-Network
2 | This is the python implementation of hardware efficient spiking neural network. It includes the modified learning and prediction rules which could be realised on hardware and are enegry efficient. Aim is to develop a network which could be used for on-chip learning as well as prediction.
3 |
4 | Spike-Time Dependent Plasticity (STDP) algorithm will be used to train the network.
5 |
6 |
7 |
8 |
9 |
10 | ## Network Elements
11 | * [Neuron](neuron/)
12 | * [Synapse](synapse/)
13 | * [Receptive field](receptive_field/)
14 | * [Spike train](encoding/)
15 |
16 |
17 | ## [SNN Simulator for Classification](classification/)
18 | Assuming that we have learned the optimal weights of the network using the STDP algorithm (will be implemented next), this uses the weights to classify the input patterns into different classes. The simulator uses the 'winner-takes-all' strategy to supress the non firing neurons and produce distinguishable results. Steps involved while classifying the patterns are:
19 |
20 | - For each input neuron membrane potential is calculated in its [receptive field](receptive_field/) (5x5 window).
21 | - [Spike train](encoding/) is generated for each input neuron with spike frequency proportional to the membrane potential.
22 | - Foe each image, at each time step, potential of the neuron is updated according to the input spike and the weights associated.
23 | - First firing output neuron performs lateral inhibition on the rest of the output neurons.
24 | - Simulator checks for output spike.
25 |
26 | ### Results
27 | The simulator was tested upon binary classification. It can be extended upto any number of classes. The images for two classes are:
28 |
29 |
30 |
31 | Each of the classes were presented to the network for 1000 time units each. The activity of the neurons was recorded. Here are the graphs of the potential of output neurons versus time unit.
32 |
33 | First 1000 TU corresponds to class1, next 1000 to class2. Red line indicates the threshold potential.
34 |
35 |
36 |
37 | The 1st output neuron is active for class1, 2nd is active for class2, and 3rd and 4th are mute for both the classes. Hence, by recording the total spikes in output neurons, we can determine the class to which the pattern belongs.
38 |
39 | Further, to demonstrate the results for multi-class classification, the simulator was tested upon the following 6 images (MNIST dataset).
40 |
41 |
42 |
43 | Each image represents a class and to each class a neuron is delegated. 2 neurons are assigned random weights. Here are the responses of each neuron to all the classes presented. X axis is the class number and Y axis is the number of spikes during each simulation. Red bar represents the class for which it spiked the most.
44 |
45 |
46 |
47 | ## [Training an SNN](training)
48 | In the previous section we assumed that our network is trained i.e weights are learned using STDP and can be used to classify patterns. Here we'll see how STDP works and what all need to be taken care of while implementing this training algorithm.
49 |
50 | ### Spike Time Dependent Plasticity
51 | STDP is actually a biological process used by brain to modify it's neural connections (synapses). Since the unmatched learning efficiency of brain has been appreciated since decades, this rule was incorporated in ANNs to train a neural network. Moulding of weights is based on the following two rules -
52 | - Any synapse that contribute to the firing of a post-synaptic neuron should be made strong i.e it's value should be increased.
53 | - Synapses that don't contribute to the firing of a post-synaptic neuron should be dimished i.e it's value should be decreased.
54 |
55 | Here is an explanation of how this algorithm works:
56 |
57 | Consider the scenario depicted in this figure
58 |
59 |
60 |
61 |
62 |
63 | Four neurons connect to a single neuron by synapse. Each pre synaptic neuron is firing at its own rate and the spikes are sent forward by the corresponding synapse. The intensity of spike translated to post synaptic neuron depends upon the strength of the connecting synapse. Now, because of the input spikes membrane potential of post synaptic neuron increases and sends out a spike after crossing the threshold. At the time when post synaptic neuron spikes, we'll monitor which all pre synaptic neurons helped it to fire. This could be done by observing which pre synaptic neurons sent out spikes before post synaptic neuron spiked. This way they helped in post synaptic spike by increasing the membrane potential and hence the corresponding synapse is strengthend. The factor by which the weight of synapse is increased is inversly proportional to the time difference between post synaptic and pre synaptic spikes given by this graph
64 |
65 |
66 |
67 |
68 |
69 | ### Generative Property of SNN
70 | This property of Spiking Neural Network is very useful in analysing training process. All the synapses connected to an output layer neuron, if scaled to proper values and rearranged in form of an image, depicts what pattern that neuron has learned and how disctinctly it can classify that pattern. For an example, after training a network with MNIST dataset if we scale the weights of all the snypases connected to a particular output neuron (784 in number) and form a 28x28 image with those scaled up weights we will get a grayscale pattern learned by that neuron. This property will be used later while demonstrating the results. [This](training/reconstruct.py) file contains the function that reconstructs image from weights.
71 |
72 | ### Variable Threshold
73 | In unsupervised learning it is very difficult to train a network where patterns have varied amount of activations (white pixels in case of MNIST). Patterns with higher activations tend to win in competetive learning and hence overshadow others (this problem will be demonstrated later). Therefore this method of normalization was introduced to bring them all down to same level. Threshold for each pattern is calculated based on the number of activation it contains. Higher the number of activations, higher is the threshold value. [This](training/var_th.py) file holds function to calculate threshold for each image.
74 |
75 | ### Lateral Inhibition
76 | In neurobiology, lateral inhibition is the capacity of an excited neuron to reduce the activity of its neighbors. Lateral inhibition disables the spreading of action potentials from excited neurons to neighboring neurons in the lateral direction. This creates a contrast in stimulation that allows increased sensory perception. This propoerty is also called as Winner-Takes-All (WTA). The neuron that gets excited first inhibits (lowers down the membrane potential) of other neurons in same layer.
77 |
78 |
79 | ## Training for 3 class dataset
80 | Here are the results after training an SNN using MNIST dataset with 3 classes (0-2) with 5 output neurons. We will leverage the generative property of SNN and reconstruct the images using trained weights connected to each output neuron to see how well the network has learned each pattern. Also, we see the membrane potential versus time plots for each output neuron to see how the training process was executes and made that neuron sensitive to a particular pattern only.
81 |
82 | **Neuron1**
83 |
84 |
85 |
86 | **Neuron2**
87 |
88 |
89 |
90 |
91 |
92 | **Neuron3**
93 |
94 |
95 |
96 |
97 |
98 | **Neuron4**
99 |
100 |
101 |
102 |
103 |
104 | Here we can see clearly observe that Neuron 1 has learned pattern '1', Neuron 2 has learned '0', Neuron 3 is noise and Neuron 4 has learned '2'. Consider the plot of Neuron 1. In the beginning when the weights were randomly assigned it was firing for all the patterns. As the training proceeded, it became specific to pattern '1' only and was in inhibitory state for the rest. Onobserving Neuron 3 we can coclude that it reactsa to all the patterns and can be considered as noise. Hence, it is advisable to have 20% more output neurons than number of classes.
105 |
106 | There is a slight overlapping of '2' and '0' which is a common problem in competetive learning. This can be eliminated proper fine tuning of parameters.
107 |
108 | ### Improper training
109 | If we don't use variable threshold for normalization, we will observe some patterns over shadowing others. Here is an example:
110 |
111 |
112 |
113 |
114 |
115 | Here same threshold voltage was used for both the patterns and hence resulted in overlapping. This could sbe avoided by either choosing a dataset where each image has more or less same number of activations or normalizing the number of activations.
116 |
117 | ## Parameters
118 | Building a Spiking Neural Network from scratch not an easy job. There are several parameters that need to be tuned and taken care of. Combinations of so many parameters make it worse. Some of the major parameters that play an important role in the dynamics of network are -
119 | - Learning Rate
120 | - Threshold Potential
121 | - Weight Initialization
122 | - Number of Spikes Per Sample
123 | - Range of Weights
124 |
125 | I have demonstrated how some of these parameters affect the network and how they should be handeled [here](https://github.com/Shikhargupta/snn-brian-mlp/tree/master/simple_demo) under the heading Parameter Analysis.
126 |
127 |
128 | ## Contributions
129 | I was helped on this project by my collegue at Indian Institute of Technology, Guwahati - Arpan Vyas. He further went on to design an architecture of hardware accelerator for this Simplified SNN and deploy it on FPGA and hence reducing the training time considerably. [Here](https://github.com/arpanvyas) is his Github profile.
130 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/temp-snn/weights.txt:
--------------------------------------------------------------------------------
1 | 0.4268826236331701 0.6772572614212359 0.545844521146493 -1.2 -1.1671799766264856 -1.2 -1.2 -1.2 -1.125163082547708 -1.2 -1.2 -1.2 -1.2 0.8294010300653403 0.3943586481052275 0.4190363039917107 0.5030519098260273 0.6777803746223289 0.1292108011602396 0.7437308118831837 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.3499256141022244 0.5992417969515015 0.7803986759624105 0.7249679035831045 0.7283237362209319 0.65594859420894 0.880583207234925 0.4263747127795585 0.30137758638573586 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.39093396846236733 0.09756418209551329 0.47359335826676835 1.000349158412721 0.8259173801311063 -1.2 0.8589074237632343 0.16531205526263038 0.8048363793829234 0.5382987380868982 0.4757407730903223 -1.2 -1.2 -1.2 -1.2 0.4371470149800145 0.3588576787664037 0.2460306082552569 0.58788973689565 0.4507738469765536 -1.2 -1.2 -1.2 0.5485548178726358 0.6589463724715869 0.8437451235161415 0.4443807005397005 0.3241474756351216 -1.2 -1.2 0.8073660827906628 1.0048888238426756 0.8407886999782473 0.8526355949286399 0.9328846785686384 -1.2 -1.2 -1.2 -1.2 -1.2 0.3502236385198686 0.6799997722983764 0.6220144246744517 0.780568276680569 0.6962589167618025 0.1270994404266214 0.7161146517834182 0.6043831465254071 0.4583939845092264 0.1905825871202797 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.45113797986911625 0.6867954401898875 -1.2 0.7316710648635361 0.2502811867812946 -1.2 0.8037254445318606 0.9597969814390169 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.7753382303991471 0.9908100458650553 0.5527436314908564 0.7282886888785477 0.19569192260445092 0.4521257213818573 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.13526109330854805 0.5004273458353664 0.8932440318846092 0.9621700297219203 0.7112555713252801 0.5696728668597013 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.4279251619191515 0.787317772584109 -1.2 0.467236351630412 0.6547091442883289 -1.2 0.35655725640221925 0.20519326011269146 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9295701235005198 0.34547567985467836 0.8707009615224371 0.308721700118802 0.19775084382678132 0.8797756258234073 0.03158317082267492 0.8081113399533022 0.4975736558431992 0.34448297718290943 -1.2 -1.2 -1.2 -1.2 -1.2 0.4309107051406928 0.8174562491018198 0.5866922666333919 0.3028027638138313 0.5462627468398606 -1.2 -1.2 0.043859748472128035 0.002933603982037347 0.4379624287539585 0.25413282974871976 0.6562586729571624 -1.2 -1.2 -1.2 0.8058649259756647 0.978815959312547 0.9627461812399581 0.4772356631413976 0.8593589913336976 -1.2 -1.2 -1.2 -1.2 0.6093478941824269 0.8378433187378505 0.3951945317717641 0.5930288993280501 0.26932278431513756 -1.2 0.5045595427414016 0.7163976288064228 0.3870297767983005 0.5596518215354046 0.5694452710252255 -1.2 -1.2 -1.1966673322580832 -1.2 -1.2 -1.2 0.6322114405612095 0.3453196075824272 0.19987368612688744 0.8549590019930438 0.9749670337686815 0.8004609771256138 0.4248310256249124 0.8396352862706746 0.8360702751178001 -1.1152497510764396 -1.2 -1.2 -1.2 -1.2 -1.2 -1.1422259421617387 -1.1751391175425465 0.7090915855809514 0.7093184466600787 0.7322370116530971 0.7737521193603116 0.4439729893512514 0.5322410048417153 0.4700524778053249 -1.1064362611951324 -1.2 -1.1815138344652762 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.27153838427966603 0.7040124312896578 0.36081215756104984
2 | -1.2 -1.2 -1.2 -1.2 0.7711324009577133 1.0937097071388817 1.0843281231287247 1.1096163714577567 0.958772239380259 0.5756673997863522 0.8490002291000622 0.6788064267821676 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.623014892516353 0.31479534495344985 0.9684192159449128 0.8948971016343928 0.46103882269690705 0.9491481316911172 0.9111530674893196 1.0144280844755587 0.647827323180217 0.9098604686588723 -1.2 -1.2 -1.2 -1.2 -1.2 0.2804210680240453 0.8757033547153947 0.9408540331867926 0.45121146057512324 -1.2 -1.2 -1.2 -1.2 0.42359813819651265 0.4261822278939147 0.4967636624032036 0.3183076812460812 -1.2 -1.2 -1.2 0.35540022566624596 0.5709703144997559 0.5860538612879037 1.0048473978876087 0.3655949569187045 -1.2 -1.2 -1.2 -1.2 0.8854113447412016 0.6125972134262534 0.601611930022877 0.935588706795641 0.37133866511062397 -1.2 0.8822031747658968 1.0053912366233746 0.9567356670084634 0.9644014055976513 0.0500388071256567 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9088402141274893 0.4655499612506108 0.5213436200120808 0.6122804139710204 0.621797502072878 0.9680331360045303 0.24717046994944064 1.0791841086243366 0.12681076447824566 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9281527379099377 0.5461765349697674 0.7746606851495714 0.5561979651512341 0.5771708156287161 0.9259472944883953 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.36436561229207365 0.5667398894369006 1.1823467144639894 0.8161694092669056 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0635228536835888 1.0027594145437628 0.8164713081241952 1.0180991616390247 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.6953485809666063 1.1676793891326254 0.855901337944645 0.934130150798771 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.000407190557355 0.9355932149574941 0.8225217175587972 0.4044776966194008 0.4931912308422174 0.13096966121037332 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.15268907864988812 0.8699556887999946 0.6668616661271902 0.9265332472759629 0.08517028078350042 0.9722045341677632 0.9217035275112243 0.6213614408223606 0.20759728601922725 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.3288812397647357 0.8127646883888199 0.4843717842641796 0.5327194866574521 0.8272199460058983 -1.2 0.8434370065078339 0.3546600317275591 0.4633191054080435 1.007408393430045 0.4625844721698522 -1.2 -1.2 -1.2 -1.2 0.1468115277673942 0.8706546661718038 0.6397924962241147 0.9505600097499115 0.8522578475062019 -1.2 -1.2 -1.2 0.3174302684820056 0.862016537194896 0.8082005331606911 0.9830517210587556 -1.2 -1.2 -1.2 -1.2 0.8686188557780962 0.9658490091338701 0.38624716976224566 0.20231289244327103 -1.2 -1.2 -1.2 -1.2 -1.2 0.17616646712096198 0.6708748242278203 1.0297976635437995 0.9522121370149406 1.035317834463331 0.7978826649212105 0.6203686933284611 0.9852084738621237 0.9319019853975912 0.4131592804944111 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9586724896241113 0.9889889973017985 0.5826427909073003 1.005448983575785 0.7816619316896082 0.9798762933440557 0.3303680923606364 0.5583705357514073 -1.2 -1.2 -1.2 -1.2
3 | 0.9782660590486534 1.138200118982542 0.5906676069191522 0.5636800793229028 0.7286641968098942 0.5955163341813613 1.0913435568208218 0.8092715062613165 0.6173719346547901 0.9586194888661119 0.7155712577684015 0.6929813175380047 1.1826191759270186 1.138907770809371 1.3103425222701057 0.8153436927559722 0.9927681186475041 1.1924783884574515 0.9835175344625459 1.0514603185023472 1.1851263814348816 1.1128718846760446 0.9336647813871971 0.9964244078556841 1.1907071629137977 0.986180042230582 1.2854259538567234 0.9500949831353936 1.214930336166053 1.1015353133246202 1.0651366650303085 1.145902868362426 1.2080846833250867 1.312067770389559 0.9895342115853814 0.7838882771974905 1.1324798472995758 1.0726808468250173 1.0337743967939514 1.2072430358650619 1.201832843225723 1.00947024007639 1.0723720046427245 1.1116539996395536 1.2335935188520815 0.8801915866160144 0.7771124983044756 1.311702752242908 1.0086488248449021 1.1792765780334737 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9357898524235073 1.1456116552043638 1.2320449798124786 1.0082419087273515 1.074598737604988 1.0094571808906474 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.6869705041330026 1.174414892021021 0.9530028620386372 1.1853406606151295 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.161861827151935 1.0820505610373639 1.0636221044912184 1.1116868136878475 0.8325728570869982 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9242511574937416 0.40021102764562255 1.1871414917803043 1.3540469665284443 1.093274319688809 0.8916656336451917 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0156151307278056 1.1373595841879447 1.211301108423455 1.0424185029170503 1.3562125987131013 1.066630815962901 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9238903632672982 1.3530476478205702 1.2467727739155825 0.8796579335071146 1.1229503256501248 1.0288898542466924 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9943397102039466 1.157478379899332 1.2914099535896693 1.0523020208776694 0.9805133921794035 0.5427031972093291 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.8731603404724251 1.3079811971431616 1.0403166984793748 1.3061995534220738 1.0796427698035156 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.8868642539851412 0.8600671769969637 1.1184916851617437 0.6065331297784442 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.013506081684933 1.1191155811140951 1.1445538527829697 1.3000900309576884 0.9917468165351483 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.8407887935171692 0.837499368871577 1.0835162835343186 1.21312819798668 1.2152650712132405 0.9804806253315467 1.2003650869764493 1.0110917170772782 1.0994308046793366 1.0004352541328403 0.6138504450365353 0.8042124371320347 0.6387666838556888 0.7334694450884504 0.9333819169984622 0.8696988725705512 1.271431860947665 0.8733278031539355 1.2403283920257162 1.0429262821270397 1.1197053993698627 1.039280683846366 1.2834423913213797 0.9724947928194414 1.2370915224036656 0.9717263253038677 1.09511620914064 1.1566737248691137 1.1935492916619574 1.2988808782880013 0.969883285180356 1.038347862338057 1.2265497374314436 1.1638827202360729 0.9494636938859022 1.1144566453201437 1.0013592189670724 0.655836029012163 0.6865553949845061 0.8140764446610713 1.126493726697319 0.7207746825380434 0.7243961084498384 1.1019244995991955 1.2083393486255267 1.0330684785065793 0.5709390434692276 0.9202513652971024 1.2106238805415028 0.6967116004663836
4 | 0.5203354503837498 0.5564331290827675 0.8553609991866812 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.7963510676698129 0.9337977832928134 0.6021265935538921 0.9829613151928791 0.8547996079923603 0.7063927972427266 1.1075206397887551 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.73237807228972 0.6104426226251453 0.8885912774301503 1.0626141797362922 0.865776375910179 0.7269518362273435 0.565067628340875 0.323964252020452 0.8323141727235788 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.5251929153011414 0.6871462085039233 0.7120273248617113 0.7920026213333761 0.7955763710674121 -1.2 0.4634264023749442 0.6211137363598097 0.9004151534506402 0.23227073691143718 0.9807498334675631 -1.2 -1.2 -1.2 -1.2 0.3745297937423986 0.960433194087582 0.6569399092637759 0.3308294342820544 0.8724573519837632 -1.2 -1.2 -1.2 0.3718858649818258 0.41322405647351107 0.3585962706270927 0.4902132994034316 0.5714052692878189 -1.2 -1.2 0.8686859071833747 0.6111792787885333 1.1281377072511949 0.6338540434640433 0.5280506894160355 -1.2 -1.2 -1.2 -1.2 -1.2 0.3308623312615886 0.5528294398759485 0.6979145708823373 0.516599753331464 0.8867774793419795 0.9740704051267468 1.016516013868188 0.9231497636917265 0.6779664453905696 0.588807515760539 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.21313051560233623 0.7052827012897024 -1.2 0.9067153673154744 1.0380665421740034 -1.2 1.2120452866269666 0.28124142877265035 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0259148996501317 0.960121506345809 1.0548893956839982 0.524477059935419 0.5818166021345427 0.714574304412897 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0527937959753317 0.6830950223539427 0.6862097324635527 0.9295527239808986 0.539468630640333 0.5453579557869453 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9171301458696142 0.6046531376366889 -1.2 0.5089343495423644 1.033654129568713 -1.2 0.5628970316210582 0.47189774329545087 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.1020060639354048 0.9908860063931095 1.027377245592994 0.6581487074719694 0.3529008186396173 0.3423427853088518 0.36750542256557944 1.0482673899564525 0.9611004700397884 0.6467161894558646 -1.2 -1.2 -1.2 -1.2 -1.2 0.6161985189723909 0.84820073004758 0.4150338266974231 0.42139700317406636 0.45776341772653484 -1.2 -1.2 0.451035447276669 0.8328102745168514 0.5474022930106535 0.42710722087504993 0.5950233628264537 -1.2 -1.2 -1.2 0.6114784774972298 0.536778125300295 0.791280782348177 0.9235284461534414 0.7565509128988349 -1.2 -1.2 -1.2 -1.2 0.2597460699904908 0.4932436178554913 0.7874649790899929 0.9909514099378983 0.4226859570274106 -1.2 0.6059854144524255 0.9952827395103271 0.6202252349287716 0.4494880583893719 0.7540882284446951 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.6006252954534838 0.8925177545931595 0.4680313814294796 0.8881185002542548 0.6127668333122916 1.0776464574152587 0.6369854121376021 0.6834433989647034 0.7469337463557284 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9461372549251538 0.8208289904539121 0.755348379456946 0.5970076738804185 1.0025810075666628 0.5710439252037459 0.9186603320325837 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9918276245796538 0.6611271513687957 0.5777174421037748
5 |
--------------------------------------------------------------------------------
/temp-snn/snn/weights.txt:
--------------------------------------------------------------------------------
1 | 0.4268826236331701 0.6772572614212359 0.545844521146493 -1.2 -1.1671799766264856 -1.2 -1.2 -1.2 -1.125163082547708 -1.2 -1.2 -1.2 -1.2 0.8294010300653403 0.3943586481052275 0.4190363039917107 0.5030519098260273 0.6777803746223289 0.1292108011602396 0.7437308118831837 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.3499256141022244 0.5992417969515015 0.7803986759624105 0.7249679035831045 0.7283237362209319 0.65594859420894 0.880583207234925 0.4263747127795585 0.30137758638573586 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.39093396846236733 0.09756418209551329 0.47359335826676835 1.000349158412721 0.8259173801311063 -1.2 0.8589074237632343 0.16531205526263038 0.8048363793829234 0.5382987380868982 0.4757407730903223 -1.2 -1.2 -1.2 -1.2 0.4371470149800145 0.3588576787664037 0.2460306082552569 0.58788973689565 0.4507738469765536 -1.2 -1.2 -1.2 0.5485548178726358 0.6589463724715869 0.8437451235161415 0.4443807005397005 0.3241474756351216 -1.2 -1.2 0.8073660827906628 1.0048888238426756 0.8407886999782473 0.8526355949286399 0.9328846785686384 -1.2 -1.2 -1.2 -1.2 -1.2 0.3502236385198686 0.6799997722983764 0.6220144246744517 0.780568276680569 0.6962589167618025 0.1270994404266214 0.7161146517834182 0.6043831465254071 0.4583939845092264 0.1905825871202797 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.45113797986911625 0.6867954401898875 -1.2 0.7316710648635361 0.2502811867812946 -1.2 0.8037254445318606 0.9597969814390169 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.7753382303991471 0.9908100458650553 0.5527436314908564 0.7282886888785477 0.19569192260445092 0.4521257213818573 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.13526109330854805 0.5004273458353664 0.8932440318846092 0.9621700297219203 0.7112555713252801 0.5696728668597013 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.4279251619191515 0.787317772584109 -1.2 0.467236351630412 0.6547091442883289 -1.2 0.35655725640221925 0.20519326011269146 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9295701235005198 0.34547567985467836 0.8707009615224371 0.308721700118802 0.19775084382678132 0.8797756258234073 0.03158317082267492 0.8081113399533022 0.4975736558431992 0.34448297718290943 -1.2 -1.2 -1.2 -1.2 -1.2 0.4309107051406928 0.8174562491018198 0.5866922666333919 0.3028027638138313 0.5462627468398606 -1.2 -1.2 0.043859748472128035 0.002933603982037347 0.4379624287539585 0.25413282974871976 0.6562586729571624 -1.2 -1.2 -1.2 0.8058649259756647 0.978815959312547 0.9627461812399581 0.4772356631413976 0.8593589913336976 -1.2 -1.2 -1.2 -1.2 0.6093478941824269 0.8378433187378505 0.3951945317717641 0.5930288993280501 0.26932278431513756 -1.2 0.5045595427414016 0.7163976288064228 0.3870297767983005 0.5596518215354046 0.5694452710252255 -1.2 -1.2 -1.1966673322580832 -1.2 -1.2 -1.2 0.6322114405612095 0.3453196075824272 0.19987368612688744 0.8549590019930438 0.9749670337686815 0.8004609771256138 0.4248310256249124 0.8396352862706746 0.8360702751178001 -1.1152497510764396 -1.2 -1.2 -1.2 -1.2 -1.2 -1.1422259421617387 -1.1751391175425465 0.7090915855809514 0.7093184466600787 0.7322370116530971 0.7737521193603116 0.4439729893512514 0.5322410048417153 0.4700524778053249 -1.1064362611951324 -1.2 -1.1815138344652762 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.27153838427966603 0.7040124312896578 0.36081215756104984
2 | -1.2 -1.2 -1.2 -1.2 0.7711324009577133 1.0937097071388817 1.0843281231287247 1.1096163714577567 0.958772239380259 0.5756673997863522 0.8490002291000622 0.6788064267821676 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.623014892516353 0.31479534495344985 0.9684192159449128 0.8948971016343928 0.46103882269690705 0.9491481316911172 0.9111530674893196 1.0144280844755587 0.647827323180217 0.9098604686588723 -1.2 -1.2 -1.2 -1.2 -1.2 0.2804210680240453 0.8757033547153947 0.9408540331867926 0.45121146057512324 -1.2 -1.2 -1.2 -1.2 0.42359813819651265 0.4261822278939147 0.4967636624032036 0.3183076812460812 -1.2 -1.2 -1.2 0.35540022566624596 0.5709703144997559 0.5860538612879037 1.0048473978876087 0.3655949569187045 -1.2 -1.2 -1.2 -1.2 0.8854113447412016 0.6125972134262534 0.601611930022877 0.935588706795641 0.37133866511062397 -1.2 0.8822031747658968 1.0053912366233746 0.9567356670084634 0.9644014055976513 0.0500388071256567 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9088402141274893 0.4655499612506108 0.5213436200120808 0.6122804139710204 0.621797502072878 0.9680331360045303 0.24717046994944064 1.0791841086243366 0.12681076447824566 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9281527379099377 0.5461765349697674 0.7746606851495714 0.5561979651512341 0.5771708156287161 0.9259472944883953 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.36436561229207365 0.5667398894369006 1.1823467144639894 0.8161694092669056 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0635228536835888 1.0027594145437628 0.8164713081241952 1.0180991616390247 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.6953485809666063 1.1676793891326254 0.855901337944645 0.934130150798771 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.000407190557355 0.9355932149574941 0.8225217175587972 0.4044776966194008 0.4931912308422174 0.13096966121037332 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.15268907864988812 0.8699556887999946 0.6668616661271902 0.9265332472759629 0.08517028078350042 0.9722045341677632 0.9217035275112243 0.6213614408223606 0.20759728601922725 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.3288812397647357 0.8127646883888199 0.4843717842641796 0.5327194866574521 0.8272199460058983 -1.2 0.8434370065078339 0.3546600317275591 0.4633191054080435 1.007408393430045 0.4625844721698522 -1.2 -1.2 -1.2 -1.2 0.1468115277673942 0.8706546661718038 0.6397924962241147 0.9505600097499115 0.8522578475062019 -1.2 -1.2 -1.2 0.3174302684820056 0.862016537194896 0.8082005331606911 0.9830517210587556 -1.2 -1.2 -1.2 -1.2 0.8686188557780962 0.9658490091338701 0.38624716976224566 0.20231289244327103 -1.2 -1.2 -1.2 -1.2 -1.2 0.17616646712096198 0.6708748242278203 1.0297976635437995 0.9522121370149406 1.035317834463331 0.7978826649212105 0.6203686933284611 0.9852084738621237 0.9319019853975912 0.4131592804944111 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9586724896241113 0.9889889973017985 0.5826427909073003 1.005448983575785 0.7816619316896082 0.9798762933440557 0.3303680923606364 0.5583705357514073 -1.2 -1.2 -1.2 -1.2
3 | 0.9782660590486534 1.138200118982542 0.5906676069191522 0.5636800793229028 0.7286641968098942 0.5955163341813613 1.0913435568208218 0.8092715062613165 0.6173719346547901 0.9586194888661119 0.7155712577684015 0.6929813175380047 1.1826191759270186 1.138907770809371 1.3103425222701057 0.8153436927559722 0.9927681186475041 1.1924783884574515 0.9835175344625459 1.0514603185023472 1.1851263814348816 1.1128718846760446 0.9336647813871971 0.9964244078556841 1.1907071629137977 0.986180042230582 1.2854259538567234 0.9500949831353936 1.214930336166053 1.1015353133246202 1.0651366650303085 1.145902868362426 1.2080846833250867 1.312067770389559 0.9895342115853814 0.7838882771974905 1.1324798472995758 1.0726808468250173 1.0337743967939514 1.2072430358650619 1.201832843225723 1.00947024007639 1.0723720046427245 1.1116539996395536 1.2335935188520815 0.8801915866160144 0.7771124983044756 1.311702752242908 1.0086488248449021 1.1792765780334737 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9357898524235073 1.1456116552043638 1.2320449798124786 1.0082419087273515 1.074598737604988 1.0094571808906474 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.6869705041330026 1.174414892021021 0.9530028620386372 1.1853406606151295 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.161861827151935 1.0820505610373639 1.0636221044912184 1.1116868136878475 0.8325728570869982 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9242511574937416 0.40021102764562255 1.1871414917803043 1.3540469665284443 1.093274319688809 0.8916656336451917 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0156151307278056 1.1373595841879447 1.211301108423455 1.0424185029170503 1.3562125987131013 1.066630815962901 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9238903632672982 1.3530476478205702 1.2467727739155825 0.8796579335071146 1.1229503256501248 1.0288898542466924 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9943397102039466 1.157478379899332 1.2914099535896693 1.0523020208776694 0.9805133921794035 0.5427031972093291 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.8731603404724251 1.3079811971431616 1.0403166984793748 1.3061995534220738 1.0796427698035156 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.8868642539851412 0.8600671769969637 1.1184916851617437 0.6065331297784442 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.013506081684933 1.1191155811140951 1.1445538527829697 1.3000900309576884 0.9917468165351483 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.8407887935171692 0.837499368871577 1.0835162835343186 1.21312819798668 1.2152650712132405 0.9804806253315467 1.2003650869764493 1.0110917170772782 1.0994308046793366 1.0004352541328403 0.6138504450365353 0.8042124371320347 0.6387666838556888 0.7334694450884504 0.9333819169984622 0.8696988725705512 1.271431860947665 0.8733278031539355 1.2403283920257162 1.0429262821270397 1.1197053993698627 1.039280683846366 1.2834423913213797 0.9724947928194414 1.2370915224036656 0.9717263253038677 1.09511620914064 1.1566737248691137 1.1935492916619574 1.2988808782880013 0.969883285180356 1.038347862338057 1.2265497374314436 1.1638827202360729 0.9494636938859022 1.1144566453201437 1.0013592189670724 0.655836029012163 0.6865553949845061 0.8140764446610713 1.126493726697319 0.7207746825380434 0.7243961084498384 1.1019244995991955 1.2083393486255267 1.0330684785065793 0.5709390434692276 0.9202513652971024 1.2106238805415028 0.6967116004663836
4 | 0.5203354503837498 0.5564331290827675 0.8553609991866812 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.7963510676698129 0.9337977832928134 0.6021265935538921 0.9829613151928791 0.8547996079923603 0.7063927972427266 1.1075206397887551 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.73237807228972 0.6104426226251453 0.8885912774301503 1.0626141797362922 0.865776375910179 0.7269518362273435 0.565067628340875 0.323964252020452 0.8323141727235788 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.5251929153011414 0.6871462085039233 0.7120273248617113 0.7920026213333761 0.7955763710674121 -1.2 0.4634264023749442 0.6211137363598097 0.9004151534506402 0.23227073691143718 0.9807498334675631 -1.2 -1.2 -1.2 -1.2 0.3745297937423986 0.960433194087582 0.6569399092637759 0.3308294342820544 0.8724573519837632 -1.2 -1.2 -1.2 0.3718858649818258 0.41322405647351107 0.3585962706270927 0.4902132994034316 0.5714052692878189 -1.2 -1.2 0.8686859071833747 0.6111792787885333 1.1281377072511949 0.6338540434640433 0.5280506894160355 -1.2 -1.2 -1.2 -1.2 -1.2 0.3308623312615886 0.5528294398759485 0.6979145708823373 0.516599753331464 0.8867774793419795 0.9740704051267468 1.016516013868188 0.9231497636917265 0.6779664453905696 0.588807515760539 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.21313051560233623 0.7052827012897024 -1.2 0.9067153673154744 1.0380665421740034 -1.2 1.2120452866269666 0.28124142877265035 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0259148996501317 0.960121506345809 1.0548893956839982 0.524477059935419 0.5818166021345427 0.714574304412897 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.0527937959753317 0.6830950223539427 0.6862097324635527 0.9295527239808986 0.539468630640333 0.5453579557869453 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9171301458696142 0.6046531376366889 -1.2 0.5089343495423644 1.033654129568713 -1.2 0.5628970316210582 0.47189774329545087 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 1.1020060639354048 0.9908860063931095 1.027377245592994 0.6581487074719694 0.3529008186396173 0.3423427853088518 0.36750542256557944 1.0482673899564525 0.9611004700397884 0.6467161894558646 -1.2 -1.2 -1.2 -1.2 -1.2 0.6161985189723909 0.84820073004758 0.4150338266974231 0.42139700317406636 0.45776341772653484 -1.2 -1.2 0.451035447276669 0.8328102745168514 0.5474022930106535 0.42710722087504993 0.5950233628264537 -1.2 -1.2 -1.2 0.6114784774972298 0.536778125300295 0.791280782348177 0.9235284461534414 0.7565509128988349 -1.2 -1.2 -1.2 -1.2 0.2597460699904908 0.4932436178554913 0.7874649790899929 0.9909514099378983 0.4226859570274106 -1.2 0.6059854144524255 0.9952827395103271 0.6202252349287716 0.4494880583893719 0.7540882284446951 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.6006252954534838 0.8925177545931595 0.4680313814294796 0.8881185002542548 0.6127668333122916 1.0776464574152587 0.6369854121376021 0.6834433989647034 0.7469337463557284 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9461372549251538 0.8208289904539121 0.755348379456946 0.5970076738804185 1.0025810075666628 0.5710439252037459 0.9186603320325837 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 -1.2 0.9918276245796538 0.6611271513687957 0.5777174421037748
5 |
--------------------------------------------------------------------------------
/multi_layer/learning.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | ####################################################### README ####################################################################
4 |
5 | # This is the main file which calls all the functions and trains the network by updating weights
6 |
7 |
8 | #####################################################################################################################################
9 |
10 |
11 | import numpy as np
12 | from neuron import neuron
13 | import random
14 | from matplotlib import pyplot as plt
15 | from recep_field import rf
16 | import cv2
17 | from spike_train import encode
18 | from rl import rl
19 | from rl import update
20 | from reconstruct import reconst_weights
21 | from parameters import param as par
22 | from var_th import threshold
23 | import os
24 | import pickle
25 | import sys
26 |
27 | #@profile
28 | def learning(learning_or_classify):
29 |
30 | #1 = learning, 0 = classify
31 | #learning_or_classify = 0
32 | print learning_or_classify
33 | if(learning_or_classify == 0):
34 | print "Starting classify..."
35 | elif(learning_or_classify == 1):
36 | print "Starting learning..."
37 | else:
38 | print "Error in argument, quitting"
39 | quit()
40 |
41 | if(learning_or_classify == 0):
42 | par.epoch = 1
43 |
44 | #potentials of output neurons
45 | pot_arrays = []
46 | pot_arrays.append([]) #because 0th layer do not require neuron model
47 | for i in range(1,par.num_layers):
48 | pot_arrays_this = []
49 | for j in range(0,par.num_layer_neurons[i]):
50 | pot_arrays_this.append([])
51 | pot_arrays.append(pot_arrays_this)
52 | print "created potential arrays for each layer..."
53 |
54 | Pth_array = []
55 | Pth_array.append([]) #because 0th layer do not require neuron model
56 | for i in range(1,par.num_layers):
57 | Pth_array_this = []
58 | for j in range(0,par.num_layer_neurons[i]):
59 | Pth_array_this.append([])
60 | Pth_array.append(Pth_array_this)
61 | print "created potential threshold arrays for each layer..."
62 |
63 |
64 | train_all = []
65 | for i in range(0,par.num_layers):
66 | train_this = []
67 | for j in range(0,par.num_layer_neurons[i]):
68 | train_this.append([])
69 | train_all.append(train_this)
70 | print "created spike trains for each layer..."
71 |
72 | #synapse matrix initialization
73 | synapse = [] #synapse[i] is the matrix for weights from layer i to layer i+1, assuming index from 0
74 | for i in range(0,par.num_layers-1):
75 | synapse_this = np.zeros((par.num_layer_neurons[i+1],par.num_layer_neurons[i]))
76 | synapse.append(synapse_this)
77 |
78 | if(learning_or_classify == 1):
79 | for layer in range(0,par.num_layers-1):
80 | for i in range(par.num_layer_neurons[layer+1]):
81 | for j in range(par.num_layer_neurons[layer]):
82 | synapse[layer][i][j] = random.uniform(0,0.4*par.scale)
83 | else:
84 | for layer in range(0,par.num_layers-1):
85 | for i in range(par.num_layer_neurons[layer+1]):
86 | #for j in range(par.num_layer_neurons[layer]):
87 | filename = "weights/layer_"+str(layer)+"_neuron_"+str(i)+".dat"
88 | with open(filename,"rb") as f:
89 | synapse[layer][i] = pickle.load(f)
90 |
91 |
92 | print "created synapse matrices for each layer..."
93 |
94 |
95 | #this contains neurons of all layers except first
96 | layers = [] #layers[i] is the list of neurons from layer i, assuming index from 0
97 | layer_this = []
98 | layers.append(layer_this) #0th layer is empty as input layer do not require neuron model
99 |
100 | #time series
101 | time = np.arange(1, par.T+1, 1)
102 |
103 | # creating each layer of neurons
104 | for i in range(1,par.num_layers):
105 | layer_this = []
106 | for i in range(par.num_layer_neurons[i]):
107 | a = neuron()
108 | layer_this.append(a)
109 | layers.append(layer_this)
110 | print "created neuron for each layer..."
111 |
112 |
113 | for k in range(par.epoch):
114 | for i in range(1,7):
115 | print "Epoch: ",str(k),", Image: ", str(i)
116 | if(learning_or_classify == 1):
117 | img = cv2.imread("training_images/" + str(i) + ".png", 0)
118 | else:
119 | img = cv2.imread("training_images/" + str(i) + ".png", 0)
120 |
121 |
122 | #Convolving image with receptive field
123 | pot = rf(img)
124 | #print pot
125 |
126 | #training layers i and i+1, assuming 0 indexing, thus n layers require n-1 pairs of training
127 | for layer in range(0,par.num_layers-1):
128 | print "Layer: ", str(layer)
129 |
130 | #Generating spike train when the first layer
131 | #else take the spike train from last layer
132 | if(layer == 0):
133 | train_all[layer] = np.array(encode(pot))
134 | train = np.array(encode(pot))
135 | else:
136 | train_all[layer] = np.asarray(train_this_layer)
137 | train = np.array(np.asarray(train_this_layer))
138 |
139 | #print train[1]
140 |
141 | #calculating threshold value for the image
142 | var_threshold = threshold(train)
143 | #print "var_threshold is ", str(var_threshold)
144 |
145 | # print var_threshold
146 | # synapse_act = np.zeros((par.n,par.m))
147 | # var_threshold = 9
148 | # print var_threshold
149 | # var_D = (var_threshold*3)*0.07
150 |
151 | var_D = 0.15*par.scale
152 |
153 | for x in layers[layer+1]:
154 | x.initial(var_threshold)
155 |
156 | #flag for lateral inhibition
157 | f_spike = 0
158 |
159 | img_win = 100
160 |
161 | active_pot = []
162 | train_this_layer = []
163 | for index1 in range(par.num_layer_neurons[layer+1]):
164 | active_pot.append(0)
165 | train_this_layer.append([])
166 |
167 | #print synapse[layer].shape, train.shape
168 | #Leaky integrate and fire neuron dynamics
169 | for t in time:
170 | #print "Time: ", str(t)
171 | for j, x in enumerate(layers[layer+1]):
172 | active = []
173 | if(x.t_restpar.Prest):
176 | x.P -= var_D
177 | active_pot[j] = x.P
178 |
179 | #pot_arrays[layer+1][j].append(x.P)
180 | #Pth_array[layer+1][j].append(x.Pth)
181 |
182 | # Lateral Inhibition
183 | # Occurs in the training of second last and last layer
184 | #if(f_spike==0 and layer == par.num_layers - 2 and learning_or_classify == 1):
185 | if(f_spike==0 ):
186 | high_pot = max(active_pot)
187 | if(high_pot>var_threshold):
188 | f_spike = 1
189 | winner = np.argmax(active_pot)
190 | img_win = winner
191 | #print "winner is " + str(winner)
192 | for s in range(par.num_layer_neurons[layer+1]):
193 | if(s!=winner):
194 | layers[layer+1][s].P = par.Pmin
195 |
196 | #Check for spikes and update weights
197 | for j,x in enumerate(layers[layer+1]):
198 | pot_arrays[layer+1][j].append(x.P)
199 | Pth_array[layer+1][j].append(x.Pth)
200 | s = x.check()
201 | train_this_layer[j].append(s)
202 | if(learning_or_classify == 1):
203 | if(s==1):
204 | x.t_rest = t + x.t_ref
205 | x.P = par.Prest
206 | for h in range(par.num_layer_neurons[layer]):
207 |
208 | for t1 in range(-2,par.t_back-1, -1):
209 | if 0<=t+t1