├── requirements.txt ├── MANIFEST.in ├── bio_and_art_connectomes.png ├── bio2art ├── __init__.py ├── connectomes │ ├── C_Drosophila.npy │ ├── C_Mouse_Ypma_Oh.npy │ ├── Names_Drosophila.lst │ ├── Names_Mouse_Ypma_Oh.lst │ ├── C_Macaque_Normalized.npy │ ├── C_Marmoset_Normalized.npy │ ├── ND_Macaque_Normalized.npy │ ├── ND_Marmoset_Normalized.npy │ ├── Names_Macaque_Normalized.lst │ ├── C_Human_Betzel_Normalized.npy │ ├── C_Mouse_Gamanut_Normalized.npy │ ├── Names_Marmoset_Normalized.lst │ ├── Names_Human_Betzel_Normalized.lst │ └── Names_Mouse_Gamanut_Normalized.lst ├── utils.py └── importnet.py ├── setup.py ├── LICENSE └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.18.1 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include bio2art/connectomes/* 2 | -------------------------------------------------------------------------------- /bio_and_art_connectomes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio_and_art_connectomes.png -------------------------------------------------------------------------------- /bio2art/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | from bio2art import importnet, utils 4 | -------------------------------------------------------------------------------- /bio2art/connectomes/C_Drosophila.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/C_Drosophila.npy -------------------------------------------------------------------------------- /bio2art/connectomes/C_Mouse_Ypma_Oh.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/C_Mouse_Ypma_Oh.npy -------------------------------------------------------------------------------- /bio2art/connectomes/Names_Drosophila.lst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/Names_Drosophila.lst -------------------------------------------------------------------------------- /bio2art/connectomes/Names_Mouse_Ypma_Oh.lst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/Names_Mouse_Ypma_Oh.lst -------------------------------------------------------------------------------- /bio2art/connectomes/C_Macaque_Normalized.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/C_Macaque_Normalized.npy -------------------------------------------------------------------------------- /bio2art/connectomes/C_Marmoset_Normalized.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/C_Marmoset_Normalized.npy -------------------------------------------------------------------------------- /bio2art/connectomes/ND_Macaque_Normalized.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/ND_Macaque_Normalized.npy -------------------------------------------------------------------------------- /bio2art/connectomes/ND_Marmoset_Normalized.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/ND_Marmoset_Normalized.npy -------------------------------------------------------------------------------- /bio2art/connectomes/Names_Macaque_Normalized.lst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/Names_Macaque_Normalized.lst -------------------------------------------------------------------------------- /bio2art/connectomes/C_Human_Betzel_Normalized.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/C_Human_Betzel_Normalized.npy -------------------------------------------------------------------------------- /bio2art/connectomes/C_Mouse_Gamanut_Normalized.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/C_Mouse_Gamanut_Normalized.npy -------------------------------------------------------------------------------- /bio2art/connectomes/Names_Marmoset_Normalized.lst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/Names_Marmoset_Normalized.lst -------------------------------------------------------------------------------- /bio2art/connectomes/Names_Human_Betzel_Normalized.lst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/Names_Human_Betzel_Normalized.lst -------------------------------------------------------------------------------- /bio2art/connectomes/Names_Mouse_Gamanut_Normalized.lst: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AlGoulas/bio2art/HEAD/bio2art/connectomes/Names_Mouse_Gamanut_Normalized.lst -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | from setuptools import setup 4 | from setuptools import find_packages 5 | 6 | setup(name='bio2art', 7 | version='0.1', 8 | description='Convert biological to artifial neural networks', 9 | author='Alexandros Goulas', 10 | author_email='agoulas227@gmail.com', 11 | classifiers=[ 12 | 'Intended Audience :: Science/Research/Any', 13 | 'Programming Language :: Python :: 3.7.3', 14 | ], 15 | packages=find_packages(), 16 | include_package_data=True, 17 | package_data={'': ['connectomes/*.npy', 'connectomes/*.lst']}, 18 | ) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 AlGoulas 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /bio2art/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | import pickle 4 | import numpy as np 5 | from pathlib import Path 6 | import pkg_resources 7 | import random 8 | 9 | # Load the names of the regions of the indicated connectome 10 | def get_names(data_name, path_to_connectome_folder = None): 11 | ''' 12 | Load the names of the regions of the indicated dataset 13 | 14 | Input 15 | ----- 16 | data_name: str 17 | String denoting the name of the neuronal network would like to use. 18 | Currently available: 19 | 20 | 'Drosophila' 49x49 (NxN shape of the ndarray) 21 | 'Human_Betzel_Normalized' 57x57 22 | 'Macaque_Normalized' 29x29 23 | 'Marmoset_Normalized' 55x55 24 | 'Mouse_Gamanut_Normalized' 19x19 25 | 'Mouse_Ypma_Oh' 56x56 26 | 27 | path_to_connectome_folder: (optional), default None, object of class pathlib.PosixPath 28 | The path to the empirical neural network data (connectomes). 29 | The path must be a passed from the Path subclasss of 30 | pathlib: path_to_connectome_folder = Path('path_to_desired_dataset'). 31 | If not specified, the path to the packaged data will be used. 32 | 33 | Output 34 | ------ 35 | names: list of str 36 | list has len N, with N the number of areas in the 37 | connectome, indicating the names of each brain region/node 38 | 39 | Note: Pickle is used for loading the names 40 | 41 | ''' 42 | if path_to_connectome_folder is None: 43 | path_to_connectome_folder = pkg_resources.resource_filename('bio2art', 'connectomes/') 44 | path_to_connectome_folder = Path(path_to_connectome_folder) 45 | data_name = 'Names_' + data_name + '.lst' # Prefix and suffix for the file 46 | file_to_open = path_to_connectome_folder / data_name 47 | with open(file_to_open, 'rb') as fp: 48 | names = pickle.load(fp) 49 | 50 | return names 51 | 52 | # Load the neuron density of the regions of the indicated dataset 53 | def get_neuron_density(data_name, path_to_connectome_folder = None): 54 | ''' 55 | Load the neuron density of the regions of the indicated dataset 56 | 57 | Input 58 | ----- 59 | data_name: str 60 | String denoting the name of the neuronal network would like to use. 61 | Currently available: 62 | 63 | 'Macaque_Normalized' 64 | 'Marmoset_Normalized' 65 | 66 | path_to_connectome_folder: (optional), default None, object of class pathlib.PosixPath 67 | The path to the empirical neural network data (connectomes). 68 | The path must be a passed from the Path subclasss of 69 | pathlib: path_to_connectome_folder = Path('path_to_desired_dataset'). 70 | If not specified, the path to the packaged data will be used. 71 | 72 | Output 73 | ------ 74 | neuron_density: ndarray of int of shape (N,) 75 | N is the number of nodes in the neural networks. Each entry of 76 | neuron_density denotes the neuron density (nr of neurons per mm3) 77 | for each region/node. 78 | 79 | ''' 80 | if path_to_connectome_folder is None: 81 | path_to_connectome_folder = pkg_resources.resource_filename('bio2art', 'connectomes/') 82 | path_to_connectome_folder = Path(path_to_connectome_folder) 83 | file_conn = 'ND_' + data_name + '.npy' # Prefix and suffix for the file 84 | file_to_open = path_to_connectome_folder / file_conn 85 | neuron_density = np.load(file_to_open) 86 | 87 | return neuron_density.astype('int64') 88 | 89 | # Construct a scaled neuron_density array based on the seed_neuron 90 | def scale_neuron_density(neuron_density, 91 | seed_neuron = 1, 92 | scale_type = 'rank'): 93 | ''' 94 | Construct a scaled neuron_density ndarray based on rank ordered or ratios 95 | of neuron_density values and seed_neuron 96 | 97 | Input 98 | ----- 99 | neuron_density: ndarray of int of shape (N,) 100 | N the number of areas in the connectome with entry i denoting the 101 | neuron density of region i 102 | (returned from function get_neuron_density). 103 | 104 | seed_neuron: int, default 1 105 | specifying the number that will be multiplied by the scaled 106 | neuron_density. 107 | 108 | scale_type: str 'ratio' 'rank', default 'rank' 109 | Specifying how the neuron_density values will be scaled. 110 | 'rank': the values are rank ordered and multiplied by seed_neuron 111 | 'ratio': the values are converted to ratios, 112 | neuron_density[i] / min(neuron_density) and multiplied by seed_neuron 113 | 114 | Output 115 | ------ 116 | scaled_neuron_density: ndarray of int of shape (N,) 117 | N the number of nodes in the neuronal network, indicating the scaled 118 | neuron_density of each brain region/node 119 | 120 | ''' 121 | # Copy and sort the neuron densities 122 | neuron_density_srt = neuron_density.copy() 123 | neuron_density_srt.sort() 124 | 125 | # Get the idx so you can go match neuron_density_srt to neuron_density 126 | # The y_ind will contain the desired idx such that: 127 | # neuron_density[y_ind[i]] == nd_sorted[i] for every i=0,1,2...N 128 | # with N=neuron_density.shape[0] 129 | xy, x_ind, y_ind = np.intersect1d(neuron_density_srt, 130 | neuron_density, 131 | return_indices=True) 132 | 133 | scaled_neuron_density = neuron_density.copy() 134 | 135 | if scale_type == 'rank': 136 | scaled_neuron_density[y_ind] = np.asarray(range(1, neuron_density.shape[0]+1) 137 | ) * seed_neuron 138 | elif scale_type == 'ratio': 139 | for i, item in enumerate(neuron_density_srt): 140 | if i == 0: 141 | scaled_neuron_density[y_ind[i]] = 1 142 | denominator = neuron_density_srt[i] 143 | scaled_neuron_density[y_ind[i]] = round((item / denominator) * seed_neuron) 144 | 145 | return scaled_neuron_density.astype('int64') 146 | 147 | # Partition integer i in n random integers that sum to i 148 | def _int_partition(i, n): 149 | ''' 150 | Partition integer i in n random integers that sum to i 151 | 152 | Input 153 | ----- 154 | i: int 155 | a positive integer that needs to be partitioned in n integers 156 | that sum to i 157 | n: int 158 | a positive integer specifying the number of partitions/integers 159 | to be generated 160 | 161 | Output 162 | ------ 163 | partitions, list of int 164 | list of n integers with the property 165 | sum(partitions)==i 166 | ''' 167 | partitions=[] 168 | if n > i: 169 | print('\nInteger i must be higher or equal to n\n') 170 | return 171 | if i < 0 or n < 0: 172 | print('\nIntegers i and n must be positive\n') 173 | return 174 | 175 | spectrum = (i-n)+2 176 | for k in range(n-1): 177 | if spectrum > 1: 178 | new_val = np.random.randint(1, high=spectrum) 179 | partitions.append(new_val) 180 | spectrum = spectrum-new_val 181 | else: 182 | new_val = np.random.randint(1, high=2) 183 | partitions.append(new_val) 184 | 185 | partitions.append(i-sum(partitions)) 186 | # Shuffle list so that the order of generation does not bias magnitude 187 | # of integers as they are generated 188 | random.shuffle(partitions) 189 | 190 | return partitions 191 | 192 | # Partition float f in n random floats that sum to f 193 | def _float_partition(f, n): 194 | ''' 195 | Partition float f in n random floats that sum to f 196 | 197 | Input 198 | ----- 199 | f: float 200 | a positive float that needs to be partitioned in n floats 201 | that sum to i 202 | n: int 203 | a positive integer specifying the number of partitions/integers 204 | to be generated 205 | 206 | Output 207 | ------ 208 | partitions, list of float 209 | list of n float numbers with the property 210 | sum(partitions)==f 211 | ''' 212 | partitions=[] 213 | spectrum = f 214 | for k in range(n-1): 215 | new_val = random.uniform(0., abs(spectrum)) 216 | partitions.append(new_val) 217 | spectrum = spectrum-new_val 218 | 219 | partitions.append(f-sum(partitions)) 220 | # Shuffle list so that the order of generation does not bias magnitude 221 | # of integers as they are generated 222 | random.shuffle(partitions) 223 | 224 | return partitions -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bio2art 2 | 3 | Convert biological neural networks to recurrent neural networks based on the topology dictated by the empirical biological networks. 4 | 5 | ![bio_and_art_connectomes](bio_and_art_connectomes.png) 6 | 7 | # Description 8 | 9 | The bio2art offers an easy to use function to convert biological neural networks to artificial recurrent neural networks. To this end, empirical neural networks of diverse species are used. Currently, the neural networks of the following species can be used: 10 | 11 | 1. Macaque monkey (Macaca mulatta) 12 | 2. Marmoset monkey (Callithrix jacchus) 13 | 3. Mouse (Mus musculus) 14 | 4. Human (Homo sapiens) 15 | 5. Fly (Drosophila melanogaster) 16 | 17 | Note that the term "connectome" refers to a biological neural network. 18 | 19 | bio2art builds artifical recurrent neural networks by using the topology dictated by the aforementioned empirical neural networks and by extrapolating from the empirical data to scale up the artifical neural networks. 20 | 21 | For instance, if the empirical data correspond to a neural network involving 29 brain regions, then the resulting artificial recurrent neural network can be scaled up by assuming a certain number of neurons populating each region (see examples below and documentation of the importnet.py function). Thus, the output can be an artificial recurrent neural network with an arbitrary number of neurons (e.g., >>29 brain regions), but, importantly, this network obeys the topology of a desired biological neural network (or "connectome"). 22 | 23 | The constructed artificial recurrent neural network is returned as a ndarray and, thus, can be used with virtually any type of artifical recurrent network, for instance, echo state networks. 24 | 25 | # Installation 26 | 27 | Download or clone the repository. It is advisable to create a virtual environment (e.g., with conda) with the requirements.txt 28 | 29 | Open a terminal and change to the corresponding folder. Type: 30 | 31 | ``` 32 | pip install . 33 | ``` 34 | 35 | Note that the bio2art only uses numpy (tested with numpy=1.16.2). 36 | 37 | Note that to use the examples (see below), further libraries are needed. Therefore, for executing the examples described in section "Examples of use in the context of echo state networks", create a virtual environment (e.g., with conda) with the requirements enlisted in examples/requirements.txt 38 | 39 | # Examples 40 | 41 | ## Basic use 42 | 43 | Please see the documentation of the ```importnet.from_conn_mat``` function for a detailed description of the parameters used below. The use of the parameters and their impact is highlighted in the following examples. 44 | 45 | Converting the macaque monkey neural network to a recurrent artifical neural network. 46 | 47 | ``` 48 | from bio2art import importnet 49 | from pathlib import Path 50 | 51 | # path to where the "connectomes" folder is located: 52 | path_to_connectome_folder = Path("/.../bio2art/connectomes/")#change to the folder where the desired data are located 53 | # If not specified, the folder to the packaged data will be used (as it is the case in all subsequent examples) 54 | 55 | data_name = "Macaque_Normalized"# the macaque monkey neuronal network (see importnet.from_conn_mat function for all names of available connectomes) 56 | 57 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 58 | data_name=data_name, 59 | neuron_density=None, 60 | intrinsic_conn=False, 61 | target_sparsity=0.1 62 | ) 63 | ``` 64 | 65 | The ```neuron_density``` is the recurrent neural network based on the indicated empirical monkey neuronal network. However, since ```neuron_density=None```, ```net_scaled``` is exactly the same with ```net_orig```, that is, the exact same empirical monkey neural network. Not very useful. Let's see how we can create something more meaningful and helpful. 66 | 67 | The ```neuron_density``` parameter can help us scale up the recurrent neural network while we stay faithful to the topology of the empirical neural network (here, the macaque monkey). 68 | 69 | ``` 70 | import numpy as np 71 | neuron_density=np.zeros(29, dtype=int) 72 | neuron_density[:] = 10 73 | 74 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 75 | data_name=data_name, 76 | neuron_density=neuron_density, 77 | intrinsic_conn=False, 78 | target_sparsity=0.1 79 | ) 80 | ``` 81 | 82 | Now the ```neuron_density``` parameter is a numpy array and each entry is containing the number 10. This means that each region ```neuron_density[i]``` consists of 10 neurons. Thus, now the resulting recurrent neural network ```net_scaled``` contains 290 neurons (29 regions of the original connectome x 10 neurons per region as we indicated). These neurons are connected based on the topology of the the actual empirical neural network. Therefore, ```net_scaled``` is a bioinstantiated recurrent neural network, but scaled up to 290 neurons. 83 | 84 | If we want to assume that regions contain another number of neurons, we just simply construct ```neuron_density``` accordingly (e.g., with 20, 34, 1093 neurons, that is, arbitrary positive integers). 85 | 86 | Note that not all regions need to contain the same number of neurons. For instance, we can assume that region 5 contains 40 neurons and the rest of the regions 10 neurons: 87 | 88 | ``` 89 | neuron_density=np.zeros(29, dtype=int) 90 | neuron_density[:] = 10 91 | neuron_density[4] = 40 92 | 93 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 94 | data_name=data_name, 95 | neuron_density=neuron_density, 96 | intrinsic_conn=False, 97 | target_sparsity=0.1 98 | ) 99 | ``` 100 | 101 | This means each ```neuron_density[i]``` can contain an arbitrary positive integer. The total number of neurons, and thus, shape of net_scaled, for this example, has shape (320, 320). 102 | 103 | Note that the parameter ```target_sparsity``` is a float (0 1] and controls for each source neuron the percentage of all possible neuron-targets to form connections with. Note that at least 1 neuron will function as target in case that the resulting percentage result in less than 1 neuron. This parameter can be used to make the sparsity of ```network_scaled``` vary around the density dictated by the actual biological connectomes. Default=0.2. Note that this parameter is meaningful only if at least one region has more than 1 neuron, that is, for some i, neuron_density[i]>1. 104 | 105 | In such cases, the sparsity of ```network_scaled``` is affected by the parameter ```target_sparsity```. For instance in the example above, the density of ```network_scaled``` (=0.066) corresponds to 6770 connections. 106 | 107 | If ```target_sparsity=0.8``` as in the example below: 108 | 109 | ``` 110 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 111 | data_name=data_name, 112 | neuron_density=neuron_density, 113 | intrinsic_conn=False, 114 | target_sparsity=0.8 115 | ) 116 | ``` 117 | then the density of network_scaled becomes higher(=0.529) corresponding to 54160 connections for ```net_scaled```. Note that density of a network is the percentage of existing connections over the number of possible connections (given the shape of the array representing it, that is, ```network_scaled``` in this example), including the intrinsic, within-region connections and the self-to-self connections. 118 | 119 | ## Intrinsic and self connections 120 | 121 | If we want to build a neural network with intrinsic, within-region connections and self-to-self connections, then the ```intrinsic_conn``` and ```keep_diag``` parameters should be used: 122 | 123 | ``` 124 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 125 | data_name=data_name, 126 | neuron_density=neuron_density, 127 | intrinsic_conn=True, 128 | target_sparsity=0.8, 129 | keep_diag=True, 130 | ) 131 | ``` 132 | Since the parameters ```intrinsic_conn``` and ```keep_diag``` are True, ```net_scaled``` exhibits both self-to-self connections and intrinsic, within-region connections. The network now has a density=0.572 and a total number of connections=58560. 133 | 134 | Note that when the ```intrinsic_conn=True``` option is used, an additional parameter ```target_sparsity_intrinsic``` can be used to specify the sparsity of targets of the intrinsic connections (in the same way that ```target_sparsity``` works for the extrinsic connections). Default ```target_sparsity_intrinsic = 1``` 135 | 136 | Let's see an example: 137 | 138 | ``` 139 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 140 | data_name=data_name, 141 | neuron_density=neuron_density, 142 | intrinsic_conn=True, 143 | target_sparsity=0.8, 144 | target_sparsity_intrinsic=0.5, 145 | keep_diag=True 146 | ) 147 | ``` 148 | 149 | The network now has a density=0.550 and a total number of connections=56360. 150 | 151 | When ```intrinsic_conn=True```, the the parameter intrinsic_wei is relevant: ```intrinsic_wei``` is a float (0 1] denoting the percentage of the weight that will be assigned to the intrinsic weights. Default value ```intrinsic_conn=0.8```, thus, 0.8*sum(extrinsic weight), where sum(extrinsic weight) is the sum of weights of connections from region A to all other regions, except from A. Note that the default value was used in all of the examples above. 152 | 153 | We can change the weight of intrinsic conenctions, including self-to-self connections, with the parameter intrinsic_wei, e.g., ```intrinsic_wei=0.5``` in the example below: 154 | 155 | ``` 156 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 157 | data_name=data_name, 158 | neuron_density=neuron_density, 159 | intrinsic_conn=True, 160 | target_sparsity=0.8, 161 | target_sparsity_intrinsic=0.5, 162 | keep_diag=True 163 | ) 164 | ``` 165 | 166 | ## Extrapolating connection weights from empirical neural networks 167 | 168 | In all of the above examples the parameter ```rand_partition``` is False (default values) and, thus, the neuron-to-neuron weights for each neuron m and n belonging to regions i and j respectively are computed as follows: 169 | ```network_scaled[m,n] = network_original[i,j] / (nr_source_neurons * nr_target_neurons)``` with m, n all neurons belonging to region i, j respectively. 170 | 171 | This introduces less diversity and may impact the amount of diverse transformations that are applied to the input to the network (since many connections from one source neuron to many target neurons has the exact same weight). Therefore, in orderr to introduce more diversity but also construct net_scaled based on the empirical values of the neural network, we can specify ```rand_partition=True```. This has as a result to compute heterogeneous strengths of for the neuron-to-neuron connections as follows: empirical wieight ```network_original[i,j]``` will be partitioned in k parts that sum to the original connection weight ```network_original[i,j]```, where ```k = nr_source_neurons * nr_target_neurons```, with ```nr_source_neurons``` and ```nr_target_neurons``` are the nr of neurons in the source i and target areas j as part of ```network_scaled[i,j]```. These k values will be assigned as conenction weights for the k connections between neurons inhabiting regions i and j. Let's see an example: 172 | 173 | ``` 174 | net_orig, net_scaled, region_neuron_ids = importnet.from_conn_mat( 175 | data_name=data_name, 176 | neuron_density=neuron_density, 177 | intrinsic_conn=True, 178 | target_sparsity=0.8, 179 | target_sparsity_intrinsic=0.5, 180 | keep_diag=True, 181 | rand_partition=True 182 | ) 183 | ``` 184 | 185 | Note that if ```neuron_density=None```, then internally this variable will be set to: ```neuron_density[i]=1```. 186 | 187 | The same syntax and parameters are used for instantiating the artifical recurrent neural network based on the topology of other empirical biological neural networks, such as the mouse brain neural network: 188 | 189 | ``` 190 | data_name = "Mouse_Ypma_Oh"# the mouse neural network 191 | 192 | neuron_density=np.zeros(56, dtype=int)# this mouse network has 56 regions (see bio2art_from_conn_mat function documentation) 193 | neuron_density[:] = 10 194 | 195 | net_orig, net_scaled, region_neuron_ids = bio2art_import.bio2art_from_conn_mat( 196 | data_name=data_name, 197 | neuron_density=neuron_density, 198 | intrinsic_conn=True, 199 | target_sparsity=0.1 200 | ) 201 | ``` 202 | 203 | This instantiation results in a recurrent neural network ```net_scaled``` that contains 560 neurons (56 regions of the original connectome x 10 neurons per region as we indicated). 204 | 205 | In all of the above examples ```net_orig``` is a ndarray that corresponds to the biological neural network that was used to construct the artificial neural network. ```region_neuron_ids``` is a list of lists. Each list in this list includes integers that are the indexes of the neurons contained within a region. For instance,```region_neuron_ids[0]``` will return the indexes of the neurons in ```net_scaled``` that correspond to region 1 in the biological neural network ```net_orig```. See section **Utilities** to see how the names of each region can be imported. 206 | 207 | Note that importnet contains also the function ```from_list```. This function can be used to read a csv file that represents a connectome and output the connectome as ndarray. Not used in the current examples, but useful to read neural network data in a csv form. 208 | 209 | # Utilities 210 | 211 | Certain utility functions can be used to extract more information about the neural networks that are prepackaged. Let's see how these ultility functions are used. 212 | 213 | ## Names of regions/nodes 214 | 215 | We can load the names of the regions of the biological neural networks. For instance, if we are working with the marmoset monkey neural network, we can load the names as follows: 216 | 217 | ``` 218 | names = bio2art.utils.get_names('Marmoset_Normalized') 219 | 220 | ``` 221 | names is now a list of str containing the acrobyms of the 55 brain regions of the marmoset brain. 222 | 223 | In the exact same way we can load the names of the rest of data sets. 224 | 225 | ## Neuron densities of regions/nodes 226 | 227 | We can load the neuron densities of each brain region as follows: 228 | ``` 229 | nd = bio2art.utils.get_neuron_density('Marmoset_Normalized') 230 | ``` 231 | nd is a ndarray of shape (N,) and N is the number of areas in the connectome. Each entry of ```neuron_density``` denotes the neuron density (nr of neurons per mm3) for each region/node (see Citations for experimental details). 232 | 233 | Note that neuron density measurments are available for the ```Macaque_Normalized``` and ```Marmoset_Normalized``` datasets and that ```Macaque_Normalized``` contains NaN values for some regions (no available empirical data). 234 | 235 | ## Scaling neuron densities 236 | 237 | Working with the actual neuron densities "as is" can pose computational challenges. For instance, the sum of neuron densities for the marmsoet is 4837703, so the bio-instantiated recurrent neural network would have more than 4 million neurons. To alleviate computational issues and take into account that for certain applications or research questions recurrent networks with this size are not necessary, we can scale the empirical measurements so that we have the desired network size. Two options are available, that is, working with ratios of neuron densities or rank ordered neuron densities. Let's see how it works: 238 | 239 | ``` 240 | nd = bio2art.utils.get_neuron_density('Marmoset_Normalized') 241 | 242 | nd_scaled = bio2art.utils.scale_neuron_density(nd, seed_neuron=2, scale_type='ratio') 243 | ``` 244 | The resulting ```nd_scaled``` is an ndarray that now expresses the nd values as ratios: 245 | ```nd_scaled = round((nd[i] / min(nd)) * seed_neuron)``` where i denotes each of the N regions. 246 | So the ratio is the actual neuron density value over the min neuron density value multiplied by ```seed_neuron```. seed_neuron allows us to scale the size of the network by multiplying the nd[i] / min(nd) ratio. 247 | 248 | We can also use the rank of the nd values: 249 | ``` 250 | nd_scaled = bio2art.utils.scale_neuron_density(nd, seed_neuron=2, scale_type='rank') 251 | ``` 252 | In this case only the rank order of the nd values is taken into account, and thus the magnituide of e.g., rank 1 and 4 is ignored. The resulting ```nd_scaled``` is simply the rank ordered nd values multiplied by ```seed_neuron```. 253 | 254 | You can use the above neuron density or scaled neuron density values as the neuron_density argument in the importnet function for generating a bio-instantiated recurrent neural network (see documentation of importnet). 255 | 256 | # Citations 257 | 258 | ## Repository and paper 259 | 260 | ``` 261 | @misc{bio2art, 262 | author = {Goulas, Alexandros}, 263 | title = {bio2art: Convert biological neural networks to recurrent neural networks}, 264 | year = {2020}, 265 | publisher = {GitHub}, 266 | journal = {GitHub repository}, 267 | howpublished = {\url{https://github.com/AlGoulas/bio2art}}, 268 | } 269 | 270 | title = {Bio-instantiated recurrent neural networks: Integrating neurobiology-based network topology in artificial networks}, 271 | journal = {Neural Networks}, 272 | volume = {142}, 273 | pages = {608-618}, 274 | year = {2021}, 275 | issn = {0893-6080}, 276 | doi = {https://doi.org/10.1016/j.neunet.2021.07.011}, 277 | url = {https://www.sciencedirect.com/science/article/pii/S0893608021002744}, 278 | author = {Alexandros Goulas and Fabrizio Damicelli and Claus C. Hilgetag}, 279 | keywords = {Network topology, Connectomes, Artificial networks} 280 | } 281 | ``` 282 | 283 | ## Citing datasets 284 | 285 | Apart from explicitly refering to this repository and paper, certain empirical datasets are used as well. Thus, if you use a specific empirical neural network to instantiate a recurrent artifical neural network, please cite the appropriate item from the following list papers: 286 | 287 | Fly: 288 | 289 | A.-S. Chiang et al. Three-dimensional reconstruction of brain-wide wiring networks in Drosophila at single-cell resolution.Curr. Biol.21,1–11 (2011) https://doi.org/10.1016/j.cub.2010.11.056 290 | 291 | Mouse Ypma Oh: 292 | 293 | M. Rubinov, R. J. F. Ypma, et al. Wiring cost and topological participation of the mouse brain connectome.Proc. Natl. Acad. Sci. U.S.A. 112,10032–10037 (2015). https://doi.org/10.1073/pnas.1420315112 294 | 295 | S.W. Oh et al. A mesoscale connectome of the mouse brain. Nature. 508,207–214 (2014). http://dx.doi.org/10.1038/nature13186 296 | 297 | Mouse Gamanut: 298 | 299 | R. Gămănuţ et al. The mouse cortical connectome, characterized by an ultra-dense cortical graph, maintains specificity by distinct connectivity profiles. Neuron. 97, 698-715.e10 https://doi.org/10.1016/j.neuron.2017.12.037 300 | 301 | Macaque monkey: 302 | 303 | N. T. Markov et al. A weighted and directed interareal connectivity matrix for macaque cerebral cortex. Cereb. Cortex 24,17–36 (2014). https://doi.org/10.1093/cercor/bhs270 304 | 305 | Marmoset monkey: 306 | 307 | P. Majka et al. Towards a comprehensive atlas of cortical connections in a primate brain: Mapping tracer injection studies of the common marmoset into a reference digital template. Journal of Comparative Neurology. 524,2161–2181 (2016). https://doi.org/10.1002/cne.24023 308 | 309 | Human: 310 | 311 | R. F. Betzel, D. S. Bassett, Specificity and robustness of long-distance connections in weighted, interareal connectomes. Proc. Natl. Acad. Sci. U.S.A. 115, E4880–E4889 (2018). https://doi.org/10.1073/pnas.1720186115 312 | 313 | -------------------------------------------------------------------------------- /bio2art/importnet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | import csv 4 | import numpy as np 5 | from pathlib import Path 6 | import pkg_resources 7 | import random 8 | 9 | from bio2art import utils 10 | 11 | # A set of functions for converting biological neuronal networks to artificial 12 | # neuronal networks. The output is a conenctivity matrix (2D numpy array) that 13 | # can be used in reccurent neuronal networks (e.g., echo state networks). 14 | 15 | # Function that simply reads a csv file and returns the matrix that constitutes 16 | # the neuronal network 17 | def from_list(path_to_connectome_folder, data_name): 18 | """ 19 | Generate matrix W from scv file 20 | 21 | Input 22 | ----- 23 | path_to_connectome_folder: the path to the folder with the csv file 24 | data_name: the name of the csv file 25 | 26 | Output 27 | ------ 28 | W: ndarray of shape (N,N) 29 | The connectivity matrix desribing the connections between the N elements 30 | 31 | """ 32 | 33 | file_to_open = path_to_connectome_folder / data_name 34 | 35 | # Lists to save the name of the neurons 36 | # It will be needed to convert the csv fiel to an adjacency matrix 37 | all_neuron_names = [] 38 | 39 | from_indexes_list =[] 40 | to_indexes_list =[] 41 | value_connection_list = [] 42 | 43 | with open(file_to_open, newline='') as f: 44 | reader = csv.reader(f) 45 | for row in reader: 46 | # Check if the row contains all data 47 | index = [i for i, list_item in enumerate(row) if list_item == ""] 48 | # If row contains all data, then proceed 49 | if len(index) == 0: 50 | from_neuron = row[0] 51 | to_neuron = row[1] 52 | 53 | # Strip the strings from spaces so we do not create duplicates 54 | from_neuron = from_neuron.strip() 55 | to_neuron = to_neuron.strip() 56 | 57 | # Keep track of all the neuron names in the 58 | index_from = [i for i, list_item in enumerate(all_neuron_names) if list_item == from_neuron] 59 | 60 | if len(index_from) > 0: 61 | from_indexes_list.append(index_from[0]) 62 | else: 63 | # If it is not in the from neuron list, added and make the index the 64 | # len of list AFTER we add the new name 65 | all_neuron_names.append(from_neuron) 66 | from_indexes_list.append(len(all_neuron_names)-1) 67 | 68 | # Do the same for the to_neuron 69 | index_to = [i for i, list_item in enumerate(all_neuron_names) if list_item == to_neuron] 70 | 71 | if len(index_to) > 0: 72 | to_indexes_list.append(index_to[0]) 73 | else: 74 | #If it is not in the from neuron list, added and make the index the 75 | #len of list AFTER we add the new name 76 | all_neuron_names.append(to_neuron) 77 | to_indexes_list.append(len(all_neuron_names)-1) 78 | 79 | # Irrespective of the above conditions the value of the connection 80 | # is stored in its respective list 81 | value_connection_list.append(float(row[len(row)-2])) 82 | 83 | # Build the connectivity matrix 84 | W = np.zeros((len(all_neuron_names), len(all_neuron_names))) 85 | 86 | for i in range(len(to_indexes_list)-1): 87 | W[from_indexes_list[i]][to_indexes_list[i]] = value_connection_list[i] 88 | 89 | return W 90 | 91 | # Function that constructs a connectivity matrix network_scaled with the topology 92 | # that is dictted by biological neuronal networks. 93 | def from_conn_mat( 94 | data_name = None, 95 | path_to_connectome_folder = None, 96 | neuron_density = None, 97 | target_sparsity = 0.2, 98 | intrinsic_conn = True, 99 | target_sparsity_intrinsic = 1., 100 | intrinsic_wei = 0.8, 101 | rand_partition = False, 102 | keep_diag = True 103 | ): 104 | 105 | """ 106 | Generate network_scaled from a biological neural network (connectome). 107 | This operation allows the contruction of artiificial neural networks 108 | with recurrent matrices that obey the topology and weight strength 109 | contraints of a biological neural network. 110 | 111 | Input 112 | ----- 113 | 114 | data_name: str 115 | String denoting the name of the neuronal network would like to use. 116 | Currently available: 117 | 118 | 'Drosophila' 49x49 (NxN shape of the ndarray) 119 | 'Human_Betzel_Normalized' 57x57 120 | 'Macaque_Normalized' 29x29 121 | 'Marmoset_Normalized' 55x55 122 | 'Mouse_Gamanut_Normalized' 19x19 123 | 'Mouse_Ypma_Oh' 56x56 124 | 125 | path_to_connectome_folder: (optional), default None, object of class pathlib.PosixPath 126 | The path to the empirical neural network data (connectomes). 127 | The path must be a passed from the Path subclasss of 128 | pathlib: path_to_connectome_folder = Path('path_to_desired_dataset'). 129 | If not specified, the path to the packaged data will be used. 130 | 131 | neuron_density: ndarray of positive int with shape (N,), default None 132 | N corresponds to the actual biological neural network (see data_name). 133 | Each entry of neuron_density[i] is denoting the number of neurons 134 | that we assume to inhabit region i. 135 | NOTE: if None (default) then neuron_density by will internally get 136 | populated with 1s (1 neuron per region). 137 | 138 | target_sparsity: float (0 1], default 0.2 139 | The percentage of all possible neuron-targets for each source neuron 140 | to form connections with. 141 | Note that at least 1 neuron will function as target in case that the 142 | resulting percentage is <1. 143 | This parameter can be used to make the sparisty of network_scaled vary 144 | around the density dictated by the actual biological connectomes. 145 | Note that this parameter is meaningful only if at least one region 146 | has more than 1 neuron, that is, for some i, neuron_density[i]>1. 147 | 148 | intrinsic_conn: bool, default True, 149 | Specify if the within regions neuron-to-neuron connectivity will be 150 | generated (=True) or not. 151 | 152 | target_sparsity_intrinsic: float (0 1], default 1. 153 | Same as target_sparsity, but for the within-region/intrinsic 154 | connections. 155 | 156 | intrinsic_wei: float (0 1], default .8 157 | The percentage of the weight that will be assigned to the intrinsic 158 | weights. 159 | E.g., 0.8*sum(extrinsic weight) where sum(extrinsic weight) is the sum 160 | of weights of connections from region A to all other regions, but A. 161 | NOTE: This parameter makes sense only if intrinsic_conn = True. 162 | 163 | rand_partition: bool, default False 164 | Specify if the original weight of each connection in the empirical 165 | neural network will be partitioned in k parts that sum to the original 166 | connection weight, where k = nr_source_neurons * nr_target_neurons. 167 | nr_source_neurons and nr_target_neurons are the nr of neurons in the 168 | source i and target areas j as part of network_scaled[i,j]. 169 | The original connection weight is network_original[i,j] where 170 | network_original the neural network corresponding to the dataset 171 | specified on the parameter file. 172 | If False, then for a given source and target i,j network_original[i,j] 173 | will be populated with thee equal constant weight values such that: 174 | network_scaled[m,n] = network_original[i,j] / (nr_source_neurons * 175 | nr_target_neurons) with m, n all neurons belonging to region i, j 176 | respectively. 177 | 178 | keep_diag: bool, default True 179 | Specify if the diagonal entries (denoting self-to-self neuron 180 | connections) should be kept of or not. 181 | NOTE: This parameter only has an effect when intrinsic_conn = True. 182 | 183 | Output 184 | ------ 185 | network_original: ndrarray of shape (N,N) 186 | The actual biological neural network that was used, with no 187 | modificiations/scaling (see data_name for N) 188 | 189 | network_scaled: ndarray of shape (M,M) 190 | The rescaled neural network. 191 | (M is bound to the parameter neuron_density) 192 | 193 | region_neuron_ids: list of lists of int 194 | List of lists for tracking the neurons of the network_scaled network. 195 | region_neuron_ids[1] contains a list with integers that denote the 196 | neurons of region 1 in network_scaled as 197 | network_scaled[region_neuron_ids[1], region_neuron_ids[1]] 198 | 199 | """ 200 | if path_to_connectome_folder is None: 201 | path_to_connectome_folder = pkg_resources.resource_filename('bio2art', 'connectomes/') 202 | path_to_connectome_folder = Path(path_to_connectome_folder) 203 | file_conn = 'C_' + data_name + '.npy' # Prefix and suffix for the file 204 | file_to_open = path_to_connectome_folder / file_conn 205 | 206 | # Read the connectivity matrix - it must be stored as a numpy array 207 | network_original = np.load(file_to_open) 208 | 209 | # If neuron_density is not specified then populate each region with 1 210 | # neuron 211 | if neuron_density is None: 212 | neuron_density=np.ones((network_original.shape[0],), dtype=int) 213 | 214 | all_neurons = np.sum(neuron_density)# how many neurons do we have? 215 | 216 | if(neuron_density.shape[0] != network_original.shape[0]): 217 | print("Size of neuron_density must be equal to value of connectome:", 218 | network_original.shape[0]) 219 | return 220 | 221 | # Construct the neuron to neuron matrix - it is simply an array of unique 222 | # integer ids of all the neurons dictated by all_neurons 223 | index_neurons = [i for i in range(int(all_neurons))] 224 | index_neurons = np.asarray(index_neurons) 225 | 226 | # Create a list of lists that tracks the neuron ids that each region 227 | # contains 228 | region_neuron_ids=[] 229 | start = 0 230 | 231 | for i in range(network_original.shape[0]): 232 | offset = neuron_density[i] 233 | offset = int(offset) 234 | 235 | new_list_of_region = list(range(start, (start + offset))) 236 | region_neuron_ids.append(new_list_of_region) 237 | 238 | #Update the indexes 239 | start = start + offset 240 | 241 | # Sum of outgoing weights for each region - used for calculation of 242 | # intrinsic weights. 243 | sum_C_out = np.sum(network_original, 0) 244 | 245 | # Initiate the neuron to neuron connectivity matrix 246 | network_scaled = np.zeros((int(all_neurons), int(all_neurons))) 247 | 248 | # The not_zeros index marks the regions with which the current region i 249 | # is connected to. What needs to be done is conencting the respective 250 | # neurons constrained in the regions. 251 | # We use the region_neuron_ids and the weight value of the region-to-region 252 | # matrix network_original. 253 | 254 | # Start populating by row of the region-to-region matrix network_original 255 | for i in range(network_original.shape[0]): 256 | # not-zeros denote the indexes of the areas that are receiving 257 | # incoming connections from the current region i 258 | not_zeros = np.where(network_original[i,:] > 0)[0] 259 | 260 | # Get the neuron source indexes 261 | sources_indexes = region_neuron_ids[i] 262 | 263 | if intrinsic_conn is True: 264 | # Add an intrinsic within-region weight by interconnecting all the 265 | # neurons that belong to one region 266 | 267 | # Intrinsic weight of within region - default 0.8 268 | intrinsic_weight = (intrinsic_wei * sum_C_out[i]) / (1-intrinsic_wei) 269 | 270 | # If target sparsity is specified then calculate the percentage 271 | # of intrinsic targets to use (source_indexes) 272 | if target_sparsity_intrinsic == 1.: 273 | nr_sources_to_use = len(sources_indexes) 274 | # If we do not keep the diagonal then we need nr_sources_to_use - 1 275 | if keep_diag is False: nr_sources_to_use = nr_sources_to_use - 1 276 | else: 277 | # If we do not keep the diagonal, then we have to exlcude 278 | # one element from the sources_index 279 | nr_sources_to_use = target_sparsity_intrinsic * len(sources_indexes) 280 | 281 | # Ensure that we keep at least one target neuron 282 | if nr_sources_to_use < 1: 283 | nr_sources_to_use = 1 284 | else: 285 | nr_sources_to_use = int(np.round(nr_sources_to_use)) 286 | 287 | # If keep_diag = False and nr_sources_to_use == len(sources_indexes) 288 | # make sure that the nr_sources_to_use is 289 | # len(sources_indexes)-1 so that by excluding the self-self 290 | # connections (as keep_diag = False dictates) we still 291 | # have the proper nr_sources_to_use. 292 | if keep_diag is False and nr_sources_to_use == len(sources_indexes): 293 | nr_sources_to_use = nr_sources_to_use - 1 294 | 295 | # Ensure that we keep at least one target neuron in case that 296 | # neuron_density[i] is 1 and self-self connections are not desired 297 | if nr_sources_to_use < 1: 298 | nr_sources_to_use = 1 299 | 300 | # Processs the weights of the original neural network based 301 | # on the rand_partition boolean parameter 302 | if rand_partition: 303 | partitioned_weights = utils._float_partition(intrinsic_weight, 304 | len(sources_indexes) * nr_sources_to_use) 305 | # Make the idx to use the different partitioned_weights 306 | # when connecting sources to targets 307 | start_partitioned_weights = 0 308 | stop_partitioned_weights = nr_sources_to_use 309 | else: 310 | # Normalize weight based on the number of target given the 311 | # target_sparsity parameter 312 | intrinsic_weight = intrinsic_weight / (len(sources_indexes) * nr_sources_to_use) 313 | 314 | # Populate the matrix with broadcasting of indexes 315 | for sources in sources_indexes: 316 | # Choose random sources as tagets for each step in the for loop 317 | # Keep random nr_sources_to_use 318 | #sources_indexes_to_use = list(range(len(sources_indexes))) 319 | current_sources_indexes = sources_indexes.copy() 320 | 321 | # If keep_diag = False we have to exclude the sources from 322 | # the sources_indexes_to_use so that self-self connections 323 | # do not occur. 324 | if keep_diag is False: 325 | current_sources_indexes.pop(current_sources_indexes.index(sources)) 326 | 327 | # Keep random nr_sources_to_use current_sources_indexes 328 | random.shuffle(current_sources_indexes) 329 | current_sources_indexes = current_sources_indexes[:nr_sources_to_use] 330 | 331 | if rand_partition: 332 | network_scaled[sources, 333 | current_sources_indexes] = partitioned_weights[start_partitioned_weights:stop_partitioned_weights] 334 | # Update idx for using the partitioned_weights 335 | start_partitioned_weights = stop_partitioned_weights 336 | stop_partitioned_weights = stop_partitioned_weights + nr_sources_to_use 337 | else: 338 | network_scaled[sources, 339 | current_sources_indexes] = intrinsic_weight 340 | 341 | # Loop through the not zeros indexes and fetch the target neuron ids 342 | # that are stored in region_neuron_ids 343 | for target in not_zeros: 344 | target_indexes = region_neuron_ids[target] 345 | 346 | # Calculate here the strength of connectivity that should be 347 | # assigned to the neuron-to-neuron matrix. 348 | # The weight is dictated by the number of source and target neurons 349 | # and the respective region-to-region weight of matrix network_original 350 | current_weight = network_original[i, target] 351 | 352 | # Here we can control the sparsity of connections by choosing 353 | # the portion of all target_indexes to be used. 354 | # Hence, apply the target_sparsity parameter if < 1. 355 | if target_sparsity == 1.: 356 | nr_targets_to_use = len(target_indexes) 357 | else: 358 | nr_targets_to_use = target_sparsity * len(target_indexes) 359 | 360 | # Ensure that we keep at least one target neuron 361 | if nr_targets_to_use < 1: 362 | nr_targets_to_use = 1 363 | else: 364 | nr_targets_to_use = int(np.round(nr_targets_to_use)) 365 | 366 | if rand_partition: 367 | partitioned_weights = utils._float_partition(current_weight, 368 | len(sources_indexes) * nr_targets_to_use) 369 | # Make the idx to use the different partitioned_weights 370 | # when connecting sources to targets 371 | start_partitioned_weights = 0 372 | stop_partitioned_weights = nr_targets_to_use 373 | else: 374 | # Normalize weight based on the number of target given the 375 | # target_sparsity parameter 376 | neuron_to_neuron_weight = current_weight / (len(sources_indexes) * nr_targets_to_use) 377 | 378 | # Populate the matrix with broadcasting of indexes 379 | for sources in sources_indexes: 380 | # Create random targets (current_target_indexes) for each 381 | # source seperately (that is, at each step in this for loop) 382 | # Keep random N=nr_targets_to_use from the target_indexes 383 | random.shuffle(target_indexes) 384 | current_target_indexes = target_indexes[:nr_targets_to_use] 385 | 386 | if rand_partition: 387 | network_scaled[sources, current_target_indexes] = partitioned_weights[start_partitioned_weights:stop_partitioned_weights] 388 | # Update idx for using the partitioned_weights 389 | start_partitioned_weights = stop_partitioned_weights 390 | stop_partitioned_weights = stop_partitioned_weights + nr_targets_to_use 391 | else: 392 | network_scaled[sources, current_target_indexes] = neuron_to_neuron_weight 393 | 394 | return network_original, network_scaled, region_neuron_ids 395 | --------------------------------------------------------------------------------