├── .DS_Store
├── .github
└── workflows
│ └── python-app.yml
├── .gitignore
├── .readthedocs.yaml
├── LICENSE
├── README.md
├── connectome_interpreter
├── __init__.py
├── _version.py
├── activation_maximisation.py
├── compress_paths.py
├── data
│ ├── Badel2016
│ │ ├── Badel2016.csv
│ │ └── Badel_et_al_2016_raw.xls
│ ├── DoOR
│ │ ├── door_chemical_meta.csv
│ │ ├── door_receptor_mappings.csv
│ │ ├── door_response_matrix.csv
│ │ ├── processed_door_adult.csv
│ │ └── processed_door_adult_sfr_subtracted.csv
│ ├── Dweck2018
│ │ ├── 1-s2.0-S2211124718306636-mmc2.xlsx
│ │ ├── 1-s2.0-S2211124718306636-mmc3.xlsx
│ │ ├── 1-s2.0-S2211124718306636-mmc4.xlsx
│ │ ├── adult_chem2glom.csv
│ │ ├── adult_fruit2glom.csv
│ │ ├── larva_chem2or.csv
│ │ └── larva_fruit2or.csv
│ ├── Matsliah2024
│ │ └── fafb_right_vis_cols.csv
│ ├── Nern2024
│ │ └── ME-columnar-cells-hex-location.csv
│ └── Zhao2024
│ │ └── ucl_hex_right_20240701_tomale.csv
├── external_map.py
├── external_paths.py
├── path_finding.py
└── utils.py
├── docs
├── Makefile
├── conf.py
├── figures
│ ├── act_max.png
│ ├── column_sum.png
│ ├── effective_input_hist.png
│ ├── ei_connectivity.png
│ ├── matmul.pdf
│ ├── matmul.png
│ ├── path_finding.png
│ ├── pathfinding_comparison.png
│ ├── rooted_effective_input_hist.png
│ ├── simplified_model.png
│ └── sparse_matmul.png
├── index.rst
├── make.bat
├── modules
│ ├── activation_maximisation.rst
│ ├── compress_paths.rst
│ ├── external_map.rst
│ ├── external_paths.rst
│ ├── path_finding.rst
│ ├── toc.rst
│ └── utils.rst
├── requirements.txt
├── tutorials
│ ├── act_max.rst
│ ├── ei_matmul.rst
│ ├── matmul.rst
│ ├── path_finding.rst
│ ├── simple_model.rst
│ └── toc.rst
└── your_own_data.rst
├── pyproject.toml
├── requirements-types.txt
├── requirements.txt
└── tests
├── __init__.py
├── test_act_max.py
├── test_compress_paths.py
├── test_path_finding.py
└── test_utils.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/.DS_Store
--------------------------------------------------------------------------------
/.github/workflows/python-app.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python
3 |
4 | name: Python application
5 |
6 | on:
7 | push:
8 | branches: [ "main" ]
9 | pull_request:
10 | branches: [ "main" ]
11 |
12 | permissions:
13 | contents: read
14 |
15 | jobs:
16 | build:
17 |
18 | runs-on: ubuntu-latest
19 |
20 | steps:
21 | - uses: actions/checkout@v4
22 |
23 | - name: Set up Python 3.10
24 | uses: actions/setup-python@v5
25 | with:
26 | python-version: "3.10"
27 | cache: 'pip' # caching pip dependencies, https://github.com/actions/setup-python#caching-packages-dependencies
28 |
29 | - name: Install package and dependencies
30 | run: |
31 | python -m pip install --upgrade pip
32 | pip install -r requirements.txt
33 |
34 | - name: Lint with flake8
35 | run: |
36 | # stop the build if there are Python syntax errors or undefined names
37 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
38 | # exit-zero treats all errors as warnings.
39 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=88 --statistics
40 |
41 | - name: Run black on changed files
42 | run: |
43 | CHANGED_FILES=$(git diff --diff-filter=d --name-only origin/main | grep '\.py$' || true)
44 | if [[ -n "$CHANGED_FILES" ]]; then
45 | black --check -l 88 $CHANGED_FILES || (echo "Please run black" && false)
46 | else
47 | echo "No Python files changed."
48 | fi
49 |
50 | - name: Run type checker
51 | run: |
52 | pip install -r requirements-types.txt
53 | mypy --non-interactive || true
54 |
55 | - name: Test with pytest
56 | run: |
57 | pytest -v --cov=connectome_interpreter --cov-report=term
58 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | dist/
2 | build/
3 | *.egg-info/
4 | __pycache__/
5 | docs/_build/
6 | .env
7 | /venv
8 | .DS_Store
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Set the OS, Python version and other tools you might need
9 | build:
10 | os: ubuntu-22.04
11 | tools:
12 | python: "3.10"
13 | # You can also specify other tool versions:
14 | # nodejs: "19"
15 | # rust: "1.64"
16 | # golang: "1.19"
17 |
18 | # Build documentation in the "docs/" directory with Sphinx
19 | sphinx:
20 | configuration: docs/conf.py
21 |
22 | # Optionally build your docs in additional formats such as PDF and ePub
23 | # formats:
24 | # - pdf
25 | # - epub
26 |
27 | # Optional but recommended, declare the Python requirements required
28 | # to build your documentation
29 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
30 | python:
31 | install:
32 | - requirements: docs/requirements.txt
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 YijieYin
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | This package is intended to be used for interpreting connectomics data.
2 |
3 | To install:
4 | ```
5 | pip install connectome-interpreter
6 | ```
7 | Or to install the bleeding edge development version:
8 | ```
9 | pip install git+https://github.com/YijieYin/connectome_interpreter.git
10 | ```
11 |
12 | Documentation [here](https://connectome-interpreter.readthedocs.io/en/latest/) (with some text snippets explaining various things).
13 |
14 | # Example notebooks
15 | ## Full Adult Fly Brain
16 | Data obtained from [Dorkenwald et al. 2024](https://www.nature.com/articles/s41586-024-07558-y), [Schlegel et al. 2024](https://www.nature.com/articles/s41586-024-07686-5), and [Matsliah et al. 2024](https://www.nature.com/articles/s41586-024-07981-1). To visualise the neurons, you can use this url: [https://tinyurl.com/flywire783](https://tinyurl.com/flywire783). By using the connectivity information, you agree to follow the [FlyWire citation guidelines and principles](https://codex.flywire.ai/api/download).
17 | - [central brain, single-neuron level](https://colab.research.google.com/drive/1_beqiKPX8pC7---DWepKO8dEv1sJ2vA4?usp=sharing) (recommended. Shows a variety of capabilities)
18 | - [central brain, cell type level](https://colab.research.google.com/drive/1ECUagwN-r2rnKyfcYgtR1oG8Lox8m8BW?usp=sharing)
19 | - [right hemisphere optic lobe, single-neuron level](https://colab.research.google.com/drive/1SHMZ3DUTeakdh0znMmXu5g2qffx6rFGV?usp=sharing)
20 |
21 | ## MaleCNS
22 | Data obtained from [neuPrint](https://neuprint.janelia.org/?dataset=optic-lobe%3Av1.0&qt=findneurons) and [Nern et al. 2024](https://www.biorxiv.org/content/10.1101/2024.04.16.589741v2), with the help of [neuprint-python](https://connectome-neuprint.github.io/neuprint-python/docs/).
23 | - [Optic lobe, single-neuron level](https://colab.research.google.com/drive/1qEmO1tOOjSksa41OZ4_mX7KnJ8vBsvLU?usp=sharing)
24 |
25 | ## Larva
26 | Data from [Winding et al. 2023](https://www.science.org/doi/10.1126/science.add9330). You can also e.g. visualise the neurons in 3D in [catmaid](https://catmaid.virtualflybrain.org/).
27 | - [single-neuron level](https://colab.research.google.com/drive/1VIMNFBp7dCgN5XOQ9vvzPaqb80BGPZx4?usp=sharing)
28 |
29 | # Mapping known to unknown
30 | To facilitate neural circuit interpretation, we compile a [list](https://docs.google.com/spreadsheets/d/1VHCEnurOdb4FDC_NUKZX_BpBckQ9LpKxv0CsK_ObVok/edit?usp=sharing) of cell types with known, *experimentally tested*, functions. [This example notebook](https://colab.research.google.com/drive/1oETJthJbdLEBhzApEbRynGxTMrOcwsf-?usp=sharing) uses this list for query of neuron receptive field. The list aims to serve as a quick look-up of literature, instead of a stipulation of neural function.
31 | - **Everyone is given edit access, to help make the list more comprehensive and correct, and to make sure the publications you care about are cited correctly. Your contributions would be much appreciated. Please handle with care.**
32 | - When multiple entries are to be added in the same cell (e.g. when multiple publications are related to the same cell type), please separate the entries with `; ` (semicolon + space), to facilitate programmatic access.
33 |
34 | # Structure-function relationship
35 | Using `connectome_interpreter`, we compare the published connectomes against published experimental papers:
36 | - [Taisz et al. 2023](https://colab.research.google.com/drive/1WNNnNCjTey-iSlHPkxMlr_EaLsRMs9iX?usp=drive_link): Generating parallel representations of position and identity in the olfactory system ([paper](https://www.cell.com/cell/abstract/S0092-8674(23)00472-5))
37 | - [Huaviala et al. 2020](https://colab.research.google.com/drive/1EyrGWO7MqpCZLvT2h4RyT4SaQy2fwYQT?usp=sharing): Neural circuit basis of aversive odour processing in Drosophila from sensory input to descending output ([paper](https://www.biorxiv.org/content/10.1101/394403v2))
38 | - [Frechter et al. 2019](https://colab.research.google.com/drive/1cSWNUdaU8Pll77eh4kOEz-NmrKHLnj-K?usp=sharing): Functional and anatomical specificity in a higher olfactory centre ([paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6550879/))
39 | - [Olsen et al. 2010](https://colab.research.google.com/drive/1dA5GTHg25S3Mc9CBtexplfjk1z1kM04V?usp=sharing): Divisive normalization in olfactory population codes ([paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2866644/))
40 |
41 | # Notes
42 | - Pre-processed connectomics data (and scripts used for pre-processing) are [here](https://github.com/YijieYin/connectome_data_prep/tree/main), in `scipy.sparse.matrix` (`.npz`) format for the adjacency matrices; and in `.csv` for the metadata.
43 | - For dataset requests / feature requests / feedback, please make an issue or email me at `yy432`at`cam.ac.uk` :).
44 |
--------------------------------------------------------------------------------
/connectome_interpreter/__init__.py:
--------------------------------------------------------------------------------
1 | from ._version import __version__
2 | from .activation_maximisation import *
3 | from .compress_paths import *
4 | from .external_map import *
5 | from .path_finding import *
6 | from .utils import *
7 | from .external_paths import *
8 |
9 | # from .activation_maximisation import MultilayeredNetwork
10 |
--------------------------------------------------------------------------------
/connectome_interpreter/_version.py:
--------------------------------------------------------------------------------
1 | __version__ = "2.8.1"
2 | # If you're making a patch or a minor bug fix, increment the patch version,
3 | # e.g., from 0.1.0 to 0.1.1.
4 | # If you're adding functionality in a backwards-compatible manner, increment
5 | # the minor version, e.g., from 0.1.0 to 0.2.0.
6 | # If you're making incompatible API changes, increment the major version, e.g.,
7 | # from 0.1.0 to 1.0.0.
8 |
--------------------------------------------------------------------------------
/connectome_interpreter/data/Badel2016/Badel_et_al_2016_raw.xls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/connectome_interpreter/data/Badel2016/Badel_et_al_2016_raw.xls
--------------------------------------------------------------------------------
/connectome_interpreter/data/DoOR/door_receptor_mappings.csv:
--------------------------------------------------------------------------------
1 | "","receptor","sensillum","OSN","glomerulus","co.receptor","coexpressing","related1","related2","related3","related4","related5","related6","Ors","sensillum.type","adult","larva","dataset.existing","comment","code","code.OSN"
2 | "1","?","?","?","VA7m","?","","","","","","","","?","",NA,NA,FALSE,"","VA7m",NA
3 | "2","Or42b","ab1","ab1A","DM1","Orco","","","","","","","","Or42b","antennal basiconic",TRUE,TRUE,TRUE,"","DM1","ab1A"
4 | "3","Or92a","ab1","ab1B","VA2","Orco","","","","","","","","Or92a","antennal basiconic",TRUE,NA,TRUE,"","VA2","ab1B"
5 | "4","Gr21a.Gr63a","ab1","ab1C","V","","Gr21a+Gr63a","","","","","","","Gr21a+Gr63a","antennal basiconic",TRUE,TRUE,TRUE,"","V","ab1C"
6 | "5","Or10a","ab1","ab1D","DL1","Orco","Gr10a","Gr10a","","","","","","Or10a+Gr10a","antennal basiconic",TRUE,NA,TRUE,"","DL1","ab1D"
7 | "6","Or67a","ab10","ab10A","DM6","Orco","","","","","","","","Or67a","antennal basiconic",TRUE,NA,TRUE,"","DM6","ab10A"
8 | "7","Or49a","ab10","ab10B","DL4","Orco","Or85f","Or85f","","","","","","Or49a+85f","antennal basiconic",TRUE,TRUE,TRUE,"",NA,NA
9 | "8","Or85f","ab10","ab10B","DL4","Orco","Or49a","Or49a","","","","","","Or49a+85f","antennal basiconic",TRUE,NA,TRUE,"","DL4","ab10B"
10 | "9","Or59b","ab2","ab2A","DM4","Orco","","","","","","","","Or59b","antennal basiconic",TRUE,NA,TRUE,"","DM4","ab2A"
11 | "10","ab2B","ab2","ab2B","DM5","Orco","Or85a+33b","Or85a","Or33b","","","","","Or85a+33b","antennal basiconic",NA,NA,TRUE,"","DM5","ab2B"
12 | "11","Or85a","ab2","ab2B","DM5","Orco","Or33b","Or33b","ab2B","","","","","Or85a+33b","antennal basiconic",TRUE,NA,TRUE,"",NA,NA
13 | "12","Or33b","ab2+ab5","ab2B+ab5B","DM5+DM3","Orco","Or85a (in ab2B) / Or47a (in ab5B)","Or85a","Or47a","ab2B","ab5B","","","Or85a+33b","antennal basiconic",TRUE,TRUE,TRUE,"",NA,NA
14 | "13","Or22a","ab3","ab3A","DM2","Orco","Or22b","Or22b","","","","","","Or22a+b","antennal basiconic",TRUE,NA,TRUE,"","DM2","ab3A"
15 | "14","Or22b","ab3","ab3A","DM2","Orco","Or22a","Or22a","","","","","","Or22a+b","antennal basiconic",TRUE,NA,TRUE,"",NA,NA
16 | "15","ab3B","ab3","ab3B","VM5d","Orco","Or85b+98b","Or85b","Or98b","","","","","Or85b+98b","antennal basiconic",NA,NA,FALSE,"",NA,NA
17 | "16","Or85b","ab3","ab3B","VM5d","Orco","Or98b","Or98b","ab3B","","","","","Or85b+98b","antennal basiconic",TRUE,NA,TRUE,"","VM5d","ab3B"
18 | "17","Or98b","ab3","ab3B","VM5d","Orco","Or85b","Or85b","ab3B","","","","","Or85b+98b","antennal basiconic",TRUE,NA,FALSE,"see Or85b for data",NA,NA
19 | "18","Or7a","ab4","ab4A","DL5","Orco","","","","","","","","Or7a","antennal basiconic",TRUE,TRUE,TRUE,"","DL5","ab4A"
20 | "19","ab4B","ab4","ab4B","DA2","Orco","Or33a+56a","Or33a","Or56a","","","","","Or33a+56a","antennal basiconic",NA,NA,TRUE,"","DA2","ab4B"
21 | "20","Or33a","ab4","ab4B","DA2","Orco","Or56a","Or56a","ab4B","","","","","Or33a+56a","antennal basiconic",TRUE,TRUE,TRUE,"",NA,NA
22 | "21","Or56a","ab4","ab4B","DA2","Orco","Or33a","Or33a","ab4B","","","","","Or33a+56a","antennal basiconic",TRUE,NA,FALSE,"see ab4B for data",NA,NA
23 | "22","Or82a","ab5","ab5A","VA6","Orco","","","","","","","","Or82a","antennal basiconic",TRUE,TRUE,TRUE,"","VA6","ab5A"
24 | "23","ab5B","ab5","ab5B","DM3","Orco","Or47a+33b","Or47a","Or33b","","","","","Or47a+33b","antennal basiconic",NA,NA,TRUE,"","DM3","ab5B"
25 | "24","Or47a","ab5","ab5B","DM3","Orco","Or33b","Or33b","ab5B","","","","","Or47a+33b","antennal basiconic",TRUE,TRUE,TRUE,"",NA,NA
26 | "25","Or13a","ab6","ab6A","DC2","Orco","","","","","","","","Or13a","antennal basiconic",TRUE,NA,TRUE,"","DC2","ab6A"
27 | "26","Or49b","ab6","ab6B","VA5","Orco","","","","","","","","Or49b","antennal basiconic",TRUE,NA,TRUE,"","VA5","ab6B"
28 | "27","Or98a","ab7","ab7A","VM5v","Orco","","","","","","","","Or98a","antennal basiconic",TRUE,NA,TRUE,"","VM5v","ab7A"
29 | "28","Or67c","ab7","ab7B","VC4","Orco","","","","","","","","Or67c","antennal basiconic",TRUE,NA,TRUE,"","VC4","ab7B"
30 | "29","Or43b","ab8","ab8A","VM2","Orco","","","","","","","","Or43b","antennal basiconic",TRUE,NA,TRUE,"","VM2","ab8A"
31 | "30","Or9a","ab8","ab8B","VM3","Orco","","","","","","","","Or9a","antennal basiconic",TRUE,TRUE,TRUE,"","VM3","ab8B"
32 | "31","Or67b","ab9","ab9","VA3","Orco","","","","","","","","Or67b","antennal basiconic",TRUE,TRUE,TRUE,"","VA3","ab9X"
33 | "32","Or69a","ab9","ab9","D","Orco","","","","","","","","Or69a","antennal basiconic",TRUE,NA,TRUE,"","D","ab9Y"
34 | "33","Ir31a","ac1","ac1","VL2p","Ir8a","","ac1","","","","","","Ir31a","antennal coeloconic",TRUE,NA,TRUE,"","VL2p","ac1X"
35 | "34","Ir92a","ac1","ac1","VM1","Ir25a, Ir76b","","ac1","","","","","","Ir92a","antennal coeloconic",TRUE,NA,TRUE,"","VM1","ac1Z"
36 | "35","ac1A","ac1","ac1A","?","","","ac1","","","","","","","antennal coeloconic",NA,NA,TRUE,"",NA,"ac1A"
37 | "36","ac1","ac1","ac1A+B+C","VL1+VM1+VL2p","","","ac1A","ac1B","ac1BC","Ir31a","Ir75d","Ir92a","","antennal coeloconic",NA,NA,TRUE,"summed responses, individual ORNs could not be separated (Silbering et al. 2011)",NA,NA
38 | "37","ac1B","ac1","ac1B","?","","","ac1BC","ac1","","","","","","antennal coeloconic",NA,NA,TRUE,"",NA,"ac1B"
39 | "38","ac1BC","ac1","ac1B+C","?","","","ac1B","ac1","","","","","","antennal coeloconic",NA,NA,TRUE,"summed responses from ac1B and C ORNs, only A neuron could be separated (Marshall et al. 2010)",NA,NA
40 | "39","Ir75d","ac1, ac2, ac4","","VL1","Ir25a","","ac1","ac2","ac4","","","","Ir75d","antennal coeloconic",TRUE,NA,TRUE,"Ir75a is expressed in one ORN of each of the ac1, ac2 and the ac4 sensilla","VL1",NA
41 | "40","Ir41a","ac2","ac2","VC5","Ir76b","","ac2","","","","","","Ir41a","antennal coeloconic",TRUE,NA,TRUE,"","VC5","ac2Z"
42 | "41","Ir75a","ac2","ac2","DP1l","Ir8a","","Ir75b","Ir75c","ac2","ac3A","","","Ir75a","antennal coeloconic",TRUE,NA,TRUE,"Ir75a is the sole receptor expressed in some ac2 neurons projecting to glomerulus DP1l,
43 | Ir75a is also expressed in ac3A neurons projecting to glomerulus DL2d+v, here along with Ir75b and Ir75c (see data set ac3A for responses)","DP1l","ac2X"
44 | "42","ac2A","ac2","ac2A","?","","","ac2","","","","","","","antennal coeloconic",NA,NA,TRUE,"",NA,"ac2A"
45 | "43","ac2","ac2","ac2A+B+C","VL1+DP1l+VC5","","","ac2A","ac2B","ac2BC","Ir41a","Ir75a","Ir75d","","antennal coeloconic",NA,NA,TRUE,"summed responses, individual ORNs could not be separated (Silbering et al. 2011)",NA,NA
46 | "44","ac2B","ac2","ac2B","?","","","ac2","ac2BC","","","","","","antennal coeloconic",NA,NA,TRUE,"",NA,"ac2B"
47 | "45","ac2BC","ac2","ac2B+C","?","","","ac2","ac2B","","","","","","antennal coeloconic",NA,NA,TRUE,"summed responses from ac2B and C ORNs, only A neuron could be separated (Marshall et al. 2010)",NA,NA
48 | "46","ac3A","ac3","ac3A","DL2d/v","Ir8a","Ir75a+b+c","Ir75a","Ir75b","Ir75c","","","","Ir75a+b+c","antennal coeloconic",NA,NA,TRUE,"mapping to DL2d&v, separation unclear","DL2d","ac3A"
49 | "47","ac3A","ac3","ac3A","DL2d/v","Ir8a","Ir75a+b+c","Ir75a","Ir75b","Ir75c","","","","Ir75a+b+c","antennal coeloconic",NA,NA,TRUE,"mapping to DL2d&v, separation unclear","DL2v",NA
50 | "48","Ir75b","ac3","ac3A","DL2d/v","Ir8a","","Ir75a","Ir75c","ac3A","","","","Ir75a+b+c","antennal coeloconic",TRUE,NA,FALSE,"",NA,NA
51 | "49","Ir75c","ac3","ac3A","DL2d/v","Ir8a","","Ir75a","Ir75b","","","","","Ir75a+b+c","antennal coeloconic",TRUE,NA,FALSE,"",NA,NA
52 | "50","ac3_noOr35a","ac3","ac3A+B","DL2d/v+VC3","","","","","","","","","","antennal coeloconic",NA,NA,TRUE,"summed responses from the ac3 sensillum, Or35a was knocked down (Silbering et al. 2011)",NA,NA
53 | "51","ac3B","ac3","ac3B","VC3","Orco, Ir76b","Or35a","Or35a","","","","","","Or35a","antennal coeloconic",NA,NA,TRUE,"","VC3","ac3B"
54 | "52","Or35a","ac3","ac3B","VC3","Orco, Ir76b","","ac3B","","","","","","Or35a","antennal coeloconic",TRUE,TRUE,TRUE,"",NA,NA
55 | "53","ac4","ac4","ac4","?","","","","","","","","","","antennal coeloconic",NA,NA,TRUE,"summed responses, individual ORNs could not be separated (Silbering et al. 2011)",NA,NA
56 | "54","Ir76a","ac4","ac4","VM4","Ir25a, Ir76b","","","","","","","","Ir76a","antennal coeloconic",TRUE,NA,TRUE,"","VM4","ac4Z"
57 | "55","Ir84a","ac4","ac4","VL2a","Ir8a","","","","","","","","Ir84a","antennal coeloconic",TRUE,NA,TRUE,"","VL2a","ac4X"
58 | "56","Or67d","at1","at1A","DA1","Orco","","","","","","","","Or67d","antennal trichoid",TRUE,NA,TRUE,"","DA1","at1A"
59 | "57","Or83c","at2","at2A","DC3","Orco","","","","","","","","Or83c","antennal trichoid / intermediate",TRUE,NA,TRUE,"named ai2A in Ronderos et al. 2014","DC3","at2A"
60 | "58","Or23a","at2","at2B","DA3","Orco","","","","","","","","Or23a","antennal trichoid / intermediate",TRUE,NA,TRUE,"named ai2B in Ronderos et al. 2014","DA3","at2B"
61 | "59","Or19a","at3","at3","DC1","Orco","Or19b","Or19b","","","","","","Or19a+b","antennal trichoid / intermediate",TRUE,NA,TRUE,"named ai2A in Dweck et al. 2013","DC1","at3A"
62 | "60","Or19b","at3","at3","DC1","Orco","Or19a","Or19a","","","","","","Or19a+b","antennal trichoid / intermediate",TRUE,NA,FALSE,"named ai2A in Dweck et al. 2013; see Or19a for data",NA,NA
63 | "61","Or2a","at3","at3","DA4m","Orco","","","","","","","","Or2a","antennal trichoid / intermediate",TRUE,TRUE,TRUE,"named ai2 in Dweck et al. 2013","DA4m","at3Y"
64 | "62","Or43a","at3","at3","DA4l","Orco","","","","","","","","Or43a","antennal trichoid / intermediate",TRUE,NA,TRUE,"named ai2 in Dweck et al. 2013","DA4l","at3Z"
65 | "63","Or47b","at4","at4A","VA1v","Orco","","","","","","","","Or47b","antennal trichoid",TRUE,NA,TRUE,"","VA1v","at4A"
66 | "64","Or65a","at4","at4B","DL3","Orco","Or65b+c","Or65b","Or65c","","","","","Or65a+b+c","antennal trichoid",TRUE,NA,TRUE,"","DL3","at4B"
67 | "65","Or65b","at4","at4B","DL3","Orco","Or65a+c","Or65a","Or65c","","","","","Or65a+b+c","antennal trichoid",TRUE,NA,FALSE,"see aOr65a for data",NA,NA
68 | "66","Or65c","at4","at4B","DL3","Orco","Or65a+b","Or65a","Or65b","","","","","Or65a+b+c","antennal trichoid",TRUE,NA,FALSE,"see aOr65a for data",NA,NA
69 | "67","Or88a","at4","at4C","VA1d","Orco","","","","","","","","Or88a","antennal trichoid",TRUE,NA,TRUE,"","VA1d","at4C"
70 | "68","Or42a","pb1","pb1A","VM7d","Orco","","","","","","","","Or42a","maxillary palp",TRUE,TRUE,TRUE,"","VM7d","pb1A"
71 | "69","Or71a","pb1","pb1B","VC2","Orco","","","","","","","","Or71a","maxillary palp",TRUE,NA,TRUE,"","VC2","pb1B"
72 | "70","Or33c","pb2","pb2A","VC1","Orco","Or85e","Or85e","","","","","","Or33c+85e","maxillary palp",TRUE,NA,TRUE,"",NA,NA
73 | "71","Or85e","pb2","pb2A","VC1","Orco","Or33c","Or33c","","","","","","Or33c+85e","maxillary palp",TRUE,NA,TRUE,"",NA,NA
74 | "72","pb2A","pb2","pb2A","VC1","Orco","Or33c+85e","Or33c","Or85e","","","","","Or33c+85e","maxillary palp",NA,NA,TRUE,"","VC1","pb2A"
75 | "73","Or46a","pb2","pb2B","VA7l","Orco","","","","","","","","Or46a","maxillary palp",TRUE,NA,TRUE,"","VA7l","pb2B"
76 | "74","Or59c","pb3","pb3A","VM7v","Orco","","","","","","","","Or59c","maxillary palp",TRUE,NA,TRUE,"","VM7v","pb3A"
77 | "75","Or85d","pb3","pb3B","VA4","Orco","","","","","","","","Or85d","maxillary palp",TRUE,NA,TRUE,"","VA4","pb3B"
78 | "76","Ir40a.VP1","sacI","sacI","VP1","Ir25a","","","","","","","","Ir40a","sacculus",TRUE,NA,FALSE,"formerly VM6 (Grabe et al. 2014","VP1","sacIX"
79 | "77","Ir40a.VP4","sacI","sacI","VP4","Ir25a","","","","","","","","Ir40a","sacculus",TRUE,NA,FALSE,"","VP4","sacIY"
80 | "78","Ir64a.DC4","sacIII","sacII","DC4","Ir8a","","","","","","","","Ir64a","sacculus",TRUE,NA,TRUE,"","DC4","sacIIX"
81 | "79","Ir64a.DP1m","sacIII","sacII","DP1m","Ir8a","","","","","","","","Ir64a","sacculus",TRUE,NA,TRUE,"","DP1m","sacIIY"
82 | "80","Ir25a","","","","-","","","","","","","","","antennal coeloconic",TRUE,NA,FALSE,"the Ir25a co-receptor",NA,""
83 | "81","Ir76b","","","","-","","","","","","","","","antennal coeloconic",TRUE,NA,FALSE,"the Ir76b co-receptor (?)",NA,""
84 | "82","Ir8a","","","","-","","","","","","","","","antennal coeloconic",TRUE,NA,FALSE,"the Ir8a co-receptor",NA,""
85 | "83","Or1a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
86 | "84","Or22c","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
87 | "85","Or24a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
88 | "86","Or30a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
89 | "87","Or45a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
90 | "88","Or45b","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
91 | "89","Or59a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
92 | "90","Or63a","","","","Orco","","","","","","","","","",FALSE,TRUE,FALSE,"",NA,""
93 | "91","Or74a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
94 | "92","Or83a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
95 | "93","Or83b","","","","-","","","","","","","","","",TRUE,TRUE,FALSE,"the Orco co-receptor expressed together with all Ors",NA,""
96 | "94","Or85c","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
97 | "95","Or94a","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
98 | "96","Or94b","","","","Orco","","","","","","","","","",FALSE,TRUE,TRUE,"",NA,""
99 |
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/1-s2.0-S2211124718306636-mmc2.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/connectome_interpreter/data/Dweck2018/1-s2.0-S2211124718306636-mmc2.xlsx
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/1-s2.0-S2211124718306636-mmc3.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/connectome_interpreter/data/Dweck2018/1-s2.0-S2211124718306636-mmc3.xlsx
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/1-s2.0-S2211124718306636-mmc4.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/connectome_interpreter/data/Dweck2018/1-s2.0-S2211124718306636-mmc4.xlsx
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/adult_chem2glom.csv:
--------------------------------------------------------------------------------
1 | glomerulus,(-)-(E)-Caryophyllene,(-)-a-Copaene,(-)-Camphor,(-)-Menthone,(-)-Terpinen-4-ol,(+)-Limonene oxide,(±)-6-Methyl-5-hepten-2-ol,(±)-Ethyl 3-acetoxy butyrate,(±)-Sabinene,(E)-2-Hexen-1-yl propionate,(E)-2-Hexenal,(E)-2-Hexenyl acetate,(R)-(-)-Linalool,(R)-(+)-Limonene,(S)-(-)-Limonene,(S)-(+)-Linalool,(Z)-3-Hexenyl acetate,(Z)-3-Hexenyl butyrate,1-Heptanol,1-Hexanol,1-Nonanol,1-Octanol,1-Octen-3-ol,2-Ethyl-1-hexanol,2-Heptanol,2-Heptanone,2-Heptyl acetate,2-Heptyl butyrate,2-Heptyl hexanoate,2-Nonanone,2-Pentyl butyrate,2-Phenethyl acetate,2-Phenylethanol,"2,3-Butanediol diacetate",3-Hexanone,3-Octanol,3-Octanone,4-Ethylguaiacol,4-Ethylveratrole,4-Methyl veratrole,6-Methyl-5-hepten-2-one,a-Farnesene,a-Humulene,a-Ionone,a-Terpineol,Acetoin,Acetoin acetate,Acetophenone,Benzyl acetate,Benzyl butyrate,Butyl acetate,Butyl butyrate,Butyl hexanoate,Butyl isovalerate,Butyl valerate,Citronellyl acetate,Creosol,Ethyl (E)-2-octenoate,Ethyl (E)-3-hexenoate,Ethyl (methylthio)acetate,Ethyl 2-methylbutyrate,Ethyl 3-(methylthio)propionate,Ethyl 3-hydroxybutyrate,Ethyl 3-hydroxyhexanoate,Ethyl acetate,Ethyl benzoate,Ethyl butyrate,Ethyl crotonate,Ethyl heptanoate,Ethyl hexanoate,Ethyl nonanoate,Ethyl octanoate,Ethyl pentanoate,Ethyl phenylacetate,Ethyl propionate,Ethyl salicylate,Eugenol,Eugenol methyl ether,Farnesol,Furaneol methylether,g-Hexalactone,g-Terpinene,Geranyl acetate,Geranyl acetone,Guaiacol,Heptyl acetate,Hexanal,Hexyl 2-methylbutanoate,Hexyl acetate,Hexyl butyrate,Hexyl hexanoate,Hexyl isobutyrate,Isoamyl butyrate,Isobutyl acetate,Isopentyl acetate,Methyl (E)-2-octenoate,Methyl 5-hexenoate,Methyl acetate,Methyl benzoate,Methyl hexanoate,Methyl octanoate,Methyl salicylate,Nonyl acetate,Octyl acetate,p-Tolyl acetate,Pentyl acetate,Phenethyl isobutyrate,Phenethyl propionate,Prenyl acetate,Terpinolene,Valencene,Veratrole,β-Citronellol,β-Elemene,β-Ionone,β-Myrcene
2 | DA4m,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
3 | DL5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.28342245989304815,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2192513368983957,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
4 | VM3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5911602209944752,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.30386740331491713,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
5 | DL1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9803921568627451,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6862745098039216,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
6 | DC2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.36231884057971014,0.0,0.0,0.3864734299516908,0.0,0.0,1.0,0.0,0.42995169082125606,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.391304347826087,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.3671497584541063,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
7 | DC1,1.0,0.8732394366197183,0.0,0.8450704225352113,0.5915492957746479,0.8309859154929577,0.0,0.0,0.49295774647887325,0.0,0.0,0.0,0.5352112676056338,0.5070422535211268,0.0,0.5352112676056338,0.0,0.0,0.0,0.0,0.0,0.39436619718309857,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7323943661971831,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4225352112676056,0.352112676056338,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5352112676056338,0.0,0.28169014084507044,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4788732394366197,0.0,0.0,0.0,0.0,0.0,0.0,0.4225352112676056,0.0,0.0,0.0,0.49295774647887325,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.29577464788732394,0.5211267605633803,0.6338028169014085,0.0,0.0,0.6056338028169014,0.0,0.28169014084507044
8 | DM2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.22777777777777777,0.0,0.0,0.0,0.0,0.0,0.17777777777777778,0.0,0.37222222222222223,1.0,0.0,0.0,0.48333333333333334,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.0,0.0,0.0,0.2,0.0,0.5833333333333334,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
9 | DA3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
10 | VC1,0.0,0.0,0.33620689655172414,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.646551724137931,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4396551724137931,0.0
11 | VC3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9411764705882353,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.9647058823529412,0.0,0.8352941176470589,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7058823529411765,0.6352941176470588,0.0,0.8,0.7411764705882353,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
12 | VM7d,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4825174825174825,0.0,0.0,0.0,0.0,0.0,0.0,0.34265734265734266,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.48951048951048953,0.0,0.0,0.0,0.0,0.0,0.7972027972027972,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
13 | DM1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
14 | DA4l,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
15 | VM2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.926605504587156,0.0,0.6972477064220184,0.0,0.0,0.1926605504587156,0.5137614678899083,0.0,0.1834862385321101,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.21100917431192662,0.27522935779816515,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
16 | VA7l,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
17 | DM3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.575,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5,0.0,0.0,0.0,0.0,0.0,0.625,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
18 | VA1v,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
19 | VA5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9322916666666666,0.0,0.0,0.0,0.0
20 | DA2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
21 | DM4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4576271186440678,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5084745762711864,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
22 | VM7v,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9375,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
23 | DL3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
24 | DM6,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.4,0.3121951219512195,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8682926829268293,0.9804878048780488,0.0,0.5121951219512195,0.2146341463414634,0.5024390243902439,0.4878048780487805,0.0,0.0,0.0,0.35609756097560974,0.0,0.0,0.0,0.0,0.0,0.0,0.375609756097561,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6878048780487804,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.3073170731707317,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2780487804878049,0.0,0.0,0.0,0.0,0.0,0.0,0.0
25 | VA3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4344262295081967,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.639344262295082,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7868852459016393,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
26 | VC4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
27 | DA1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
28 | D,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.42105263157894735,0.0,0.0,0.0,0.7763157894736842,0.0,0.39473684210526316,0.7763157894736842,0.3684210526315789,1.0,0.0,0.0,0.0,0.881578947368421,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8157894736842105,0.0,0.0,0.0,0.4605263157894737,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.3815789473684211,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.32894736842105265,0.0,0.0,0.0,0.34210526315789475,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5921052631578947,0.0,0.0,0.0
29 | VC2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.3817204301075269,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6989247311827957,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6182795698924731,0.8548387096774194,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.40860215053763443,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
30 | VA6,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6470588235294118,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9411764705882353,1.0,0.0,0.0,0.0,0.0,0.49411764705882355,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
31 | DC3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4090909090909091,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
32 | DM5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8111111111111111,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.20555555555555555,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
33 | VM5d,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7438423645320197,1.0,0.21674876847290642,0.19704433497536947,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.22660098522167488,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.24630541871921183,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.18719211822660098,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
34 | VA4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.30851063829787234,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.14361702127659576,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.17553191489361702,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7659574468085106,0.8723404255319149,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
35 | VA1d,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
36 | VA2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
37 | VM5v,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.17703349282296652,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.22488038277511962,0.0,0.0,0.0,0.3062200956937799,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9234449760765551,0.0,0.0,0.0,0.0,0.0,0.0,0.0
38 | DM2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.22777777777777777,0.0,0.0,0.0,0.0,0.0,0.17777777777777778,0.0,0.37222222222222223,1.0,0.0,0.0,0.48333333333333334,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2,0.0,0.0,0.0,0.2,0.0,0.5833333333333334,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
39 | VC1,0.0,0.0,0.33620689655172414,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.646551724137931,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4396551724137931,0.0
40 |
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/adult_fruit2glom.csv:
--------------------------------------------------------------------------------
1 | glomerulus,African breadfruit,African mango,African nutmeg,Apple,Apricot,Avocado,Banana,Blackberry,Blueberry,Cactus fig,Currant,Fig,Galiamelon,Grape,Grapefruit,Honeymelon,Java plum,Kiwi,Mandarina,Mango,Monkey fruit,Napoleons's button,Orange,Papaya,Passionfruit,Peach,Pear,Physalis,Pineapple,Plums,Pomegranate,Raspberry,Strawberry,Watermelon
2 | D,0.0,0.0,0.2222222222222222,0.3333333333333333,0.0,0.0,0.1111111111111111,0.0,0.0,0.0,0.0,0.0,0.1111111111111111,0.0,0.8888888888888888,0.1111111111111111,0.1111111111111111,0.0,0.0,0.0,0.5555555555555556,0.1111111111111111,0.6666666666666666,0.0,1.0,0.4444444444444444,0.2222222222222222,0.1111111111111111,0.4444444444444444,0.3333333333333333,0.0,0.0,0.4444444444444444,0.1111111111111111
3 | DA1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
4 | DA2,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
5 | DA3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
6 | DA4l,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
7 | DA4m,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
8 | DC1,0.25,0.3125,0.75,0.0625,0.0,0.1875,0.125,0.0,0.0,0.0,0.0,0.0625,0.0,0.0,1.0,0.0,0.25,0.0,0.375,0.125,0.3125,0.4375,0.8125,0.0,0.375,0.0,0.0625,0.375,0.0,0.125,0.0625,0.25,0.0625,0.1875
9 | DC2,1.0,0.25,0.0,0.25,0.0,0.0,0.25,0.5,0.0,0.25,0.25,0.0,0.75,0.0,0.0,0.0,0.25,0.0,0.0,0.0,0.0,0.0,0.0,0.25,0.5,0.25,0.5,0.25,0.25,0.0,0.25,0.25,0.0,0.0
10 | DC3,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5,0.0,0.5,0.0,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0
11 | DL1,0.0,1.0,0.75,0.0,0.25,0.0,0.0,0.0,0.0,0.25,0.0,0.0,0.25,0.25,0.5,0.25,0.25,0.0,0.0,0.0,0.5,0.25,0.0,0.0,0.75,0.0,0.0,0.5,0.25,0.0,0.0,0.0,0.0,0.0
12 | DL2v,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
13 | DL3,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
14 | DL4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
15 | DL5,0.8,0.2,0.2,1.0,0.0,0.0,0.2,0.0,0.2,0.2,0.2,0.2,0.8,0.2,0.2,0.4,0.2,0.2,0.0,0.2,0.6,0.6,0.4,0.0,0.0,0.4,1.0,0.2,0.0,0.0,0.0,0.4,0.2,0.4
16 | DM1,1.0,1.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,1.0,0.0
17 | DM2,1.0,0.7,0.3,0.8,0.0,0.0,0.2,0.6,0.3,0.7,0.0,0.1,0.9,0.0,0.1,0.7,0.2,0.2,0.0,0.0,0.6,0.1,0.2,0.1,0.4,0.3,0.3,0.4,0.7,0.0,0.0,0.3,0.7,0.0
18 | DM3,0.8,0.2,0.0,0.0,0.0,0.0,0.2,0.2,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.8,0.0,0.4,0.0,0.2,0.4,0.4,0.0,0.0,0.2,0.6,0.4,0.2,0.4,0.0,0.0,0.4,0.6,0.0
19 | DM4,0.6666666666666666,0.0,0.0,0.0,0.6666666666666666,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.6666666666666666,1.0,0.0,0.0,0.0,0.3333333333333333,0.3333333333333333,0.0,0.6666666666666666,0.0,0.0,1.0,1.0,0.0
20 | DM5,0.5,0.3333333333333333,0.0,0.5,0.0,0.0,0.3333333333333333,0.16666666666666666,0.0,0.16666666666666666,0.0,0.0,0.8333333333333334,0.0,0.16666666666666666,0.3333333333333333,0.16666666666666666,0.16666666666666666,0.0,0.0,0.5,0.5,0.16666666666666666,0.0,1.0,0.0,0.16666666666666666,0.8333333333333334,0.3333333333333333,0.0,0.0,0.16666666666666666,0.5,0.0
21 | DM6,0.625,0.6666666666666666,0.25,1.0,0.375,0.0,0.5833333333333334,0.08333333333333333,0.041666666666666664,0.4166666666666667,0.0,0.0,0.4166666666666667,0.041666666666666664,0.4583333333333333,0.20833333333333334,0.0,0.08333333333333333,0.16666666666666666,0.125,0.16666666666666666,0.375,0.5833333333333334,0.3333333333333333,0.6666666666666666,0.041666666666666664,0.3333333333333333,0.4166666666666667,0.5416666666666666,0.20833333333333334,0.0,0.08333333333333333,0.4583333333333333,0.041666666666666664
22 | DP1l,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
23 | V,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
24 | VA1d,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
25 | VA1v,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
26 | VA2,0.0,0.0,0.0,1.0,1.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0
27 | VA3,0.6666666666666666,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.0,0.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.0,0.3333333333333333,1.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,1.0,0.0,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.0,0.3333333333333333,0.6666666666666666,0.3333333333333333
28 | VA4,0.5,0.25,0.25,0.5,0.25,0.25,0.5,0.25,0.25,0.5,0.25,0.25,1.0,0.25,0.25,0.25,0.5,0.25,0.0,0.0,0.5,0.5,0.25,0.25,1.0,1.0,0.75,0.25,1.0,0.25,0.25,0.25,0.25,0.25
29 | VA5,0.5,0.0,0.5,0.0,0.0,0.0,0.5,0.0,0.5,0.5,0.5,1.0,1.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.5,0.5,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0
30 | VA6,1.0,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.0,0.0,0.3333333333333333,0.0,0.6666666666666666,0.0,0.0,0.6666666666666666,0.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.6666666666666666,0.3333333333333333,1.0,1.0,1.0,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333
31 | VA7l,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0
32 | VC1,0.5,0.3333333333333333,1.0,0.3333333333333333,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.3333333333333333,0.0,0.3333333333333333,0.16666666666666666,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.16666666666666666,0.6666666666666666,0.6666666666666666,0.3333333333333333,0.0,0.8333333333333334,0.5,0.0,0.8333333333333334,0.3333333333333333,0.0,0.0,0.8333333333333334,0.3333333333333333,0.3333333333333333
33 | VC2,1.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.16666666666666666,0.0,0.0,0.16666666666666666,0.0,0.0,1.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.8333333333333334,0.3333333333333333,0.16666666666666666,0.0,0.3333333333333333,0.0,0.16666666666666666,0.16666666666666666,0.3333333333333333,0.0,0.0,0.0,0.16666666666666666,0.0
34 | VC3,0.25,0.125,0.0,1.0,0.25,0.125,0.5,0.125,0.125,0.375,0.125,0.0,0.125,0.125,0.75,0.375,0.0,0.0,0.0,0.0,0.125,0.125,0.625,0.0,0.0,0.25,0.375,0.375,0.0,0.375,0.0,0.25,0.625,0.25
35 | VC4,1.0,1.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0,0.0,0.0,1.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0
36 | VC5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
37 | VL1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
38 | VL2a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
39 | VL2p,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
40 | VM1,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
41 | VM2,0.18181818181818182,0.8181818181818182,0.18181818181818182,0.5454545454545454,0.09090909090909091,0.0,0.45454545454545453,0.09090909090909091,0.09090909090909091,0.18181818181818182,0.0,0.09090909090909091,0.6363636363636364,0.09090909090909091,0.0,0.09090909090909091,0.09090909090909091,0.18181818181818182,0.09090909090909091,0.18181818181818182,0.5454545454545454,0.45454545454545453,0.18181818181818182,0.09090909090909091,0.7272727272727273,0.18181818181818182,0.36363636363636365,0.45454545454545453,1.0,0.36363636363636365,0.09090909090909091,0.0,0.7272727272727273,0.0
42 | VM3,0.5,0.5,0.25,0.25,0.25,0.0,0.75,0.25,0.5,0.75,0.25,0.25,0.75,0.25,0.0,0.5,0.5,0.5,0.0,0.25,0.75,0.75,0.5,0.5,1.0,0.25,0.25,0.75,0.75,0.5,0.25,0.5,0.5,0.25
43 | VM4,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
44 | VM5d,0.5,0.3333333333333333,0.16666666666666666,0.8333333333333334,0.5,0.0,0.8333333333333334,0.3333333333333333,0.16666666666666666,0.6666666666666666,0.16666666666666666,0.3333333333333333,0.5,0.16666666666666666,0.16666666666666666,0.6666666666666666,0.16666666666666666,0.3333333333333333,0.0,0.0,0.3333333333333333,0.3333333333333333,0.16666666666666666,0.0,1.0,0.16666666666666666,0.3333333333333333,0.3333333333333333,0.16666666666666666,0.3333333333333333,0.0,0.16666666666666666,0.5,0.16666666666666666
45 | VM5v,1.0,0.4,0.4,0.9,0.2,0.0,0.2,0.1,0.1,0.1,0.0,0.2,0.4,0.3,0.2,0.2,0.9,0.3,0.2,0.0,0.6,0.8,0.2,0.2,0.9,0.2,1.0,0.5,0.7,0.2,0.1,0.3,0.4,0.2
46 | VM7d,0.6666666666666666,0.16666666666666666,0.6666666666666666,0.6666666666666666,0.3333333333333333,0.0,0.3333333333333333,0.0,0.0,0.16666666666666666,0.16666666666666666,0.5,1.0,0.0,0.3333333333333333,0.6666666666666666,0.16666666666666666,0.3333333333333333,0.16666666666666666,0.3333333333333333,0.5,0.3333333333333333,0.6666666666666666,0.16666666666666666,0.8333333333333334,0.16666666666666666,0.5,0.5,0.5,0.0,0.16666666666666666,0.16666666666666666,0.5,0.16666666666666666
47 | VM7v,0.2,0.2,0.2,1.0,0.2,0.2,0.6,0.0,0.2,0.6,0.2,0.2,0.4,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.4,0.6,0.4,0.2,0.0,0.2,0.2,0.2,0.4,0.2
48 | DL2d,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
49 |
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/larva_chem2or.csv:
--------------------------------------------------------------------------------
1 | receptor,(-)-(E)-Caryophyllene,(-)-a-Copaene,(-)-Camphor,(-)-Menthone,(-)-Terpinen-4-ol,(+)-Limonene oxide,(±)-6-Methyl-5-hepten-2-ol,(±)-Ethyl 3-acetoxy butyrate,(±)-Sabinene,(E)-2-Hexen-1-yl propionate,(E)-2-Hexenal,(E)-2-Hexenyl acetate,(R)-(-)-Linalool,(R)-(+)-Limonene,(S)-(-)-Limonene,(S)-(+)-Linalool,(Z)-3-Hexenyl acetate,(Z)-3-Hexenyl butyrate,1-Heptanol,1-Hexanol,1-Nonanol,1-Octanol,1-Octen-3-ol,2-Ethyl-1-hexanol,2-Heptanol,2-Heptanone,2-Heptyl acetate,2-Heptyl butyrate,2-Heptyl hexanoate,2-Nonanone,2-Pentyl butyrate,2-Phenethyl acetate,2-Phenylethanol,"2,3-Butanediol diacetate",3-Hexanone,3-Octanol,3-Octanone,4-Ethylguaiacol,4-Ethylveratrole,4-Methyl veratrole,6-Methyl-5-hepten-2-one,a-Farnesene,a-Humulene,a-Ionone,a-Terpineol,Acetoin,Acetoin acetate,Acetophenone,Benzyl acetate,Benzyl butyrate,Butyl acetate,Butyl butyrate,Butyl hexanoate,Butyl isovalerate,Butyl valerate,Citronellyl acetate,Creosol,Ethyl (E)-2-octenoate,Ethyl (E)-3-hexenoate,Ethyl (methylthio)acetate,Ethyl 2-methylbutyrate,Ethyl 3-(methylthio)propionate,Ethyl 3-hydroxybutyrate,Ethyl 3-hydroxyhexanoate,Ethyl acetate,Ethyl benzoate,Ethyl butyrate,Ethyl crotonate,Ethyl heptanoate,Ethyl hexanoate,Ethyl nonanoate,Ethyl octanoate,Ethyl pentanoate,Ethyl phenylacetate,Ethyl propionate,Ethyl salicylate,Eugenol,Eugenol methyl ether,Farnesol,Furaneol methylether,g-Hexalactone,g-Terpinene,Geranyl acetate,Geranyl acetone,Guaiacol,Heptyl acetate,Hexanal,Hexyl 2-methylbutanoate,Hexyl acetate,Hexyl butyrate,Hexyl hexanoate,Hexyl isobutyrate,Isoamyl butyrate,Isobutyl acetate,Isopentyl acetate,Methyl (E)-2-octenoate,Methyl 5-hexenoate,Methyl acetate,Methyl benzoate,Methyl hexanoate,Methyl octanoate,Methyl salicylate,Nonyl acetate,Octyl acetate,p-Tolyl acetate,Pentyl acetate,Phenethyl isobutyrate,Phenethyl propionate,Prenyl acetate,Terpinolene,Valencene,Veratrole,β-Citronellol,β-Elemene,β-Ionone,β-Myrcene
2 | Or2a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
3 | Or7a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.28342245989304815,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.2192513368983957,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
4 | Or9a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5911602209944752,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.30386740331491713,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
5 | Or22c,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.39901477832512317,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8916256157635468,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7044334975369458,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
6 | Or24a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9558823529411765,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
7 | Or30a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
8 | Or33b,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
9 | Or35a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9411764705882353,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.9647058823529412,0.0,0.8352941176470589,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7058823529411765,0.6352941176470588,0.0,0.8,0.7411764705882353,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
10 | Or42a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4825174825174825,0.0,0.0,0.0,0.0,0.0,0.0,0.34265734265734266,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.48951048951048953,0.0,0.0,0.0,0.0,0.0,0.7972027972027972,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
11 | Or42b,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
12 | Or45a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.40641711229946526,0.3315508021390374,0.0,0.0,0.0,0.16042780748663102,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5668449197860963,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.3850267379679144,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.10695187165775401,0.20855614973262032,0.25133689839572193,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8074866310160428,0.0,0.0,0.3850267379679144,0.33689839572192515,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.28342245989304815,0.0,0.5454545454545454,0.6417112299465241,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
13 | Or45b,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
14 | Or47a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.575,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.5,0.0,0.0,0.0,0.0,0.0,0.625,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
15 | Or59a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.75,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.95,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9375,0.0,0.0,0.0,0.0
16 | Or67b,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.4344262295081967,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.639344262295082,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.7868852459016393,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
17 | Or74a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.925,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
18 | Or82a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.6470588235294118,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9411764705882353,1.0,0.0,0.0,0.0,0.0,0.49411764705882355,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
19 | Or85c,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.9411764705882353,1.0,0.35294117647058826,0.38235294117647056,0.30392156862745096,0.0,0.0,0.0,0.0,0.0,0.0,0.5392156862745098,0.8627450980392157,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.29411764705882354,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
20 | Or94a,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.8738317757009346,0.0,0.0,0.0,0.0
21 | Or94b,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
22 |
--------------------------------------------------------------------------------
/connectome_interpreter/data/Dweck2018/larva_fruit2or.csv:
--------------------------------------------------------------------------------
1 | receptor,African mango,African breadfruit,African nutmeg,Apple,Apricot,Avocado,Banana,Blackberry,Blueberry,Cactus fig,Currant,Fig,Galiamelon,Grape,Grapefruit,Honeymelon,Java plum,Kiwi,Mandarina,Mango,Monkey fruit,Napoleons's button,Orange,Papaya,Passionfruit,Peach,Pear,Physalis,Pineapple,Plums,Pomegranate,Raspberry,Strawberry,Watermelon
2 | Or2a,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
3 | Or7a,0.2,0.8,0.2,1.0,0.0,0.0,0.2,0.0,0.2,0.2,0.2,0.2,0.8,0.2,0.2,0.4,0.2,0.2,0.0,0.2,0.6,0.6,0.4,0.0,0.0,0.4,1.0,0.2,0.0,0.0,0.0,0.4,0.2,0.4
4 | Or13a,0.25,1.0,0.0,0.25,0.0,0.0,0.25,0.5,0.0,0.25,0.25,0.0,0.75,0.0,0.0,0.0,0.25,0.0,0.0,0.0,0.0,0.0,0.0,0.25,0.5,0.25,0.5,0.25,0.25,0.0,0.25,0.25,0.0,0.0
5 | Or22c,0.8,0.6,0.4,0.6,0.2,0.0,0.2,0.4,0.0,0.4,0.2,0.2,0.6,0.2,0.4,0.6,0.2,0.4,0.2,0.0,0.8,0.4,0.0,0.2,1.0,0.0,0.4,0.6,0.6,0.2,0.0,0.2,0.8,0.4
6 | Or24a,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.0,0.0,0.5,0.0,0.0,0.5,0.0,0.5,0.5,0.5,0.5,0.0,0.5,1.0,0.5,0.5,0.0,0.5,0.5,0.5,1.0,0.5,0.5,0.5,0.5,0.5,0.5
7 | Or30a,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0
8 | Or33b,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0
9 | Or35a,0.125,0.25,0.0,1.0,0.25,0.125,0.5,0.125,0.125,0.375,0.125,0.0,0.125,0.125,0.75,0.375,0.0,0.0,0.0,0.0,0.125,0.125,0.625,0.0,0.0,0.25,0.375,0.375,0.0,0.375,0.0,0.25,0.625,0.25
10 | Or42a,0.16666666666666666,0.6666666666666666,0.6666666666666666,0.6666666666666666,0.3333333333333333,0.0,0.3333333333333333,0.0,0.0,0.16666666666666666,0.16666666666666666,0.5,1.0,0.0,0.3333333333333333,0.6666666666666666,0.16666666666666666,0.3333333333333333,0.16666666666666666,0.3333333333333333,0.5,0.3333333333333333,0.6666666666666666,0.16666666666666666,0.8333333333333334,0.16666666666666666,0.5,0.5,0.5,0.0,0.16666666666666666,0.16666666666666666,0.5,0.16666666666666666
11 | Or42b,1.0,1.0,0.0,1.0,1.0,0.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,0.0,1.0,0.0,0.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,1.0,1.0,1.0,0.0,1.0,1.0,0.0
12 | Or45a,0.3181818181818182,0.7727272727272727,0.22727272727272727,0.5454545454545454,0.18181818181818182,0.0,0.6818181818181818,0.045454545454545456,0.045454545454545456,0.3181818181818182,0.0,0.09090909090909091,0.5,0.09090909090909091,0.45454545454545453,0.36363636363636365,0.2727272727272727,0.13636363636363635,0.09090909090909091,0.045454545454545456,0.4090909090909091,0.36363636363636365,0.4090909090909091,0.0,1.0,0.5,0.4090909090909091,0.6363636363636364,0.6363636363636364,0.4090909090909091,0.0,0.45454545454545453,0.5454545454545454,0.2727272727272727
13 | Or45b,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0
14 | Or47a,0.2,0.8,0.0,0.0,0.0,0.0,0.2,0.2,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.8,0.0,0.4,0.0,0.2,0.4,0.4,0.0,0.0,0.2,0.6,0.4,0.2,0.4,0.0,0.0,0.4,0.6,0.0
15 | Or49a,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
16 | Or59a,0.75,0.75,0.75,0.5,0.0,0.0,1.0,0.25,0.0,0.75,0.0,0.25,1.0,0.0,0.0,0.25,1.0,0.25,0.0,0.5,1.0,0.5,0.5,0.0,0.5,0.0,0.5,0.5,0.75,0.0,0.25,0.25,0.5,0.25
17 | Or67b,0.6666666666666666,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.0,0.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.0,0.3333333333333333,1.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,1.0,0.0,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.0,0.3333333333333333,0.6666666666666666,0.3333333333333333
18 | Or74a,0.0,0.5,0.25,0.0,0.0,0.5,0.0,0.0,0.0,0.4166666666666667,0.0,0.25,0.8333333333333334,0.08333333333333333,1.0,0.6666666666666666,0.0,0.4166666666666667,0.8333333333333334,0.25,0.25,0.0,0.75,0.5,0.5,0.5,1.0,0.5833333333333334,0.5833333333333334,0.6666666666666666,0.0,0.25,0.25,0.4166666666666667
19 | Or82a,0.3333333333333333,1.0,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.0,0.0,0.3333333333333333,0.0,0.6666666666666666,0.0,0.0,0.6666666666666666,0.0,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.6666666666666666,0.3333333333333333,1.0,1.0,1.0,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333,0.3333333333333333
20 | Or85c,0.2857142857142857,0.7142857142857143,0.14285714285714285,0.8571428571428571,0.2857142857142857,0.0,0.42857142857142855,0.42857142857142855,0.14285714285714285,0.42857142857142855,0.14285714285714285,0.42857142857142855,0.5714285714285714,0.2857142857142857,0.0,0.42857142857142855,0.42857142857142855,0.2857142857142857,0.0,0.14285714285714285,0.5714285714285714,0.5714285714285714,0.2857142857142857,0.0,1.0,0.5714285714285714,0.42857142857142855,0.42857142857142855,0.7142857142857143,0.14285714285714285,0.14285714285714285,0.42857142857142855,0.5714285714285714,0.2857142857142857
21 | Or94a,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.6666666666666666,0.6666666666666666,0.0,0.3333333333333333,0.6666666666666666,0.0,0.3333333333333333,0.0,0.0,0.6666666666666666,0.0,0.3333333333333333,0.3333333333333333,0.6666666666666666,0.3333333333333333,0.0,0.0,1.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.3333333333333333,0.6666666666666666,0.0,0.0,0.0,0.3333333333333333,0.0
22 | Or94b,0.3333333333333333,0.6666666666666666,0.0,0.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.6666666666666666,0.0,0.0,0.0,0.3333333333333333,0.0,0.0,0.0,0.6666666666666666,0.3333333333333333,0.0,0.0,0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.3333333333333333
23 |
--------------------------------------------------------------------------------
/connectome_interpreter/external_map.py:
--------------------------------------------------------------------------------
1 | import io
2 | import pkgutil
3 | import os
4 |
5 | import numpy as np
6 | import pandas as pd
7 | import plotly.graph_objects as go
8 | import matplotlib.pyplot as plt
9 |
10 | DATA_SOURCES: dict[str, str] = {
11 | "DoOR_adult": "data/DoOR/processed_door_adult.csv",
12 | "DoOR_adult_sfr_subtracted": "data/DoOR/processed_door_adult_sfr_subtracted.csv",
13 | "Dweck_adult_chem": "data/Dweck2018/adult_chem2glom.csv",
14 | "Dweck_adult_fruit": "data/Dweck2018/adult_fruit2glom.csv",
15 | "Dweck_larva_chem": "data/Dweck2018/larva_chem2or.csv",
16 | "Dweck_larva_fruit": "data/Dweck2018/larva_fruit2or.csv",
17 | "Nern2024": "data/Nern2024/ME-columnar-cells-hex-location.csv",
18 | "Matsliah2024": "data/Matsliah2024/fafb_right_vis_cols.csv",
19 | "Badel2016_PN": "data/Badel2016/Badel2016.csv",
20 | "Zhao2024": "data/Zhao2024/ucl_hex_right_20240701_tomale.csv",
21 | }
22 |
23 |
24 | def load_dataset(dataset: str) -> pd.DataFrame:
25 | """
26 | Load the dataset from the package data folder. These datasets have been
27 | preprocessed to work with connectomics data. The preprocessing scripts are
28 | in this repository: https://github.com/YijieYin/interpret_connectome.
29 |
30 | Args:
31 | dataset: (str) The name of the dataset to load. Options are:
32 |
33 | - 'DoOR_adult': mapping from glomeruli to chemicals, from Munch and Galizia DoOR dataset (https://www.nature.com/articles/srep21841).
34 | - 'DoOR_adult_sfr_subtracted': mapping from glomeruli to chemicals, with spontaneous firing rate subtracted. There are therefore negative values.
35 | - 'Dweck_adult_chem': mapping from glomeruli to chemicals extracted from fruits, from Dweck et al. 2018 (https://www.cell.com/cell-reports/abstract/S2211-1247(18)30663-6). Firing rates normalised to between 0 and 1.
36 | - 'Dweck_adult_fruit': mapping from glomeruli to fruits, from Dweck et al. 2018. Number of responses normalised to between 0 and 1.
37 | - 'Dweck_larva_chem': mapping from olfactory receptors to chemicals, from Dweck et al. 2018. Firing rates normalised to between 0 and 1.
38 | - 'Dweck_larva_fruit': mapping from olfactory receptors to fruits from Dweck et al. 2018. Number of responses normalised to between 0 and 1.
39 | - 'Nern2024': columnar coordinates of individual cells from a collection of columnar cell types within the medulla of the right optic lobe, from Nern et al. 2024 (https://www.biorxiv.org/content/10.1101/2024.04.16.589741v2).
40 | - 'Matsliah2024': columnar coordinates of individual cells from a collection of columnar cell types in the right optic lobe from FAFB, from Matsliah et al. 2024 (https://www.nature.com/articles/s41586-024-07981-1).
41 | - 'Badel2016_PN': mapping from olfactory projection neurons to odours, from Badel et al. 2016 (https://www.cell.com/neuron/fulltext/S0896-6273(16)30201-X).
42 | - 'Zhao2024': mapping from hexagonal coordinates to 3D coordinates, update from Zhao et al. 2022 (https://www.biorxiv.org/content/10.1101/2022.12.14.520178v1).
43 |
44 | Returns:
45 | pd.DataFrame: The dataset as a pandas DataFrame. For the adult, the glomeruli
46 | are in the rows. For the larva, receptors are in the rows.
47 | """
48 |
49 | try:
50 | data = pkgutil.get_data("connectome_interpreter", DATA_SOURCES[dataset])
51 | except KeyError as exc:
52 | raise ValueError(
53 | "Dataset not recognized. Please choose from {}".format(
54 | list(DATA_SOURCES.keys())
55 | )
56 | ) from exc
57 |
58 | return pd.read_csv(io.BytesIO(data), index_col=0)
59 |
60 |
61 | def map_to_experiment(df, dataset=None, custom_experiment=None):
62 | """
63 | Map the connectomics data to experimental data. For example, if odour1
64 | excites neuron1 0.5, and neuron2 0.6; both neuron1 and neuron2 output to
65 | neuron3 (0.7 and 0.8 respectively), then the output of neuron3 to odour1
66 | is 0.5*0.7 + 0.6*0.8 = 0.83. The result would only be 1 if a stimulus
67 | excites neurons 100%, and those neurons constitue 100% of the downstream
68 | neuron's input.
69 |
70 | Args:
71 | df (pd.DataFrame): The connectivity data. Standardised input (e.g. glomeruli,
72 | receptors) in rows, observations (target neurons) in columns.
73 | dataset (str): The name of the dataset to load. Options are:
74 |
75 | - 'DoOR_adult': mapping from glomeruli to chemicals, from Munch and Galizia DoOR dataset (https://www.nature.com/articles/srep21841).
76 | - 'DoOR_adult_sfr_subtracted': mapping from glomeruli to chemicals, with spontaneous firing rate subtracted. There are therefore negative values.
77 | - 'Dweck_adult_chem': mapping from glomeruli to chemicals extracted from fruits, from Dweck et al. 2018 (https://www.cell.com/cell-reports/abstract/S2211-1247(18)30663-6). Firing rates normalised to between 0 and 1.
78 | - 'Dweck_adult_fruit': mapping from glomeruli to fruits, from Dweck et al. 2018. Number of responses normalised to between 0 and 1.
79 | - 'Dweck_larva_chem': mapping from olfactory receptors to chemicals, from Dweck et al. 2018. Firing rates normalised to between 0 and 1.
80 | - 'Dweck_larva_fruit': mapping from olfactory receptors to fruits, from Dweck et al. 2018. Number of responses normalised to between 0 and 1.
81 | - 'Nern2024': columnar coordinates of individual cells from a collection of columnar cell types within the medulla of the right optic lobe, from Nern et al. 2024.
82 | - 'Badel2016_PN': mapping from olfactory projection neurons to odours, from Badel et al. 2016 (https://www.cell.com/neuron/fulltext/S0896-6273(16)30201-X).
83 |
84 | custom_experiment (pd.DataFrame): A custom experimental dataset to compare the
85 | connectomics data to. The row indices of this dataframe must match the row
86 | indices of df. They are the units of comparison (e.g. glomeruli).
87 |
88 | Returns:
89 | pd.DataFrame: The similarity between the connectomics data and the experimental
90 | data. Rows are neurons, columns are external stimulus.
91 | """
92 |
93 | # try:
94 | # from sklearn.metrics.pairwise import cosine_similarity
95 | # except ImportError as e:
96 | # raise ImportError(
97 | # "To use this function, please install scikit-learn. You can
98 | # install it with 'pip install scikit-learn'.") from e
99 | if dataset is not None and custom_experiment is not None:
100 | raise ValueError(
101 | "Please provide either a dataset or a custom_experiment, not both."
102 | )
103 | if dataset is None and custom_experiment is None:
104 | raise ValueError("Please provide either a dataset or a custom_experiment.")
105 | if dataset is not None:
106 | data = load_dataset(dataset)
107 | else:
108 | data = custom_experiment
109 |
110 | # take the intersection of glomeruli
111 | data = data[data.index.isin(df.index)]
112 | df_intersect = df[df.index.isin(data.index)]
113 | df_intersect = df_intersect.reindex(data.index)
114 |
115 | # multiply the correpsonding values using matmul
116 | target2chem = np.dot(df_intersect.values.T, data.values)
117 | # Assign appropriate column names
118 | target2chem = pd.DataFrame(
119 | target2chem, index=df_intersect.columns, columns=data.columns
120 | )
121 | return target2chem
122 |
123 |
124 | def hex_heatmap(
125 | df: pd.Series | pd.DataFrame,
126 | style: dict | None = None,
127 | sizing: dict | None = None,
128 | dpi: int = 72,
129 | custom_colorscale: list | None = None,
130 | global_min: float | None = None,
131 | global_max: float | None = None,
132 | dataset: str | None = "mcns_right",
133 | ) -> go.Figure:
134 | """
135 | Generate a hexagonal heat map plot of the data. The index of the data
136 | should be formatted as strings of the form '-12,34', where the first
137 | number is the x-coordinate and the second number is the y-coordinate.
138 |
139 | Args:
140 | df : pd.Series | pd.DataFrame
141 | The data to plot. Each column will generate a separate frame in
142 | the plot.
143 | style : dict, default=None
144 | Dict containing styling formatting variables. Possible keys are:
145 |
146 | - 'font_type': str, default='arial'
147 | - 'linecolor': str, default='black'
148 | - 'papercolor': str, default='rgba(255,255,255,255)' (white)
149 |
150 | sizing : dict, default=None
151 | Dict containing size formatting variables. Possible keys are:
152 |
153 | - 'fig_width': int, default=260 (mm)
154 | - 'fig_height': int, default=220 (mm)
155 | - 'fig_margin': int, default=0 (mm)
156 | - 'fsize_ticks_pt': int, default=20 (points)
157 | - 'fsize_title_pt': int, default=20 (points)
158 | - 'markersize': int, default=18 if dataset='mcns_right', 20 if dataset='fafb_right'
159 | - 'ticklen': int, default=15
160 | - 'tickwidth': int, default=5
161 | - 'axislinewidth': int, default=3
162 | - 'markerlinewidth': int, default=0.9
163 | - 'cbar_thickness': int, default=20
164 | - 'cbar_len': float, default=0.75
165 |
166 | dpi : int, default=72
167 | Dots per inch for the output figure. Standard is 72 for screen/SVG/PDF.
168 | Use higher values (e.g., 300) for print-quality output.
169 | custom_colorscale : list, default=None
170 | Custom colorscale for the heatmap. If None, defaults to white-to-blue
171 | colorscale [[0, "rgb(255, 255, 255)"], [1, "rgb(0, 20, 200)"]].
172 | global_min : float, default=None
173 | Global minimum value for the color scale.
174 | If None, the minimum value of the data is used but if that is negative, use 0.
175 | global_max : float, default=None
176 | Global maximum value for the color scale.
177 | If None, the maximum value of the data is used.
178 | dataset : str, default='mcns_right'
179 | The dataset to use for the hexagon locations. Options are:
180 |
181 | - 'mcns_right': columnar coordinates of individual cells from columnar cell types: L1, L2, L3, L5, Mi1, Mi4, Mi9, C2, C3, Tm1, Tm2, Tm4, Tm9, Tm20, T1, within the medulla of the right optic lobe, from Nern et al. 2024.
182 | - 'fafb_right': columnar coordinates of individual cells from columnar cell types, in the right optic lobe of FAFB, from Matsliah et al. 2024.
183 |
184 | Returns:
185 | fig : go.Figure
186 | """
187 |
188 | def bg_hex():
189 | """
190 | Generate a scatter plot of the background hexagons."
191 | """
192 | goscatter = go.Scatter(
193 | x=background_hex["x"],
194 | y=background_hex["y"],
195 | mode="markers",
196 | marker_symbol=symbol_number,
197 | marker={
198 | "size": sizing["markersize"],
199 | "color": "white",
200 | "line": {
201 | "width": sizing["markerlinewidth"],
202 | "color": "lightgrey",
203 | },
204 | },
205 | showlegend=False,
206 | )
207 | return goscatter
208 |
209 | def data_hex(aseries):
210 | """
211 | Generate a scatter plot of the data hexagons.""
212 | """
213 | goscatter = go.Scatter(
214 | x=x_vals,
215 | y=y_vals,
216 | mode="markers",
217 | marker_symbol=symbol_number,
218 | customdata=np.stack([x_vals, y_vals, aseries.values], axis=-1),
219 | hovertemplate="x: %{customdata[0]}
y: %{customdata[1]}
value: %{customdata[2]}",
220 | marker={
221 | "cmin": global_min,
222 | "cmax": global_max,
223 | "size": sizing["markersize"],
224 | "color": aseries.values,
225 | "line": {
226 | "width": sizing["markerlinewidth"],
227 | "color": "lightgrey",
228 | },
229 | "colorbar": {
230 | "orientation": "v",
231 | "outlinecolor": style["linecolor"],
232 | "outlinewidth": sizing["axislinewidth"],
233 | "thickness": sizing["cbar_thickness"],
234 | "len": sizing["cbar_len"],
235 | "tickmode": "array",
236 | "ticklen": sizing["ticklen"],
237 | "tickwidth": sizing["tickwidth"],
238 | "tickcolor": style["linecolor"],
239 | "tickfont": {
240 | "size": fsize_ticks_px,
241 | "family": style["font_type"],
242 | "color": style["linecolor"],
243 | },
244 | "tickformat": ".5f",
245 | "title": {
246 | "font": {
247 | "family": style["font_type"],
248 | "size": fsize_title_px,
249 | "color": style["linecolor"],
250 | },
251 | "side": "right",
252 | },
253 | },
254 | "colorscale": custom_colorscale,
255 | },
256 | showlegend=False,
257 | )
258 | return goscatter
259 |
260 | # Default styling and sizing parameters to use if not specified.
261 | default_style = {
262 | "font_type": "arial",
263 | "markerlinecolor": "rgba(0,0,0,0)", # transparent
264 | "linecolor": "black",
265 | "papercolor": "rgba(255,255,255,255)",
266 | }
267 |
268 | if dataset == "mcns_right":
269 | markersize = 18
270 | elif dataset == "fafb_right":
271 | markersize = 20
272 | else:
273 | # raise error
274 | raise ValueError(
275 | "Dataset not recognized. Currently available datasets are 'mcns_right', "
276 | "'fafb_right'."
277 | )
278 |
279 | default_sizing = {
280 | "fig_width": 260, # units = mm
281 | "fig_height": 220, # units = mm
282 | "fig_margin": 0,
283 | "fsize_ticks_pt": 20,
284 | "fsize_title_pt": 20,
285 | "markersize": markersize,
286 | "ticklen": 15,
287 | "tickwidth": 5,
288 | "axislinewidth": 3,
289 | "markerlinewidth": 0.5, # 0.9,
290 | "cbar_thickness": 20,
291 | "cbar_len": 0.75,
292 | }
293 |
294 | # If style is provided, update default_style with user values
295 | if style is not None:
296 | default_style.update(style)
297 | style = default_style
298 |
299 | if sizing is not None:
300 | default_sizing.update(sizing)
301 | sizing = default_sizing
302 |
303 | # Constants for unit conversion
304 | POINTS_PER_INCH = 72 # Typography standard: 1 point = 1/72 inch
305 | MM_PER_INCH = 25.4 # Standard conversion: 1 inch = 25.4 mm
306 |
307 | # sizing of the figure and font
308 | pixelsperinch = dpi # Use the provided DPI value
309 | pixelspermm = pixelsperinch / MM_PER_INCH
310 |
311 | # Default colorscale
312 | if custom_colorscale is None:
313 | custom_colorscale = [[0, "rgb(255, 255, 255)"], [1, "rgb(0, 20, 200)"]]
314 |
315 | area_width = (sizing["fig_width"] - sizing["fig_margin"]) * pixelspermm
316 | area_height = (sizing["fig_height"] - sizing["fig_margin"]) * pixelspermm
317 |
318 | fsize_ticks_px = sizing["fsize_ticks_pt"] * (1 / POINTS_PER_INCH) * pixelsperinch
319 | fsize_title_px = sizing["fsize_title_pt"] * (1 / POINTS_PER_INCH) * pixelsperinch
320 |
321 | # Get global min and max for consistent color scale
322 | # minimum of 0 and df.values.min()
323 | vals = df.to_numpy()
324 | if global_min is None:
325 | global_min = min(0, vals.min())
326 | if global_max is None:
327 | global_max = vals.max()
328 |
329 | # Symbol number to choose to plot hexagons
330 | symbol_number = 15
331 |
332 | # load all hex coordinates
333 | if dataset == "mcns_right":
334 | background_hex = load_dataset("Nern2024")
335 | elif dataset == "fafb_right":
336 | background_hex = load_dataset("Matsliah2024")
337 | else:
338 | # raise error
339 | raise ValueError(
340 | "Dataset not recognized. Currently available datasets are 'mcns_right', "
341 | "'fafb_right'."
342 | )
343 | # only get the unique combination of 'x' and 'y' columns
344 | background_hex = background_hex.drop_duplicates(subset=["x", "y"])
345 |
346 | # initiate plot
347 | fig = go.Figure()
348 | fig.update_layout(
349 | autosize=False,
350 | height=area_height,
351 | width=area_width,
352 | margin={"l": 0, "r": 0, "b": 0, "t": 0, "pad": 0},
353 | paper_bgcolor=style["papercolor"],
354 | plot_bgcolor=style["papercolor"],
355 | )
356 | fig.update_xaxes(
357 | showgrid=False, showticklabels=False, showline=False, visible=False
358 | )
359 | fig.update_yaxes(
360 | showgrid=False, showticklabels=False, showline=False, visible=False
361 | )
362 |
363 | # Convert index values (formatted as '-12,34') into separate x and y coordinates
364 | df = df[(df.index != "nan") & (~df.index.isnull())]
365 | coords = [tuple(map(float, idx.split(","))) for idx in df.index]
366 | x_vals, y_vals = zip(*coords) # Separate into x and y lists
367 |
368 | if isinstance(df, pd.Series) or len(df.columns) == 1:
369 | if isinstance(df, pd.DataFrame):
370 | df = df.iloc[:, 0]
371 | fig.add_trace(bg_hex())
372 | fig.add_trace(data_hex(df))
373 |
374 | elif isinstance(df, pd.DataFrame):
375 | # Adjust figure size - add extra height for slider
376 | slider_height = 100 # pixels
377 | area_height += slider_height
378 |
379 | # Create frames for slider
380 | frames = []
381 | slider_steps = []
382 |
383 | # Add base layout
384 | fig.update_layout(
385 | autosize=False,
386 | height=area_height,
387 | width=area_width,
388 | margin={
389 | "l": 0,
390 | "r": 0,
391 | "b": slider_height,
392 | "t": 0,
393 | "pad": 0,
394 | }, # Add bottom margin for slider
395 | paper_bgcolor=style["papercolor"],
396 | plot_bgcolor=style["papercolor"],
397 | sliders=[
398 | {
399 | "active": 0,
400 | "currentvalue": {
401 | "font": {"size": 16},
402 | "visible": True,
403 | "xanchor": "right",
404 | },
405 | "pad": {"b": 10, "t": 0}, # Adjusted padding
406 | "len": 0.9,
407 | "x": 0.1,
408 | "y": 0, # Move slider below plot
409 | "steps": [],
410 | }
411 | ],
412 | )
413 |
414 | # Create frames for each column
415 | for i, col_name in enumerate(df.columns):
416 | series = df[col_name]
417 | frame_data = [
418 | bg_hex(),
419 | data_hex(series),
420 | ]
421 |
422 | frames.append(go.Frame(data=frame_data, name=str(i)))
423 |
424 | # Add to slider
425 | slider_steps.append(
426 | {
427 | "args": [
428 | [str(i)],
429 | {"frame": {"duration": 0, "redraw": True}, "mode": "immediate"},
430 | ],
431 | "label": col_name,
432 | "method": "animate",
433 | }
434 | )
435 |
436 | # Set initial display to first column
437 | if i == 0:
438 | fig.add_traces(frame_data)
439 |
440 | # Update slider with all steps
441 | fig.layout.sliders[0].steps = slider_steps
442 | fig.frames = frames
443 |
444 | # Update axes
445 | fig.update_xaxes(
446 | showgrid=False, showticklabels=False, showline=False, visible=False
447 | )
448 | fig.update_yaxes(
449 | showgrid=False, showticklabels=False, showline=False, visible=False
450 | )
451 |
452 | else:
453 | # raise error
454 | raise ValueError("df must be a pd.Series or pd.DataFrame")
455 |
456 | return fig
457 |
458 |
459 | def looming_stimulus(start_coords, all_coords, n_time=4):
460 | """
461 | Generate a list of lists of coordinates for a looming stimulus. The stimulus starts
462 | at the start_coords and expands outwards in a hexagonal pattern. The stimulus
463 | expands for n_time steps. Currently the expansion happens one layer at a time.
464 |
465 | Args:
466 | start_coords (list): List of strings of the form 'x,y' where x and y are the
467 | coordinates of the starting hexes for the stimulus.
468 | all_coords (list): List of strings of the form 'x,y' where x and y are the
469 | coordinates of all hexes in the grid.
470 | n_time (int): Default=4. Number of time steps for the stimulus to expand.
471 |
472 | Returns:
473 | stim_str (list): List of lists of strings of the form 'x,y' where x and y are
474 | the coordinates of the hexes that are stimulated at each time step.
475 | """
476 | coords = [tuple(map(float, idx.split(","))) for idx in all_coords]
477 | x_vals, y_vals = zip(*coords) # Separate into x and y lists
478 |
479 | # sort and rank x_vals
480 | x_sorted = sorted(list(set(x_vals)))
481 | x_to_rank = {x: rank for rank, x in enumerate(x_sorted)}
482 | rank_to_x = {rank: x for rank, x in enumerate(x_sorted)}
483 | y_sorted = sorted(list(set(y_vals)))
484 | y_to_rank = {y: rank for rank, y in enumerate(y_sorted)}
485 | rank_to_y = {rank: y for rank, y in enumerate(y_sorted)}
486 |
487 | start = [tuple(map(float, idx.split(","))) for idx in start_coords]
488 | stimulus = []
489 | stimulus.append(start)
490 | for atime in range(n_time):
491 | for x, y in start:
492 | start_copy = start.copy()
493 | # hexes above and below x
494 | if y_to_rank[y] + 2 in rank_to_y:
495 | start_copy.append((x, rank_to_y[y_to_rank[y] + 2]))
496 | if y_to_rank[y] - 2 in rank_to_y:
497 | start_copy.append((x, rank_to_y[y_to_rank[y] - 2]))
498 | # hexes to the left
499 | if x_to_rank[x] + 1 in rank_to_x:
500 | if y_to_rank[y] + 1 in rank_to_y:
501 | start_copy.append(
502 | (rank_to_x[x_to_rank[x] + 1], rank_to_y[y_to_rank[y] + 1])
503 | )
504 | if y_to_rank[y] - 1 in rank_to_y:
505 | start_copy.append(
506 | (rank_to_x[x_to_rank[x] + 1], rank_to_y[y_to_rank[y] - 1])
507 | )
508 | # hexes to the right
509 | if x_to_rank[x] - 1 in rank_to_x:
510 | if y_to_rank[y] + 1 in rank_to_y:
511 | start_copy.append(
512 | (rank_to_x[x_to_rank[x] - 1], rank_to_y[y_to_rank[y] + 1])
513 | )
514 | if y_to_rank[y] - 1 in rank_to_y:
515 | start_copy.append(
516 | (rank_to_x[x_to_rank[x] - 1], rank_to_y[y_to_rank[y] - 1])
517 | )
518 |
519 | start = list(set(start_copy))
520 | stimulus.append(start)
521 |
522 | stim_str = []
523 | for atime in range(n_time):
524 | stim_atime = []
525 | for x, y in stimulus[atime]:
526 | # Format x and y to remove .0 if they're integers
527 | x_str = str(int(x)) if x == int(x) else str(x)
528 | y_str = str(int(y)) if y == int(y) else str(y)
529 | stim_atime.append(f"{x_str},{y_str}")
530 | stim_str.append(stim_atime)
531 | return stim_str
532 |
533 |
534 | def make_sine_stim(phase=0, amplitude=1, n=8):
535 | """
536 | Generate a dictionary of values representing a sine wave stimulus with a given phase
537 | and amplitude. The sine wave is defined over n points, starting from the given phase.
538 |
539 | Args:
540 | phase (int): Phase of the sine wave in degrees. Default is 0.
541 | amplitude (float): Amplitude of the sine wave. Default is 1.
542 | n (int): Number of points in the sine wave. Default is 8.
543 |
544 | Returns:
545 | dict: A dictionary where keys are indices from 1 to n, and values are the
546 | corresponding sine wave values.
547 | """
548 | x = (phase % 180) / 180 * np.pi
549 | x = np.linspace(x, x + np.pi, n)
550 | y = amplitude * abs(np.sin(x))
551 | return dict(zip(range(1, n + 1), y))
552 |
553 |
554 | def plot_mollweide_projection(
555 | data: pd.Series | pd.DataFrame,
556 | fig_size: tuple = (900, 700),
557 | custom_colorscale: str = "Viridis",
558 | global_min: float | None = None,
559 | global_max: float | None = None,
560 | dataset: str = "Zhao2024",
561 | marker_size: int = 8,
562 | ) -> go.Figure:
563 | """
564 | Generates a heatmap to visualize the value of column features per column using the
565 | mollweide projection.
566 |
567 | Args:
568 | data (pd.Series | pd.DataFrame): Data with index formatted as strings of the
569 | form '-12,34', where the first number is the x-coordinate and the second
570 | number is the y-coordinate. The data to plot. Each column will generate a
571 | separate frame in the plot.
572 | fig_size (tuple): Size of the figure in pixels (width, height).
573 | custom_colorscale (str): Name of the Plotly colorscale to use.
574 | global_min (float | None): Global minimum value for the color scale. If this
575 | minumum is >0, 0 is used.
576 | global_max (float | None): Global maximum value for the color scale. If None,
577 | the maximum value of the data is used.
578 | dataset (str): The dataset to use for the hexagon locations. Options are:
579 |
580 | - 'Zhao2024': mapping from hexagonal coordinates to 3D coordinates, update from Zhao et al. 2022 (https://www.biorxiv.org/content/10.1101/2022.12.14.520178v1).
581 |
582 | marker_size (int): Size of markers in the plot.
583 |
584 | Returns:
585 | go.Figure: A Plotly figure object containing the mollweide projection heatmap.
586 | """
587 |
588 | def cart2sph(xyz: np.array) -> np.array:
589 | """
590 | Convert Cartesian to spherical coordinates.
591 | Theta is polar angle (from +z), phi is angle from +x to +y.
592 | """
593 | r = np.sqrt((xyz**2).sum(1))
594 | theta = np.arccos(xyz[:, 2])
595 | phi = np.arctan2(xyz[:, 1], xyz[:, 0])
596 | phi[phi < 0] = phi[phi < 0] + 2 * np.pi
597 | return np.stack((r, theta, phi), axis=1)
598 |
599 | def sph2Mollweide(thetaphi: np.array) -> np.array:
600 | """
601 | Spherical (viewed from outside) to Mollweide,
602 | cf. https://mathworld.wolfram.com/MollweideProjection.html
603 | """
604 | azim = thetaphi[:, 1]
605 | azim[azim > np.pi] = azim[azim > np.pi] - 2 * np.pi # longitude/azimuth
606 | elev = np.pi / 2 - thetaphi[:, 0] # lattitude/elevation in radian
607 |
608 | N = len(azim) # number of points
609 | xy = np.zeros((N, 2)) # output
610 | for i in range(N):
611 | theta = np.arcsin(2 * elev[i] / np.pi)
612 | if np.abs(np.abs(theta) - np.pi / 2) < 0.001:
613 | xy[i,] = [
614 | 2 * np.sqrt(2) / np.pi * azim[i] * np.cos(theta),
615 | np.sqrt(2) * np.sin(theta),
616 | ]
617 | else:
618 | # to calculate theta
619 | dtheta = 1
620 | while dtheta > 1e-3:
621 | theta_new = theta - (
622 | 2 * theta + np.sin(2 * theta) - np.pi * np.sin(elev[i])
623 | ) / (2 + 2 * np.cos(2 * theta))
624 | dtheta = np.abs(theta_new - theta)
625 | theta = theta_new
626 | xy[i,] = [
627 | 2 * np.sqrt(2) / np.pi * azim[i] * np.cos(theta),
628 | np.sqrt(2) * np.sin(theta),
629 | ]
630 | return xy
631 |
632 | def create_mollweide_guidelines():
633 | """
634 | Create Mollweide projection guidelines as plotly traces
635 | """
636 | traces = []
637 |
638 | # Create meridians
639 | ww = np.stack((np.linspace(0, 180, 19), np.repeat(-180, 19)), axis=1)
640 | w = np.stack((np.linspace(180, 0, 19), np.repeat(-90, 19)), axis=1)
641 | m = np.stack((np.linspace(0, 180, 19), np.repeat(0, 19)), axis=1)
642 | e = np.stack((np.linspace(180, 0, 19), np.repeat(90, 19)), axis=1)
643 | ee = np.stack((np.linspace(0, 180, 19), np.repeat(180, 19)), axis=1)
644 | pts = np.vstack((ww, w, m, e, ee))
645 | rtp = np.insert(pts / 180 * np.pi, 0, np.repeat(1, pts.shape[0]), axis=1)
646 | meridians_xy = sph2Mollweide(rtp[:, 1:3])
647 |
648 | traces.append(
649 | go.Scatter(
650 | x=meridians_xy[:, 0],
651 | y=meridians_xy[:, 1],
652 | mode="lines",
653 | line=dict(color="lightgrey", width=0.5),
654 | showlegend=False,
655 | hoverinfo="skip",
656 | )
657 | )
658 |
659 | # Create parallels
660 | for lat in [45, 90, 135]:
661 | pts = np.stack((np.repeat(lat, 37), np.linspace(-180, 180, 37)), axis=1)
662 | rtp = np.insert(pts / 180 * np.pi, 0, np.repeat(1, pts.shape[0]), axis=1)
663 | parallel_xy = sph2Mollweide(rtp[:, 1:3])
664 |
665 | traces.append(
666 | go.Scatter(
667 | x=parallel_xy[:, 0],
668 | y=parallel_xy[:, 1],
669 | mode="lines",
670 | line=dict(color="lightgrey", width=0.5),
671 | showlegend=False,
672 | hoverinfo="skip",
673 | )
674 | )
675 |
676 | return traces
677 |
678 | def create_data_scatter(series_data, x_coords, y_coords, column_name=None):
679 | """Create scatter plot for data points"""
680 | return go.Scatter(
681 | x=x_coords,
682 | y=y_coords,
683 | mode="markers",
684 | marker=dict(
685 | color=series_data.values,
686 | colorscale=custom_colorscale,
687 | cmin=global_min,
688 | cmax=global_max,
689 | size=marker_size,
690 | colorbar=dict(
691 | title=column_name if column_name else "Value", titleside="right"
692 | ),
693 | ),
694 | customdata=np.stack([x_coords, y_coords, series_data.values], axis=-1),
695 | hovertemplate="x: %{customdata[0]}
y: %{customdata[1]}
value: %{customdata[2]}",
696 | showlegend=False,
697 | )
698 |
699 | # Clean data - remove NaN indices
700 | data = data[(data.index != "nan") & (~data.index.isnull())]
701 |
702 | # Convert string indices to coordinate arrays
703 | coords = [tuple(map(float, idx.split(","))) for idx in data.index]
704 | coord_array = np.array(coords)
705 |
706 | # Get global min and max for consistent color scale
707 | vals = data.to_numpy() if isinstance(data, pd.DataFrame) else data.values
708 | if global_min is None:
709 | global_min = min(0, vals.min())
710 | if global_max is None:
711 | global_max = vals.max()
712 |
713 | # Load eyemap data and convert coordinates
714 | ucl_hex = load_dataset(dataset)
715 | rtp2 = cart2sph(ucl_hex[["x", "y", "z"]].values)
716 | xy = sph2Mollweide(rtp2[:, 1:3])
717 | xy[:, 0] = -xy[:, 0] # flip x axis
718 | xypq_moll = np.concatenate((xy, ucl_hex[["p", "q"]].values), axis=1)
719 | xypq_moll = pd.DataFrame(xypq_moll, columns=["x", "y", "p", "q"])
720 | xypq_moll[["p", "q"]] = xypq_moll[["p", "q"]].astype(int)
721 |
722 | # Convert data coordinates to Mollweide
723 | hex1_id = (coord_array[:, 1] - coord_array[:, 0]) / 2
724 | hex2_id = (coord_array[:, 1] + coord_array[:, 0]) / 2
725 |
726 | coord_df = pd.DataFrame({"hex1_id": hex1_id, "hex2_id": hex2_id}, index=data.index)
727 |
728 | merged_coords = coord_df.merge(
729 | xypq_moll, left_on=["hex1_id", "hex2_id"], right_on=["q", "p"], how="left"
730 | )
731 |
732 | x_mollweide = merged_coords["x"].values
733 | y_mollweide = merged_coords["y"].values
734 |
735 | # Create figure
736 | fig = go.Figure()
737 |
738 | # Add guidelines
739 | guidelines = create_mollweide_guidelines()
740 | for trace in guidelines:
741 | fig.add_trace(trace)
742 |
743 | # Handle single series vs DataFrame
744 | if isinstance(data, pd.Series) or (
745 | isinstance(data, pd.DataFrame) and len(data.columns) == 1
746 | ):
747 | if isinstance(data, pd.DataFrame):
748 | data = data.iloc[:, 0]
749 |
750 | # Single plot
751 | fig.add_trace(create_data_scatter(data, x_mollweide, y_mollweide))
752 |
753 | elif isinstance(data, pd.DataFrame):
754 | # Multiple columns - create frames for slider
755 | frames = []
756 | slider_steps = []
757 |
758 | # Create frames for each column
759 | for i, col_name in enumerate(data.columns):
760 | series = data[col_name]
761 |
762 | # Create frame data (guidelines + data scatter)
763 | frame_traces = guidelines + [
764 | create_data_scatter(series, x_mollweide, y_mollweide, col_name)
765 | ]
766 | frames.append(go.Frame(data=frame_traces, name=str(i)))
767 |
768 | # Add slider step
769 | slider_steps.append(
770 | {
771 | "args": [
772 | [str(i)],
773 | {"frame": {"duration": 0, "redraw": True}, "mode": "immediate"},
774 | ],
775 | "label": col_name,
776 | "method": "animate",
777 | }
778 | )
779 |
780 | # Set initial display to first column
781 | if i == 0:
782 | fig.add_trace(
783 | create_data_scatter(series, x_mollweide, y_mollweide, col_name)
784 | )
785 |
786 | # Add frames to figure
787 | fig.frames = frames
788 |
789 | # Add slider
790 | fig.update_layout(
791 | sliders=[
792 | {
793 | "active": 0,
794 | "currentvalue": {
795 | "font": {"size": 16},
796 | "visible": True,
797 | "xanchor": "right",
798 | },
799 | "pad": {"b": 10, "t": 50},
800 | "len": 0.9,
801 | "x": 0.1,
802 | "y": 0,
803 | "steps": slider_steps,
804 | }
805 | ]
806 | )
807 |
808 | # Update layout
809 | fig.update_layout(
810 | width=fig_size[0],
811 | height=fig_size[1],
812 | xaxis=dict(
813 | range=[-np.pi, np.pi],
814 | scaleanchor="y",
815 | scaleratio=1,
816 | showgrid=False,
817 | showticklabels=False,
818 | showline=False,
819 | visible=False,
820 | ),
821 | yaxis=dict(
822 | range=[-np.pi / 2, np.pi / 2],
823 | showgrid=False,
824 | showticklabels=False,
825 | showline=False,
826 | visible=False,
827 | ),
828 | plot_bgcolor="white",
829 | paper_bgcolor="white",
830 | margin=dict(
831 | l=0,
832 | r=0,
833 | t=50,
834 | b=50 if isinstance(data, pd.DataFrame) and len(data.columns) > 1 else 0,
835 | ),
836 | )
837 |
838 | return fig
839 |
--------------------------------------------------------------------------------
/connectome_interpreter/external_paths.py:
--------------------------------------------------------------------------------
1 | # Standard library imports
2 | import numpy as np
3 | import pickle as pkl
4 | import pandas as pd
5 | import os
6 |
7 | # Third-party package imports
8 | from scipy.sparse import csc_matrix, csr_matrix
9 | from scipy.sparse.csgraph import shortest_path
10 |
11 |
12 | def compute_flow_hitting_time(
13 | conn_df: pd.DataFrame,
14 | flow_seed_idx: np.ndarray[int],
15 | flow_steps: int,
16 | flow_thre: float,
17 | ):
18 | """
19 | Compute hitting time for all cells in conn_df.
20 | Hitting time is the average number of hops required to reach a cell from a set of seed cells.
21 | The main algorithm is implemented in the 'navis' library.
22 |
23 | Args:
24 | conn_df (pd.DataFrame): DataFrame containing the connections with columns 'idx_pre', 'idx_post', and 'rel_in_weight'.
25 | flow_seed_idx (np.ndarray): Array of seed cell indices.
26 | flow_steps (int): Number of steps for flow calculation.
27 | flow_thre (float): Threshold for activation in flow calculation.
28 |
29 | Returns:
30 | pd.DataFrame: DataFrame with columns 'idx' and 'hitting_time',
31 | where 'idx' is the cell index and 'hitting_time' is the computed hitting time.
32 | """
33 |
34 | try:
35 | from navis.models import BayesianTraversalModel, linear_activation_p
36 | except ImportError as e:
37 | raise ImportError(
38 | "The 'navis' library is required for computing information flow."
39 | ) from e
40 |
41 | # choose threshold for flow activation and define model
42 | def my_act(x):
43 | return linear_activation_p(x, min_w=0, max_w=flow_thre)
44 |
45 | edges = conn_df[["idx_pre", "idx_post", "rel_in_weight"]]
46 | edges.columns = ["source", "target", "weight"]
47 | model = BayesianTraversalModel(
48 | edges, flow_seed_idx, max_steps=flow_steps + 1, traversal_func=my_act
49 | )
50 |
51 | res = model.run()
52 |
53 | # compute hitting times from cmf (cumulative mass function)
54 | cmf_arr = np.stack(res["cmf"].values)
55 | hitting_prob = np.zeros(cmf_arr.shape)
56 | hitting_prob[:, 1:] = np.diff(cmf_arr, axis=1)
57 | hitting_time = np.dot(hitting_prob, np.arange(0, flow_steps + 1))
58 |
59 | # cmf of unreached and seed cell types is 0
60 | # change hitting time of unreached cell types to flow_steps
61 | idx_unreached = np.where(
62 | (hitting_time < 0.1) & (~np.isin(res["node"], flow_seed_idx))
63 | )[0]
64 | if len(idx_unreached) > 0:
65 | hitting_time[idx_unreached] = flow_steps
66 |
67 | flow_df = pd.DataFrame({"idx": res["node"].values, "hitting_time": hitting_time})
68 |
69 | return flow_df
70 |
71 |
72 | def trim_inprop_by_flow(
73 | inprop: csc_matrix,
74 | meta: pd.DataFrame,
75 | file_path: str = None,
76 | save_prefix: str = "flow_",
77 | flow_seed_types: list[str] = [
78 | "L1",
79 | "L2",
80 | "L3",
81 | "R7p",
82 | "R8p",
83 | "R7y",
84 | "R8y",
85 | "R7d",
86 | "R8d",
87 | "HBeyelet",
88 | ],
89 | flow_steps: int = 20,
90 | flow_thre: float = 0.1,
91 | flow_diff_min: float = 0.5,
92 | flow_diff_max: float = 20,
93 | ):
94 | """
95 | Trim connections based on hitting time assigned by information flow algorithm (navis).
96 |
97 | Args:
98 | inprop (csc_matrix): Input sparse matrix representing connections.
99 | meta (pd.DataFrame): DataFrame containing metadata with 'bodyId' and 'cell_type'.
100 | file_path (str): Path to the directory containing the hitting time data.
101 | flow_steps (int): Number of steps for flow calculation.
102 | flow_thre (float): Threshold for flow calculation.
103 | flow_diff_min (float): Minimum difference in hitting time for connection retention.
104 | flow_diff_max (float): Maximum difference in hitting time for connection retention.
105 |
106 | Returns:
107 | csc_matrix: sparse matrix for which pairs of connections have hitting time
108 | within specified range.
109 | """
110 |
111 | if file_path is None:
112 | # Check if running in Google Colab
113 | if "COLAB_GPU" in os.environ:
114 | # Running in Colab
115 | file_path = "/content/"
116 | else:
117 | # Running locally
118 | file_path = ""
119 |
120 | # convert inprop to dataframe
121 | coo = inprop.tocoo()
122 | conn_df = pd.DataFrame(
123 | {"idx_pre": coo.row, "idx_post": coo.col, "rel_in_weight": coo.data}
124 | )
125 |
126 | # load hitting time or compute
127 | flow_file_name = os.path.join(
128 | file_path, f"{save_prefix}{flow_steps}step_{flow_thre}thre_hit.p"
129 | )
130 | if os.path.exists(flow_file_name):
131 | flow_df = pd.read_csv(flow_file_name)
132 | else:
133 | flow_seed_idx = meta[np.isin(meta["cell_type"], flow_seed_types)]["idx"].values
134 | flow_df = compute_flow_hitting_time(
135 | conn_df, flow_seed_idx, flow_steps, flow_thre
136 | )
137 | flow_df = flow_df.merge(meta, how="inner", on="idx")
138 | flow_df.to_csv(flow_file_name, index=False)
139 |
140 | # add hitting time to conn_df
141 | conn_flow_df = (
142 | conn_df.merge(
143 | flow_df[["idx", "hitting_time"]],
144 | how="inner",
145 | left_on="idx_post",
146 | right_on="idx",
147 | )
148 | .drop(columns=["idx"])
149 | .rename(columns={"hitting_time": "hitting_time_post"})
150 | )
151 | conn_flow_df = (
152 | conn_flow_df.merge(
153 | flow_df[["idx", "hitting_time"]],
154 | how="inner",
155 | left_on="idx_pre",
156 | right_on="idx",
157 | )
158 | .drop(columns=["idx"])
159 | .rename(columns={"hitting_time": "hitting_time_pre"})
160 | )
161 |
162 | # remove connections for which difference in hitting time is too small or too large
163 | conn_flow_df = conn_flow_df[
164 | (
165 | conn_flow_df["hitting_time_post"] - conn_flow_df["hitting_time_pre"]
166 | > flow_diff_min
167 | )
168 | & (
169 | conn_flow_df["hitting_time_post"] - conn_flow_df["hitting_time_pre"]
170 | <= flow_diff_max
171 | )
172 | ]
173 | inprop_flow = csc_matrix(
174 | (
175 | conn_flow_df["rel_in_weight"],
176 | (conn_flow_df["idx_pre"], conn_flow_df["idx_post"]),
177 | ),
178 | shape=inprop.shape,
179 | )
180 |
181 | return inprop_flow
182 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | import os
7 | import sys
8 |
9 | sys.path.insert(0, os.path.abspath(".."))
10 |
11 | # -- Project information -----------------------------------------------------
12 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
13 |
14 | project = "Connectome Interpreter"
15 | copyright = "2024, Yijie Yin"
16 | author = "Yijie Yin"
17 | release = "0.2.0"
18 |
19 | # -- General configuration ---------------------------------------------------
20 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
21 |
22 | extensions = [
23 | "sphinx.ext.autodoc",
24 | "sphinx.ext.napoleon",
25 | "sphinx.ext.viewcode",
26 | "sphinx_rtd_theme",
27 | ]
28 |
29 | templates_path = ["_templates"]
30 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
31 |
32 |
33 | # -- Options for HTML output -------------------------------------------------
34 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
35 |
36 | html_theme = "sphinx_rtd_theme"
37 | html_static_path = ["_static"]
38 |
--------------------------------------------------------------------------------
/docs/figures/act_max.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/act_max.png
--------------------------------------------------------------------------------
/docs/figures/column_sum.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/column_sum.png
--------------------------------------------------------------------------------
/docs/figures/effective_input_hist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/effective_input_hist.png
--------------------------------------------------------------------------------
/docs/figures/ei_connectivity.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/ei_connectivity.png
--------------------------------------------------------------------------------
/docs/figures/matmul.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/matmul.pdf
--------------------------------------------------------------------------------
/docs/figures/matmul.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/matmul.png
--------------------------------------------------------------------------------
/docs/figures/path_finding.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/path_finding.png
--------------------------------------------------------------------------------
/docs/figures/pathfinding_comparison.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/pathfinding_comparison.png
--------------------------------------------------------------------------------
/docs/figures/rooted_effective_input_hist.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/rooted_effective_input_hist.png
--------------------------------------------------------------------------------
/docs/figures/simplified_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/simplified_model.png
--------------------------------------------------------------------------------
/docs/figures/sparse_matmul.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/docs/figures/sparse_matmul.png
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. Connectome Interpreter documentation master file, created by
2 | sphinx-quickstart on Thu Feb 22 08:46:53 2024.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Connectome Interpreter's documentation
7 | ==================================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 | tutorials/toc
14 | modules/toc
15 | your_own_data
16 |
17 |
18 |
19 | Indices and tables
20 | ==================
21 |
22 | * :ref:`genindex`
23 | * :ref:`modindex`
24 | * :ref:`search`
25 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/modules/activation_maximisation.rst:
--------------------------------------------------------------------------------
1 | Activation maximisation
2 | =======================
3 |
4 | .. automodule:: connectome_interpreter.activation_maximisation
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/modules/compress_paths.rst:
--------------------------------------------------------------------------------
1 | Compress paths
2 | =====================
3 |
4 | .. automodule:: connectome_interpreter.compress_paths
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/modules/external_map.rst:
--------------------------------------------------------------------------------
1 | Map to external data
2 | =====================
3 |
4 | .. automodule:: connectome_interpreter.external_map
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/modules/external_paths.rst:
--------------------------------------------------------------------------------
1 | External paths
2 | =====================
3 |
4 | .. automodule:: connectome_interpreter.external_paths
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/modules/path_finding.rst:
--------------------------------------------------------------------------------
1 | Path-finding
2 | =====================
3 |
4 | .. automodule:: connectome_interpreter.path_finding
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/modules/toc.rst:
--------------------------------------------------------------------------------
1 | Function documentation
2 | =======================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | activation_maximisation
8 | compress_paths
9 | path_finding
10 | external_map
11 | utils
12 | external_paths
--------------------------------------------------------------------------------
/docs/modules/utils.rst:
--------------------------------------------------------------------------------
1 | Utils
2 | ==============
3 |
4 | .. automodule:: connectome_interpreter.utils
5 | :members:
6 | :undoc-members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | # Documentation build dependencies
2 | Sphinx>=7.2
3 | sphinx_rtd_theme
4 |
5 | # Project dependencies
6 | numpy>=1.26
7 | pandas>=2.1
8 | scipy>=1.11
9 | torch>=1.10
10 | tqdm>=4.65
11 | plotly>=5.19
12 | matplotlib>=3.4
13 | networkx>3.0
14 | ipywidgets>=7.6
15 | seaborn>=0.11
16 | IPython>=7.23
--------------------------------------------------------------------------------
/docs/tutorials/act_max.rst:
--------------------------------------------------------------------------------
1 | Activation maximisation
2 | ========================
3 |
4 | There are now models of the connectome (e.g. `Shiu et al. 2023 `_ or :doc:`our implementation`). But what stimulus should you give to activate your neurons of interest?
5 |
6 | If you didn't know anything about the target neuron(s) *a priori*, it can be hard to answer this question without searching through all possible stimuli. But even assuming each input neuron only has two states (on/off), with (say) 500 input neurons, it still means :math:`2^{500}` combinations to try. We don't want to do this.
7 |
8 | Fortunately, there is already a technique in explainable AI research that addresses this problem: **activation maximisation**. Very cool blogs in this field: `Olah et al. 2017 `_, `Olah et al. 2018 `_, and `Goh et al. 2021 `_.
9 |
10 | Our model of the connectome uses the "multi-layer perceptron" architecture in machine learning. This makes it possible to apply this technique, which uses `gradient descent` to search for the *optimal* stimulus for each neuron.
11 |
12 | .. figure:: ../figures/act_max.png
13 | :width: 100%
14 | :align: left
15 | :alt: Activation maximisation
16 |
17 | This illustration follows the one in the :doc:`model implementation`. Additionally, the fill colour of each circle indicates the activation of the neuron. As illustrated in the lower panel, the goal is to find the optimal input pattern that maximally activates the target neuron(s) at the specified timepoints.
18 |
19 | Gradient descent
20 | -----------------
21 | There are many blogs/articles online explaining this, but briefly, it's like you are lost in a mountain, and you are trying to find the lowest point possible in the landscape (the foot of the mountain). To get there, for every step you take, you look around, and go towards the direction that takes you lower than your previous step. If you keep doing this, you will end up at *a* foot of the mountain.
22 |
23 |
24 | Loss function
25 | --------------
26 | What are we optimising for? The activation of the target neuron(s), you say.
27 |
28 | But we want more than that. To make the results as interpretable as possible, we want as few as possible (input and non-input) neurons activated, while the target neuron(s) are activated, to extract a *minimal* circuit for maximal activation. So the loss function (what we are trying to minimise, the distance from the foot of the mountain) is:
29 |
30 | .. math::
31 |
32 | \text{Loss} = -\ell_{\text{act}} + \ell_{\text{de-act}} + \ell_{\text{in-reg}} + \ell_{\text{out-reg}}
33 |
34 | where :math:`\ell_{\text{act}}` is the activation loss, :math:`\ell_{\text{de-act}}` the de-activation loss, :math:`\ell_{\text{in-reg}}` the input regularisation loss (the general activation of input neurons), and :math:`\ell_{\text{out-reg}}` the non-input regularisation loss (the general activation of non-input neurons).
35 |
36 | Getting fancy
37 | --------------
38 | Using `TargetActivation`, you can specify the target activation by batch, timepoint, neuron index, and activation value. This means you can mix and match neurons you wish to (de)activate at each timestep. Some interesting questions include:
39 |
40 | - What's the *difference* in the optimal stimuli between two sets of neurons?
41 | - What's the best stimuli pattern for a time-varying *pattern* of target neuron activation?
42 |
43 | Have fun!
--------------------------------------------------------------------------------
/docs/tutorials/ei_matmul.rst:
--------------------------------------------------------------------------------
1 | Excitatory/Inhibitory "effective connectivity"
2 | ==============================================
3 | The :doc:`path compression method` quantifies the influence one neuron has on another, but does not account for the sign of the connections. Using predicted neurotransmitter information (`Eckstein et al. 2024 `_), we present a method to calculate the excitatory/inhibitory influence.
4 |
5 | We assume that an even number of inhibitory steps in the path is somewhat equivalent to excitation. This is again a common neuroscience conceptualisation: two inhibitory steps are together termed ‘**disinhibition**’, which in principle only fails to excite the target neuron if the intermediary neuron is not active in the first place (the inhibition from the source therefore has no effect).
6 |
7 | **Naively**, calculating excitatory/inhibitory influence of path length `n` requires:
8 |
9 | 1. enumerating all possible combinations of excitatory/inhibitory steps (e.g. for path length 3, there are :math:`2^3` combinations: e-e-e, e-e-i, e-i-e...);
10 | 2. counting inhibitory steps, and
11 | 3. summing paths with an even number of inhibitory steps.
12 |
13 | This approach is memory-intensive, and the number of possible combinations grows exponentially with path length.
14 |
15 | .. figure:: ../figures/ei_connectivity.png
16 | :width: 100%
17 | :align: left
18 | :alt: Illustration of calculation of excitatory/inhibitory effective connectivity.
19 |
20 | Figure 1. Calculation of excitatory/inhibitory connectivity. Presynaptic neurons are in the rows. Suppose the first two are excitatory (red). Excitation one hop away consists of excitatory-excitatory paths and inhibitory-inhibitory paths (i.e. disinhibition). Similarly, inhibition one hop away consists of excitatory-inhibitory and inhibitory-excitatory paths. The resulting effective excitation and inhibition matrix can be used to calculate the next excitatory/inhibitory step.
21 |
22 |
23 | **Our method** avoids this by summarizing 'effective excitation' and 'effective inhibition' at each step. The connectivity matrix (presynaptic neurons in rows, postsynaptic neurons in columns) is split into excitatory and inhibitory parts based on the presynaptic neuron's neurotransmitter type (Figure 1).
24 |
25 | To calculate the weights through two steps of excitation, the matrix with excitatory output rows is multiplied by the matrix with excitatory input columns. The total excitation is the sum of excitatory-excitatory and inhibitory-inhibitory paths. The total inhibition is the sum of excitatory-inhibitory and inhibitory-excitatory paths. These new matrices are then used for subsequent steps of matrix multiplication.
26 |
--------------------------------------------------------------------------------
/docs/tutorials/matmul.rst:
--------------------------------------------------------------------------------
1 | Paths compression / "effective connectivity" calculation
2 | =========================================================
3 |
4 | All upstream partners of a neuron, by definition, occupy 100% of that neuron’s input. Each upstream partner has its own upstream partners, which also contribute 100% to their input.
5 |
6 | We can, therefore, calculate the contribution from neurons two hops upstream to the target neuron (Figure 1C). For example, for the following path: :math:`A\xrightarrow[]{0.4}B\xrightarrow[]{0.6} C` (i.e. A's input occupies 40% of B’s input; and B occupies 60% of C’s input), A’s contribution to C is :math:`0.4\times0.6 = 0.24`.
7 |
8 | .. figure:: ../figures/matmul.png
9 | :width: 100%
10 | :align: left
11 | :alt: Paths compression
12 |
13 | Figure 1. Paths compression.
14 |
15 | **A.** Demonstrating the equivalence of two ways of representing the connectivity in a toy circuit.
16 |
17 | **B.** Effective connectivity as matrix multiplication: for instance, the connectivity from :math:`a` to :math:`a`, two hops away (blue outline), is calculated as follows: :math:`0\times0 + 0.3\times0.2 + 0.5\times0.8 = 0.46`.
18 |
19 | :math:`a` reaches :math:`a` through :math:`b` and :math:`c`. The amount of influence :math:`a` has on :math:`a`, one hop away, is a product of the source :math:`a`'s output (first matrix, row), and the target `a`'s input (second matrix, column).
20 |
21 | Another way to look at this: :math:`a`'s connectivity from :math:`a`, :math:`b` and :math:`c`, two hops away, could be understood as follows: 0.2 of :math:`a`'s direct input is from :math:`b`, and 0.8 from :math:`c`. :math:`a`'s one-hop connectivity is therefore: :math:`0.2\times \text{column}_b + 0.8\times \text{column}_c`. This corresponds to the 'column picture' of matrix multiplication.
22 |
23 | **C.** An alternative schematic focusing on one target neuron (i.e. one column in the resulting matrix).
24 |
25 | The calculation of effective connectivity is therefore implemented via matrix multiplications: as illustrated in Figure 1A and 1B, calculating connectivity two hops away, from all neurons to all neurons, is equivalent to multiplying the adjacency matrix (where each column sums to 1) by itself once, similar to a Markov chain.
26 |
27 | Implementation
28 | --------------
29 | We showed above that calculating all-to-all indirect connectivity can be done with matrix multiplications. We also explained :doc:`here<../your_own_data>` why sparse matrices should be used to reduce memory consumption.
30 |
31 | This is however not enough: the vast divergence and convergence in the nervous system hints towards the fact that the sparse matrix quickly becomes dense as it's multiplied with itself, again consuming more memory than we can afford.
32 |
33 | We address this problem here with chunking:
34 |
35 | .. figure:: ../figures/sparse_matmul.png
36 | :width: 100%
37 | :align: left
38 | :alt: Sparse matmul
39 |
40 | Figure 2.
41 |
42 | We split the last matrix to be multiplied into column chunks (orange boxes). Each chunk is a dense matrix. The chunk is then multiplied with all previous sparse full matrices, resulting in a dense matrix the same size as the chunk. This is repeated for all chunks. Thresholding is optional for both during matrix multiplication and when saving the results.
43 |
44 | This way, the memory consumption is the size of the sparse all-to-all connectivity matrix (generally a few hundred MB), and a few times the size of one chunk, whose size is chosen by the user.
45 |
46 | Since matrix multiplication works better on GPUs, this function is a lot faster on GPU, but runs on CPU too.
47 |
48 | Glitches
49 | --------
50 | `colsum<=1`:
51 | ++++++++++++
52 | In theory, since all neurons directly upstream occupy 100% of the target neuron’s input, all neurons *exactly* one hop away should also occupy 100% (Figure 1C). Therefore, to get a simplified measure of the influence from neuron A to neuron B *within*, e.g., 5 hops, you can sum the results of multiple matrix multiplications. This value would be less than 5, and B's input from *all* neurons would :math:`= 5`.
53 | To show this:
54 |
55 | .. code-block:: python
56 |
57 | import numpy as np
58 |
59 | # Define the size of the matrix
60 | n = 5 # You can change this to any desired size
61 |
62 | # Generate a random matrix with values between 0 and 1
63 | matrix = np.random.rand(n, n)
64 |
65 | # Normalize the columns to sum to 1
66 | matrix = matrix / matrix.sum(axis=0)
67 | matrix
68 |
69 | .. code-block:: python
70 |
71 | # you get e.g.
72 | > array([[0.12637487, 0.09282855, 0.22877125, 0.10338055, 0.38200873],
73 | [0.20048119, 0.25219522, 0.21829739, 0.07612923, 0.05527061],
74 | [0.20128841, 0.26499372, 0.21952733, 0.1025118 , 0.3651178 ],
75 | [0.23524778, 0.25096706, 0.19189627, 0.35302955, 0.04821095],
76 | [0.23660774, 0.13901546, 0.14150776, 0.36494887, 0.14939192]])
77 |
78 | .. code-block:: python
79 |
80 | (matrix @ matrix @ matrix).sum(axis = 0)
81 |
82 | .. code-block:: python
83 |
84 | > array([1., 1., 1., 1., 1.])
85 |
86 | However, some sensory neurons have no upstream partners. This means that some paths cannot be further extended upstream. The sum of inputs exactly `n` steps away is therefore :math:`\leq 1`. For instance, using the larval connectome from `Winding et al. 2023 `_ (`code here `_):
87 |
88 | .. figure:: ../figures/column_sum.png
89 | :width: 100%
90 | :align: left
91 | :alt: Column sum <= 1
92 |
93 | Figure 3. Column sum <= 1.
94 |
95 |
96 | Small numbers:
97 | ++++++++++++++
98 | When calculating this effective input proportion, one is essentially multiplying numbers between 0 and 1, which is bound to return a smaller number the more multiplications take place. (This is not why the column sums decrease however. The connectivity matrix of the connectome is sparse. With matrix multiplications, the resulting matrix becomes increasingly dense.) Again illustrated using the larval connectome (`code here (scroll downwards a little) `_):
99 |
100 | .. figure:: ../figures/effective_input_hist.png
101 | :width: 100%
102 | :align: left
103 | :alt: Values of effective connectivity gets smaller with more matrix multiplications
104 |
105 | Figure 4. Values of effective connectivity gets smaller with more matrix multiplications.
106 |
107 | Given the vast amount of convergence and divergence in the connectome, it is reasonable for neurons further away to have a smaller influence (i.e. one neuron can be influenced by many neurons when the path length is long), but it can make the number hard to interpret.
108 |
109 | An alternative implementation is to **`n`-root the products** after `n` matrix multiplications. In the example of :math:`A\xrightarrow[]{0.4}B\xrightarrow[]{0.6} C`, the influence from A to C is :math:`\sqrt{0.4 \times 0.6} = 0.49`, which can be thought of as *"the equivalent direct connection strength in a path between A and C, where there is only one neuron in each layer"*, instead of *"A’s relative contribution among all other upstream partners exactly two steps away from C"*.
110 |
111 | Since there is much convergence and divergence, a neuron can reach another through multiple neurons in the middle. The equivalent connection strength for "where there is only one neuron in the middle" can therefore be big. So n-rooting longer paths results in bigger connection strengths. As illustrated by the larval connectome (`code `_):
112 |
113 | .. figure:: ../figures/rooted_effective_input_hist.png
114 | :width: 100%
115 | :align: left
116 | :alt: Values of rooted effective connectivity gets bigger with more matrix multiplications
117 |
118 | Figure 5. Values of *rooted* effective connectivity gets bigger with more matrix multiplications.
119 |
120 | Whether the original or the `n`-rooted number is used (`compress_paths(root=True)`) depends on the specific needs of the user. The original gives a rough estimate of the connection strength *among other neurons* the same distance away from the target neuron; and the `n`-rooted number is perhaps more informative in the cross-path-length comparisons.
121 |
--------------------------------------------------------------------------------
/docs/tutorials/path_finding.rst:
--------------------------------------------------------------------------------
1 | Path finding
2 | ============
3 |
4 | After calculating the overall influence from source neurons to target neurons, the next natural question is: *what are the paths between them?*
5 |
6 | First, we need to determine the **distances** from which source neurons exert their influence. The :py:func:`contribution_by_path_lengths` function answers this, with :py:func:`contribution_by_path_lengths_heatmap` providing a heatmap visualization.
7 |
8 | Once significant path lengths are identified, the :py:func:`find_path_iteratively` function locates paths between source and target neurons (**Figure 1**): Starting from the target neurons at the end of the path (of length `n`), we trace one step upstream (with a threshold) using direct connections in the connectome. We then check if these upstream partners are also "effectively" connected to the source neurons with length `n-1`. We intersect these two sets:
9 |
10 | 1. neurons directly connected to the target neurons, and
11 | 2. neurons connected to the source neurons through compressed paths.
12 |
13 | This process is repeated until we reach the source neurons.
14 |
15 | The :py:func:`find_path_iteratively` function returns a dataframe of edges, which can be
16 |
17 | 1. grouped with :py:func:`group_paths` and
18 | 2. filtered using :py:func:`filter_paths` based on thresholds or necessary intermediate neurons, and
19 | 3. plotted with :py:func:`plot_layered_paths`.
20 |
21 | .. figure:: ../figures/path_finding.png
22 | :width: 100%
23 | :align: left
24 | :alt: Path finding
25 |
26 | Figure 1. Path finding. To find paths between source and target neurons `n` steps away, we start from the target neuron(s) (blue outline), and trace one step upstream (blue edges). Using the effective connectivity matrices, we identify neurons effectively connected to the source neurons (orange fill). We intersect these sets and use the new "target neurons" to continue upstream until reaching the source neurons.
27 |
28 | In the context of **activation maximisation**, while it effectively links (the known) input to (the unknown) target neuron receptive fields, one might still want to identify significant intermediate neurons: Given the output of activation maximisation, :py:func:`activations_to_df` converts the data into a path-like dataframe, with optional activation/connectivity thresholds, grouped by variables of interest (e.g., cell type). To address "off-target" neuron activations (neurons that are active but do not participate in paths from source to target neurons), the :py:func:`remove_excess_neurons` function allows specifying target neurons and identifying only those neurons linking source to target neurons. The results can be plotted with :py:func:`plot_layered_paths`, which can also display neuron activation by colouring the nodes in the plot.
29 |
30 | Benchmarking:
31 | ++++++++++++++
32 | To illustrate the speed of this algorithm, we use use the FAFB central brain connectivity matrix and look for paths between sensory neurons and descending neurons. Most of the code is `here `_ (though you will likely run out of colab runtime with `networkx` pathfinding). The data was generated by running similar code (available on request) on HPC.
33 |
34 | .. figure:: ../figures/pathfinding_comparison.png
35 | :width: 100%
36 | :align: left
37 | :alt: Path finding benchmark
38 |
39 | Figure 2. Comparing connectome interpreter with `networkx `_.
40 |
--------------------------------------------------------------------------------
/docs/tutorials/simple_model.rst:
--------------------------------------------------------------------------------
1 | A simplified model of the connectome
2 | =====================================
3 |
4 | Following the thinking process of a "typical neuroscientist", we construct the model with the following assumptions:
5 |
6 | - Signals pass in a stepwise manner from one neuron to another through the synapses. So target neurons multiple synaptic hops away are reached later than those one synaptic hop away.
7 | - Excitation and inhibition take the same time to propagate (that is, one step).
8 | - The activation of a neuron ranges from "not active at all" (0) to "somewhat active" (0~1), to "as active as it can be" (1).
9 |
10 | Unlike a "typical neuroscientist", we also make the following assumptions:
11 |
12 | - Neurons are "points". That is, we disregard synapse location, ion channel composition, cable radius etc..
13 | - We disregard neuromodulation for now (unless you know what a specific instance of neuromodulation *should* do, in which case you could either model by modifying the connection weights, or ask me to incorporate some new features in the package (`yy432[at]cam.ac.uk `_)).
14 |
15 | With these assumptions, we construct the following model, aiming to provide "connectome-based hypotheses" for your circuit of interest:
16 |
17 | .. figure:: ../figures/simplified_model.png
18 | :width: 100%
19 | :align: left
20 | :alt: Simplified model
21 |
22 |
23 | **Panel A** shows the implementation: *all* neurons are in *each* layer. Signed weights between adjacent layers are defined by the connectome. Each layer is therefore like a timepoint.
24 |
25 | User can define a set of source neurons (blue/brown circles) which could be e.g. input to the central brain (sensory neurons, visual projection neurons, ascending neurons). External input is provided by activating the source neurons (brown / **Panel B**). The network is silent before any external input is fed in.
26 |
27 | **Panel C** shows the activation function of each neuron: the (signed) weighted sum of the upstream neurons' activity (x) is passed into a Rectified Linear Unit (`ReLU()`), scaled by `excitability`, and then passed into `tanh()`, to keep the activation of each neuron between 0 and 1.
28 |
29 | An example implementation can be found `here `_, which uses the :py:func:`MultilayeredNetwork`.
30 |
31 | Comparison with :doc:`"effective connectivity"`
32 | --------------------------------------------------------
33 | Pros
34 | +++++
35 | - nonlinearity (i.e. the curvature in **panel C**) - a bit more similar to real neurons;
36 | - users can see directly the response from a user-defined input pattern (**panel B**);
37 | - cheaper to compute than "effective connectivity";
38 | - neuron activation don't diminish with the increase in layers / time points, which does happen for "effective connectivity" calculation;
39 | - almost forces users to not cherry pick neurons/connections for interpretation in the densely-connected connectome.
40 |
41 | Cons
42 | +++++
43 | - a bit more complicated;
44 |
45 |
46 | Plasticity
47 | -----------
48 | The connectivity in the connectome between some neurons, e.g. ring neurons and compass neurons, is only a *scaffold* for, instead of a direct reflection of, functional connectivity (`Fisher et al. 2022 `_). We therefore implemented (third-party-dependent) change in weights ("plasticity"), based also on the activation similarity of two groups of neurons (:py:func:`change_model_weights`).
--------------------------------------------------------------------------------
/docs/tutorials/toc.rst:
--------------------------------------------------------------------------------
1 | How is it done?
2 | ================
3 |
4 | .. toctree::
5 | :maxdepth: 1
6 |
7 | matmul
8 | ei_matmul
9 | path_finding
10 | simple_model
11 | act_max
--------------------------------------------------------------------------------
/docs/your_own_data.rst:
--------------------------------------------------------------------------------
1 | Using your own dataset
2 | ======================
3 |
4 | Essentially, you need the following two components:
5 |
6 | - a connectivity matrix in sparse form (pre in rows, post in columns), and
7 | - a dataframe containing meta-information (e.g. neuron id, cell type etc.) corresponding to the indices in the connectivity matrix.
8 |
9 | For examples for making these, see the ipython notebooks with "prepare_connectome" in the file name, `here `_.
10 |
11 | Values in the connectivity matrix
12 | ---------------------------------
13 | So far, I have been using input proportion. For instance, the connection weight from A to B is:
14 |
15 | .. math::
16 | \frac{\text{the number of synapses from A to B}}{\text{the total number of post-synapses B has}}
17 |
18 | This means that, except for incoming neurons with no input in the brain, the sum across rows for each column (postsynaptic neuron) is 1. See more discussion on this in the :doc:`effective connectivity calculation tutorial `.
19 |
20 | **Note:** calculating the effectivity connectivity is essentially getting all the paths of a certain length from source to target neurons, and summing the weights across the paths. This means that if the data you have is partially reconstructed, or if you are only taking a part of the connectome, you should be cautious, and if possible, use *all* post-synapses a neuron has as the denominator.
21 |
22 | Why does it need to be sparse?
23 | --------------------------------
24 | Storing big, dense matrices can be memory-intensive. For example, if there are 50,000 neurons, a dense matrix would require:
25 |
26 | .. math::
27 | 50,000^2 \times 32 \text{ bits} / 8 \text{ bits per byte} = 10^{10} \text{ bytes} = 10 \text{ GB}.
28 |
29 | However, if the connectivity matrix is sparse, i.e. only a small fraction of the entries are non-zero, the same connectivity information is better stored as an edgelist (pre, post and connection weight). This is essentially what's happening when you use the `scipy.sparse.coo_matrix `_ format.
30 |
31 | I'm using `float32` (i.e. can be a decimal number, needs 32 bits to represent) to represent the connection weights, to get a balance between memory usage and precision (other options include `float8`, `float64` etc.).
32 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools>=42", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "connectome_interpreter"
7 | dynamic = ["version"]
8 |
9 | description = "A tool for connectomics data interpretation"
10 | authors = [{name = "Yijie Yin", email = "yy432@cam.ac.uk"}]
11 | keywords = ["connectomics", "neural network"]
12 | readme = "README.md"
13 | license = {file="LICENSE"}
14 | urls = {"Source & example notebooks" = "https://github.com/YijieYin/connectome_interpreter", "Documentation" = "https://connectome-interpreter.readthedocs.io/en/latest/"}
15 |
16 | dependencies = [
17 | "numpy",
18 | "pandas",
19 | "scipy",
20 | "tqdm",
21 | "plotly",
22 | "matplotlib",
23 | "networkx",
24 | "seaborn",
25 | "ipywidgets",
26 | "IPython",
27 | "torch",
28 | ]
29 |
30 | [project.optional-dependencies]
31 | get_ngl_link = ["nglscenes"]
32 | wandb = ["wandb"]
33 | compute_flow_hitting_time = ["navis"]
34 | plot_layered_paths = ["pyvis"]
35 |
36 | [tool.setuptools.dynamic]
37 | version = {attr = "connectome_interpreter._version.__version__"}
38 |
39 | [tool.setuptools.package-data]
40 | "connectome_interpreter" = ["data/*/*"]
41 |
42 | [tool.setuptools.packages.find]
43 | where = ["."]
44 |
45 |
46 | [tool.pytest.ini_options]
47 | addopts = "" # Add arguments here
48 |
49 | [tool.mypy]
50 | packages = ["connectome_interpreter", "tests"]
51 | install_types = true
52 | ignore_missing_imports = true
53 | follow_untyped_imports = false
54 | pretty = true
55 | show_error_context = true
56 | show_column_numbers = true
57 | show_error_code_links = true
58 |
--------------------------------------------------------------------------------
/requirements-types.txt:
--------------------------------------------------------------------------------
1 | # Packages for the type checker
2 | pandas-stubs
3 | scipy-stubs
4 | types-tqdm
5 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -e . # install this package, see pyproject.toml
2 |
3 | # https://stackoverflow.com/questions/43658870/requirements-txt-vs-setup-py
4 |
5 | # Additional developer requirements here
6 | black
7 | flake8
8 | mypy
9 | pytest
10 | pytest-cov
11 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/YijieYin/connectome_interpreter/184cf89330274e3b8b1e6f21c33cf553e24b3ce6/tests/__init__.py
--------------------------------------------------------------------------------
/tests/test_act_max.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import numpy as np
4 | import pandas as pd
5 | import torch
6 | from scipy.sparse import csr_matrix
7 | import torch
8 |
9 | from connectome_interpreter.activation_maximisation import (
10 | MultilayeredNetwork,
11 | TargetActivation,
12 | activation_maximisation,
13 | activations_to_df,
14 | activations_to_df_batched,
15 | get_neuron_activation,
16 | )
17 |
18 |
19 | class TestMultilayeredNetwork(unittest.TestCase):
20 | def setUp(self):
21 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22 | self.num_neurons = 10
23 | self.num_sensory = 4
24 | self.num_layers = 3
25 | self.batch_size = 2
26 |
27 | # Create a dense matrix and convert it to a scipy sparse matrix
28 | dense_weights = np.random.rand(self.num_neurons, self.num_neurons)
29 | dense_weights = dense_weights / dense_weights.sum(axis=1, keepdims=True)
30 | dense_weights[:, :3] = -dense_weights[:, :3]
31 | self.all_weights = csr_matrix(dense_weights) # Convert to scipy sparse matrix
32 | self.sensory_indices = list(range(self.num_sensory))
33 |
34 | self.model = MultilayeredNetwork(
35 | self.all_weights, self.sensory_indices, num_layers=self.num_layers
36 | ).to(self.device)
37 |
38 | def test_initialization(self):
39 | # self.assertEqual(self.model.num_layers, self.num_layers)
40 | # self.assertEqual(len(self.model.sensory_indices), self.num_sensory)
41 | # self.assertTrue(
42 | # torch.equal(
43 | # self.model.all_weights.to_dense(),
44 | # torch.tensor(self.all_weights.toarray(), device=self.device),
45 | # )
46 | # )
47 | self.assertEqual(self.model.num_layers, self.num_layers)
48 | self.assertEqual(len(self.model.sensory_indices), self.num_sensory)
49 |
50 | # Convert both to numpy arrays for comparison
51 | model_weights = self.model.all_weights.to_dense().cpu().numpy()
52 | expected_weights = self.all_weights.toarray()
53 |
54 | # Use numpy's allclose for a more tolerant comparison
55 | self.assertTrue(
56 | np.allclose(model_weights, expected_weights, rtol=1e-5, atol=1e-5),
57 | "Weights matrices are not equal within tolerance",
58 | )
59 |
60 | def test_forward_pass_2d(self):
61 | print("testing forward pass 2d")
62 | input_tensor = torch.rand(self.num_sensory, self.num_layers).to(self.device)
63 | output = self.model(input_tensor)
64 |
65 | expected_shape = (self.num_neurons, self.num_layers)
66 | self.assertEqual(output.shape, expected_shape)
67 | self.assertTrue(torch.all(output >= -1) and torch.all(output <= 1))
68 |
69 | def test_forward_pass_3d(self):
70 | print("testing forward pass 3d")
71 | input_tensor = torch.rand(
72 | self.batch_size, self.num_sensory, self.num_layers
73 | ).to(self.device)
74 | output = self.model(input_tensor)
75 |
76 | expected_shape = (self.batch_size, self.num_neurons, self.num_layers)
77 | self.assertEqual(output.shape, expected_shape)
78 | self.assertTrue(torch.all(output >= -1) and torch.all(output <= 1))
79 |
80 |
81 | class TestTargetActivation(unittest.TestCase):
82 | def setUp(self):
83 | self.dict_targets = {0: {1: 0.5, 2: 0.8}, 1: {0: 0.3}}
84 | self.df_targets = pd.DataFrame(
85 | [
86 | {"batch": 0, "layer": 0, "neuron": 1, "value": 0.5},
87 | {"batch": 0, "layer": 0, "neuron": 2, "value": 0.8},
88 | {"batch": 1, "layer": 1, "neuron": 0, "value": 0.3},
89 | ]
90 | )
91 |
92 | def test_dict_initialization(self):
93 | target = TargetActivation(targets=self.dict_targets, batch_size=2)
94 | self.assertEqual(target.batch_size, 2)
95 |
96 | batch_targets = target.get_batch_targets(0)
97 | self.assertEqual(batch_targets[0][1], 0.5)
98 |
99 | def test_df_initialization(self):
100 | target = TargetActivation(targets=self.df_targets)
101 | self.assertEqual(target.batch_size, 2)
102 |
103 | batch_targets = target.get_batch_targets(1)
104 | self.assertEqual(batch_targets[1][0], 0.3)
105 |
106 |
107 | class TestActivationMaximisation(unittest.TestCase):
108 | def setUp(self):
109 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
110 |
111 | # Create a dense matrix and convert it to a scipy sparse matrix
112 | dense_weights = np.random.rand(10, 10)
113 | dense_weights = dense_weights / dense_weights.sum(axis=1, keepdims=True)
114 | dense_weights[:, :3] = -dense_weights[:, :3]
115 | self.all_weights = csr_matrix(dense_weights) # Convert to scipy sparse matrix
116 |
117 | self.model = MultilayeredNetwork(
118 | self.all_weights,
119 | sensory_indices=[0, 1, 2, 3],
120 | num_layers=3,
121 | ).to(self.device)
122 |
123 | self.targets = TargetActivation(
124 | {0: {0: 0.5, 1: 0.8}, 1: {2: 0.3}}, batch_size=2
125 | )
126 |
127 | def test_basic_optimization(self):
128 | print("testing basic optimization")
129 |
130 | result = activation_maximisation(
131 | self.model,
132 | self.targets,
133 | num_iterations=10,
134 | in_reg_lambda=1e-3,
135 | out_reg_lambda=1e-3,
136 | wandb=False,
137 | device=self.device,
138 | )
139 |
140 | input_tensor, output, act_losses, *_ = result
141 | expected_shape = (2, 4, 3) # (batch_size, num_sensory, num_layers)
142 | self.assertEqual(input_tensor.shape, expected_shape)
143 | self.assertTrue(act_losses[-1] <= act_losses[0])
144 |
145 | def test_custom_regularization(self):
146 | print("testing custom regularization")
147 |
148 | custom_reg = {
149 | "in": lambda x: torch.sum(torch.abs(x)),
150 | "out": lambda x: torch.sum(torch.abs(x)),
151 | }
152 |
153 | result = activation_maximisation(
154 | self.model,
155 | self.targets,
156 | custom_reg_functions=custom_reg,
157 | num_iterations=10,
158 | wandb=False,
159 | device=self.device,
160 | )
161 |
162 | _, _, _, out_reg_losses, in_reg_losses, _ = result
163 | self.assertTrue(len(in_reg_losses) > 0 and len(out_reg_losses) > 0)
164 |
165 |
166 | class TestActivationsToDF(unittest.TestCase):
167 | def setUp(self):
168 | self.weights = np.array([[0.5, 0.3, 0.0], [0.0, 0.4, 0.2], [0.0, 0.0, 0.6]])
169 | self.input_act = np.array([[0.8, 0.6], [0.7, 0.5]])
170 | self.output_act = np.array([[0.8, 0.6], [0.7, 0.5], [0.6, 0.4]])
171 | self.sensory_indices = [0, 1]
172 |
173 | def test_basic_functionality(self):
174 | paths = activations_to_df(
175 | self.weights, self.input_act, self.output_act, self.sensory_indices
176 | )
177 |
178 | expected_columns = [
179 | "pre",
180 | "post",
181 | "weight",
182 | "layer",
183 | "pre_activation",
184 | "post_activation",
185 | ]
186 | self.assertTrue(all(col in paths.columns for col in expected_columns))
187 | self.assertEqual(paths["layer"].nunique(), self.output_act.shape[1])
188 |
189 | def test_sparse_input(self):
190 | sparse_weights = csr_matrix(self.weights)
191 |
192 | paths_dense = activations_to_df(
193 | self.weights, self.input_act, self.output_act, self.sensory_indices
194 | )
195 | paths_sparse = activations_to_df(
196 | sparse_weights,
197 | self.input_act,
198 | self.output_act,
199 | self.sensory_indices,
200 | )
201 |
202 | pd.testing.assert_frame_equal(paths_dense, paths_sparse)
203 |
204 |
205 | class TestActivationsToDFBatched(unittest.TestCase):
206 | def setUp(self):
207 | self.weights = np.array([[0.5, 0.3, 0.0], [0.0, 0.4, 0.2], [0.0, 0.0, 0.6]])
208 | self.batched_input = np.array(
209 | [[[0.8, 0.6], [0.7, 0.5]], [[0.6, 0.4], [0.5, 0.3]]]
210 | )
211 | self.batched_output = np.array(
212 | [
213 | [[0.8, 0.6], [0.7, 0.5], [0.6, 0.4]],
214 | [[0.6, 0.4], [0.5, 0.3], [0.4, 0.2]],
215 | ]
216 | )
217 | self.sensory_indices = [0, 1]
218 |
219 | def test_batched_processing(self):
220 | print("testing batched proccessing")
221 | paths = activations_to_df_batched(
222 | self.weights,
223 | self.batched_input,
224 | self.batched_output,
225 | self.sensory_indices,
226 | )
227 |
228 | self.assertTrue("batch" in paths.columns)
229 | self.assertEqual(paths["batch"].nunique(), self.batched_input.shape[0])
230 |
231 |
232 | class TestGetNeuronActivation(unittest.TestCase):
233 |
234 | def test_2d_output_with_groups(self):
235 | output = torch.tensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]])
236 | neuron_indices = [0, 2]
237 | idx_to_group = {0: "A", 2: "B"}
238 |
239 | df = get_neuron_activation(output, neuron_indices, idx_to_group=idx_to_group)
240 |
241 | expected_df = pd.DataFrame(
242 | {
243 | "group": ["A", "B"],
244 | "time_0": [0.1, 0.7],
245 | "time_1": [0.2, 0.8],
246 | "time_2": [0.3, 0.9],
247 | }
248 | )
249 | pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
250 |
251 | def test_2d_output_without_groups(self):
252 | output = torch.tensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]])
253 | neuron_indices = [1]
254 |
255 | df = get_neuron_activation(output, neuron_indices)
256 |
257 | expected_df = pd.DataFrame(
258 | {"group": [1], "time_0": [0.4], "time_1": [0.5], "time_2": [0.6]}
259 | )
260 | pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
261 |
262 | def test_3d_output_with_batch_names_and_groups(self):
263 | output = torch.tensor([[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6], [0.7, 0.8]]])
264 | neuron_indices = [0]
265 | batch_names = ["batch_1", "batch_2"]
266 | idx_to_group = {0: "A"}
267 |
268 | df = get_neuron_activation(
269 | output,
270 | neuron_indices,
271 | batch_names=batch_names,
272 | idx_to_group=idx_to_group,
273 | )
274 |
275 | expected_df = pd.DataFrame(
276 | {
277 | "batch_name": ["batch_1", "batch_2"],
278 | "group": ["A", "A"],
279 | "time_0": [0.1, 0.5],
280 | "time_1": [0.2, 0.6],
281 | }
282 | )
283 | pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
284 |
285 | def test_3d_output_without_batch_names(self):
286 | output = torch.tensor([[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6], [0.7, 0.8]]])
287 | neuron_indices = [1]
288 |
289 | df = get_neuron_activation(output, neuron_indices)
290 |
291 | expected_df = pd.DataFrame(
292 | {
293 | "batch_name": ["batch_0", "batch_1"],
294 | "group": [1, 1],
295 | "time_0": [0.3, 0.7],
296 | "time_1": [0.4, 0.8],
297 | }
298 | )
299 | pd.testing.assert_frame_equal(df, expected_df, check_dtype=False)
300 |
301 | def test_batch_names_mismatch(self):
302 | output = torch.tensor([[[0.1, 0.2], [0.3, 0.4]], [[0.5, 0.6], [0.7, 0.8]]])
303 | neuron_indices = [1]
304 | batch_names = ["batch_1"]
305 |
306 | with self.assertRaises(ValueError):
307 | get_neuron_activation(output, neuron_indices, batch_names=batch_names)
308 |
309 |
310 | # Add these test classes to your existing test file
311 |
312 |
313 | class TestMultilayeredNetworkEnhanced(unittest.TestCase):
314 | def setUp(self):
315 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
316 | self.num_neurons = 10
317 | self.num_sensory = 4
318 | self.num_layers = 3
319 |
320 | # Create weights and mappings
321 | dense_weights = np.random.rand(self.num_neurons, self.num_neurons)
322 | dense_weights = dense_weights / dense_weights.sum(axis=1, keepdims=True)
323 | self.all_weights = csr_matrix(dense_weights)
324 | self.sensory_indices = list(range(self.num_sensory))
325 |
326 | # Create idx_to_group mapping
327 | self.idx_to_group = {i: f"type_{i//3}" for i in range(self.num_neurons)}
328 |
329 | def test_trainable_parameters_initialization(self):
330 | """Test initialization with trainable parameters"""
331 | model = MultilayeredNetwork(
332 | self.all_weights,
333 | self.sensory_indices,
334 | idx_to_group=self.idx_to_group,
335 | default_bias=0.2,
336 | tanh_steepness=3.0,
337 | ).to(self.device)
338 |
339 | # Check that parameters exist
340 | self.assertIsNotNone(model.slope)
341 | self.assertIsNotNone(model.biases)
342 | self.assertIsNotNone(model.indices)
343 |
344 | # Check parameter shapes
345 | num_types = len(set(self.idx_to_group.values()))
346 | self.assertEqual(model.slope.shape[0], num_types)
347 | self.assertEqual(model.biases.shape[0], num_types)
348 |
349 | def test_dict_parameter_values(self):
350 | """Test initialization with dictionary parameter values"""
351 | bias_dict = {"type_0": 0.1, "type_1": 0.2, "type_2": 0.3}
352 | slope_dict = {"type_0": 2.0, "type_1": 4.0, "type_2": 6.0}
353 |
354 | model = MultilayeredNetwork(
355 | self.all_weights,
356 | self.sensory_indices,
357 | idx_to_group=self.idx_to_group,
358 | bias_dict=bias_dict,
359 | slope_dict=slope_dict,
360 | ).to(self.device)
361 |
362 | # Check that parameters were set correctly
363 | # The order depends on set() ordering, so check individual values
364 | unique_types = sorted(set(self.idx_to_group.values()))
365 | for i, type_name in enumerate(unique_types):
366 | if type_name in bias_dict:
367 | expected_bias = bias_dict[type_name]
368 | else:
369 | expected_bias = 0 # default
370 | self.assertAlmostEqual(model.raw_biases[i].item(), expected_bias, places=6)
371 |
372 | def test_custom_activation_function(self):
373 | """Test custom activation function"""
374 |
375 | def custom_activation(x):
376 | return torch.sigmoid(x)
377 |
378 | model = MultilayeredNetwork(
379 | self.all_weights,
380 | self.sensory_indices,
381 | activation_function=custom_activation,
382 | ).to(self.device)
383 |
384 | input_tensor = torch.rand(self.num_sensory, self.num_layers).to(self.device)
385 | output = model(input_tensor)
386 |
387 | # Check that output is in sigmoid range [0, 1]
388 | self.assertTrue(torch.all(output >= 0) and torch.all(output <= 1))
389 |
390 | def test_backward_compatibility(self):
391 | """Test that model works without new parameters (backward compatibility)"""
392 | model = MultilayeredNetwork(
393 | self.all_weights, self.sensory_indices, num_layers=self.num_layers
394 | ).to(self.device)
395 |
396 | # Check that trainable parameters are None
397 | self.assertIsNone(model.slope)
398 | self.assertIsNone(model.biases)
399 | self.assertIsNone(model.indices)
400 |
401 | # Check that forward pass still works
402 | input_tensor = torch.rand(self.num_sensory, self.num_layers).to(self.device)
403 | output = model(input_tensor)
404 | self.assertEqual(output.shape, (self.num_neurons, self.num_layers))
405 |
406 | def test_parameter_gradient_control(self):
407 | """Test set_param_grads method"""
408 | model = MultilayeredNetwork(
409 | self.all_weights, self.sensory_indices, idx_to_group=self.idx_to_group
410 | ).to(self.device)
411 |
412 | # Initially parameters shouldn't require grad
413 | self.assertFalse(model.slope.requires_grad)
414 | self.assertFalse(model.raw_biases.requires_grad)
415 |
416 | # Enable gradients
417 | model.set_param_grads(slopes=True, raw_biases=True)
418 | self.assertTrue(model.slope.requires_grad)
419 | self.assertTrue(model.raw_biases.requires_grad)
420 |
421 | # Disable gradients
422 | model.set_param_grads(slopes=False, raw_biases=False)
423 | self.assertFalse(model.slope.requires_grad)
424 | self.assertFalse(model.raw_biases.requires_grad)
425 |
426 |
427 | class TestContextManagers(unittest.TestCase):
428 | def setUp(self):
429 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
430 | dense_weights = np.random.rand(5, 5)
431 | self.all_weights = csr_matrix(dense_weights)
432 | self.sensory_indices = [0, 1]
433 | self.idx_to_group = {i: f"type_{i}" for i in range(5)}
434 |
435 | def test_training_mode_context(self):
436 | """Test training_mode context manager"""
437 | from connectome_interpreter.activation_maximisation import training_mode
438 |
439 | model = MultilayeredNetwork(
440 | self.all_weights, self.sensory_indices, idx_to_group=self.idx_to_group
441 | ).to(self.device)
442 |
443 | # Initially no gradients
444 | self.assertFalse(model.slope.requires_grad)
445 | self.assertFalse(model.raw_biases.requires_grad)
446 |
447 | # Inside context, gradients should be enabled
448 | with training_mode(model):
449 | self.assertTrue(model.slope.requires_grad)
450 | self.assertTrue(model.raw_biases.requires_grad)
451 |
452 | # After context, gradients should be disabled
453 | self.assertFalse(model.slope.requires_grad)
454 | self.assertFalse(model.raw_biases.requires_grad)
455 |
456 |
457 | class TestTrainModelEnhanced(unittest.TestCase):
458 | def setUp(self):
459 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
460 | dense_weights = np.random.rand(6, 6) * 0.1
461 | self.all_weights = csr_matrix(dense_weights)
462 | self.sensory_indices = [0, 1]
463 | self.idx_to_group = {i: f"type_{i//2}" for i in range(6)}
464 |
465 | self.model = MultilayeredNetwork(
466 | self.all_weights,
467 | self.sensory_indices,
468 | num_layers=2,
469 | idx_to_group=self.idx_to_group,
470 | ).to(self.device)
471 |
472 | def test_time_series_targets(self):
473 | """Test training with time series targets (layer column)"""
474 | from connectome_interpreter.activation_maximisation import train_model
475 |
476 | # Create inputs and time series targets
477 | inputs = torch.rand(4, 2, 2).to(self.device)
478 | targets = pd.DataFrame(
479 | [
480 | {"batch": 0, "neuron_idx": 2, "layer": 0, "value": 0.5},
481 | {"batch": 0, "neuron_idx": 2, "layer": 1, "value": 0.8},
482 | {"batch": 1, "neuron_idx": 3, "layer": 0, "value": 0.3},
483 | {"batch": 2, "neuron_idx": 4, "layer": 1, "value": 0.7},
484 | {"batch": 3, "neuron_idx": 5, "layer": 0, "value": 0.4},
485 | ]
486 | )
487 |
488 | # Train model
489 | result = train_model(self.model, inputs, targets, num_epochs=5, wandb=False)
490 |
491 | model, history, *_ = result
492 |
493 | # Check that training occurred
494 | self.assertTrue(len(history["loss"]) > 0)
495 | self.assertIsInstance(history["loss"][0], float)
496 |
497 | def test_backward_compatible_targets(self):
498 | """Test training with old format targets (no layer column)"""
499 | from connectome_interpreter.activation_maximisation import train_model
500 |
501 | inputs = torch.rand(4, 2, 2).to(self.device)
502 | targets = pd.DataFrame(
503 | [
504 | {"batch": 0, "neuron_idx": 2, "value": 0.5},
505 | {"batch": 1, "neuron_idx": 3, "value": 0.3},
506 | {"batch": 2, "neuron_idx": 4, "value": 0.7},
507 | {"batch": 3, "neuron_idx": 5, "value": 0.4},
508 | ]
509 | )
510 |
511 | # Should work without error
512 | result = train_model(self.model, inputs, targets, num_epochs=3, wandb=False)
513 |
514 | model, history, *_ = result
515 | self.assertTrue(len(history["loss"]) > 0)
516 |
517 |
518 | class TestSaliencyEnhanced(unittest.TestCase):
519 | def setUp(self):
520 | self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
521 | dense_weights = np.random.rand(8, 8) * 0.1
522 | self.all_weights = csr_matrix(dense_weights)
523 | self.sensory_indices = [0, 1, 2]
524 | self.idx_to_group = {i: f"type_{i//2}" for i in range(8)}
525 |
526 | def test_saliency_with_trainable_model(self):
527 | """Test saliency computation with trainable parameters"""
528 | from connectome_interpreter.activation_maximisation import saliency
529 |
530 | model = MultilayeredNetwork(
531 | self.all_weights,
532 | self.sensory_indices,
533 | num_layers=2,
534 | idx_to_group=self.idx_to_group,
535 | ).to(self.device)
536 |
537 | input_tensor = torch.rand(3, 2).to(self.device)
538 | neurons_of_interest = {0: [4, 5], 1: [6, 7]}
539 |
540 | saliency_maps = saliency(
541 | model, input_tensor, neurons_of_interest, device=self.device
542 | )
543 |
544 | # Check output shape and that gradients were computed
545 | self.assertEqual(saliency_maps.shape, input_tensor.shape)
546 | self.assertFalse(torch.allclose(saliency_maps, torch.zeros_like(saliency_maps)))
547 |
548 | def test_saliency_methods(self):
549 | """Test different saliency methods"""
550 | from connectome_interpreter.activation_maximisation import saliency
551 |
552 | model = MultilayeredNetwork(
553 | self.all_weights, self.sensory_indices, num_layers=2
554 | ).to(self.device)
555 |
556 | input_tensor = torch.rand(3, 2).to(self.device)
557 | neurons_of_interest = {0: [4], 1: [5]}
558 |
559 | # Test vanilla saliency
560 | vanilla_sal = saliency(
561 | model,
562 | input_tensor,
563 | neurons_of_interest,
564 | method="vanilla",
565 | device=self.device,
566 | )
567 |
568 | # Test input_x_gradient saliency
569 | ixg_sal = saliency(
570 | model,
571 | input_tensor,
572 | neurons_of_interest,
573 | method="input_x_gradient",
574 | device=self.device,
575 | )
576 |
577 | # Should produce different results
578 | self.assertFalse(torch.allclose(vanilla_sal, ixg_sal))
579 |
580 | def test_saliency_invalid_method(self):
581 | """Test saliency with invalid method"""
582 | from connectome_interpreter.activation_maximisation import saliency
583 |
584 | model = MultilayeredNetwork(
585 | self.all_weights, self.sensory_indices, num_layers=2
586 | ).to(self.device)
587 |
588 | input_tensor = torch.rand(3, 2).to(self.device)
589 | neurons_of_interest = {0: [4]}
590 |
591 | with self.assertRaises(ValueError):
592 | saliency(
593 | model,
594 | input_tensor,
595 | neurons_of_interest,
596 | method="invalid_method",
597 | device=self.device,
598 | )
599 |
600 |
601 | class TestTargetActivationEnhanced(unittest.TestCase):
602 | def test_time_series_targets_dict(self):
603 | """Test TargetActivation with time series targets from dict"""
604 | targets_dict = {0: {1: 0.5, 2: 0.8}, 1: {0: 0.3, 3: 0.7}} # layer 0 # layer 1
605 | target = TargetActivation(targets=targets_dict, batch_size=3)
606 |
607 | # Check that all batches have the same targets
608 | batch_0_targets = target.get_batch_targets(0)
609 | batch_2_targets = target.get_batch_targets(2)
610 | self.assertEqual(batch_0_targets, batch_2_targets)
611 |
612 | # Check layer structure
613 | self.assertIn(0, batch_0_targets) # layer 0
614 | self.assertIn(1, batch_0_targets) # layer 1
615 |
--------------------------------------------------------------------------------
/tests/test_path_finding.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | import random
3 | import unittest
4 |
5 | import pandas as pd
6 |
7 | from connectome_interpreter.path_finding import find_xor
8 |
9 |
10 | class TestFindXOR(unittest.TestCase):
11 |
12 | def create_test_df(self, edges):
13 | """Helper function to create test DataFrame from edge list"""
14 | return pd.DataFrame(edges, columns=["pre", "post", "sign", "layer"])
15 |
16 | def test_basic_xor_circuit(self):
17 | """Test basic XOR circuit with single inputs/outputs"""
18 | edges = [
19 | ("a", "c", 1, 1),
20 | ("b", "d", 1, 1),
21 | ("a", "e", 1, 1),
22 | ("b", "e", 1, 1),
23 | ("c", "f", 1, 2),
24 | ("d", "f", 1, 2),
25 | ("e", "f", -1, 2),
26 | ]
27 | df = self.create_test_df(edges)
28 | circuits = find_xor(df)
29 | self.assertEqual(len(circuits), 1)
30 | circuit = circuits[0]
31 | self.assertEqual(circuit.input1, ["a"])
32 | self.assertEqual(circuit.input2, ["b"])
33 | self.assertEqual({"c", "d"}, {circuit.exciter1, circuit.exciter2})
34 | self.assertEqual(circuit.inhibitor, "e")
35 | self.assertEqual(circuit.output, ["f"])
36 |
37 | def test_multiple_inputs_outputs(self):
38 | """Test XOR circuit with multiple inputs and outputs"""
39 | edges = [
40 | ("a1", "c", 1, 1),
41 | ("a2", "c", 1, 1),
42 | ("b1", "d", 1, 1),
43 | ("b2", "d", 1, 1),
44 | ("a1", "e", 1, 1),
45 | ("a2", "e", 1, 1),
46 | ("b1", "e", 1, 1),
47 | ("b2", "e", 1, 1),
48 | ("c", "f1", 1, 2),
49 | ("c", "f2", 1, 2),
50 | ("d", "f1", 1, 2),
51 | ("d", "f2", 1, 2),
52 | ("e", "f1", -1, 2),
53 | ("e", "f2", -1, 2),
54 | ]
55 | df = self.create_test_df(edges)
56 | circuits = find_xor(df)
57 | self.assertEqual(len(circuits), 1)
58 | circuit = circuits[0]
59 | self.assertEqual(set(circuit.input1), {"a1", "a2"})
60 | self.assertEqual(set(circuit.input2), {"b1", "b2"})
61 | self.assertEqual({"c", "d"}, {circuit.exciter1, circuit.exciter2})
62 | self.assertEqual(circuit.inhibitor, "e")
63 | self.assertEqual(set(circuit.output), {"f1", "f2"})
64 |
65 | def test_no_xor_circuit(self):
66 | """Test case where no XOR circuit exists"""
67 | edges = [
68 | ("a", "c", 1, 1),
69 | ("b", "d", 1, 1),
70 | ("c", "f", 1, 2),
71 | ("d", "f", 1, 2),
72 | ]
73 | df = self.create_test_df(edges)
74 | circuits = find_xor(df)
75 | self.assertEqual(len(circuits), 0)
76 |
77 | def test_invalid_layer_numbers(self):
78 | """Test error handling for invalid layer numbers"""
79 | edges = [("a", "b", 1, 1), ("b", "c", 1, 3)]
80 | df = self.create_test_df(edges)
81 | with self.assertRaises(ValueError):
82 | find_xor(df)
83 |
84 | def test_multiple_xor_circuits(self):
85 | """Test detection of multiple XOR circuits"""
86 | edges = [
87 | ("a1", "c1", 1, 1),
88 | ("b1", "d1", 1, 1),
89 | ("a1", "e1", 1, 1),
90 | ("b1", "e1", 1, 1),
91 | ("c1", "f1", 1, 2),
92 | ("d1", "f1", 1, 2),
93 | ("e1", "f1", -1, 2),
94 | ("a2", "c2", 1, 1),
95 | ("b2", "d2", 1, 1),
96 | ("a2", "e2", 1, 1),
97 | ("b2", "e2", 1, 1),
98 | ("c2", "f2", 1, 2),
99 | ("d2", "f2", 1, 2),
100 | ("e2", "f2", -1, 2),
101 | ]
102 | df = self.create_test_df(edges)
103 | circuits = find_xor(df)
104 | self.assertEqual(len(circuits), 2)
105 | for circuit in circuits:
106 | self.assertEqual(len(circuit.input1), 1)
107 | self.assertEqual(len(circuit.input2), 1)
108 | self.assertIn(circuit.inhibitor, ["e1", "e2"])
109 | self.assertEqual(len(circuit.output), 1)
110 |
111 | def test_error_conditions(self):
112 | """Test various error conditions and edge cases"""
113 | with self.assertRaises(ValueError):
114 | find_xor(pd.DataFrame(columns=["pre", "post", "sign", "layer"]))
115 | incomplete_df = pd.DataFrame(
116 | {"pre": ["a", "b"], "post": ["c", "d"], "layer": [1, 2]}
117 | )
118 | with self.assertRaises(ValueError):
119 | find_xor(incomplete_df)
120 | invalid_signs = [
121 | ("a", "c", 2, 1),
122 | ("b", "d", 1, 1),
123 | ("c", "f", 1, 2),
124 | ("d", "f", -1, 2),
125 | ]
126 | df = self.create_test_df(invalid_signs)
127 | with self.assertRaises(ValueError):
128 | find_xor(df)
129 | disconnected = [("a", "c", 1, 1), ("b", "d", 1, 1), ("e", "f", -1, 2)]
130 | df = self.create_test_df(disconnected)
131 | circuits = find_xor(df)
132 | self.assertEqual(len(circuits), 0)
133 |
134 | def test_overlapping_xor_circuits(self):
135 | """Test detection of XOR circuits that share nodes"""
136 | edges = [
137 | ("a", "c", 1, 1),
138 | ("b1", "d1", 1, 1),
139 | ("b2", "d2", 1, 1),
140 | ("a", "e1", 1, 1),
141 | ("b1", "e1", 1, 1),
142 | ("a", "e2", 1, 1),
143 | ("b2", "e2", 1, 1),
144 | ("c", "f1", 1, 2),
145 | ("c", "f2", 1, 2),
146 | ("d1", "f1", 1, 2),
147 | ("d2", "f2", 1, 2),
148 | ("e1", "f1", -1, 2),
149 | ("e2", "f2", -1, 2),
150 | ]
151 | df = self.create_test_df(edges)
152 | circuits = find_xor(df)
153 | self.assertGreater(len(circuits), 0)
154 | input1_nodes = set()
155 | for circuit in circuits:
156 | input1_nodes.update(circuit.input1)
157 | self.assertIn("a", input1_nodes)
158 |
159 | def test_specific_topologies(self):
160 | """Test specific circuit topologies"""
161 | convergent = [
162 | ("a1", "c", 1, 1),
163 | ("a2", "c", 1, 1),
164 | ("b1", "d", 1, 1),
165 | ("b2", "d", 1, 1),
166 | ("a1", "e", 1, 1),
167 | ("a2", "e", 1, 1),
168 | ("b1", "e", 1, 1),
169 | ("b2", "e", 1, 1),
170 | ("c", "f", 1, 2),
171 | ("d", "f", 1, 2),
172 | ("e", "f", -1, 2),
173 | ]
174 | df = self.create_test_df(convergent)
175 | circuits = find_xor(df)
176 | self.assertGreater(len(circuits), 0)
177 | self.assertGreater(len(circuits[0].input1), 1)
178 |
179 | divergent = [
180 | ("a", "c", 1, 1),
181 | ("b", "d", 1, 1),
182 | ("a", "e", 1, 1),
183 | ("b", "e", 1, 1),
184 | ("c", "f1", 1, 2),
185 | ("c", "f2", 1, 2),
186 | ("d", "f1", 1, 2),
187 | ("d", "f2", 1, 2),
188 | ("e", "f1", -1, 2),
189 | ("e", "f2", -1, 2),
190 | ]
191 | df = self.create_test_df(divergent)
192 | circuits = find_xor(df)
193 | self.assertGreater(len(circuits), 0)
194 | self.assertGreater(len(circuits[0].output), 1)
195 |
196 | def test_fully_connected_network(self):
197 | """Test with a fully connected network"""
198 | edges = []
199 | inputs = ["a", "b"]
200 | middles = ["c", "d", "e"]
201 | outputs = ["f"]
202 | for inp in inputs:
203 | for mid in middles:
204 | edges.append((inp, mid, 1, 1))
205 | for mid in middles:
206 | for out in outputs:
207 | edges.append((mid, out, random.choice([1, -1]), 2))
208 | df = self.create_test_df(edges)
209 | circuits = find_xor(df)
210 | max_possible = len(list(itertools.combinations(middles, 3)))
211 | self.assertLessEqual(len(circuits), max_possible)
212 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import numpy as np
4 | import pandas as pd
5 | from scipy.sparse import coo_matrix
6 |
7 | from connectome_interpreter.utils import modify_coo_matrix
8 |
9 |
10 | class TestModifyCooMatrix(unittest.TestCase):
11 |
12 | def setUp(self):
13 | # Create a small sparse matrix with float data type and values between 0 and 1
14 | self.example_coo = coo_matrix(
15 | np.array([[0.1, 0.2, 0], [0, 0, 0.3], [0.4, 0, 0.5]], dtype=np.float32)
16 | ) # Ensure dtype is float32
17 |
18 | # DataFrame for batch updates with float values
19 | self.updates_df = pd.DataFrame(
20 | {
21 | "input_idx": [0, 2],
22 | "output_idx": [2, 1],
23 | "value": [0.25, 0.75], # Use values between 0 and 1
24 | }
25 | )
26 |
27 | def test_single_update(self):
28 | # Test updating a single value with a float value between 0 and 1
29 | result = modify_coo_matrix(
30 | self.example_coo, input_idx=1, output_idx=2, value=0.99, re_normalize=False
31 | )
32 | self.assertAlmostEqual(result.toarray()[1, 2], 0.99)
33 |
34 | def test_batch_update(self):
35 | # Test updating using a DataFrame of updates with float values
36 | result = modify_coo_matrix(
37 | self.example_coo, updates_df=self.updates_df, re_normalize=False
38 | )
39 | self.assertAlmostEqual(result.toarray()[0, 2], 0.25)
40 | self.assertAlmostEqual(result.toarray()[2, 1], 0.75)
41 |
42 | def test_renormalize(self):
43 | # Test the re-normalization functionality with float values
44 | result = modify_coo_matrix(
45 | self.example_coo, updates_df=self.updates_df, re_normalize=True
46 | )
47 | updated_cols = self.updates_df["output_idx"].unique()
48 | # Check if columns sum up to 1 or close to it
49 | column_sums = np.array(result.sum(axis=0))[0]
50 | for col_sum in column_sums[updated_cols]:
51 | # Assuming a little margin for floating-point arithmetic
52 | self.assertTrue(0.99 <= col_sum <= 1.01)
53 |
54 | def test_handle_zero_colsums(self):
55 | # Test how the function handles columns with zero sums after update with float values
56 | zero_updates_df = pd.DataFrame(
57 | {
58 | "input_idx": [1, 0],
59 | "output_idx": [2, 1],
60 | "value": [0.0, 0.0], # Use float zero to keep consistent data type
61 | }
62 | )
63 | result = modify_coo_matrix(
64 | self.example_coo, updates_df=zero_updates_df, re_normalize=True
65 | )
66 | column_sums = np.array(result.sum(axis=0))[0]
67 | self.assertAlmostEqual(column_sums[1], 0.0)
68 |
--------------------------------------------------------------------------------