├── .github └── workflows │ └── test_and_deploy.yml ├── .gitignore ├── .pre-commit-config.yaml ├── CITATION.cff ├── LICENSE ├── MANIFEST.in ├── README.md ├── brainrender ├── __init__.py ├── _colors.py ├── _io.py ├── _jupyter.py ├── _utils.py ├── _video.py ├── actor.py ├── actors │ ├── __init__.py │ ├── cylinder.py │ ├── line.py │ ├── neurons.py │ ├── points.py │ ├── ruler.py │ ├── streamlines.py │ └── volume.py ├── atlas.py ├── atlas_specific │ ├── __init__.py │ └── allen_brain_atlas │ │ ├── gene_expression │ │ ├── __init__.py │ │ ├── api.py │ │ └── ge_utils.py │ │ └── streamlines.py ├── camera.py ├── cameras.py ├── render.py ├── scene.py ├── settings.py └── video.py ├── examples ├── __init__.py ├── add_cells.py ├── add_cylinder.py ├── add_labels.py ├── add_mesh_from_file.py ├── animation.py ├── animation_callback.py ├── brain_regions.py ├── brainglobe_atlases.py ├── brainmapper.py ├── brainmapper_regions.py ├── cell_density.py ├── custom_camera.py ├── gene_expression.py ├── line.py ├── mirror_actors.py ├── neurons.py ├── notebook_workflow.ipynb ├── probe_tracks.py ├── regions_single_hemisphere.py ├── ruler.py ├── screenshot.py ├── settings.py ├── slice.py ├── streamlines.py ├── user_volumetric_data.py ├── video.py ├── volumetric_data.py └── web_export.py ├── imgs ├── cellfinder_cells_3.png ├── gene_expression.png ├── human_regions.png ├── injection_2.png ├── mouse_neurons_2.png ├── probes.png ├── three_atlases.png ├── zfish_functional_clusters_2.png ├── zfish_gene_expression.png ├── zfish_neurons.png └── zfish_regions.png ├── pyproject.toml ├── resources ├── CC_134_1_ch1inj.obj ├── CC_134_2_ch1inj.obj ├── neuron1.swc ├── points.npy ├── probe_1_striatum.npy ├── probe_2_RSP.npy ├── random_cells.h5 ├── random_cells.npy └── volume.npy └── tests ├── __init__.py ├── data ├── screenshot.jpg └── screenshot.png ├── test_01_atlas_download.py ├── test_aba_gene.py ├── test_actor.py ├── test_atlas.py ├── test_camera.py ├── test_colors.py ├── test_cylinder.py ├── test_examples.py ├── test_export_html.py ├── test_integration.py ├── test_line.py ├── test_neuron.py ├── test_points.py ├── test_ruler.py ├── test_scene.py ├── test_screenshot.py ├── test_streamlines.py ├── test_utils.py ├── test_video.py └── test_volume.py /.github/workflows/test_and_deploy.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'main' 7 | tags: 8 | - 'v**' 9 | pull_request: 10 | schedule: 11 | # Runs at 6:10am UTC on Monday 12 | - cron: '10 6 * * 1' 13 | workflow_dispatch: 14 | 15 | concurrency: 16 | # Cancel this workflow if it is running, 17 | # and then changes are applied on top of the HEAD of the branch, 18 | # triggering another run of the workflow 19 | group: ${{ github.workflow }}-${{ github.ref }} 20 | cancel-in-progress: true 21 | 22 | jobs: 23 | linting: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: neuroinformatics-unit/actions/lint@v2 27 | 28 | manifest: 29 | name: Check Manifest 30 | runs-on: ubuntu-latest 31 | steps: 32 | - uses: neuroinformatics-unit/actions/check_manifest@v2 33 | 34 | test: 35 | needs: [linting, manifest] 36 | name: ${{ matrix.os }} py${{ matrix.python-version }} 37 | runs-on: ${{ matrix.os }} 38 | strategy: 39 | fail-fast: false 40 | matrix: 41 | # Run all supported Python versions on linux 42 | os: [ubuntu-latest] 43 | python-version: ["3.11", "3.12", "3.13"] 44 | include: 45 | - os: macos-latest 46 | python-version: "3.13" 47 | - os: windows-latest 48 | python-version: "3.13" 49 | 50 | steps: 51 | - name: Cache brainglobe directory 52 | uses: actions/cache@v3 53 | with: 54 | path: | # ensure we don't cache any interrupted atlas download and extraction, if e.g. we cancel the workflow manually 55 | ~/.brainglobe 56 | !~/.brainglobe/atlas.tar.gz 57 | key: brainglobe 58 | 59 | - name: Install hdf5 libs for Mac 60 | if: runner.os == 'macOS' 61 | run: brew install hdf5 62 | 63 | # Helps set up VTK with a headless display 64 | - uses: pyvista/setup-headless-display-action@v3 65 | 66 | # Sets up ffmpeg to we can run video tests on CI 67 | - uses: FedericoCarboni/setup-ffmpeg@v2 68 | if: matrix.os != 'macos-latest' 69 | id: setup-ffmpeg 70 | 71 | - name: setup ffmpeg on latest Mac with brew 72 | if: matrix.os == 'macos-latest' 73 | run: brew install ffmpeg 74 | 75 | # Run tests 76 | - uses: neuroinformatics-unit/actions/test@v2 77 | with: 78 | python-version: ${{ matrix.python-version }} 79 | secret-codecov-token: ${{ secrets.CODECOV_TOKEN }} 80 | 81 | 82 | - name: Notify slack on scheduled failure 83 | if: failure() && github.event_name == 'schedule' 84 | uses: ravsamhq/notify-slack-action@v2 85 | with: 86 | status: ${{ job.status }} # required 87 | notify_when: 'failure' 88 | env: 89 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_NOTIFYBOT_WEBHOOK_URL }} # required 90 | 91 | 92 | build_sdist_wheels: 93 | name: Build source distribution 94 | needs: [test] 95 | if: github.event_name == 'push' && github.ref_type == 'tag' 96 | runs-on: ubuntu-latest 97 | steps: 98 | - uses: neuroinformatics-unit/actions/build_sdist_wheels@v2 99 | 100 | 101 | upload_all: 102 | name: Publish build distributions 103 | needs: [build_sdist_wheels] 104 | if: github.event_name == 'push' && github.ref_type == 'tag' 105 | runs-on: ubuntu-latest 106 | steps: 107 | - uses: actions/download-artifact@v4 108 | with: 109 | name: artifact 110 | path: dist 111 | - uses: pypa/gh-action-pypi-publish@release/v1 112 | with: 113 | user: __token__ 114 | password: ${{ secrets.TWINE_API_KEY }} 115 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignored file types 2 | .DS_Store 3 | examples/*.mp4 4 | tests/*.mp4 5 | 6 | .pytest_cache 7 | __pycache__ 8 | *.pyc 9 | .ipynb_checkpoints 10 | .pytest_cache/* 11 | .pytest_cache/ 12 | __pycache__/** 13 | __pycache__/ 14 | **/__pycache__ 15 | brainrender/atlas_specific/gene_expression/__pycache__ 16 | brainrender/atlas_specific/gene_expression/__pycache__/* 17 | brainrender/atlas_specific/__pycache__ 18 | 19 | example_brainrender_shot* 20 | brain_regions.html 21 | workspace.py 22 | workspace.ipynb 23 | brexport.html 24 | *.ckpt 25 | *.vscode 26 | *.json 27 | *.xml 28 | *.idea 29 | *.pyc 30 | *.vtk 31 | *.nrrd 32 | *.tdms 33 | *.mp4 34 | 35 | 36 | screenshots/* 37 | atlas_images/* 38 | 39 | # Output folder where screnshots etx. are saved 40 | Output/Data/* 41 | Output/Scenes/* 42 | Output/Screenshots/* 43 | Output/Videos/* 44 | Data/ABA/* 45 | 46 | # User folder where they can save stuff without it being tracked by git 47 | User/* 48 | 49 | # This folder is sometimes created when the incorrect paths are passed 50 | mouse_connectivity 51 | 52 | # Visual studio stuff 53 | */tempCodeRunnerFile.py 54 | */.ipynb_checkpoints 55 | .ipynb_checkpoints 56 | */__pycache__ 57 | __pycache__ 58 | .DS_Store 59 | Users/ 60 | 61 | # Test scripts and hidden files 62 | workspace.py 63 | workspace.ipynb 64 | playground.py 65 | secrets 66 | 67 | # Custom config files 68 | *.conf.custom 69 | 70 | # Byte-compiled / optimized / DLL files 71 | __pycache__/ 72 | *.py[cod] 73 | *$py.class 74 | 75 | # C extensions 76 | *.so 77 | 78 | # Distribution / packaging 79 | .Python 80 | build/ 81 | develop-eggs/ 82 | dist/ 83 | downloads/ 84 | eggs/ 85 | .eggs/ 86 | lib/ 87 | lib64/ 88 | parts/ 89 | sdist/ 90 | var/ 91 | wheels/ 92 | *.egg-info/ 93 | .installed.cfg 94 | *.egg 95 | MANIFEST 96 | 97 | # PyInstaller 98 | # Usually these files are written by a python script from a template 99 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 100 | *.manifest 101 | *.spec 102 | 103 | # Installer logs 104 | pip-log.txt 105 | pip-delete-this-directory.txt 106 | 107 | # Unit test / coverage reports 108 | htmlcov/ 109 | .tox/ 110 | .coverage 111 | .coverage.* 112 | .cache 113 | nosetests.xml 114 | coverage.xml 115 | *.cover 116 | .hypothesis/ 117 | .pytest_cache/ 118 | 119 | # Translations 120 | *.mo 121 | *.pot 122 | 123 | # Django stuff: 124 | *.log 125 | local_settings.py 126 | db.sqlite3 127 | 128 | # Flask stuff: 129 | instance/ 130 | .webassets-cache 131 | 132 | # Scrapy stuff: 133 | .scrapy 134 | 135 | # Sphinx documentation 136 | doc/build/ 137 | 138 | # pydocmd 139 | _build/ 140 | mkdocs.yml 141 | 142 | # PyBuilder 143 | target/ 144 | 145 | # pyenv 146 | .python-version 147 | 148 | # celery beat schedule file 149 | celerybeat-schedule 150 | 151 | # SageMath parsed files 152 | *.sage.py 153 | 154 | # Environments 155 | .env 156 | .venv 157 | env/ 158 | venv/ 159 | ENV/ 160 | env.bak/ 161 | venv.bak/ 162 | 163 | # Spyder project settings 164 | .spyderproject 165 | .spyproject 166 | 167 | # Rope project settings 168 | .ropeproject 169 | 170 | # mkdocs documentation 171 | /site 172 | 173 | # mypy 174 | .mypy_cache/ 175 | 176 | .idea/ 177 | 178 | *.~lock.* 179 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # Configuring https://pre-commit.ci/ bot 2 | ci: 3 | autoupdate_schedule: monthly 4 | 5 | repos: 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v5.0.0 8 | hooks: 9 | - id: check-docstring-first 10 | - id: check-executables-have-shebangs 11 | - id: check-merge-conflict 12 | - id: check-toml 13 | - id: end-of-file-fixer 14 | - id: mixed-line-ending 15 | args: [--fix=lf] 16 | - id: requirements-txt-fixer 17 | - id: trailing-whitespace 18 | - repo: https://github.com/astral-sh/ruff-pre-commit 19 | rev: v0.11.12 20 | hooks: 21 | - id: ruff 22 | - repo: https://github.com/psf/black 23 | rev: 25.1.0 24 | hooks: 25 | - id: black 26 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: brainrender 6 | message: >- 7 | If you use this software, please cite it using the 8 | metadata from this file. 9 | type: software 10 | authors: 11 | - given-names: Federico 12 | family-names: Claudi 13 | - given-names: Adam 14 | family-names: Tyson 15 | email: hello@brainglobe.info 16 | - given-names: Luigi 17 | family-names: Petrucco 18 | - given-names: BrainGlobe 19 | family-names: Developers 20 | email: hello@brainglobe.info 21 | repository-code: 'https://github.com/brainglobe/brainrender' 22 | url: 'https://brainglobe.info' 23 | abstract: >- 24 | Visualisation and exploration of brain atlases and other 25 | anatomical data. 26 | license: BSD-3-Clause 27 | date-released: '2024-01-25' 28 | year: 2024 29 | preferred-citation: 30 | type: article 31 | title: "Visualizing anatomically registered data with brainrender" 32 | authors: 33 | - given-names: Federico 34 | family-names: Claudi 35 | - given-names: Adam 36 | family-names: Tyson 37 | - given-names: Luigi 38 | family-names: Petrucco 39 | - given-names: Troy W. 40 | family-names: Margrie 41 | - given-names: Ruben 42 | family-names: Portugues 43 | - given-names: Tiago 44 | family-names: Branco 45 | year: 2021 46 | month: 3 47 | doi: 10.7554/eLife.65751 48 | url: https://doi.org/10.7554/eLife.65751 49 | journal: eLife 50 | volume: 10 51 | issn: "2050-084X" 52 | pages: e65761 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020, University College London 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | exclude .pre-commit-config.yaml 2 | include CITATION.cff 3 | 4 | global-include *.svg 5 | 6 | prune tests 7 | prune examples 8 | prune imgs 9 | prune videos 10 | prune resources 11 | 12 | graft brainrender 13 | 14 | global-exclude */__pycache__/* 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # brainrender 2 | 3 | *A user-friendly python library to create high-quality, 3D neuro-anatomical renderings combining data from publicly available brain atlases with user-generated experimental data.* 4 | 5 | [![Python Version](https://img.shields.io/pypi/pyversions/brainrender.svg)](https://pypi.org/project/brainrender) 6 | [![PyPI](https://img.shields.io/pypi/v/brainrender.svg)](https://pypi.org/project/brainrender) 7 | [![tests](https://github.com/brainglobe/brainrender/workflows/tests/badge.svg)](https://github.com/brainglobe/brainrender/actions) 8 | [![codecov](https://codecov.io/gh/brainglobe/brainrender/graph/badge.svg)](https://codecov.io/gh/brainglobe/brainrender) 9 | [![Downloads](https://static.pepy.tech/badge/brainrender)](https://pepy.tech/project/brainrender) 10 | 11 |   12 |   13 | 14 | ![Example gallery](https://iiif.elifesciences.org/lax/65751%2Felife-65751-fig3-v3.tif/full/,1500/0/default.jpg) 15 | 16 | From: Claudi et al. (2021) Visualizing anatomically registered data with brainrender. eLife 17 | 18 | 19 | ## Documentation 20 | 21 | brainrender is a project of the BrainGlobe Initiative, which is a collaborative effort to develop a suite of Python-based software tools for computational neuroanatomy. A comprehensive online documentation for brainrender can be found on the BrainGlobe website [here](https://brainglobe.info/documentation/brainrender/index.html). 22 | 23 | Furthermore, an open-access journal article describing BrainRender has been published in eLife, available [here](https://doi.org/10.7554/eLife.65751). 24 | 25 | 26 | ## Installation 27 | 28 | From PyPI: 29 | 30 | ``` 31 | pip install brainrender 32 | ``` 33 | 34 | ## Quickstart 35 | 36 | ``` python 37 | import random 38 | 39 | import numpy as np 40 | 41 | from brainrender import Scene 42 | from brainrender.actors import Points 43 | 44 | def get_n_random_points_in_region(region, N): 45 | """ 46 | Gets N random points inside (or on the surface) of a mesh 47 | """ 48 | 49 | region_bounds = region.mesh.bounds() 50 | X = np.random.randint(region_bounds[0], region_bounds[1], size=10000) 51 | Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000) 52 | Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000) 53 | pts = [[x, y, z] for x, y, z in zip(X, Y, Z)] 54 | 55 | ipts = region.mesh.inside_points(pts).coordinates 56 | return np.vstack(random.choices(ipts, k=N)) 57 | 58 | 59 | # Display the Allen Brain mouse atlas. 60 | scene = Scene(atlas_name="allen_mouse_25um", title="Cells in primary visual cortex") 61 | 62 | # Display a brain region 63 | primary_visual = scene.add_brain_region("VISp", alpha=0.2) 64 | 65 | # Get a numpy array with (fake) coordinates of some labelled cells 66 | coordinates = get_n_random_points_in_region(primary_visual, 2000) 67 | 68 | # Create a Points actor 69 | cells = Points(coordinates) 70 | 71 | # Add to scene 72 | scene.add(cells) 73 | 74 | # Add label to the brain region 75 | scene.add_label(primary_visual, "Primary visual cortex") 76 | 77 | # Display the figure. 78 | scene.render() 79 | 80 | ``` 81 | 82 | ## Seeking help or contributing 83 | We are always happy to help users of our tools, and welcome any contributions. If you would like to get in contact with us for any reason, please see the [contact page of our website](https://brainglobe.info/contact.html). 84 | 85 | ## Citing brainrender 86 | 87 | If you use brainrender in your scientific work, please cite: 88 | ``` 89 | Claudi, F., Tyson, A. L., Petrucco, L., Margrie, T.W., Portugues, R., Branco, T. (2021) "Visualizing anatomically registered data with Brainrender" eLife 2021;10:e65751 [doi.org/10.7554/eLife.65751](https://doi.org/10.7554/eLife.65751) 90 | ``` 91 | 92 | BibTeX: 93 | 94 | ``` bibtex 95 | @article{Claudi2021, 96 | author = {Claudi, Federico and Tyson, Adam L. and Petrucco, Luigi and Margrie, Troy W. and Portugues, Ruben and Branco, Tiago}, 97 | doi = {10.7554/eLife.65751}, 98 | issn = {2050084X}, 99 | journal = {eLife}, 100 | pages = {1--16}, 101 | pmid = {33739286}, 102 | title = {{Visualizing anatomically registered data with brainrender}}, 103 | volume = {10}, 104 | year = {2021} 105 | } 106 | 107 | ``` 108 | -------------------------------------------------------------------------------- /brainrender/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | from loguru import logger 3 | from pathlib import Path 4 | from rich.logging import RichHandler 5 | from importlib.metadata import PackageNotFoundError, version 6 | from brainrender import settings 7 | 8 | try: 9 | from pyinspect import install_traceback 10 | 11 | install_traceback(hide_locals=not settings.DEBUG) 12 | except ImportError: 13 | pass # fails in notebooks 14 | 15 | from brainrender.scene import Scene 16 | import brainrender.actors 17 | from brainrender.video import VideoMaker, Animation 18 | from brainrender.atlas import Atlas 19 | 20 | 21 | try: 22 | __version__ = version("brainrender") 23 | except PackageNotFoundError: 24 | # package is not installed 25 | pass 26 | 27 | base_dir = Path.home() / ".brainglobe" / "brainrender" 28 | base_dir.mkdir(parents=True, exist_ok=True) 29 | 30 | 31 | # set logger level 32 | def set_logging(level="INFO", path=None): 33 | """ 34 | Sets loguru to save all logs to a file i 35 | brainrender's base directory and to print 36 | to stdout only logs >= to a given level 37 | """ 38 | logger.remove() 39 | 40 | path = path or str(base_dir / "log.log") 41 | if Path(path).exists(): 42 | Path(path).unlink() 43 | logger.add(path, level="DEBUG") 44 | 45 | if level == "DEBUG": 46 | logger.configure( 47 | handlers=[ 48 | { 49 | "sink": RichHandler(level="WARNING", markup=True), 50 | "format": "{message}", 51 | } 52 | ] 53 | ) 54 | 55 | 56 | if not settings.DEBUG: 57 | set_logging() 58 | else: 59 | set_logging(level="DEBUG") 60 | -------------------------------------------------------------------------------- /brainrender/_colors.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import matplotlib as mpl 4 | import numpy as np 5 | from vedo.colors import colors as vcolors 6 | from vedo.colors import get_color as getColor 7 | 8 | 9 | def map_color(value, name="jet", vmin=None, vmax=None): 10 | """Map a real value in range [vmin, vmax] to a (r,g,b) color scale. 11 | 12 | :param value: scalar value to transform into a color 13 | :type value: float, list 14 | :param name: color map name (Default value = "jet") 15 | :type name: str, matplotlib.colors.LinearSegmentedColorMap 16 | :param vmin: (Default value = None) 17 | :param vmax: (Default value = None) 18 | :returns: return: (r,g,b) color, or a list of (r,g,b) colors. 19 | """ 20 | if vmax < vmin: 21 | raise ValueError("vmax should be larger than vmin") 22 | 23 | mp = mpl.colormaps.get_cmap(name) 24 | 25 | value -= vmin 26 | value /= vmax - vmin 27 | if value > 0.999: 28 | value = 0.999 29 | elif value < 0: 30 | value = 0 31 | return mp(value)[0:3] 32 | 33 | 34 | def make_palette(N, *colors): 35 | """Generate N colors starting from `color1` to `color2` 36 | by linear interpolation HSV in or RGB spaces. 37 | Adapted from vedo make_palette function 38 | 39 | :param int: N: number of output colors. 40 | :param colors: input colors, any number of colors with 0 < ncolors <= N is okay. 41 | """ 42 | N = int(N) 43 | 44 | N_input_colors = len(colors) 45 | if not N_input_colors: 46 | raise ValueError("No colors where passed to make_palette") 47 | if N_input_colors > N: 48 | raise ValueError( 49 | "More input colors than out colors (N) where passed to make_palette" 50 | ) 51 | 52 | if N_input_colors == N: 53 | return colors 54 | else: 55 | # Get how many colors for each pair of colors we are interpolating over 56 | fractions = [ 57 | N // N_input_colors + (1 if x < N % N_input_colors else 0) 58 | for x in range(N_input_colors) 59 | ] 60 | 61 | # Get pairs of colors 62 | cs = [np.array(getColor(col)) for col in colors] 63 | cs += [cs[-1]] 64 | 65 | output = [] 66 | for n, (c1, c2) in enumerate(zip(cs, cs[1:])): 67 | cols = [] 68 | for f in np.linspace(0, 1, fractions[n], endpoint=True): 69 | c = c1 * (1 - f) + c2 * f 70 | cols.append(c) 71 | output.extend(cols) 72 | return output 73 | 74 | 75 | def get_random_colors(n_colors=1): 76 | """ 77 | :param n_colors: (Default value = 1) 78 | """ 79 | col_names = list(vcolors.keys()) 80 | if n_colors == 1: 81 | return random.choice(col_names) 82 | else: 83 | return list(random.choices(col_names, k=n_colors)) 84 | -------------------------------------------------------------------------------- /brainrender/_io.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import requests 4 | from vedo import load 5 | 6 | 7 | def connected_to_internet(url="http://www.google.com/", timeout=5): 8 | """ 9 | Check that there is an internet connection 10 | 11 | :param url: url to use for testing (Default value = 'http://www.google.com/') 12 | :param timeout: timeout to wait for [in seconds] (Default value = 5) 13 | """ 14 | 15 | try: 16 | _ = requests.get(url, timeout=timeout) 17 | return True 18 | except requests.ConnectionError: # pragma: no cover 19 | print("No internet connection available.") # pragma: no cover 20 | return False 21 | 22 | 23 | def fail_on_no_connection(func): 24 | """ 25 | Decorator that throws an error if no internet connection is available 26 | """ 27 | if not connected_to_internet(): # pragma: no cover 28 | raise ConnectionError( 29 | "No internet connection found." 30 | ) # pragma: no cover 31 | 32 | def inner(*args, **kwargs): 33 | return func(*args, **kwargs) 34 | 35 | return inner 36 | 37 | 38 | def request(url): 39 | """ 40 | Sends a request to a url 41 | 42 | :param url: 43 | 44 | """ 45 | if not connected_to_internet(): # pragma: no cover 46 | raise ConnectionError( 47 | "No internet connection found." 48 | ) # pragma: no cover 49 | 50 | response = requests.get(url) 51 | if response.ok: 52 | return response 53 | else: # pragma: no cover 54 | exception_string = "URL request failed: {}".format( 55 | response.reason 56 | ) # pragma: no cover 57 | raise ValueError(exception_string) 58 | 59 | 60 | def check_file_exists(func): # pragma: no cover 61 | """ 62 | Decorator that throws an error if a function;s first argument 63 | is not a path to an existing file. 64 | """ 65 | 66 | def inner(*args, **kwargs): 67 | if not Path(args[0]).exists(): 68 | raise FileNotFoundError( 69 | f"File {args[0]} not found" 70 | ) # pragma: no cover 71 | return func(*args, **kwargs) 72 | 73 | return inner 74 | 75 | 76 | @check_file_exists 77 | def load_mesh_from_file(filepath, color=None, alpha=None): 78 | """ 79 | Load a a mesh or volume from files like .obj, .stl, ... 80 | 81 | :param filepath: path to file 82 | :param **kwargs: 83 | 84 | """ 85 | actor = load(str(filepath)) 86 | actor.c(color).alpha(alpha) 87 | return actor 88 | -------------------------------------------------------------------------------- /brainrender/_jupyter.py: -------------------------------------------------------------------------------- 1 | from functools import partial, update_wrapper 2 | 3 | import vedo 4 | from myterial import orange_dark, salmon 5 | from rich import print 6 | from rich.syntax import Syntax 7 | 8 | from brainrender import settings 9 | 10 | 11 | class JupyterMixIn: # pragma: no cover 12 | def __init__(self): # pragma: no cover 13 | # keep track if we are in a jupyter notebook 14 | if vedo.settings.default_backend == "k3d": 15 | self.backend = "k3d" 16 | elif vedo.settings.default_backend == "itkwidgets": 17 | self.backend = "itkwidgets" 18 | else: 19 | self.backend = False 20 | 21 | # Can't use cartoon shader in a notebook 22 | if self.backend == "k3d" or self.backend == "itkwidgets": 23 | if settings.SHADER_STYLE == "cartoon": 24 | settings.SHADER_STYLE = "plastic" 25 | 26 | 27 | class not_on_jupyter: # pragma: no cover 28 | def __init__(self, func): # pragma: no cover 29 | """ 30 | A decorator to block some methods from 31 | running in jupyter notebooks 32 | """ 33 | update_wrapper(self, func) 34 | self.func = func 35 | 36 | def __get__(self, obj, objtype): # pragma: no cover 37 | """Support instance methods.""" 38 | return partial(self.__call__, obj) 39 | 40 | def __call__(self, obj, *args, **kwargs): # pragma: no cover 41 | backend = JupyterMixIn().backend 42 | if not backend: 43 | return self.func(obj, *args, **kwargs) 44 | else: 45 | print( 46 | f"[{orange_dark}]Cannot run function [bold {salmon}]{self.func.__name__}[/ bold {salmon}] in a jupyter notebook", 47 | f"[{orange_dark}]Try setting the correct backend before creating your scene:\n", 48 | Syntax("from vedo import embedWindow", lexer="python"), 49 | Syntax("embedWindow(None)", lexer="python"), 50 | ) 51 | return None 52 | -------------------------------------------------------------------------------- /brainrender/_utils.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | 4 | def listdir(fld): 5 | """ 6 | List the files into a folder with the complete file path instead of the relative file path like os.listdir. 7 | 8 | :param fld: string, folder path 9 | 10 | """ 11 | return [str(f) for f in Path(fld).glob("**/*") if f.is_file()] 12 | 13 | 14 | def get_subdirs(folderpath): 15 | """ 16 | Returns the subfolders in a given folder 17 | """ 18 | return [str(f) for f in Path(folderpath).glob("**/*") if f.is_dir()] 19 | 20 | 21 | def listify(obj): 22 | """ 23 | Makes sure that the obj is a list 24 | """ 25 | if isinstance(obj, list): 26 | return obj 27 | elif isinstance(obj, tuple): 28 | return list(obj) 29 | else: 30 | return [obj] 31 | 32 | 33 | def return_list_smart(lst): 34 | """ 35 | If the list has length > 1 returns the list 36 | if it has length == 1 it returns the element 37 | if it has length == 0 it returns None 38 | """ 39 | if len(lst) > 1: 40 | return lst 41 | elif len(lst) == 1: 42 | return lst[0] 43 | else: 44 | return None 45 | -------------------------------------------------------------------------------- /brainrender/_video.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from loguru import logger 4 | from myterial import amber_light 5 | from rich import print 6 | from vedo import Video as VtkVideo 7 | 8 | 9 | class Video(VtkVideo): 10 | # Redefine vedo.Video close method 11 | def __init__(self, *args, fmt="mp4", size="1620x1050", **kwargs): 12 | """ 13 | Video class, takes care of storing screenshots (frames) 14 | as images in a temporary folder and then merging these into a 15 | single video file when the video is closed. 16 | """ 17 | super().__init__(*args, **kwargs) 18 | self.format = fmt 19 | self.size = size 20 | 21 | def close(self): 22 | """Render the video and write to file.""" 23 | print(f"[{amber_light}]Saving video") 24 | logger.debug(f"[{amber_light}]Saving video") 25 | 26 | fld = os.path.join(self.tmp_dir.name, "%d.png") 27 | fps = int(self.fps) 28 | name = f"{self.name}.{self.format}" 29 | fmt = "-vcodec libx264 -crf 28 -pix_fmt yuv420p" 30 | if self.size: 31 | fmt += f" -s {self.size}" 32 | 33 | command = f"ffmpeg -hide_banner -loglevel panic -y -r {fps} -start_number 0 -i {fld} {fmt} {name}" 34 | out = os.system(command) 35 | self.tmp_dir.cleanup() 36 | return out, command 37 | -------------------------------------------------------------------------------- /brainrender/actor.py: -------------------------------------------------------------------------------- 1 | from io import StringIO 2 | from typing import Optional 3 | 4 | import numpy as np 5 | import numpy.typing as npt 6 | import pyinspect as pi 7 | from brainglobe_atlasapi import BrainGlobeAtlas 8 | from brainglobe_space import AnatomicalSpace 9 | from myterial import amber, orange, salmon 10 | from rich.console import Console 11 | from vedo import Sphere, Text3D 12 | 13 | from brainrender._utils import listify 14 | 15 | # transform matrix to fix labels orientation 16 | label_mtx = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]] 17 | 18 | 19 | def make_actor_label( 20 | atlas, 21 | actors, 22 | labels, 23 | size=300, 24 | color=None, 25 | radius=100, 26 | xoffset=0, 27 | yoffset=-500, 28 | zoffset=0, 29 | ): 30 | """ 31 | Adds a 2D text anchored to a point on the actor's mesh 32 | to label what the actor is 33 | 34 | :param kwargs: keyword arguments can be passed to determine 35 | text appearance and location: 36 | - size: int, text size. Default 300 37 | - color: str, text color. A list of colors can be passed 38 | if None a gray color is used. Default None. 39 | - xoffset, yoffset, zoffset: integers that shift the label position 40 | - radius: radius of sphere used to denote label anchor. Set to 0 or None to hide. 41 | """ 42 | offset = [-yoffset, -zoffset, xoffset] 43 | default_offset = np.array([0, -200, 100]) 44 | 45 | new_actors = [] 46 | for _, (actor, label) in enumerate(zip(listify(actors), listify(labels))): 47 | # Get label color 48 | if color is None: 49 | color = [0.2, 0.2, 0.2] 50 | 51 | # Get mesh's highest point 52 | points = actor.mesh.vertices.copy() 53 | point = points[np.argmin(points[:, 1]), :] 54 | point += np.array(offset) + default_offset 55 | point[2] = -point[2] 56 | 57 | try: 58 | if atlas.hemisphere_from_coords(point, as_string=True) == "left": 59 | point = atlas.mirror_point_across_hemispheres(point) 60 | except IndexError: 61 | pass 62 | 63 | # Create label 64 | txt = Text3D( 65 | label, point * np.array([-1, -1, -1]), s=size, c=color, depth=0.1 66 | ) 67 | new_actors.append(txt.rotate_x(180).rotate_y(180)) 68 | 69 | # Mark a point on Mesh that corresponds to the label location 70 | if radius is not None: 71 | pt = actor.closest_point(point) 72 | pt[2] = -pt[2] 73 | sphere = Sphere(pt, r=radius, c=color, res=8) 74 | sphere.ancor = pt 75 | new_actors.append(sphere) 76 | sphere.compute_normals() 77 | 78 | return new_actors 79 | 80 | 81 | class Actor: 82 | _needs_label = False # needs to make a label 83 | _needs_silhouette = False # needs to make a silhouette 84 | _is_transformed = False # has been transformed to correct axes orientation 85 | _is_added = False # has the actor been added to the scene already 86 | 87 | labels = [] 88 | silhouette = None 89 | 90 | def __init__( 91 | self, 92 | mesh, 93 | name=None, 94 | br_class=None, 95 | is_text=False, 96 | color=None, 97 | alpha=None, 98 | ): 99 | """ 100 | Actor class representing anythng shown in a brainrender scene. 101 | Methods in brainrender.actors are used to creates actors specific 102 | for different data types. 103 | 104 | An actor has a mesh, a name and a brainrender class type. 105 | It also has methods to create a silhouette or a label. 106 | 107 | :param mesh: instance of vedo.Mesh 108 | :param name: str, actor name 109 | :param br_class: str, name of brainrender actors class 110 | :param is_text: bool, is it a 2d text or annotation? 111 | :param color: str, name or hex code of color to assign to actor's mesh 112 | :param alpha: float, transparency to assign to actor's mesh 113 | """ 114 | self.mesh = mesh 115 | self.name = name or "Actor" 116 | self.br_class = br_class or "None" 117 | self.is_text = is_text 118 | 119 | if color: 120 | self.mesh.c(color) 121 | if alpha: 122 | self.mesh.alpha(alpha) 123 | 124 | def __getattr__(self, attr): 125 | """ 126 | If an unknown attribute is called, try `self.mesh.attr` 127 | to get the mesh's attribute 128 | """ 129 | if "mesh" not in self.__dict__.keys(): 130 | raise AttributeError( 131 | f"Actor does not have attribute {attr}" 132 | ) # pragma: no cover 133 | 134 | # some attributes should be from .mesh, others from ._mesh 135 | mesh_attributes = ("center_of_mass",) 136 | if attr in mesh_attributes: 137 | if hasattr(self.__dict__["mesh"], attr): 138 | return getattr(self.__dict__["mesh"], attr) 139 | else: 140 | try: 141 | return getattr(self.__dict__["_mesh"], attr) 142 | except KeyError: 143 | # no ._mesh, use .mesh 144 | if hasattr(self.__dict__["mesh"], attr): 145 | return getattr(self.__dict__["mesh"], attr) 146 | 147 | raise AttributeError( 148 | f"Actor does not have attribute {attr}" 149 | ) # pragma: no cover 150 | 151 | def __repr__(self): # pragma: no cover 152 | return f"brainrender.Actor: {self.name}-{self.br_class}" 153 | 154 | def __str__(self): 155 | buf = StringIO() 156 | _console = Console(file=buf, force_jupyter=False) 157 | _console.print(self) 158 | 159 | return buf.getvalue() 160 | 161 | @property 162 | def center(self): 163 | """ 164 | Returns the coordinates of the mesh's center 165 | """ 166 | return self.mesh.center_of_mass() 167 | 168 | @classmethod 169 | def make_actor(cls, mesh, name, br_class): 170 | """ 171 | Make an actor from a given mesh 172 | """ 173 | return cls(mesh, name=name, br_class=br_class) 174 | 175 | def make_label(self, atlas): 176 | """ 177 | Create a new Actor with a sphere and a text 178 | labelling this actor 179 | """ 180 | labels = make_actor_label( 181 | atlas, self, self._label_str, **self._label_kwargs 182 | ) 183 | self._needs_label = False 184 | 185 | lbls = [ 186 | Actor.make_actor(label, self.name, "label") for label in labels 187 | ] 188 | self.labels = lbls 189 | return lbls 190 | 191 | def make_silhouette(self): 192 | """ 193 | Create a new silhouette actor outlining this actor 194 | """ 195 | lw = self._silhouette_kwargs["lw"] 196 | color = self._silhouette_kwargs["color"] 197 | sil = self._mesh.silhouette().lw(lw).c(color) 198 | 199 | name = f"{self.name} silhouette" 200 | sil = Actor.make_actor(sil, name, "silhouette") 201 | sil._is_transformed = True 202 | 203 | self._needs_silhouette = False 204 | self.silhouette = sil 205 | 206 | return sil 207 | 208 | def mirror( 209 | self, 210 | axis: str, 211 | origin: Optional[npt.NDArray] = None, 212 | atlas: Optional[BrainGlobeAtlas] = None, 213 | ): 214 | """ 215 | Mirror the actor's mesh around the specified axis, using the 216 | parent_center as the center of the mirroring operation. The axes can 217 | be specified using an abbreviation, e.g. 'x' for the x-axis, or anatomical 218 | convention e.g. 'sagittal'. If an atlas is provided, then the anatomical 219 | space of the atlas is used, otherwise `asr` is assumed. 220 | 221 | :param axis: str, axis around which to mirror the mesh 222 | :param origin: np.ndarray, center of the mirroring operation 223 | :param atlas: BrainGlobeAtlas, atlas object to use for anatomical space 224 | """ 225 | if axis in ["sagittal", "vertical", "frontal"]: 226 | anatomical_space = atlas.space if atlas else AnatomicalSpace("asr") 227 | 228 | axis_ind = anatomical_space.get_axis_idx(axis) 229 | axis = "x" if axis_ind == 0 else "y" if axis_ind == 1 else "z" 230 | 231 | self.mesh = self.mesh.mirror(axis, origin) 232 | 233 | def __rich_console__(self, *args): 234 | """ 235 | Print some useful characteristics to console. 236 | """ 237 | rep = pi.Report( 238 | title="[b]brainrender.Actor: ", 239 | color=salmon, 240 | accent=orange, 241 | ) 242 | 243 | rep.add(f"[b {orange}]name:[/b {orange}][{amber}] {self.name}") 244 | rep.add(f"[b {orange}]type:[/b {orange}][{amber}] {self.br_class}") 245 | rep.line() 246 | rep.add( 247 | f"[{orange}]center of mass:[/{orange}][{amber}] {self.mesh.center_of_mass().astype(np.int32)}" 248 | ) 249 | rep.add( 250 | f"[{orange}]number of vertices:[/{orange}][{amber}] {self.mesh.npoints}" 251 | ) 252 | rep.add( 253 | f"[{orange}]dimensions:[/{orange}][{amber}] {np.array(self.mesh.bounds()).astype(np.int32)}" 254 | ) 255 | rep.add(f"[{orange}]color:[/{orange}][{amber}] {self.mesh.color()}") 256 | 257 | yield "\n" 258 | yield rep 259 | -------------------------------------------------------------------------------- /brainrender/actors/__init__.py: -------------------------------------------------------------------------------- 1 | from brainrender.actors.points import Points, Point, PointsDensity 2 | from brainrender.actors.ruler import ruler, ruler_from_surface 3 | from brainrender.actors.neurons import Neuron, make_neurons 4 | from brainrender.actors.cylinder import Cylinder 5 | from brainrender.actors.volume import Volume 6 | from brainrender.actors.streamlines import Streamlines 7 | from brainrender.actors.line import Line 8 | -------------------------------------------------------------------------------- /brainrender/actors/cylinder.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | from vedo import Mesh, shapes 3 | 4 | from brainrender.actor import Actor 5 | 6 | 7 | class Cylinder(Actor): 8 | def __init__(self, pos, root, color="powderblue", alpha=1, radius=350): 9 | """ 10 | Cylinder class creates a cylinder mesh between a given 11 | point and the brain's surface. 12 | 13 | :param pos: list, np.array of ap, dv, ml coordinates. 14 | If an actor is passed, gets the center of mass instead 15 | :param root: brain root Actor or mesh object 16 | :param color: str, color 17 | :param alpha: float 18 | :param radius: float 19 | """ 20 | 21 | # Get pos 22 | if isinstance(pos, Mesh): 23 | pos = pos.center_of_mass() 24 | elif isinstance(pos, Actor): 25 | pos = pos.center 26 | logger.debug(f"Creating Cylinder actor at: {pos}") 27 | 28 | # Get point at top of cylinder 29 | top = pos.copy() 30 | top[1] = root.bounds()[2] - 500 31 | 32 | # Create mesh and Actor 33 | mesh = shapes.Cylinder(pos=[top, pos], c=color, r=radius, alpha=alpha) 34 | Actor.__init__(self, mesh, name="Cylinder", br_class="Cylinder") 35 | -------------------------------------------------------------------------------- /brainrender/actors/line.py: -------------------------------------------------------------------------------- 1 | from vedo import shapes 2 | 3 | from brainrender.actor import Actor 4 | 5 | 6 | class Line(Actor): 7 | def __init__( 8 | self, coordinates, color="black", alpha=1, linewidth=2, name=None 9 | ): 10 | """ 11 | Creates an actor representing a single line. 12 | 13 | :param coordinates: list, np.ndarray with shape (N, 3) of ap, dv, ml coordinates. 14 | :param color: CSS named color str, hex code, or RGB tuple, e.g. "white", "#ffffff", or (255, 255, 255) 15 | :param alpha: float in range 0.0 to 1.0 16 | :param linewidth: float 17 | :param name: str 18 | """ 19 | 20 | # Create mesh and Actor 21 | mesh = shapes.Line(p0=coordinates, lw=linewidth, c=color, alpha=alpha) 22 | Actor.__init__(self, mesh, name=name, br_class="Line") 23 | -------------------------------------------------------------------------------- /brainrender/actors/neurons.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from loguru import logger 4 | from morphapi.morphology.morphology import Neuron as MorphoNeuron 5 | from pyinspect.utils import _class_name 6 | from vedo import Mesh 7 | 8 | from brainrender.actor import Actor 9 | 10 | 11 | def make_neurons( 12 | *neurons, alpha=1, color=None, neurite_radius=8, soma_radius=15, name=None 13 | ): 14 | """ 15 | Returns a list of Neurons given a variable number of inputs 16 | :param neurons: any accepted data input for Neuron 17 | :param alpha: float 18 | :param color: str 19 | :param neurite_radius: float, radius of axon/dendrites 20 | :param soma_radius: float, radius of soma 21 | :param name: str, actor name 22 | """ 23 | return [ 24 | Neuron( 25 | n, 26 | alpha=alpha, 27 | color=color, 28 | neurite_radius=neurite_radius, 29 | soma_radius=soma_radius, 30 | name=name, 31 | ) 32 | for n in neurons 33 | ] 34 | 35 | 36 | class Neuron(Actor): 37 | def __init__( 38 | self, 39 | neuron, 40 | color=None, 41 | alpha=1, 42 | neurite_radius=8, 43 | soma_radius=15, 44 | invert_dims=True, 45 | name=None, 46 | ): 47 | """ 48 | Creates an Actor representing a single neuron's morphology 49 | :param neuron: path to .swc file, Mesh, Actor or Neuron from morphapi.morphology 50 | :param alpha: float 51 | :param color: str, 52 | :param neuron_radius: float, radius of axon/dendrites 53 | :param soma_radius: float, radius of soma 54 | :param invert_dims: bool, exchange the first and last dimension coordinates 55 | when loading from a .swc file. e.g going from (x, y, z) to (z, y, x). 56 | :param name: str, actor name 57 | """ 58 | logger.debug("Creating a Neuron actor") 59 | if color is None: 60 | color = "blackboard" 61 | alpha = alpha 62 | self.neurite_radius = neurite_radius 63 | self.soma_radius = soma_radius 64 | self.name = None 65 | 66 | if isinstance(neuron, (str, Path)): 67 | mesh = self._from_file(neuron, invert_dims) 68 | elif isinstance(neuron, (Mesh)): 69 | mesh = neuron 70 | elif isinstance(neuron, Actor): 71 | mesh = neuron.mesh 72 | elif isinstance(neuron, MorphoNeuron): 73 | mesh = self._from_morphapi_neuron(neuron) 74 | else: 75 | raise ValueError( 76 | f'Argument "neuron" is not in a recognized format: {_class_name(neuron)}' 77 | ) 78 | 79 | Actor.__init__(self, mesh, name=self.name, br_class="Neuron") 80 | self.mesh.c(color).alpha(alpha) 81 | 82 | def _from_morphapi_neuron(self, neuron: MorphoNeuron): 83 | # Temporarily set cache to false as meshes were being corrupted 84 | # on second load 85 | mesh = neuron.create_mesh( 86 | neurite_radius=self.neurite_radius, 87 | soma_radius=self.soma_radius, 88 | use_cache=False, 89 | )[1] 90 | return mesh 91 | 92 | def _from_file(self, neuron: (str, Path), invert_dims): 93 | path = Path(neuron) 94 | if not path.exists(): 95 | raise FileExistsError(f"Neuron file doesn't exist: {path}") 96 | 97 | if not path.suffix == ".swc": 98 | raise NotImplementedError( 99 | "Neuron can load morphology only from brainrender.swc files" 100 | ) 101 | 102 | self.name = self.name or path.name 103 | 104 | return self._from_morphapi_neuron( 105 | MorphoNeuron(data_file=neuron, invert_dims=invert_dims) 106 | ) 107 | -------------------------------------------------------------------------------- /brainrender/actors/points.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | from loguru import logger 5 | from pyinspect.utils import _class_name 6 | from vedo import Points as vPoints 7 | from vedo import Sphere, Spheres 8 | 9 | from brainrender.actor import Actor 10 | 11 | 12 | class Point(Actor): 13 | def __init__( 14 | self, pos, radius=100, color="blackboard", alpha=1, res=25, name=None 15 | ): 16 | """ 17 | Creates an actor representing a single point 18 | :param pos: list or np.ndarray with coordinates 19 | :param radius: float 20 | :param color: str, 21 | :param alpha: float 22 | :param res: int, resolution of mesh 23 | :param name: str, actor name 24 | """ 25 | logger.debug(f"Creating a point actor at: {pos}") 26 | mesh = Sphere(pos=pos, r=radius, c=color, alpha=alpha, res=res) 27 | name = name or "Point" 28 | Actor.__init__(self, mesh, name=name, br_class="Point") 29 | 30 | 31 | class PointsBase: 32 | def __init__( 33 | self, 34 | ): 35 | """ 36 | Base class with functionality to load from file. 37 | """ 38 | return 39 | 40 | def _from_numpy(self, data): 41 | """ 42 | Creates the mesh 43 | """ 44 | N = len(data) 45 | if not isinstance(self.colors, str): 46 | if not N == len(self.colors): # pragma: no cover 47 | raise ValueError( # pragma: no cover 48 | "When passing a list of colors, the number of colors should match the number of cells" # pragma: no cover 49 | ) # pragma: no cover 50 | 51 | self.name = self.name or "Points" 52 | mesh = Spheres( 53 | data, r=self.radius, c=self.colors, alpha=self.alpha, res=self.res 54 | ) 55 | return mesh 56 | 57 | def _from_file(self, data, colors="salmon", alpha=1): 58 | """ 59 | Loads points coordinates from a numpy file 60 | before creating the mesh. 61 | """ 62 | path = Path(data) 63 | if not path.exists(): 64 | raise FileExistsError(f"File {data} does not exist") 65 | 66 | if path.suffix == ".npy": 67 | self.name = self.name or path.name 68 | return self._from_numpy( 69 | np.load(path), 70 | ) 71 | else: # pragma: no cover 72 | raise NotImplementedError( # pragma: no cover 73 | f"Add points from file only works with numpy file for now, not {path.suffix}." # pragma: no cover 74 | + "If youd like more formats supported open an issue on Github!" # pragma: no cover 75 | ) # pragma: no cover 76 | 77 | 78 | class Points(PointsBase, Actor): 79 | def __init__( 80 | self, data, name=None, colors="salmon", alpha=1, radius=20, res=8 81 | ): 82 | """ 83 | Creates an actor representing multiple points (more efficient than 84 | creating many Point instances). 85 | 86 | :param data: np.ndarray, Nx3 array or path to .npy file with coords data 87 | :param radius: float 88 | :param color: str, or list of str with color names or hex codes 89 | :param alpha: float 90 | :param name: str, actor name 91 | :param res: int. Resolution of sphere actors 92 | """ 93 | PointsBase.__init__(self) 94 | logger.debug("Creating a Points actor") 95 | 96 | self.radius = radius 97 | self.colors = colors 98 | self.alpha = alpha 99 | self.name = name 100 | self.res = res 101 | 102 | if isinstance(data, np.ndarray): 103 | mesh = self._from_numpy(data) 104 | elif isinstance(data, (str, Path)): 105 | mesh = self._from_file(data) 106 | else: # pragma: no cover 107 | raise TypeError( # pragma: no cover 108 | f"Input data should be either a numpy array or a file path, not: {_class_name(data)}" # pragma: no cover 109 | ) # pragma: no cover 110 | 111 | Actor.__init__(self, mesh, name=self.name, br_class="Points") 112 | 113 | 114 | class PointsDensity(Actor): 115 | def __init__( 116 | self, 117 | data, 118 | name=None, 119 | dims=(40, 40, 40), 120 | radius=None, 121 | colors="Dark2", 122 | **kwargs, 123 | ): 124 | """ 125 | Creates a Volume actor showing the 3d density of a set 126 | of points. 127 | 128 | :param data: np.ndarray, Nx3 array with cell coordinates 129 | :param colors: str, matplotlib colormap 130 | 131 | 132 | from vedo: 133 | Generate a density field from a point cloud. Input can also be a set of 3D coordinates. 134 | Output is a ``Volume``. 135 | The local neighborhood is specified as the `radius` around each sample position (each voxel). 136 | The density is expressed as the number of counts in the radius search. 137 | 138 | :param int,list dims: number of voxels in x, y and z of the output Volume. 139 | 140 | """ 141 | logger.debug("Creating a PointsDensity actor") 142 | 143 | # flip coordinates on XY axis to match brainrender coordinates system 144 | data[:, 2] = -data[:, 2] 145 | 146 | # create volume and then actor 147 | volume = ( 148 | vPoints(data) 149 | .density(dims=dims, radius=radius, **kwargs) 150 | .cmap(colors) 151 | .alpha([0, 0.9]) 152 | .mode(1) 153 | ) # returns a vedo Volume 154 | 155 | Actor.__init__(self, volume, name=name, br_class="density") 156 | -------------------------------------------------------------------------------- /brainrender/actors/ruler.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from loguru import logger 3 | from vedo import merge 4 | from vedo.shapes import Line, Sphere, Text3D 5 | from vedo.utils import mag, precision 6 | 7 | from brainrender.actor import Actor 8 | 9 | 10 | def ruler(p1, p2, unit_scale=1, units=None, s=50): 11 | """ 12 | Creates a ruler showing the distance between two points. 13 | The ruler is composed of a line between the points and 14 | a text indicating the distance. 15 | 16 | :param p1: list, np.ndarray with coordinates of first point 17 | :param p2: list, np.ndarray with coordinates of second point 18 | :param unit_scale: float. To scale the units (e.g. show mm instead of µm) 19 | :param units: str, name of unit (e.g. 'mm') 20 | :param s: float size of text 21 | 22 | """ 23 | logger.debug(f"Creating a ruler actor between {p1} and {p2}") 24 | actors = [] 25 | 26 | # Make two line segments 27 | midpoint = np.array([(x + y) / 2 for x, y in zip(p1, p2)]) 28 | gap1 = ((midpoint - p1) * 0.8) + p1 29 | gap2 = ((midpoint - p2) * 0.8) + p2 30 | 31 | actors.append(Line(p1, gap1, lw=200)) 32 | actors.append(Line(gap2, p2, lw=200)) 33 | 34 | # Add label 35 | if units is None: # pragma: no cover 36 | units = "" # pragma: no cover 37 | dist = mag(p2 - p1) * unit_scale 38 | label = precision(dist, 3) + " " + units 39 | lbl = Text3D(label, pos=midpoint, s=s + 100, justify="center") 40 | lbl.rotate_z(180, around=midpoint) 41 | actors.append(lbl) 42 | 43 | # Add spheres add end 44 | actors.append(Sphere(p1, r=s, c=[0.3, 0.3, 0.3])) 45 | actors.append(Sphere(p2, r=s, c=[0.3, 0.3, 0.3])) 46 | 47 | act = Actor(merge(*actors), name="Ruler", br_class="Ruler") 48 | act.c((0.3, 0.3, 0.3)).alpha(1).lw(2) 49 | 50 | return act 51 | 52 | 53 | def ruler_from_surface( 54 | p1, root, unit_scale=1, axis=1, units=None, s=50 55 | ) -> Actor: 56 | """ 57 | Creates a ruler between a point and the brain's surface 58 | :param p1: list, np.ndarray with coordinates of point 59 | :param root: mesh or actor with brain's root 60 | :param axis: int, index of axis along which distance is computed 61 | :param unit_scale: float. To scale the units (e.g. show mm instead of µm) 62 | :param units: str, name of unit (e.g. 'mm') 63 | :param s: float size of text 64 | """ 65 | logger.debug(f"Creating a ruler actor between {p1} and brain surface") 66 | # Get point on brain surface 67 | p2 = p1.copy() 68 | p2[axis] = 0 # zero the chosen coordinate 69 | 70 | pts = root.mesh.intersect_with_line(p1, p2) 71 | surface_point = pts[0] 72 | 73 | return ruler(p1, surface_point, unit_scale=unit_scale, units=units, s=s) 74 | -------------------------------------------------------------------------------- /brainrender/actors/streamlines.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pandas as pd 5 | from loguru import logger 6 | from vedo import merge 7 | from vedo.shapes import Spheres, Tube 8 | 9 | from brainrender.actor import Actor 10 | 11 | 12 | def make_streamlines( 13 | *streamlines, color="salmon", alpha=1, radius=10, show_injection=True 14 | ): 15 | """ 16 | Creates instances of Streamlines from data. 17 | :param streamlines: pd.dataframes with streamlines data 18 | :param radius: float. Radius of the Tube mesh used to render streamlines 19 | :param color: str, name of the color to be used 20 | :param alpha: float, transparency 21 | :param show_injection: bool. If true spheres mark the injection sites 22 | """ 23 | return [ 24 | Streamlines( 25 | s, 26 | color=color, 27 | alpha=alpha, 28 | radius=radius, 29 | show_injection=show_injection, 30 | ) 31 | for s in streamlines 32 | ] 33 | 34 | 35 | class Streamlines(Actor): 36 | """ 37 | Streamliens actor class. 38 | Creates an actor from streamlines data (from a json file parsed with: get_streamlines_data) 39 | """ 40 | 41 | def __init__( 42 | self, 43 | data, 44 | radius=10, 45 | color="salmon", 46 | alpha=1, 47 | show_injection=True, 48 | name=None, 49 | ): 50 | """ 51 | Turns streamlines data to a mesh. 52 | :param data: pd.DataFrame with streamlines points data 53 | :param radius: float. Radius of the Tube mesh used to render streamlines 54 | :param color: str, name of the color to be used 55 | :param alpha: float, transparency 56 | :param name: str, name of the actor. 57 | :param show_injection: bool. If true spheres mark the injection sites 58 | """ 59 | logger.debug("Creating a streamlines actor") 60 | if isinstance(data, (str, Path)): 61 | data = pd.read_json(data) 62 | elif not isinstance(data, pd.DataFrame): 63 | raise TypeError("Input data should be a dataframe") 64 | 65 | self.radius = radius 66 | mesh = ( 67 | self._make_mesh(data, show_injection=show_injection) 68 | .c(color) 69 | .alpha(alpha) 70 | .clean() 71 | ) 72 | 73 | name = name or "Streamlines" 74 | Actor.__init__(self, mesh, name=name, br_class="Streamliness") 75 | 76 | def _make_mesh(self, data, show_injection=True): 77 | lines = [] 78 | if len(data["lines"]) == 1: 79 | try: 80 | lines_data = data["lines"][0] 81 | except KeyError: # pragma: no cover 82 | lines_data = data["lines"]["0"] # pragma: no cover 83 | else: 84 | lines_data = data["lines"] 85 | 86 | for line in lines_data: 87 | points = [[lin["x"], lin["y"], lin["z"]] for lin in line] 88 | lines.append( 89 | Tube( 90 | points, 91 | r=self.radius, 92 | res=8, 93 | ) 94 | ) 95 | 96 | if show_injection: 97 | coords = np.vstack( 98 | [ 99 | list(point.values()) 100 | for point in data.injection_sites.iloc[0] 101 | ] 102 | ) 103 | lines.append( 104 | Spheres( 105 | coords, 106 | r=self.radius * 10, 107 | res=8, 108 | ) 109 | ) 110 | 111 | return merge(*lines) 112 | -------------------------------------------------------------------------------- /brainrender/actors/volume.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | from loguru import logger 5 | from vedo import Volume as VedoVolume 6 | 7 | from brainrender.actor import Actor 8 | 9 | 10 | class Volume(Actor): 11 | def __init__( 12 | self, 13 | griddata, 14 | voxel_size=1, 15 | cmap="bwr", 16 | min_quantile=None, 17 | min_value=None, 18 | name=None, 19 | br_class=None, 20 | as_surface=True, 21 | **volume_kwargs, 22 | ): 23 | """ 24 | Takes a 3d numpy array with volumetric data 25 | and returns an Actor with mesh: vedo.Volume.isosurface or a vedo.Volume. 26 | BY default the volume is represented as a surface 27 | 28 | To extract the surface: 29 | The isosurface needs a lower bound threshold, this can be 30 | either a user defined hard value (min_value) or the value 31 | corresponding to some percentile of the grid data. 32 | 33 | :param griddata: np.ndarray, 3d array with grid data. Can also be a vedo Volume 34 | or a file path pointing to a .npy file 35 | :param griddata: np.ndarray, 3d array with grid data 36 | :param voxel_size: int, size of each voxel in microns 37 | :param min_quantile: float, percentile for threshold 38 | :param min_value: float, value for threshold 39 | :param cmap: str, name of colormap to use 40 | :param as_surface, bool. default True. If True 41 | a surface mesh is returned instead of the whole volume 42 | :param volume_kwargs: keyword arguments for vedo's Volume class 43 | """ 44 | logger.debug("Creating a Volume actor") 45 | # Create mesh 46 | color = volume_kwargs.pop("c", "viridis") 47 | if isinstance(griddata, np.ndarray): 48 | # create volume from data 49 | mesh = self._from_numpy( 50 | griddata, voxel_size, color, **volume_kwargs 51 | ) 52 | elif isinstance(griddata, (str, Path)): 53 | # create from .npy file 54 | mesh = self._from_file( 55 | griddata, voxel_size, color, **volume_kwargs 56 | ) 57 | else: 58 | mesh = griddata # assume a vedo Volume was passed 59 | 60 | if as_surface: 61 | # Get threshold 62 | if min_quantile is None and min_value is None: 63 | th = 0 64 | elif min_value is not None: 65 | th = min_value 66 | else: 67 | th = np.percentile(griddata.ravel(), min_quantile) 68 | 69 | mesh = mesh.legosurface(vmin=th) 70 | mesh.cmap(cmap) 71 | 72 | Actor.__init__( 73 | self, mesh, name=name or "Volume", br_class=br_class or "Volume" 74 | ) 75 | 76 | def _from_numpy(self, griddata, voxel_size, color, **volume_kwargs): 77 | """ 78 | Creates a vedo.Volume actor from a 3D numpy array with volume data. 79 | """ 80 | vvol = VedoVolume( 81 | griddata, 82 | spacing=[voxel_size, voxel_size, voxel_size], 83 | **volume_kwargs, 84 | ) 85 | vvol.cmap(color) 86 | # The transformation below is ALREADY applied 87 | # to vedo.Volume instances in render.py 88 | # so we should not apply it here. 89 | # Flip volume so that it's oriented as in the atlas 90 | # vvol.permute_axes(2, 1, 0) 91 | # mtx = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] 92 | # vvol.apply_transform(mtx) 93 | return vvol 94 | 95 | def _from_file(self, filepath, voxel_size, color, **volume_kwargs): 96 | """ 97 | Loads a .npy file and returns a vedo Volume actor. 98 | """ 99 | filepath = Path(filepath) 100 | if not filepath.exists(): 101 | raise FileExistsError( 102 | f"Loading volume from file, file not found: {filepath}" 103 | ) 104 | if not filepath.suffix == ".npy": 105 | raise ValueError( 106 | "Loading volume from file only accepts .npy files" 107 | ) 108 | 109 | return self._from_numpy( 110 | np.load(str(filepath)), voxel_size, color, **volume_kwargs 111 | ) 112 | -------------------------------------------------------------------------------- /brainrender/atlas.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from brainglobe_atlasapi.bg_atlas import BrainGlobeAtlas 3 | from loguru import logger 4 | from vedo import Plane 5 | 6 | from brainrender import settings 7 | from brainrender._io import load_mesh_from_file 8 | from brainrender._utils import return_list_smart 9 | from brainrender.actor import Actor 10 | 11 | 12 | class Atlas(BrainGlobeAtlas): 13 | def __init__(self, atlas_name=None, check_latest=True): 14 | """ 15 | Brainrender's Atlas class subclasses BrainGlobeAtlas 16 | to add methods to get regions meshes as Actors 17 | and to get a plane at a given point and normal. 18 | 19 | :param atlas_name: str, atlas name from brainglobe's atlas API atlases 20 | :param check_latest: bool, if True checks that the atlas is the latest version 21 | """ 22 | atlas_name = atlas_name or settings.DEFAULT_ATLAS 23 | self.atlas_name = atlas_name 24 | logger.debug(f"Generating ATLAS: {atlas_name}") 25 | 26 | try: 27 | super().__init__(atlas_name=atlas_name, print_authors=False) 28 | except TypeError: 29 | # The latest version of BGatlas has no print_authors argument 30 | super().__init__(atlas_name=atlas_name, check_latest=check_latest) 31 | 32 | @property 33 | def zoom(self): 34 | """ 35 | Returns the best camera zoom given the atlas resolution 36 | """ 37 | res = np.max(self.metadata["resolution"]) 38 | 39 | if self.atlas_name == "allen_human_500um": 40 | logger.debug( 41 | "ATLAS: setting zoom manually for human atlas, atlas needs fixing" 42 | ) 43 | return 350 44 | else: 45 | return 40 / res 46 | 47 | def _get_region_color(self, region): 48 | """ 49 | Gets the rgb color of a region in the atlas 50 | """ 51 | return [ 52 | x / 255 for x in self._get_from_structure(region, "rgb_triplet") 53 | ] 54 | 55 | def get_region(self, *regions, alpha=1, color=None): 56 | """ 57 | Get brain regions meshes as Actors 58 | :param regions: str with names of brain regions in the atlas 59 | :param alpha: float 60 | :param color: str 61 | """ 62 | if not regions: 63 | return None 64 | 65 | _color = color 66 | actors = [] 67 | for region in regions: 68 | if ( 69 | region not in self.lookup_df.acronym.values 70 | and region not in self.lookup_df["id"].values 71 | ): 72 | print( 73 | f"The region {region} doesn't seem to belong to the atlas being used: {self.atlas_name}. Skipping" 74 | ) 75 | continue 76 | 77 | # Get mesh 78 | obj_file = str(self.meshfile_from_structure(region)) 79 | try: 80 | mesh = load_mesh_from_file(obj_file, color=color, alpha=alpha) 81 | except FileNotFoundError: 82 | print( 83 | f"The region {region} is in the onthology but does not have a corresponding volume in the atlas being used: {self.atlas_name}. Skipping" 84 | ) 85 | continue 86 | 87 | # Get color 88 | color = color or self._get_region_color(region) 89 | 90 | # Make actor 91 | actor = Actor(mesh, name=region, br_class="brain region") 92 | actor.c(color).alpha(alpha) 93 | actors.append(actor) 94 | 95 | # reset color to input 96 | color = _color 97 | 98 | return return_list_smart(actors) 99 | 100 | def get_plane( 101 | self, 102 | pos=None, 103 | norm=None, 104 | plane=None, 105 | sx=None, 106 | sy=None, 107 | color="lightgray", 108 | alpha=0.25, 109 | **kwargs, 110 | ): 111 | """ 112 | Returns a plane going through a point at pos, oriented 113 | orthogonally to the vector norm and of width and height 114 | sx, sy. 115 | 116 | :param pos: 3-tuple or list with x,y,z, coords of point the plane goes through 117 | :param norm: 3-tuple with plane's normal vector (optional) 118 | :param sx, sy: int, width and height of the plane 119 | :param plane: "sagittal", "horizontal", or "frontal" 120 | :param color, alpha: plane color and transparency 121 | """ 122 | axes_pairs = dict(sagittal=(0, 1), horizontal=(2, 0), frontal=(2, 1)) 123 | 124 | if pos is None: 125 | pos = self.root._mesh.center_of_mass() 126 | 127 | try: 128 | norm = norm or self.space.plane_normals[plane] 129 | except KeyError: # pragma: no cover 130 | raise ValueError( # pragma: no cover 131 | f"Could not find normals for plane {plane}. Atlas space provides these normals: {self.space.plane_normals}" # pragma: no cover 132 | ) 133 | 134 | # Get plane width and height 135 | idx_pair = ( 136 | axes_pairs[plane] 137 | if plane is not None 138 | else axes_pairs["horizontal"] 139 | ) 140 | 141 | bounds = self.root.bounds() 142 | root_bounds = [ 143 | [bounds[0], bounds[1]], 144 | [bounds[2], bounds[3]], 145 | [bounds[4], bounds[5]], 146 | ] 147 | 148 | wh = [float(np.diff(root_bounds[i])) for i in idx_pair] 149 | if sx is None: 150 | sx = wh[0] 151 | if sy is None: 152 | sy = wh[1] 153 | 154 | # return plane 155 | return Actor( 156 | Plane(pos=pos, normal=norm, s=(sx, sy), c=color, alpha=alpha), 157 | name=f"Plane at {pos} norm: {norm}", 158 | br_class="plane", 159 | ) 160 | -------------------------------------------------------------------------------- /brainrender/atlas_specific/__init__.py: -------------------------------------------------------------------------------- 1 | from brainrender.atlas_specific.allen_brain_atlas.gene_expression import ( 2 | GeneExpressionAPI, 3 | ) 4 | from brainrender.atlas_specific.allen_brain_atlas.streamlines import ( 5 | get_streamlines_for_region, 6 | ) 7 | -------------------------------------------------------------------------------- /brainrender/atlas_specific/allen_brain_atlas/gene_expression/__init__.py: -------------------------------------------------------------------------------- 1 | from brainrender.atlas_specific.allen_brain_atlas.gene_expression.api import ( 2 | GeneExpressionAPI, 3 | ) 4 | -------------------------------------------------------------------------------- /brainrender/atlas_specific/allen_brain_atlas/gene_expression/api.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from time import sleep 4 | 5 | import pandas as pd 6 | import requests 7 | from loguru import logger 8 | 9 | from brainrender import base_dir 10 | from brainrender._io import fail_on_no_connection, request 11 | from brainrender.actors import Volume 12 | from brainrender.atlas_specific.allen_brain_atlas.gene_expression.ge_utils import ( 13 | check_gene_cached, 14 | download_and_cache, 15 | load_cached_gene, 16 | ) 17 | 18 | 19 | class GeneExpressionAPI: 20 | voxel_size = 200 # um 21 | grid_size = [58, 41, 67] # number of voxels along each direction 22 | 23 | all_genes_url = ( 24 | "http://api.brain-map.org/api/v2/data/query.json?criteria=" 25 | + "model::Gene," 26 | + "rma::criteria,products[abbreviation$eq'DevMouse']," 27 | + "rma::options,[tabular$eq'genes.id','genes.acronym+as+gene_symbol','genes.name+as+gene_name'," 28 | + "'genes.entrez_id+as+entrez_gene_id','genes.homologene_id+as+homologene_group_id']," 29 | + "[order$eq'genes.acronym']" 30 | + "&num_rows=all&start_row=0" 31 | ) 32 | 33 | gene_experiments_url = ( 34 | "http://api.brain-map.org/api/v2/data/query.json?criteria=model::SectionDataSet," 35 | + "rma::criteria,[failed$eq'false'],products[abbreviation$eq'Mouse'],genes[acronym$eq'-GENE_SYMBOL-']" 36 | ) 37 | 38 | download_url = "http://api.brain-map.org/grid_data/download/EXP_ID?include=energy,intensity,density" 39 | 40 | gene_expression_cache = base_dir / "GeneExpressionCache" 41 | gene_name = None 42 | 43 | def __init__(self): 44 | # Get metadata about all available genes 45 | self.genes = None # when necessary gene data can be downloaded with self.get_all_genes 46 | self.gene_expression_cache.mkdir(exist_ok=True) 47 | 48 | @fail_on_no_connection 49 | def get_all_genes(self): 50 | """ 51 | Download metadata about all the genes available in the Allen gene expression dataset 52 | """ 53 | res = request(self.all_genes_url) 54 | return pd.DataFrame(res.json()["msg"]) 55 | 56 | def get_gene_id_by_name(self, gene_name): 57 | self.gene_name = self.gene_name or gene_name 58 | if self.genes is None: 59 | self.genes = self.get_all_genes() 60 | 61 | if gene_name not in self.genes.gene_symbol.values: 62 | print( 63 | f"Gene name {gene_name} doesn't appear in the genes dataset, nothing to return\n" 64 | + "You can search for you gene here: https://mouse.brain-map.org/" 65 | ) 66 | return None 67 | else: 68 | return int( 69 | self.genes.loc[self.genes.gene_symbol == gene_name].id.values[ 70 | 0 71 | ] 72 | ) 73 | 74 | def get_gene_symbol_by_id(self, gene_id): 75 | if self.genes is None: 76 | self.genes = self.get_all_genes() 77 | 78 | return self.genes.loc[ 79 | self.genes.id == str(gene_id) 80 | ].gene_symbol.values[0] 81 | 82 | @fail_on_no_connection 83 | def get_gene_experiments(self, gene): 84 | """ 85 | Given a gene_symbol it returns the list of ISH 86 | experiments for this gene 87 | 88 | :param gene_symbol: str 89 | """ 90 | url = self.gene_experiments_url.replace("-GENE_SYMBOL-", gene) 91 | max_retries = 8 92 | delay = 4 93 | data = None 94 | 95 | for i in range(max_retries): 96 | try: 97 | data = request(url).json()["msg"] 98 | break 99 | except requests.exceptions.JSONDecodeError: 100 | print(f"Unable to connect to Allen API, retrying in {delay}") 101 | sleep(delay) 102 | delay *= 2 103 | 104 | if not len(data): 105 | print(f"No experiment found for gene {gene}") 106 | return None 107 | else: 108 | return [d["id"] for d in data] 109 | 110 | @fail_on_no_connection 111 | def download_gene_data(self, gene): 112 | """ 113 | Downloads a gene's data from the Allen Institute 114 | Gene Expression dataset and saves to cache. 115 | See: http://help.brain-map.org/display/api/Downloading+3-D+Expression+Grid+Data 116 | 117 | :param gene: int, the gene_id for the gene being downloaded. 118 | """ 119 | # Get the gene's experiment id 120 | exp_ids = self.get_gene_experiments(gene) 121 | 122 | if exp_ids is None: 123 | return 124 | 125 | # download experiment data 126 | for eid in exp_ids: 127 | print(f"Downloading data for {gene} - experiment: {eid}") 128 | url = self.download_url.replace("EXP_ID", str(eid)) 129 | download_and_cache( 130 | url, os.path.join(self.gene_expression_cache, f"{gene}-{eid}") 131 | ) 132 | 133 | def get_gene_data(self, gene, exp_id, use_cache=True, metric="energy"): 134 | """ 135 | Given a list of gene ids 136 | """ 137 | logger.debug(f"Getting gene data for gene: {gene} experiment {exp_id}") 138 | self.gene_name = self.gene_name or gene 139 | 140 | # Check if gene-experiment cached 141 | if use_cache: 142 | cache = check_gene_cached(self.gene_expression_cache, gene, exp_id) 143 | else: 144 | cache = False 145 | 146 | if not cache: # then download it 147 | self.download_gene_data(gene) 148 | cache = check_gene_cached(self.gene_expression_cache, gene, exp_id) 149 | if not cache: 150 | raise ValueError( # pragma: no cover 151 | "Something went wrong and data were not cached" 152 | ) 153 | 154 | # Load from cache 155 | data = load_cached_gene(cache, metric, self.grid_size) 156 | 157 | if sys.platform == "darwin": 158 | data = data.T 159 | 160 | return data 161 | 162 | def griddata_to_volume( 163 | self, 164 | griddata, 165 | min_quantile=None, 166 | min_value=None, 167 | cmap="bwr", 168 | ): 169 | """ 170 | Takes a 3d numpy array with volumetric gene expression 171 | and returns a vedo.Volume.isosurface actor. 172 | The isosurface needs a lower bound threshold, this can be 173 | either a user defined hard value (min_value) or the value 174 | corresponding to some percentile of the gene expression data. 175 | 176 | :param griddata: np.ndarray, 3d array with gene expression data 177 | :param min_quantile: float, percentile for threshold 178 | :param min_value: float, value for threshold 179 | """ 180 | return Volume( 181 | griddata, 182 | min_quantile=min_quantile, 183 | voxel_size=self.voxel_size, 184 | min_value=min_value, 185 | cmap=cmap, 186 | name=self.gene_name, 187 | br_class="Gene Data", 188 | ) 189 | -------------------------------------------------------------------------------- /brainrender/atlas_specific/allen_brain_atlas/gene_expression/ge_utils.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | import sys 4 | import zipfile 5 | 6 | import numpy as np 7 | 8 | from brainrender._io import check_file_exists, request 9 | from brainrender._utils import get_subdirs, listdir 10 | 11 | # ----------------------------------- Cache ---------------------------------- # 12 | 13 | 14 | def check_gene_cached(cache_folder, gene_id, exp_id): 15 | """ 16 | A gene is saved in a folder in cache_folder 17 | with gene_id-exp_id as name. If the folder doesn't 18 | exist the gene is not cached. 19 | 20 | :param cache_folder: str, path to general cache folder for all data 21 | :param gene_id: str name of gene 22 | :param exp_id: id of experiment 23 | """ 24 | cache = [ 25 | sub 26 | for sub in get_subdirs(cache_folder) 27 | if f"{gene_id}-{exp_id}" == os.path.basename(sub) 28 | ] 29 | if not cache: 30 | return False 31 | elif len(cache) > 1: 32 | raise ValueError("Found too many folders") 33 | else: 34 | return cache[0] 35 | 36 | 37 | def download_and_cache(url, cachedir): 38 | """ 39 | Given a url to download a gene's ISH experiment data, 40 | this function download and unzips the data 41 | 42 | :param url: str, utl to download data 43 | :param cachedir: str, path to folder where data will be downloaded 44 | """ 45 | # Get data 46 | req = request(url) 47 | 48 | # Create cache dir 49 | if not os.path.isdir(cachedir): 50 | os.mkdir(cachedir) 51 | 52 | # Unzip to cache dir 53 | z = zipfile.ZipFile(io.BytesIO(req.content)) 54 | z.extractall(cachedir) 55 | 56 | 57 | def load_cached_gene(cache, metric, grid_size): 58 | """ 59 | Loads a gene's data from cache 60 | """ 61 | files = [ 62 | f for f in listdir(cache) if metric in f and not f.endswith(".mhd") 63 | ] 64 | if not files: 65 | return None 66 | if len(files) > 1: 67 | raise NotImplementedError("Deal with more than one file found") 68 | else: 69 | return read_raw(files[0], grid_size) 70 | 71 | 72 | # --------------------------------- Open .raw -------------------------------- # 73 | @check_file_exists 74 | def read_raw(filepath, grid_size): 75 | """ 76 | reads a .raw file with gene expression data 77 | downloaded from the Allen atlas and returns 78 | a numpy array with the correct grid_size. 79 | See as reference: 80 | http://help.brain-map.org/display/mousebrain/API#API-Expression3DGridsz 81 | 82 | :param filepath: str or Path object 83 | """ 84 | filepath = str(filepath) 85 | 86 | # Read bytes 87 | with open(filepath, "rb") as test: 88 | content = test.read() 89 | 90 | # Create np array and return 91 | data = np.frombuffer(content, dtype="float32").reshape(grid_size) 92 | 93 | if sys.platform == "darwin": 94 | data = data.T # TODO figure out why this is necessary on Mac OS? 95 | 96 | return data 97 | -------------------------------------------------------------------------------- /brainrender/atlas_specific/allen_brain_atlas/streamlines.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from loguru import logger 3 | from myterial import orange 4 | from rich import print 5 | from rich.progress import track 6 | 7 | try: 8 | from allensdk.api.queries.mouse_connectivity_api import ( 9 | MouseConnectivityApi, 10 | ) 11 | 12 | mca = MouseConnectivityApi() 13 | allen_sdk_installed = True 14 | except ModuleNotFoundError: # pragma: no cover 15 | allen_sdk_installed = False # pragma: no cover 16 | 17 | 18 | from brainrender import base_dir 19 | from brainrender._io import request 20 | from brainrender._utils import listify 21 | 22 | streamlines_folder = base_dir / "streamlines" 23 | streamlines_folder.mkdir(exist_ok=True) 24 | 25 | 26 | def experiments_source_search(SOI): 27 | """ 28 | Returns data about experiments whose injection was in the SOI, structure of interest 29 | :param SOI: str, structure of interest. Acronym of structure to use as seed for the search 30 | :param source: (Default value = True) 31 | """ 32 | 33 | transgenic_id = 0 # id = 0 means use only wild type 34 | primary_structure_only = True 35 | 36 | if not allen_sdk_installed: 37 | print( 38 | f"[{orange}]Streamlines cannot be download because the AllenSDK package is not installed. " 39 | "Please install `allensdk` with `pip install allensdk`" 40 | ) 41 | return None 42 | 43 | return pd.DataFrame( 44 | mca.experiment_source_search( 45 | injection_structures=listify(SOI), 46 | target_domain=None, 47 | transgenic_lines=transgenic_id, 48 | primary_structure_only=primary_structure_only, 49 | ) 50 | ) 51 | 52 | 53 | def get_streamlines_data(eids, force_download=False): 54 | """ 55 | Given a list of expeirmental IDs, it downloads the streamline data 56 | from the https://neuroinformatics.nl cache and saves them as 57 | json files. 58 | 59 | :param eids: list of integers with experiments IDs 60 | """ 61 | data = [] 62 | for eid in track(eids, total=len(eids), description="downloading"): 63 | url = "https://neuroinformatics.nl/HBP/allen-connectivity-viewer/json/streamlines_{}.json.gz".format( 64 | eid 65 | ) 66 | 67 | jsonpath = streamlines_folder / f"{eid}.json" 68 | 69 | if not jsonpath.exists() or force_download: 70 | response = request(url) 71 | 72 | # Write the response content as a temporary compressed file 73 | temp_path = streamlines_folder / "temp.gz" 74 | with open(str(temp_path), "wb") as temp: 75 | temp.write(response.content) 76 | 77 | # Open in pandas and delete temp 78 | url_data = pd.read_json( 79 | str(temp_path), lines=True, compression="gzip" 80 | ) 81 | temp_path.unlink() 82 | 83 | # save json 84 | url_data.to_json(str(jsonpath)) 85 | 86 | # append to lists and return 87 | data.append(url_data) 88 | else: 89 | data.append(pd.read_json(str(jsonpath))) 90 | return data 91 | 92 | 93 | def get_streamlines_for_region(region, force_download=False): 94 | """ 95 | Using the Allen Mouse Connectivity data and corresponding API, this function finds experiments whose injections 96 | were targeted to the region of interest and downloads the corresponding streamlines data. By default, experiments 97 | are selected for only WT mice and only when the region was the primary injection target. 98 | 99 | :param region: str with region to use for research 100 | 101 | """ 102 | logger.debug(f"Getting streamlines data for region: {region}") 103 | # Get experiments whose injections were targeted to the region 104 | region_experiments = experiments_source_search(region) 105 | if region_experiments is None or region_experiments.empty: 106 | logger.debug("No experiments found from allen data") 107 | return None 108 | 109 | return get_streamlines_data( 110 | region_experiments.id.values, force_download=force_download 111 | ) 112 | -------------------------------------------------------------------------------- /brainrender/camera.py: -------------------------------------------------------------------------------- 1 | from loguru import logger 2 | from vtkmodules.vtkRenderingCore import vtkCamera 3 | 4 | from brainrender.cameras import cameras 5 | 6 | 7 | def get_camera(camera): 8 | """ 9 | Returns the parameters for a pre-defined camera 10 | 11 | :param camera: str 12 | """ 13 | return cameras[camera] 14 | 15 | 16 | def check_camera_param(camera): 17 | """ 18 | Check that a dictionary of camera parameters 19 | is complete. Must have entries: 20 | ["pos", "viewup", "clipping_range"] 21 | 22 | :param camera: str, dict 23 | """ 24 | if isinstance(camera, str): 25 | return get_camera(camera) 26 | else: 27 | params = ["pos", "viewup", "clipping_range"] 28 | for param in params: 29 | if param not in list(camera.keys()): 30 | raise ValueError( 31 | f"Camera parameters dict should include the following keys: {params}, missing: {param}" 32 | ) 33 | if "focal_point" not in camera.keys(): 34 | camera["focal_point"] = None 35 | return camera 36 | 37 | 38 | def set_camera_params(camera, params): 39 | """ 40 | Set camera parameters 41 | :param camera: camera obj 42 | :param params: dictionary of camera parameters 43 | """ 44 | logger.debug(f"Setting camera parameters: {params}") 45 | # Apply camera parameters 46 | camera.SetPosition(params["pos"]) 47 | camera.SetViewUp(params["viewup"]) 48 | camera.SetClippingRange(params["clipping_range"]) 49 | 50 | if "focal_point" in params.keys() and params["focal_point"] is not None: 51 | camera.SetFocalPoint(params["focal_point"]) 52 | if "distance" in params.keys(): 53 | camera.SetDistance(params["distance"]) 54 | 55 | 56 | def set_camera(scene, camera): 57 | """ 58 | Sets the position of the camera of a brainrender scene. 59 | 60 | :param scene: instance of Scene 61 | :param camera: either a string with the name of one of the pre-defined cameras, or 62 | a dictionary of camera parameters. 63 | """ 64 | if camera is None: 65 | return None 66 | 67 | if not isinstance(camera, vtkCamera): 68 | # Get camera params 69 | camera = check_camera_param(camera) 70 | 71 | # set params 72 | try: 73 | set_camera_params(scene.plotter.camera, camera) 74 | except AttributeError: 75 | return None 76 | else: 77 | scene.plotter.camera = camera 78 | return camera 79 | 80 | 81 | def get_camera_params(scene=None, camera=None): 82 | """ 83 | Given an active brainrender scene or a camera, it return 84 | the camera parameters. 85 | 86 | :param scene: instance of Scene whose camera is to be used 87 | :param camera: camera obj 88 | """ 89 | 90 | def clean(val): 91 | if isinstance(val, tuple): 92 | return tuple((round(v) for v in val)) 93 | else: 94 | return round(val) 95 | 96 | if scene is not None: 97 | if not scene.is_rendered: 98 | scene.render(interactive=False) 99 | cam = scene.plotter.camera 100 | else: 101 | cam = camera 102 | 103 | params = dict( 104 | pos=clean(cam.GetPosition()), 105 | focal_point=clean(cam.GetFocalPoint()), 106 | viewup=clean(cam.GetViewUp()), 107 | distance=clean(cam.GetDistance()), 108 | clipping_range=clean(cam.GetClippingRange()), 109 | ) 110 | return params 111 | -------------------------------------------------------------------------------- /brainrender/cameras.py: -------------------------------------------------------------------------------- 1 | sagittal_camera = { 2 | "pos": (6514, -34, 36854), 3 | "viewup": (0, -1, 0), 4 | "clipping_range": (24098, 49971), 5 | } 6 | 7 | sagittal_camera2 = { 8 | "pos": (9782, 1795, -40999), 9 | "viewup": (0, -1, 0), 10 | "clipping_range": (23256, 51031), 11 | } 12 | 13 | 14 | frontal_camera = { 15 | "pos": (-19199, -1428, -5763), 16 | "viewup": (0, -1, 0), 17 | "clipping_range": (19531, 40903), 18 | } 19 | 20 | top_camera = { 21 | "pos": (7760, -31645, -5943), 22 | "viewup": (-1, 0, 0), 23 | "clipping_range": (27262, 45988), 24 | } 25 | 26 | top_side_camera = { 27 | "pos": (4405, -31597, -5411), 28 | "viewup": (0, 0, -1), 29 | "clipping_range": (26892, 46454), 30 | } 31 | 32 | three_quarters_camera = { 33 | "pos": (-20169, -7298, 14832), 34 | "viewup": (0, -1, 0), 35 | "clipping_range": (16955, 58963), 36 | } 37 | 38 | cameras = dict( 39 | sagittal=sagittal_camera, 40 | sagittal2=sagittal_camera2, 41 | frontal=frontal_camera, 42 | top=top_camera, 43 | top_side=top_side_camera, 44 | three_quarters=three_quarters_camera, 45 | ) 46 | -------------------------------------------------------------------------------- /brainrender/render.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | from loguru import logger 6 | from myterial import teal 7 | from rich import print 8 | from rich.syntax import Syntax 9 | from vedo import Plotter 10 | from vedo import Volume as VedoVolume 11 | from vedo import settings as vsettings 12 | 13 | from brainrender import settings 14 | from brainrender.actors.points import PointsDensity 15 | from brainrender.camera import ( 16 | check_camera_param, 17 | get_camera, 18 | set_camera, 19 | ) 20 | 21 | # mtx used to transform meshes to sort axes orientation 22 | mtx = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]] 23 | mtx_swap_x_z = [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]] 24 | 25 | 26 | class Render: 27 | is_rendered = False 28 | plotter = None 29 | 30 | axes_names = ("AP", "DV", "LR") 31 | axes_lookup = {"x": "AP", "y": "DV", "z": "LR"} 32 | axes_indices = {"AP": 0, "DV": 1, "LR": 2} 33 | 34 | def __init__(self, plotter=None): 35 | """ 36 | Backend for Scene, handles all rendering and exporting 37 | related tasks. 38 | """ 39 | if plotter is None: 40 | self._get_plotter() 41 | else: 42 | self.plotter = plotter 43 | self.plotter.keyPressFunction = self.keypress 44 | 45 | def _get_plotter(self): 46 | """ 47 | Make a vedo plotter with 48 | fancy axes and all 49 | """ 50 | self.plotter = Plotter( 51 | axes=self._make_axes() if settings.SHOW_AXES else None, 52 | pos=(0, 0), 53 | title="brainrender", 54 | bg=settings.BACKGROUND_COLOR, 55 | offscreen=settings.OFFSCREEN, 56 | size="full" if settings.WHOLE_SCREEN else (1600, 1200), 57 | ) 58 | 59 | self.plotter.keyPressFunction = self.keypress 60 | 61 | def _make_axes(self): 62 | """ 63 | Returns a dictionary with axes 64 | parameters for the vedo plotter 65 | """ 66 | ax_idx = self.atlas.space.axes_order.index("frontal") 67 | 68 | # make a custom axes dict 69 | atlas_shape = np.array(self.atlas.metadata["shape"]) * np.array( 70 | self.atlas.metadata["resolution"] 71 | ) 72 | z_range = np.array([-atlas_shape[2], 0]) 73 | z_ticks = [ 74 | (-v, str(np.abs(v).astype(np.int32))) 75 | for v in np.linspace( 76 | 0, 77 | atlas_shape[ax_idx], 78 | 10, 79 | ) 80 | ] 81 | 82 | if self.atlas.atlas_name == "allen_human_500um": 83 | z_range = None 84 | z_ticks = None 85 | logger.debug( 86 | "RENDER: manually forcing axes size for human atlas, atlas needs fixing" 87 | ) 88 | 89 | # make custom axes dict 90 | axes = dict( 91 | axes_linewidth=3, 92 | tip_size=0, 93 | xtitle="AP (μm)", 94 | ytitle="DV (μm)", 95 | ztitle="LR (μm)", 96 | text_scale=0.8, 97 | xtitle_rotation=180, 98 | zrange=z_range, 99 | z_values_and_labels=z_ticks, 100 | xygrid=False, 101 | yzgrid=False, 102 | zxgrid=False, 103 | x_use_bounds=True, 104 | y_use_bounds=True, 105 | z_use_bounds=True, 106 | xlabel_rotation=180, 107 | ylabel_rotation=180, 108 | zlabel_rotation=90, 109 | ) 110 | 111 | return axes 112 | 113 | def _prepare_actor(self, actor): 114 | """ 115 | When an actor is first rendered, a transform matrix 116 | is applied to its points to correct axes orientation 117 | mismatches: https://github.com/brainglobe/brainglobe-atlasapi/issues/73 118 | 119 | Once an actor is 'corrected' it spawns labels and silhouettes as needed 120 | """ 121 | # don't apply transforms to points density actors 122 | if isinstance(actor, PointsDensity): 123 | logger.debug( 124 | f'Not transforming actor "{actor.name} (type: {actor.br_class})"' 125 | ) 126 | actor._is_transformed = True 127 | 128 | # Flip every actor's orientation 129 | if not actor._is_transformed: 130 | try: 131 | actor._mesh = actor.mesh.clone() 132 | 133 | if isinstance(actor._mesh, VedoVolume): 134 | actor._mesh.permute_axes(2, 1, 0) 135 | actor._mesh.apply_transform(mtx, True) 136 | actor._mesh.transform = ( 137 | None # otherwise it gets applied twice 138 | ) 139 | elif actor.br_class in ["None", "Gene Data"]: 140 | actor._mesh.apply_transform(mtx_swap_x_z) 141 | actor._mesh.apply_transform(mtx) 142 | else: 143 | actor._mesh.apply_transform(mtx) 144 | 145 | except AttributeError: # some types of actors don't transform 146 | logger.debug( 147 | f'Failed to transform actor: "{actor.name} (type: {actor.br_class})"' 148 | ) 149 | actor._is_transformed = True 150 | else: 151 | try: 152 | actor.mesh.reverse() 153 | except AttributeError: # Volumes don't have reverse 154 | logger.debug( 155 | f'Failed to reverse actor: "{actor.name} (type: {actor.br_class})"' 156 | ) 157 | actor._is_transformed = True 158 | 159 | # Add silhouette and labels 160 | if actor._needs_silhouette and not self.backend: 161 | self.plotter.add(actor.make_silhouette().mesh) 162 | 163 | if actor._needs_label and not self.backend: 164 | self.labels.extend(actor.make_label(self.atlas)) 165 | 166 | def _apply_style(self): 167 | """ 168 | Sets the rendering style for each mesh 169 | """ 170 | for actor in self.clean_actors: 171 | if settings.SHADER_STYLE != "cartoon": 172 | style = settings.SHADER_STYLE 173 | else: 174 | if self.backend: # notebook backend 175 | print( 176 | 'Shader style "cartoon" cannot be used in a notebook' 177 | ) 178 | style = "off" 179 | 180 | try: 181 | actor.mesh.reverse() # flip normals 182 | actor.mesh.lighting(style=style) 183 | 184 | actor._mesh.reverse() 185 | actor._mesh.lighting(style=style) 186 | except AttributeError: 187 | pass 188 | 189 | def render( 190 | self, 191 | interactive=None, 192 | camera=None, 193 | zoom=None, 194 | resetcam=False, 195 | **kwargs, 196 | ): 197 | """ 198 | Renders the scene. 199 | 200 | :param interactive: bool. If note settings.INTERACTIVE is used. 201 | If true the program's execution is stopped and users 202 | can interact with scene. 203 | :param camera: str, dict. If none the default camera is used. 204 | Pass a valid camera input to specify the camera position when 205 | the scene is rendered. 206 | :param zoom: float, if None atlas default is used 207 | :param resetcam: bool, if True the camera is reset between renders 208 | :param kwargs: additional arguments to pass to self.plotter.show 209 | """ 210 | logger.debug( 211 | f"Rendering scene. Interactive: {interactive}, camera: {camera}, zoom: {zoom}" 212 | ) 213 | # get zoom 214 | zoom = zoom or self.atlas.zoom 215 | 216 | # get vedo plotter 217 | if self.plotter is None: 218 | self._get_plotter() 219 | 220 | # Get camera 221 | camera = camera or settings.DEFAULT_CAMERA 222 | if isinstance(camera, str): 223 | camera = get_camera(camera) 224 | else: 225 | camera = check_camera_param(camera) 226 | 227 | if "focal_point" not in camera.keys() or camera["focal_point"] is None: 228 | camera["focal_point"] = self.root._mesh.center_of_mass() 229 | 230 | if not self.backend and camera is not None: 231 | _ = set_camera(self, camera) 232 | 233 | # Apply axes correction 234 | for actor in self.clean_actors: 235 | if not actor._is_transformed: 236 | self._prepare_actor(actor) 237 | self.plotter.add(actor.mesh) 238 | 239 | if actor._needs_silhouette or actor._needs_label: 240 | self._prepare_actor(actor) 241 | 242 | # add labels to the scene 243 | for label in self.labels: 244 | if label._is_added: 245 | continue 246 | else: 247 | label._mesh = label.mesh.clone() 248 | self._prepare_actor(label) 249 | self.plotter.add(label._mesh.reverse()) 250 | label._is_added = True 251 | 252 | # Apply style 253 | self._apply_style() 254 | 255 | if self.inset and not self.is_rendered: 256 | self._get_inset() 257 | 258 | # render 259 | self.is_rendered = True 260 | if not self.backend: # not running in a python script 261 | if interactive is None: 262 | interactive = settings.INTERACTIVE 263 | 264 | self.plotter.show( 265 | interactive=interactive, 266 | zoom=zoom, 267 | bg=settings.BACKGROUND_COLOR, 268 | rate=40, 269 | axes=self.plotter.axes, 270 | resetcam=resetcam, 271 | ) 272 | elif self.backend == "k3d": # pragma: no cover 273 | # Remove silhouettes 274 | self.remove(*self.get_actors(br_class="silhouette")) 275 | print( 276 | f"[{teal}]Your scene is ready for rendering, use:\n", 277 | Syntax("from vedo import show", lexer="python"), 278 | Syntax("vedo.show(*scene.renderables)", lexer="python"), 279 | sep="\n", 280 | ) 281 | else: # pragma: no cover 282 | print( 283 | f"[{teal}]Your scene is ready for rendering, use:\n", 284 | Syntax("from itkwidgets import view", lexer="python"), 285 | Syntax( 286 | "view(scene.plotter.show(*scene.renderables))", 287 | lexer="python", 288 | ), 289 | sep="\n", 290 | ) 291 | 292 | def close(self): 293 | self.plotter.close() 294 | 295 | def export(self, savepath, **kwargs): 296 | """ 297 | Exports the scene to a .html 298 | file for online renderings. 299 | 300 | :param savepath: str, Path to a .html file to save the export 301 | """ 302 | logger.debug(f"Exporting scene to {savepath}") 303 | _backend = self.backend 304 | _default_backend = vsettings.default_backend 305 | 306 | if not self.is_rendered: 307 | self.render(interactive=False, **kwargs) 308 | 309 | path = Path(savepath) 310 | if path.suffix != ".html": 311 | raise ValueError("Savepath should point to a .html file") 312 | 313 | # prepare settings 314 | vsettings.default_backend = "k3d" 315 | 316 | # Create new plotter and save to file 317 | plt = Plotter() 318 | plt.add(self.clean_renderables).render() 319 | plt = plt.show(interactive=False) 320 | 321 | with open(path, "w") as fp: 322 | fp.write(plt.get_snapshot()) 323 | 324 | print( 325 | f"The brainrender scene has been exported for web. The results are saved at {path}" 326 | ) 327 | 328 | # Reset settings 329 | vsettings.default_backend = _default_backend 330 | self.backend = _backend 331 | 332 | return str(path) 333 | 334 | def screenshot(self, name=None, scale=None, **kwargs): 335 | """ 336 | Takes a screenshot of the current view 337 | and save it to file. 338 | Screenshots are saved in `screenshots_folder` 339 | (see Scene) 340 | 341 | :param name: str, name of png file 342 | :param scale: float, >1 for higher resolution 343 | """ 344 | 345 | if not self.is_rendered: 346 | self.render(interactive=False, **kwargs) 347 | 348 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") 349 | name = Path(name or f"brainrender_screenshot_{timestamp}") 350 | 351 | # If no suffix is provided or it an unsupported format, default to .png 352 | if name.suffix not in [".png", ".eps", ".pdf", ".svg", ".jpg"]: 353 | name = name.with_suffix(".png") 354 | 355 | scale = scale or settings.SCREENSHOT_SCALE 356 | 357 | print(f"\nSaving new screenshot at {name}\n") 358 | 359 | savepath = str(self.screenshots_folder / name) 360 | logger.debug(f"Saving scene at {savepath}") 361 | self.plotter.screenshot(filename=savepath, scale=scale) 362 | return savepath 363 | 364 | def keypress(self, key): # pragma: no cover 365 | """ 366 | Handles key presses for interactive view 367 | -s: take's a screenshot 368 | -q: closes the window 369 | -c: prints the current camera parameters 370 | """ 371 | if key == "s": 372 | self.screenshot() 373 | 374 | elif key in ("q", "Esc"): 375 | self.close() 376 | -------------------------------------------------------------------------------- /brainrender/scene.py: -------------------------------------------------------------------------------- 1 | """ 2 | Scene 3 | - Create a scene, add root and inset if necessary 4 | - add actor method 5 | - special methods 6 | 7 | """ 8 | 9 | import sys 10 | from pathlib import Path 11 | 12 | import pyinspect as pi 13 | from loguru import logger 14 | from myterial import amber, orange, orange_darker, salmon 15 | from rich import print 16 | from vedo import Assembly, Mesh, Text2D 17 | 18 | from brainrender import settings 19 | from brainrender._io import load_mesh_from_file 20 | from brainrender._jupyter import JupyterMixIn, not_on_jupyter 21 | from brainrender._utils import listify, return_list_smart 22 | from brainrender.actor import Actor 23 | from brainrender.actors import Volume 24 | from brainrender.atlas import Atlas 25 | from brainrender.render import Render 26 | 27 | 28 | class Scene(JupyterMixIn, Render): 29 | def __init__( 30 | self, 31 | root=True, 32 | atlas_name=None, 33 | check_latest=True, 34 | inset=True, 35 | title=None, 36 | screenshots_folder=None, 37 | plotter=None, 38 | title_color: str = "k", 39 | ): 40 | """ 41 | Main scene in brainrender. 42 | It coordinates what should be rendered and how should it look like. 43 | 44 | :param root: bool. If true the brain root mesh is added 45 | :param atlas_name: str, name of the brainglobe atlas to be used 46 | :param check_latest: bool, if True checks that the atlas is the latest version 47 | :param inset: bool. If true an inset is shown with the brain's outline 48 | :param title: str. If true a title is added to the top of the window 49 | :param screenshots_folder: str, Path. Where the screenshots will be saved 50 | """ 51 | logger.debug( 52 | f"Creating scene with parameters: root: {root}, atlas_name: '{atlas_name}'', inset: {inset}, screenshots_folder: {screenshots_folder}" 53 | ) 54 | JupyterMixIn.__init__(self) 55 | 56 | self.actors = [] # stores all actors in the scene 57 | self.labels = [] # stores all `labels` actors in scene 58 | 59 | self.atlas = Atlas(atlas_name=atlas_name, check_latest=check_latest) 60 | 61 | self.screenshots_folder = ( 62 | Path(screenshots_folder) 63 | if screenshots_folder is not None 64 | else Path().cwd() 65 | ) 66 | self.screenshots_folder.mkdir(exist_ok=True) 67 | 68 | # Initialise render class 69 | Render.__init__(self, plotter) 70 | 71 | # Get root mesh 72 | self.root = self.add_brain_region( 73 | "root", 74 | alpha=settings.ROOT_ALPHA, 75 | color=settings.ROOT_COLOR, 76 | silhouette=bool(root and settings.SHADER_STYLE == "cartoon"), 77 | ) 78 | self.atlas.root = self.root # give atlas access to root 79 | self._root_mesh = self.root.mesh.clone() 80 | if not root: 81 | self.remove(self.root) 82 | 83 | # keep track if we need to make an inset 84 | self.inset = inset 85 | 86 | # add title 87 | if title: 88 | self.add( 89 | Text2D(title, pos="top-center", s=2.5, c=title_color, alpha=1), 90 | names="title", 91 | classes="title", 92 | ) 93 | 94 | def __str__(self): 95 | return f"A `brainrender.scene.Scene` with {len(self.actors)} actors." 96 | 97 | def __repr__(self): # pragma: no cover 98 | return f"A `brainrender.scene.Scene` with {len(self.actors)} actors." 99 | 100 | def __repr_html__(self): # pragma: no cover 101 | return f"A `brainrender.scene.Scene` with {len(self.actors)} actors." 102 | 103 | def __del__(self): 104 | self.close() 105 | 106 | @not_on_jupyter 107 | def _get_inset(self): 108 | """ 109 | Creates a small inset showing the brain's orientation 110 | """ 111 | if settings.OFFSCREEN: 112 | return 113 | 114 | inset = self._root_mesh.clone() 115 | inset.alpha(1) # scale(0.5) 116 | self.plotter.add_inset(inset, pos=(0.95, 0.1), draggable=False) 117 | 118 | if settings.SHADER_STYLE == "cartoon": 119 | inset.lighting("off") 120 | 121 | def add(self, *items, names=None, classes=None, transform=True, **kwargs): 122 | """ 123 | General method to add Actors to the scene. 124 | 125 | :param items: vedo.Mesh, Actor, (str, Path). 126 | If str/path it should be a path to a .obj or .stl file. 127 | Whatever the input it's turned into an instance of Actor 128 | before adding it to the scne 129 | 130 | :param names: names to be assigned to the Actors 131 | :param classes: br_classes to be assigned to the Actors 132 | :param **kwargs: parameters to be passed to the individual 133 | loading functions (e.g. to load from file and specify the color) 134 | """ 135 | names = names or [None for a in items] 136 | classes = classes or [None for a in items] 137 | 138 | # turn items into Actors 139 | actors = [] 140 | for item, name, _class in zip(items, listify(names), listify(classes)): 141 | if item is None: 142 | continue 143 | 144 | if isinstance(item, (Mesh, Assembly)): 145 | actors.append(Actor(item, name=name, br_class=_class)) 146 | 147 | elif isinstance(item, Text2D): 148 | # Mark text actors differently because they don't behave like 149 | # other 3d actors 150 | actors.append( 151 | Actor( 152 | item, 153 | name=name, 154 | br_class=_class, 155 | is_text=True, 156 | **kwargs, 157 | ) 158 | ) 159 | elif pi.utils._class_name(item) == "Volume" and not isinstance( 160 | item, Volume 161 | ): 162 | actors.append( 163 | Volume(item, name=name, br_class=_class, **kwargs) 164 | ) 165 | elif isinstance(item, Actor): 166 | actors.append(item) 167 | 168 | elif isinstance(item, (str, Path)): 169 | mesh = load_mesh_from_file(item, **kwargs) 170 | name = name or Path(item).name 171 | _class = _class or "from file" 172 | actors.append(Actor(mesh, name=name, br_class=_class)) 173 | 174 | else: 175 | raise ValueError( 176 | f"Unrecognized argument: {item} [{pi.utils._class_name(item)}]" 177 | ) 178 | 179 | # transform actors 180 | if transform: 181 | for actor in actors: 182 | self._prepare_actor(actor) 183 | 184 | # add actors to plotter 185 | for actor in actors: 186 | try: 187 | self.plotter.add(actor._mesh) 188 | except AttributeError: # e.g. for titles 189 | self.plotter.add(actor.mesh) 190 | 191 | # Add to the lists actors 192 | self.actors.extend(actors) 193 | return return_list_smart(actors) 194 | 195 | def remove(self, *actors): 196 | """ 197 | Removes actors from the scene. 198 | """ 199 | logger.debug(f"Removing {len(actors)} actors from scene") 200 | for act in actors: 201 | try: 202 | self.actors.pop(self.actors.index(act)) 203 | except Exception: 204 | print( 205 | f"Could not remove ({act}, {pi.utils._class_name(act)}) from actors" 206 | ) 207 | else: 208 | # remove from plotter 209 | try: 210 | self.plotter.remove(act._mesh) 211 | except AttributeError: 212 | pass 213 | 214 | if act.silhouette is not None: 215 | self.plotter.remove(act.silhouette.mesh) 216 | 217 | for label in act.labels: 218 | self.plotter.remove(label.mesh) 219 | 220 | def get_actors(self, name=None, br_class=None): 221 | """ 222 | Return's the scene's actors that match some search criteria. 223 | 224 | :param name: strm int or list of str/int, actors' names 225 | :param br_class: str or list of str, actors br classes 226 | """ 227 | matches = self.actors 228 | if name is not None: 229 | name = listify(name) 230 | matches = [m for m in matches if m.name in name] 231 | if br_class is not None: 232 | br_class = listify(br_class) 233 | matches = [m for m in matches if m.br_class in br_class] 234 | return matches 235 | 236 | def add_brain_region( 237 | self, 238 | *regions, 239 | alpha=1, 240 | color=None, 241 | silhouette=None, 242 | hemisphere="both", 243 | force=False, 244 | ): 245 | """ 246 | Dedicated method to add brain regions to render 247 | 248 | :param regions: str. String of regions names 249 | :param alpha: float. How opaque the regions are rendered. 250 | :param color: str. If None the atlas default color is used 251 | :param silhouette: bool. If true regions Actors will have 252 | a silhouette 253 | :param hemisphere: str. 254 | - if "both" the complete mesh is returned 255 | - if "left"/"right" only the corresponding half 256 | of the mesh is returned 257 | :param force: bool. If true force adding of region even 258 | if already rendered 259 | """ 260 | if silhouette is None: 261 | silhouette = ( 262 | silhouette or True 263 | if settings.SHADER_STYLE == "cartoon" 264 | else False 265 | ) 266 | 267 | # avoid adding regions already rendered 268 | if not force: 269 | already_in = [ 270 | r.name for r in self.get_actors(br_class="brain region") 271 | ] 272 | regions = [r for r in regions if r not in already_in] 273 | 274 | if not regions: # they were all already rendered 275 | logger.debug( 276 | "Not adding any region because they are all already in the scene" 277 | ) 278 | return None 279 | 280 | logger.debug( 281 | f"SCENE: Adding {len(regions)} brain regions to scene: {regions}" 282 | ) 283 | 284 | # get regions actors from atlas 285 | regions = self.atlas.get_region(*regions, alpha=alpha, color=color) 286 | regions = listify(regions) or [] 287 | 288 | # add actors 289 | actors = self.add(*regions) 290 | 291 | # slice to keep only one hemisphere 292 | if hemisphere in ("left", "right"): 293 | if self.atlas.metadata["symmetric"]: 294 | mesh_center = ( 295 | self.root._mesh.bounds().reshape((3, 2)).mean(axis=1) 296 | ) 297 | else: 298 | mesh_center = self.root._mesh.center_of_mass() 299 | 300 | normal = (0, 0, 1) if hemisphere == "right" else (0, 0, -1) 301 | plane = self.atlas.get_plane(pos=mesh_center, norm=normal) 302 | 303 | if not isinstance(actors, list): 304 | actors._mesh.cut_with_plane( 305 | origin=plane.center, 306 | normal=plane.normal, 307 | ) 308 | actors.cap() 309 | else: 310 | for actor in actors: 311 | actor._mesh.cut_with_plane( 312 | origin=plane.center, 313 | normal=plane.normal, 314 | ) 315 | actor.cap() 316 | 317 | # make silhouettes 318 | if silhouette and regions and alpha: 319 | self.add_silhouette(*regions, lw=2) 320 | 321 | return actors 322 | 323 | @not_on_jupyter 324 | def add_silhouette(self, *actors, lw=1, color="k"): 325 | """ 326 | Dedicated method to add silhouette to actors 327 | 328 | :param actors: Actors 329 | :param lw: float. Line weight 330 | :param color: str, silhouette color 331 | """ 332 | for actor in actors: 333 | if actor is None: 334 | continue 335 | actor._needs_silhouette = True 336 | actor._silhouette_kwargs = dict( 337 | lw=lw or settings.LW, 338 | color=color, 339 | ) 340 | 341 | @not_on_jupyter 342 | def add_label(self, actor, label, **kwargs): 343 | """ 344 | Dedicated method to add labels to actors 345 | 346 | :param actor: Actors 347 | :param label: str. Text of label 348 | :param **kwargs: see brainrender._actor.make_actor_label for kwargs 349 | """ 350 | actor._needs_label = True 351 | actor._label_str = label 352 | actor._label_kwargs = kwargs 353 | 354 | def slice(self, plane, actors=None, close_actors=False, invert=False): 355 | """ 356 | Slices actors with a plane. 357 | 358 | :param plane: str, Plane. If a string it needs to be 359 | a supported plane from brainglobe's atlas api (e.g. 'frontal') 360 | otherwise it should be a vedo.Plane mesh 361 | :param actors: list of actors to be sliced. If None all actors 362 | will be sliced 363 | :param close_actors: If true the openings in the actors meshes 364 | caused by the cut will be closed. 365 | :param invert: Invert the slice direction. 366 | """ 367 | if isinstance(plane, str): 368 | if invert is False: 369 | norm = self.atlas.space.plane_normals[plane] 370 | elif invert is True: 371 | norm = tuple( 372 | x * -1 for x in self.atlas.space.plane_normals[plane] 373 | ) 374 | plane = self.atlas.get_plane(plane=plane, norm=norm) 375 | 376 | if not actors or actors is None: 377 | actors = self.clean_actors.copy() 378 | 379 | for actor in listify(actors): 380 | actor._mesh = actor._mesh.cut_with_plane( 381 | origin=plane.center, 382 | normal=plane.normal, 383 | ) 384 | if close_actors: 385 | actor.cap() 386 | 387 | if actor.silhouette is not None: 388 | self.plotter.remove(actor.silhouette.mesh) 389 | self.plotter.add(actor.make_silhouette().mesh) 390 | 391 | @property 392 | def content(self): 393 | """ 394 | Prints an overview of the Actors in the scene. 395 | """ 396 | 397 | actors = pi.Report( 398 | "Scene actors", accent=salmon, dim=orange, color=orange 399 | ) 400 | 401 | for act in self.actors: 402 | actors.add( 403 | f"[bold][{amber}]- {act.name}[/bold][{orange_darker}] (type: [{orange}]{act.br_class}[/{orange}])" 404 | ) 405 | 406 | if sys.platform != "win32": 407 | actors.print() 408 | else: 409 | print(pi.utils.stringify(actors, maxlen=-1)) 410 | 411 | @property 412 | def renderables(self): 413 | """ 414 | Returns the meshes for all actors. 415 | """ 416 | if not self.backend: 417 | return [a.mesh for a in self.actors + self.labels] 418 | else: 419 | return [a.mesh for a in self.actors if not a.is_text] 420 | 421 | @property 422 | def clean_actors(self): 423 | """ 424 | returns only actors that are not Text objects and similar 425 | """ 426 | return [a for a in self.actors if not a.is_text] 427 | 428 | @property 429 | def clean_renderables(self): 430 | """ 431 | Returns meshes only for 'clean actors' (i.e. not text). 432 | _mesh is returned to account for internal rotations. 433 | """ 434 | return [a._mesh for a in self.actors if not a.is_text] 435 | -------------------------------------------------------------------------------- /brainrender/settings.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from vedo import settings as vsettings 4 | 5 | DEBUG = False # set to True to see more detailed logs 6 | 7 | # ------------------------------- vedo settings ------------------------------ # 8 | 9 | vsettings.point_smoothing = False 10 | vsettings.line_smoothing = False 11 | vsettings.polygon_smoothing = False 12 | vsettings.immediate_rendering = False 13 | 14 | vsettings.use_depth_peeling = True 15 | vsettings.alpha_bit_planes = 1 16 | vsettings.max_number_of_peels = 12 17 | vsettings.occlusion_ratio = 0.1 18 | vsettings.multi_samples = 0 if sys.platform == "darwin" else 8 19 | 20 | # For transparent background with screenshots 21 | vsettings.screenshot_transparent_background = False # vedo for transparent bg 22 | vsettings.use_fxaa = False 23 | 24 | 25 | # --------------------------- brainrender settings --------------------------- # 26 | 27 | BACKGROUND_COLOR = "white" 28 | DEFAULT_ATLAS = "allen_mouse_25um" # default atlas 29 | DEFAULT_CAMERA = "three_quarters" # Default camera settings (orientation etc. see brainrender.camera.py) 30 | INTERACTIVE = True # rendering interactive ? 31 | LW = 2 # e.g. for silhouettes 32 | ROOT_COLOR = [0.8, 0.8, 0.8] # color of the overall brain model's actor 33 | ROOT_ALPHA = 0.2 # transparency of the overall brain model's actor' 34 | SCREENSHOT_SCALE = 1 35 | SHADER_STYLE = "cartoon" # affects the look of rendered brain regions: [metallic, plastic, shiny, glossy, cartoon] 36 | SHOW_AXES = True 37 | WHOLE_SCREEN = False # If true render window is full screen 38 | OFFSCREEN = False 39 | -------------------------------------------------------------------------------- /brainrender/video.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | from loguru import logger 6 | from myterial import amber, orange, salmon 7 | from rich import print 8 | from rich.progress import track 9 | 10 | import brainrender as br 11 | from brainrender._jupyter import not_on_jupyter 12 | from brainrender._video import Video 13 | from brainrender.camera import check_camera_param, get_camera_params 14 | 15 | 16 | class VideoMaker: 17 | def __init__( 18 | self, 19 | scene, 20 | save_fld, 21 | name, 22 | fmt="mp4", 23 | size="1620x1050", 24 | make_frame_func=None, 25 | ): 26 | """ 27 | Creates a video by animating a scene and saving a sequence 28 | of screenshots. 29 | 30 | :param scene: the instance of Scene to be animated 31 | :param save_fld: str, Path. Where the video will be savd 32 | :param save_name: str, name of the video 33 | :param fmt: str. Video format (e.g. 'mp4') 34 | :param make_frame_func: None, optional. If passed it should be a 35 | function that takes the Scene to be animated as the fist argument abd 36 | the current frame number as second. At every frame this function 37 | can do what's needed to animate the scene 38 | :param size: str, size of video's frames in pixels 39 | """ 40 | logger.debug( 41 | f"Creating video with name {name}. Format: {fmt}, size: {size}, save folder: {save_fld}" 42 | ) 43 | 44 | self.scene = scene 45 | 46 | self.save_fld = Path(save_fld) 47 | self.save_fld.mkdir(exist_ok=True) 48 | self.save_name = name 49 | self.video_format = fmt 50 | self.size = size 51 | if "mp4" not in self.video_format: 52 | raise NotImplementedError( 53 | "Video creation can only output mp4 videos for now" 54 | ) 55 | 56 | self.make_frame_func = make_frame_func or self._make_frame 57 | 58 | @staticmethod 59 | def _make_frame( 60 | scene, 61 | frame_number, 62 | tot_frames, 63 | resetcam, 64 | azimuth=0, 65 | elevation=0, 66 | roll=0, 67 | ): 68 | """ 69 | Default `make_frame_func`. Rotates the camera in 3 directions 70 | 71 | :param scene: scene to be animated. 72 | :param frame_number: int, not used 73 | :param tot_frames: int, total number of frames 74 | :param resetcam: bool, if True the camera is reset 75 | :param azimuth: integer, specify the rotation in degrees 76 | per frame on the relative axis. (Default value = 0) 77 | :param elevation: integer, specify the rotation in degrees 78 | per frame on the relative axis. (Default value = 0) 79 | :param roll: integer, specify the rotation in degrees 80 | per frame on the relative axis. (Default value = 0) 81 | """ 82 | scene.plotter.show(interactive=False, resetcam=resetcam) 83 | scene.plotter.camera.Elevation(elevation) 84 | scene.plotter.camera.Azimuth(azimuth) 85 | scene.plotter.camera.Roll(roll) 86 | 87 | def generate_frames(self, fps, duration, video, resetcam, *args, **kwargs): 88 | """ 89 | Loop to generate frames 90 | 91 | :param fps: int, frame rate 92 | :param duration: float, video duration in seconds 93 | :param video: vedo Video class used to create the video 94 | :param resetcam: bool, if True the camera is reset 95 | """ 96 | nframes = int(fps * duration) 97 | for i in track(range(nframes), description="Generating frames"): 98 | self.make_frame_func( 99 | self.scene, i, nframes, resetcam, *args, **kwargs 100 | ) 101 | video.add_frame() 102 | 103 | def compress(self, temp_name): 104 | """ 105 | Compresses created video with ffmpeg and removes 106 | uncompressed video 107 | """ 108 | 109 | command = f"ffmpeg -hide_banner -loglevel panic -i {temp_name}.mp4 -vcodec libx264 -crf 28 {self.save_name}.mp4 -y" 110 | os.system(command) 111 | Path(temp_name + ".mp4").unlink() 112 | 113 | print( 114 | f"[{amber}]Saved compressed video at: [{orange} bold]{self.save_fld}/{self.save_name}.{self.video_format}" 115 | ) 116 | 117 | @not_on_jupyter 118 | def make_video( 119 | self, 120 | *args, 121 | duration=10, 122 | fps=30, 123 | fix_camera=False, 124 | resetcam=False, 125 | render_kwargs={}, 126 | **kwargs, 127 | ): 128 | """ 129 | Creates a video using user defined parameters 130 | 131 | :param *args: any extra argument to be passed to `make_frame_func` 132 | :param duration: float, duration of the video in seconds 133 | :param fps: int, frame rate 134 | :param fix_camera: bool, if True the focal point of the camera is set based on the first frame 135 | :param resetcam: bool, if True the camera is reset 136 | :param render_kwargs: dict, any extra keyword argument to be passed to `scene.render` 137 | :param **kwargs: any extra keyword argument to be passed to `make_frame_func` 138 | """ 139 | logger.debug(f"Saving a video {duration}s long ({fps} fps)") 140 | _off = br.settings.OFFSCREEN 141 | br.settings.OFFSCREEN = True # render offscreen 142 | 143 | self.scene.render(interactive=False, **render_kwargs) 144 | 145 | if fix_camera: 146 | first_frame = self.keyframes.get(0) 147 | if not first_frame: 148 | logger.error("No keyframes found, can't fix camera") 149 | 150 | # Sets the focal point of the first frame to the centre of mass of the 151 | # full root mesh, since this focal point is set subsequent frames will 152 | # have the same focal point unless a new camera is defined 153 | self.keyframes[0]["camera"][ 154 | "focal_point" 155 | ] = self.scene.root._mesh.center_of_mass() 156 | 157 | # cd to folder where the video will be saved 158 | curdir = os.getcwd() 159 | os.chdir(self.save_fld) 160 | print(f"[{amber}]Saving video in [{orange}]{self.save_fld}") 161 | 162 | # Create video 163 | video = Video( 164 | name=self.save_name, 165 | duration=duration, 166 | fps=fps, 167 | fmt=self.video_format, 168 | size=self.size, 169 | ) 170 | 171 | # Make frames 172 | self.generate_frames(fps, duration, video, resetcam, *args, **kwargs) 173 | self.scene.close() 174 | 175 | # Stitch frames into uncompressed video 176 | out, command = video.close() 177 | spath = f"{self.save_fld}/{self.save_name}.{self.video_format}" 178 | if out: 179 | print( 180 | f"[{orange} bold]ffmpeg returned an error while trying to save video with command:\n [{salmon}]{command}" 181 | ) 182 | else: 183 | print(f"[{amber}]Saved video at: [{orange} bold]{spath}") 184 | 185 | # finish up 186 | br.settings.OFFSCREEN = _off 187 | os.chdir(curdir) 188 | return spath 189 | 190 | 191 | def sigma(x): 192 | """ 193 | Sigmoid curve 194 | """ 195 | y = 1.05 / (1 + np.exp(-8 * (x - 0.5))) - 0.025 196 | if y < 0: 197 | y = 0 198 | if y > 1: 199 | y = 1 200 | return y 201 | 202 | 203 | class Animation(VideoMaker): 204 | """ 205 | The animation class facilitates the creation of videos 206 | by specifying a series of keyframes at given moments during 207 | the video. At each keyframe various parameters (e.g. camera position) 208 | is specified and the video is created by interpolating 209 | between consecutive key frames. 210 | """ 211 | 212 | _last_frame_params = None 213 | _first_zoom = 0 214 | 215 | def __init__(self, scene, save_fld, name, fmt="mp4", size="1620x1050"): 216 | """ 217 | The animation class facilitates the creation of videos 218 | by specifying a series of keyframes at given moments during 219 | the video. At each keyframe various parameters (e.g. camera position) 220 | is specified and the video is created by interpolating 221 | between consecutive key frames. 222 | 223 | :param scene: the instance of Scene to be animated 224 | :param save_fld: str, Path. Where the video will be savd 225 | :param save_name: str, name of the video 226 | :param fmt: str. Video format (e.g. 'mp4') 227 | """ 228 | VideoMaker.__init__(self, scene, save_fld, name, fmt=fmt, size=size) 229 | logger.debug("Creating animation") 230 | 231 | self.keyframes = {} 232 | self.keyframes[0] = dict( # make sure first frame is a keyframe 233 | zoom=None, camera=None, callback=None 234 | ) 235 | self.keyframes_numbers = 0 236 | self.nframes = 0 237 | self.last_keyframe = 0 238 | self.segment_fact = 0 239 | 240 | def add_keyframe( 241 | self, 242 | time, 243 | duration=0, 244 | camera=None, 245 | zoom=None, 246 | interpol="sigma", 247 | callback=None, 248 | **kwargs, 249 | ): 250 | """ 251 | Add a keyframe to the video. 252 | 253 | :param time: float, time in seconds during the video 254 | at which the keyframe takes place. 255 | :param duration: float, if >0 the key frame is repeated 256 | every 5ms to go from start to start+duration 257 | :param zoom: camera zoom 258 | :param camera: dictionary of camera parameters 259 | :param interpol: str, if `sigma` or `linear` specifies 260 | the interpolation mode between key frames. 261 | :param callback: function which takes scene, current video 262 | frame and total number of frames in video as arguments. 263 | can be used to make stuff happen during a key frame (e.g. remove 264 | an actor) 265 | """ 266 | if camera is not None: 267 | camera = check_camera_param(camera) 268 | 269 | if time in self.keyframes.keys() and time > 0: 270 | print(f"[b {orange}]Keyframe {time} already exists, overwriting!") 271 | 272 | if zoom is None: 273 | previous_zoom = list(self.keyframes.values())[0]["zoom"] or 0 274 | zoom = previous_zoom 275 | 276 | if not duration: 277 | self.keyframes[time] = dict( 278 | zoom=zoom, 279 | camera=camera, 280 | callback=callback, 281 | interpol=interpol, 282 | kwargs=kwargs, 283 | ) 284 | else: 285 | for time in np.arange(time, time + duration, 0.001): 286 | self.keyframes[time] = dict( 287 | zoom=zoom if time == 0 else None, 288 | camera=camera, 289 | callback=callback, 290 | interpol=interpol, 291 | kwargs=kwargs, 292 | ) 293 | 294 | def get_keyframe_framenumber(self, fps): 295 | """ 296 | Keyframes are defines in units of time (s), so we need 297 | to know to which frame each keyframe corresponds 298 | 299 | :param fps: int, frame rate 300 | """ 301 | self.keyframes = { 302 | int(np.floor(s * fps)): v for s, v in self.keyframes.items() 303 | } 304 | self.keyframes_numbers = sorted(list(self.keyframes.keys())) 305 | 306 | def generate_frames(self, fps, duration, video, resetcam): 307 | """ 308 | Loop to generate frames 309 | 310 | :param fps: int, frame rate 311 | :param duration: float, video duration in seconds 312 | :param video: vedo Video class used to create the video 313 | :param resetcam: bool, if True the camera is reset 314 | """ 315 | logger.debug( 316 | f"Generating animation keyframes. Duration: {duration}, fps: {fps}" 317 | ) 318 | self.get_keyframe_framenumber(fps) 319 | 320 | self.nframes = int(fps * duration) 321 | self.last_keyframe = max(self.keyframes_numbers) 322 | 323 | if self.last_keyframe > self.nframes: 324 | print( 325 | f"[b {orange}]The video will be {self.nframes} frames long, but you have defined keyframes after that, try increasing video duration?" 326 | ) 327 | 328 | for framen in track( 329 | range(self.nframes), description="Generating frames..." 330 | ): 331 | self._make_frame(framen, resetcam) 332 | 333 | if framen > 1: 334 | video.add_frame() 335 | 336 | def get_frame_params(self, frame_number): 337 | """ 338 | Get current parameters (e.g. camera position) 339 | based on frame number and defined key frames. 340 | 341 | If frame number is a keyframe or is after a keyframe 342 | then the params are those of that/the last keyframe. 343 | Else the params of two consecutive keyframes are interpolate 344 | using either a linear or sigmoid function. 345 | """ 346 | if frame_number in self.keyframes_numbers: 347 | # Check if current frame is a key frame 348 | params = self.keyframes[frame_number] 349 | 350 | elif frame_number > self.last_keyframe: 351 | # check if current frame is past the last keyframe 352 | params = self.keyframes[self.last_keyframe] 353 | 354 | else: 355 | # interpolate between two key frames 356 | prev = [n for n in self.keyframes_numbers if n < frame_number][-1] 357 | nxt = [n for n in self.keyframes_numbers if n > frame_number][0] 358 | kf1, kf2 = self.keyframes[prev], self.keyframes[nxt] 359 | 360 | self.segment_fact = (nxt - frame_number) / (nxt - prev) 361 | if kf2["interpol"] == "sigma": 362 | self.segment_fact = sigma(self.segment_fact) 363 | 364 | params = dict( 365 | camera=self._interpolate_cameras(kf1["camera"], kf2["camera"]), 366 | zoom=self._interpolate_values(kf1["zoom"], kf2["zoom"]), 367 | callback=None, 368 | ) 369 | 370 | # get current camera (to avoid using scene's default) 371 | if params["camera"] is None: 372 | params["camera"] = get_camera_params(self.scene) 373 | return params 374 | 375 | def _make_frame(self, frame_number, resetcam): 376 | """ 377 | Creates a frame with the correct params 378 | and calls the keyframe callback function if defined. 379 | 380 | :param frame_number: int, current frame number 381 | """ 382 | frame_params = self.get_frame_params(frame_number) 383 | logger.debug(f"Frame {frame_number}, params: {frame_params}") 384 | 385 | # callback 386 | if frame_params["callback"] is not None: 387 | callback_camera = frame_params["callback"]( 388 | self.scene, 389 | frame_number, 390 | self.nframes, 391 | **frame_params["kwargs"], 392 | ) 393 | else: 394 | callback_camera = None 395 | 396 | # see if callback returned a camera 397 | camera = callback_camera or frame_params["camera"] 398 | 399 | # render 400 | self.scene.render( 401 | camera=camera.copy(), 402 | zoom=frame_params["zoom"], 403 | interactive=False, 404 | resetcam=resetcam, 405 | ) 406 | 407 | def _interpolate_cameras(self, cam1, cam2): 408 | """ 409 | Interpolate the parameters of two cameras 410 | """ 411 | if cam1 is None: 412 | return cam2 413 | elif cam2 is None: 414 | return cam1 415 | 416 | interpolated = {} 417 | for k, v1 in cam1.items(): 418 | try: 419 | interpolated[k] = self._interpolate_values(v1, cam2[k]) 420 | except KeyError: # pragma: no cover 421 | raise ValueError( 422 | "Cameras to interpolate dont have the same set of parameters" 423 | ) 424 | return interpolated 425 | 426 | def _interpolate_values(self, v1, v2): 427 | """ 428 | Interpolate two values 429 | """ 430 | if v1 is None: 431 | return v2 432 | elif v2 is None: 433 | return v1 434 | 435 | return self.segment_fact * np.array(v1) + ( 436 | 1 - self.segment_fact 437 | ) * np.array(v2) 438 | -------------------------------------------------------------------------------- /examples/__init__.py: -------------------------------------------------------------------------------- 1 | from examples import ( 2 | add_cells, 3 | add_cylinder, 4 | add_labels, 5 | add_mesh_from_file, 6 | animation, 7 | brain_regions, 8 | brainglobe_atlases, 9 | cell_density, 10 | custom_camera, 11 | gene_expression, 12 | line, 13 | neurons, 14 | probe_tracks, 15 | ruler, 16 | screenshot, 17 | settings, 18 | slice, 19 | # streamlines, 20 | user_volumetric_data, 21 | video, 22 | volumetric_data, 23 | web_export, 24 | ) 25 | -------------------------------------------------------------------------------- /examples/add_cells.py: -------------------------------------------------------------------------------- 1 | import random 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | from myterial import orange 6 | from rich import print 7 | 8 | from brainrender import Scene 9 | from brainrender.actors import Points 10 | 11 | print(f"[{orange}]Running example: {Path(__file__).name}") 12 | 13 | 14 | def get_n_random_points_in_region(region, N): 15 | """ 16 | Gets N random points inside (or on the surface) of a mes 17 | """ 18 | 19 | region_bounds = region.mesh.bounds() 20 | X = np.random.randint(region_bounds[0], region_bounds[1], size=10000) 21 | Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000) 22 | Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000) 23 | pts = [[x, y, z] for x, y, z in zip(X, Y, Z)] 24 | 25 | ipts = region.mesh.inside_points(pts).coordinates 26 | return np.vstack(random.choices(ipts, k=N)) 27 | 28 | 29 | scene = Scene(title="Labelled cells") 30 | 31 | # Get a numpy array with (fake) coordinates of some labelled cells 32 | mos = scene.add_brain_region("MOs", alpha=0.15) 33 | coordinates = get_n_random_points_in_region(mos, 2000) 34 | 35 | # Add to scene 36 | scene.add(Points(coordinates, name="CELLS", colors="steelblue")) 37 | 38 | # render 39 | scene.content 40 | scene.render() 41 | -------------------------------------------------------------------------------- /examples/add_cylinder.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example shows how to add a cylinder actor to a scene (e.g. 3 | to represent the location of an implanted optic canula) 4 | """ 5 | 6 | from brainrender import Scene, settings 7 | from brainrender.actors import Cylinder 8 | 9 | settings.SHOW_AXES = False 10 | settings.WHOLE_SCREEN = False 11 | 12 | 13 | from pathlib import Path 14 | 15 | from myterial import orange 16 | from rich import print 17 | 18 | print(f"[{orange}]Running example: {Path(__file__).name}") 19 | 20 | scene = Scene(inset=False, title="optic canula") 21 | 22 | th = scene.add_brain_region( 23 | "TH", 24 | alpha=0.4, 25 | ) 26 | 27 | # create and add a cylinder actor 28 | actor = Cylinder( 29 | th, # center the cylinder at the center of mass of th 30 | scene.root, # the cylinder actor needs information about the root mesh 31 | ) 32 | 33 | scene.add(actor) 34 | scene.render(zoom=1.6) 35 | -------------------------------------------------------------------------------- /examples/add_labels.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example shows how to add a label to a renderend actor 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | from myterial import orange 8 | from rich import print 9 | 10 | from brainrender import Scene 11 | 12 | print(f"[{orange}]Running example: {Path(__file__).name}") 13 | 14 | # create a scene and add brain regions 15 | scene = Scene() 16 | th, mos = scene.add_brain_region("TH", "MOs") 17 | scene.add_label(th, "TH") 18 | scene.add_label(mos, "My region") 19 | 20 | # render 21 | scene.render() 22 | -------------------------------------------------------------------------------- /examples/add_mesh_from_file.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | 8 | obj_file = Path(__file__).parent.parent / "resources" / "CC_134_1_ch1inj.obj" 9 | 10 | print(f"[{orange}]Running example: {Path(__file__).name}") 11 | 12 | # Create a brainrender scene 13 | scene = Scene(title="Injection in SCm") 14 | 15 | # Add brain SCm 16 | scene.add_brain_region("SCm", alpha=0.2) 17 | 18 | # Add from file 19 | scene.add(obj_file, color="tomato") 20 | 21 | # Render! 22 | scene.render() 23 | -------------------------------------------------------------------------------- /examples/animation.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example shows how to create an animated video by specifying 3 | the camera parameters at a number of key frames 4 | """ 5 | 6 | from pathlib import Path 7 | 8 | from myterial import orange 9 | from rich import print 10 | 11 | from brainrender import Animation, Scene 12 | 13 | print(f"[{orange}]Running example: {Path(__file__).name}") 14 | 15 | 16 | # Create a brainrender scene 17 | scene = Scene(title="brain regions", inset=False) 18 | 19 | # Add brain regions 20 | scene.add_brain_region("TH") 21 | 22 | anim = Animation(scene, Path.cwd(), "brainrender_animation") 23 | 24 | # Specify camera position and zoom at some key frames 25 | # each key frame defines the scene's state after n seconds have passed 26 | anim.add_keyframe(0, camera="top", zoom=1) 27 | anim.add_keyframe(1.5, camera="sagittal", zoom=0.95) 28 | anim.add_keyframe(3, camera="frontal", zoom=1) 29 | anim.add_keyframe(4, camera="frontal", zoom=1.2) 30 | 31 | # Make videos 32 | anim.make_video(duration=5, fps=15, resetcam=True) 33 | -------------------------------------------------------------------------------- /examples/animation_callback.py: -------------------------------------------------------------------------------- 1 | from brainrender import Animation, Scene, settings 2 | from pathlib import Path 3 | 4 | settings.SHOW_AXES = False 5 | 6 | scene = Scene(atlas_name="allen_mouse_25um") 7 | 8 | regions = ( 9 | "CTX", 10 | "HPF", 11 | "STR", 12 | "CB", 13 | "MB", 14 | "TH", 15 | "HY", 16 | ) 17 | scene.add_brain_region(*regions, silhouette=True) 18 | 19 | 20 | def slc(scene, framen, totframes): 21 | # Get new slicing plane 22 | fact = framen / totframes 23 | shape_um = scene.atlas.shape_um 24 | # Multiply by fact to move the plane, add buffer to go past the brain 25 | point = [(shape_um[0] + 500) * fact, shape_um[1] // 2, shape_um[2] // 2] 26 | plane = scene.atlas.get_plane(pos=point, norm=(1, 0, 0)) 27 | 28 | scene.slice(plane) 29 | 30 | 31 | anim = Animation( 32 | scene, Path.cwd(), "brainrender_animation_callback", size=None 33 | ) 34 | 35 | # Specify camera pos and zoom at some key frames` 36 | anim.add_keyframe(0, camera="frontal", zoom=1, callback=slc) 37 | 38 | # Make videos 39 | anim.make_video(duration=5, fps=10, fix_camera=True) 40 | -------------------------------------------------------------------------------- /examples/brain_regions.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | 8 | print(f"[{orange}]Running example: {Path(__file__).name}") 9 | 10 | # Create a brainrender scene 11 | scene = Scene(title="brain regions", atlas_name="allen_human_500um") 12 | 13 | # Add brain regions 14 | scene.add_brain_region("FGM") 15 | 16 | # You can specify color, transparency... 17 | 18 | # Render! 19 | scene.render() 20 | -------------------------------------------------------------------------------- /examples/brainglobe_atlases.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | 8 | print(f"[{orange}]Running example: {Path(__file__).name}") 9 | 10 | # Create a brainrender scene using the zebrafish atlas 11 | scene = Scene(atlas_name="mpin_zfish_1um", title="zebrafish") 12 | # Render! 13 | scene.render() 14 | -------------------------------------------------------------------------------- /examples/brainmapper.py: -------------------------------------------------------------------------------- 1 | """ 2 | Visualise the output from brainmapper. 3 | Cells transformed to atlas space can be found at 4 | brainmapper_output/points/points.npy or exported by the brainmapper 5 | napari widget 6 | 7 | For more details on brainmapper, please see: 8 | - https://brainglobe.info/documentation/brainglobe-workflows/brainmapper/index.html 9 | - https://brainglobe.info/documentation/brainglobe-utils/transform-widget.html 10 | """ 11 | 12 | from pathlib import Path 13 | 14 | from myterial import orange 15 | from rich import print 16 | 17 | from brainrender.scene import Scene 18 | from brainrender.actors import Points 19 | from brainrender import settings 20 | 21 | settings.SHADER_STYLE = "plastic" 22 | 23 | cells_path = Path(__file__).parent.parent / "resources" / "points.npy" 24 | 25 | print(f"[{orange}]Running example: {Path(__file__).name}") 26 | 27 | # Create a brainrender scene 28 | scene = Scene(title="brainmapper cells") 29 | 30 | # Create points actor 31 | cells = Points(cells_path, radius=45, colors="palegoldenrod", alpha=0.8) 32 | 33 | # Visualise injection site 34 | scene.add_brain_region("VISp", color="mediumseagreen", alpha=0.6) 35 | 36 | # Add cells 37 | scene.add(cells) 38 | 39 | scene.render() 40 | -------------------------------------------------------------------------------- /examples/brainmapper_regions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Visualise the output from brainmapper in some specific brain regions. 3 | Cells transformed to atlas space can be found at 4 | brainmapper_output/points/points.npy or exported by the brainmapper napari 5 | widget 6 | 7 | For more details on brainmapper, please see: 8 | - https://brainglobe.info/documentation/brainglobe-workflows/brainmapper/index.html 9 | - https://brainglobe.info/documentation/brainglobe-utils/transform-widget.html 10 | """ 11 | 12 | from pathlib import Path 13 | 14 | from myterial import orange 15 | from rich import print 16 | 17 | from brainrender.scene import Scene 18 | from brainrender.actors import Points 19 | from brainrender import settings 20 | 21 | import numpy as np 22 | 23 | settings.SHADER_STYLE = "plastic" 24 | settings.SHOW_AXES = False 25 | 26 | cells_path = Path(__file__).parent.parent / "resources" / "points.npy" 27 | 28 | # Define regions of interest (easier to define all at the 29 | # terminal/finest level of the hierarchy) 30 | regions = ["VISp1", "VISp4", "VISp5"] 31 | 32 | print(f"[{orange}]Running example: {Path(__file__).name}") 33 | 34 | 35 | def get_cells_in_regions(scene, cells_path, regions): 36 | cells = np.load(cells_path) 37 | new_cells = [] 38 | 39 | for cell in cells: 40 | if ( 41 | scene.atlas.structure_from_coords( 42 | cell, as_acronym=True, microns=True 43 | ) 44 | in regions 45 | ): 46 | if cell[0] > 0: 47 | new_cells.append(cell) 48 | 49 | new_cells = np.asarray(new_cells) 50 | 51 | return new_cells 52 | 53 | 54 | # Create a brainrender scene 55 | scene = Scene(title=f"brainmapper cells in {regions}", inset=False) 56 | 57 | cells_points = get_cells_in_regions(scene, cells_path, regions) 58 | 59 | # Create points actor 60 | cells = Points(cells_points, radius=45, colors="palegoldenrod", alpha=0.8) 61 | 62 | # Add specific regions 63 | for region in regions: 64 | scene.add_brain_region(region, color="mediumseagreen", alpha=0.2) 65 | 66 | # Add cells 67 | scene.add(cells) 68 | 69 | scene.render() 70 | -------------------------------------------------------------------------------- /examples/cell_density.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example shows how to use a PointsDensity 3 | actor to show the density of labelled cells 4 | """ 5 | 6 | import random 7 | from pathlib import Path 8 | 9 | import numpy as np 10 | from myterial import orange 11 | from rich import print 12 | 13 | from brainrender import Scene 14 | from brainrender.actors import Points, PointsDensity 15 | 16 | print(f"[{orange}]Running example: {Path(__file__).name}") 17 | 18 | 19 | def get_n_random_points_in_region(region, N): 20 | """ 21 | Gets N random points inside (or on the surface) of a mes 22 | """ 23 | 24 | region_bounds = region.mesh.bounds() 25 | X = np.random.randint(region_bounds[0], region_bounds[1], size=10000) 26 | Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000) 27 | Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000) 28 | pts = [[x, y, z] for x, y, z in zip(X, Y, Z)] 29 | 30 | ipts = region.mesh.inside_points(pts).vertices 31 | return np.vstack(random.choices(ipts, k=N)) 32 | 33 | 34 | scene = Scene(title="Labelled cells") 35 | 36 | # Get a numpy array with (fake) coordinates of some labelled cells 37 | mos = scene.add_brain_region("MOs", alpha=0.0) 38 | coordinates = get_n_random_points_in_region(mos, 2000) 39 | 40 | # Add to scene 41 | scene.add(Points(coordinates, name="CELLS", colors="salmon")) 42 | scene.add(PointsDensity(coordinates)) 43 | 44 | # render 45 | scene.render() 46 | -------------------------------------------------------------------------------- /examples/custom_camera.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example shows how to create a scene and render it with a custom camera. 3 | This is done by specifying a dictionary of camera parameters, to get the 4 | parameters you need for your camera: 5 | - render an interactive scene with any camera 6 | - move the camera to where you need it to be 7 | - press 'c' 8 | - this will print the current camera parameters to your console. Copy paste the 9 | parameters in your script 10 | """ 11 | 12 | from pathlib import Path 13 | 14 | from myterial import orange 15 | from rich import print 16 | 17 | from brainrender import Scene 18 | 19 | print(f"[{orange}]Running example: {Path(__file__).name}") 20 | 21 | custom_camera = { 22 | "pos": (41381, -16104, 27222), 23 | "viewup": (0, -1, 0), 24 | "clipping_range": (31983, 76783), 25 | } 26 | 27 | 28 | # Create a brainrender scene 29 | scene = Scene(title="Custom camera") 30 | 31 | # Add brain regions 32 | scene.add_brain_region("CB", alpha=0.8) 33 | 34 | # Render! 35 | scene.render(camera=custom_camera, zoom=1.5) 36 | -------------------------------------------------------------------------------- /examples/gene_expression.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene, settings 7 | from brainrender.atlas_specific import GeneExpressionAPI 8 | 9 | print(f"[{orange}]Running example: {Path(__file__).name}") 10 | 11 | settings.SHOW_AXES = False 12 | 13 | scene = Scene(inset=False) 14 | 15 | gene = "Gpr161" 16 | geapi = GeneExpressionAPI() 17 | expids = geapi.get_gene_experiments(gene) 18 | data = geapi.get_gene_data(gene, expids[1]) 19 | 20 | gene_actor = geapi.griddata_to_volume(data, min_quantile=99, cmap="inferno") 21 | act = scene.add(gene_actor) 22 | 23 | ca1 = scene.add_brain_region("CA1", alpha=0.2, color="skyblue") 24 | ca3 = scene.add_brain_region("CA3", alpha=0.5, color="salmon") 25 | 26 | 27 | scene.add_silhouette(act) 28 | 29 | scene.render(zoom=1.6) 30 | -------------------------------------------------------------------------------- /examples/line.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import vedo 3 | 4 | vedo.settings.default_backend = "vtk" 5 | 6 | from brainrender import Scene 7 | from brainrender.actors import Points, Line 8 | 9 | # Display the Allen Brain mouse atlas. 10 | scene = Scene(atlas_name="allen_mouse_25um") 11 | 12 | # Highlight the cerebral cortex. 13 | scene.add_brain_region("CTX", alpha=0.2, color="green") 14 | 15 | # Add two points identifying the positions of two cortical neurons. 16 | point_coordinates = np.array([[4575, 5050, 9750], [4275, 2775, 6100]]) 17 | 18 | points = Points(point_coordinates, radius=100, colors="blue") 19 | scene.add(points) 20 | 21 | # Display the shortest path within cortex between the two points. 22 | # The path was pre-calculated with https://github.com/seung-lab/dijkstra3d/. 23 | path_coordinates = np.array( 24 | [ 25 | [4575, 5050, 9750], 26 | [4575, 4800, 9500], 27 | [4575, 4550, 9250], 28 | [4575, 4300, 9000], 29 | [4575, 4050, 8750], 30 | [4350, 3800, 8500], 31 | [4225, 3550, 8250], 32 | [4200, 3300, 8000], 33 | [4200, 3100, 7750], 34 | [4200, 2950, 7500], 35 | [4200, 2800, 7250], 36 | [4200, 2700, 7000], 37 | [4200, 2650, 6750], 38 | [4200, 2650, 6500], 39 | [4200, 2650, 6250], 40 | [4275, 2775, 6100], 41 | ] 42 | ) 43 | 44 | line = Line(path_coordinates, linewidth=3, color="black") 45 | scene.add(line) 46 | 47 | # Render the scene and display the figure. 48 | scene.render() 49 | -------------------------------------------------------------------------------- /examples/mirror_actors.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | from myterial import orange 5 | from rich import print 6 | 7 | from brainrender import Scene 8 | from brainrender._io import load_mesh_from_file 9 | from brainrender.actor import Actor 10 | from brainrender.actors import Neuron, Points 11 | 12 | neuron_file = Path(__file__).parent.parent / "resources" / "neuron1.swc" 13 | obj_file = Path(__file__).parent.parent / "resources" / "CC_134_1_ch1inj.obj" 14 | probe_striatum = ( 15 | Path(__file__).parent.parent / "resources" / "probe_1_striatum.npy" 16 | ) 17 | 18 | print(f"[{orange}]Running example: {Path(__file__).name}") 19 | 20 | # Create a brainrender scene 21 | scene = Scene(title="mirrored actors") 22 | 23 | # Add the neuron 24 | neuron_original = Neuron(neuron_file) 25 | scene.add(neuron_original) 26 | 27 | # Add a mesh from a file 28 | scene.add(obj_file, color="tomato") 29 | 30 | # Add a probe from a file 31 | scene.add( 32 | Points( 33 | np.load(probe_striatum), 34 | name="probe_1", 35 | colors="darkred", 36 | radius=50, 37 | ), 38 | color="darkred", 39 | radius=50, 40 | ) 41 | 42 | # Add mirrored objects 43 | axis = "frontal" 44 | atlas_center = scene.root.center 45 | 46 | neuron_mirrored = Neuron(neuron_file) 47 | neuron_mirrored.mirror(axis, origin=atlas_center, atlas=scene.atlas) 48 | scene.add(neuron_mirrored) 49 | 50 | mesh_mirrored = Actor( 51 | load_mesh_from_file(obj_file, color="tomato"), 52 | name=obj_file.name, 53 | br_class="from file", 54 | ) 55 | mesh_mirrored.mirror(axis, origin=atlas_center, atlas=scene.atlas) 56 | scene.add(mesh_mirrored) 57 | 58 | mirrored_probe = Points( 59 | np.load(probe_striatum), 60 | name="probe_1", 61 | colors="darkred", 62 | radius=50, 63 | ) 64 | mirrored_probe.mirror(axis, origin=atlas_center, atlas=scene.atlas) 65 | scene.add(mirrored_probe) 66 | 67 | # Render! 68 | scene.render() 69 | -------------------------------------------------------------------------------- /examples/neurons.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import requests.exceptions 4 | from morphapi.api.mouselight import MouseLightAPI 5 | from myterial import orange 6 | from rich import print 7 | from urllib3.exceptions import NewConnectionError, MaxRetryError 8 | 9 | from brainrender import Scene 10 | from brainrender.actors import Neuron, make_neurons 11 | 12 | neuron_file = Path(__file__).parent.parent / "resources" / "neuron1.swc" 13 | 14 | print(f"[{orange}]Running example: {Path(__file__).name}") 15 | 16 | # Create a brainrender scene 17 | scene = Scene(title="neurons") 18 | 19 | # Add a neuron from file 20 | scene.add(Neuron(neuron_file)) 21 | 22 | # Download neurons data with morphapi 23 | try: 24 | mlapi = MouseLightAPI() 25 | neurons_metadata = mlapi.fetch_neurons_metadata( 26 | filterby="soma", filter_regions=["MOs"] 27 | ) 28 | 29 | to_add = [neurons_metadata[47], neurons_metadata[51]] 30 | neurons = mlapi.download_neurons(to_add) 31 | neurons = scene.add(*make_neurons(*neurons, neurite_radius=12)) 32 | except ( 33 | NewConnectionError, 34 | MaxRetryError, 35 | requests.exceptions.ConnectionError, 36 | requests.exceptions.ReadTimeout, 37 | ) as e: 38 | print("Failed to download neurons data from neuromorpho.org.") 39 | 40 | # Render! 41 | scene.render() 42 | -------------------------------------------------------------------------------- /examples/notebook_workflow.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Using Notebooks\n", 8 | "\n", 9 | "`brainrender` can be used with Jupyter notebooks in two ways:\n", 10 | "\n", 11 | "1. you can **embed** a window with your rendered scene\n", 12 | "2. you can have your scene be rendered in a **pop-up** window.\n", 13 | "\n", 14 | "\n", 15 | "## Rendering your scene in a separate window\n", 16 | "\n", 17 | "If you want your scene to be rendered in a new window, then set this option before you create \n", 18 | "your `Scene`.\n", 19 | "\n", 20 | "```python\n", 21 | "import vedo\n", 22 | "vedo.settings.default_backend= 'vtk'\n", 23 | "```\n", 24 | "After this everything will work exactly the same as usual, and you will have access to all of brainrender's features. \n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "### To visualise primary visual cortex in the Allen Adult Mouse Brain Atlas:" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 1, 37 | "metadata": {}, 38 | "source": [ 39 | "import vedo\n", 40 | "vedo.settings.default_backend= 'vtk'\n", 41 | "\n", 42 | "from brainrender import Scene\n", 43 | "popup_scene = Scene(atlas_name='allen_mouse_50um', title='popup')\n", 44 | "\n", 45 | "popup_scene.add_brain_region('VISp')\n", 46 | "\n", 47 | "popup_scene.render() # press 'Esc' to close" 48 | ], 49 | "outputs": [] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "## Embedding the scene\n", 56 | "\n", 57 | "When embedding renderings in Jupyter Notebook not all of `brainrender`'s functionality will work! \n", 58 | "If you want to support all of `brainrender`'s features you should **not embed** renderings in the notebooks.\n", 59 | "\n", 60 | "Note that this is due to the backend (`k3d`) used to embed the renderings, and not because of `brainrender`.\n", 61 | "\n", 62 | "If you still need to embed your `Scene` then `brainrender` works slightly differently." 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "### To visualise the tectum in the larval zebrafish atlas:" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": {}, 76 | "source": [ 77 | "# Set the backend\n", 78 | "import vedo\n", 79 | "vedo.settings.default_backend= 'k3d'\n", 80 | "\n", 81 | "# Create a brainrender scene\n", 82 | "from brainrender import Scene\n", 83 | "scene = Scene(atlas_name='mpin_zfish_1um', title='Embedded') # note the title will not actually display\n", 84 | "scene.add_brain_region('tectum')\n", 85 | "\n", 86 | "# Make sure it gets embedded in the window\n", 87 | "scene.jupyter = True\n", 88 | "\n", 89 | "# scene.render now will prepare the scene for rendering, but it won't render anything yet\n", 90 | "scene.render()\n", 91 | "\n", 92 | "# To display the scene we use `vedo`'s `show` method to show the scene's actors\n", 93 | "from vedo import Plotter # <- this will be used to render an embedded scene \n", 94 | "plt = Plotter()\n", 95 | "plt.show(*scene.renderables) # same as vedo.show(*scene.renderables)" 96 | ], 97 | "outputs": [] 98 | } 99 | ], 100 | "metadata": { 101 | "file_extension": ".py", 102 | "kernelspec": { 103 | "display_name": "Python 3 (ipykernel)", 104 | "language": "python", 105 | "name": "python3" 106 | }, 107 | "language_info": { 108 | "codemirror_mode": { 109 | "name": "ipython", 110 | "version": 3 111 | }, 112 | "file_extension": ".py", 113 | "mimetype": "text/x-python", 114 | "name": "python", 115 | "nbconvert_exporter": "python", 116 | "pygments_lexer": "ipython3", 117 | "version": "3.10.13" 118 | }, 119 | "mimetype": "text/x-python", 120 | "name": "python", 121 | "npconvert_exporter": "python", 122 | "pygments_lexer": "ipython3", 123 | "version": 3 124 | }, 125 | "nbformat": 4, 126 | "nbformat_minor": 2 127 | } 128 | -------------------------------------------------------------------------------- /examples/probe_tracks.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example visualizes `.npy` files exported from brainglobe-segmentation 3 | """ 4 | 5 | from pathlib import Path 6 | import numpy as np 7 | 8 | from brainrender import Scene 9 | from brainrender.actors import Points 10 | 11 | resource_path = Path(__file__).parent.parent / "resources" 12 | 13 | scene = Scene(title="Silicon Probe Visualization") 14 | 15 | # Visualise the probe target regions 16 | cp = scene.add_brain_region("CP", alpha=0.15) 17 | rsp = scene.add_brain_region("RSP", alpha=0.15) 18 | 19 | # Add probes to the scene. 20 | # Each .npy file should contain a numpy array with the coordinates of each 21 | # part of the probe. 22 | scene.add( 23 | Points( 24 | np.load(resource_path / "probe_1_striatum.npy"), 25 | name="probe_1", 26 | colors="darkred", 27 | radius=50, 28 | ) 29 | ) 30 | scene.add( 31 | Points( 32 | np.load(resource_path / "probe_2_RSP.npy"), 33 | name="probe_2", 34 | colors="darkred", 35 | radius=50, 36 | ) 37 | ) 38 | 39 | # render 40 | scene.render() 41 | -------------------------------------------------------------------------------- /examples/regions_single_hemisphere.py: -------------------------------------------------------------------------------- 1 | """ 2 | Example to visualise brain regions in a single hemisphere 3 | """ 4 | 5 | from pathlib import Path 6 | 7 | from myterial import orange 8 | from rich import print 9 | 10 | from brainrender import Scene 11 | 12 | print(f"[{orange}]Running example: {Path(__file__).name}") 13 | 14 | # Create a brainrender scene 15 | scene = Scene(title="Left hemisphere", atlas_name="allen_mouse_25um") 16 | 17 | # Add brain regions 18 | scene.add_brain_region("CP", "VISp", hemisphere="left") 19 | 20 | # Render! 21 | scene.render() 22 | -------------------------------------------------------------------------------- /examples/ruler.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | from brainrender.actors import ruler, ruler_from_surface 8 | 9 | print(f"[{orange}]Running example: {Path(__file__).name}") 10 | 11 | scene = Scene(title="rulers") 12 | 13 | th, mos = scene.add_brain_region("TH", "MOs", alpha=0.3) 14 | 15 | # Get a ruler between the two regions 16 | p1 = th.center_of_mass() 17 | p2 = mos.center_of_mass() 18 | 19 | rul1 = ruler(p1, p2, unit_scale=0.01, units="mm") 20 | 21 | # Get a ruler between thalamus and brian surface 22 | rul2 = ruler_from_surface(p1, scene.root, unit_scale=0.01, units="mm") 23 | 24 | scene.add(rul1, rul2) 25 | scene.render() 26 | -------------------------------------------------------------------------------- /examples/screenshot.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange, salmon 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | 8 | print(f"[{orange}]Running example: {Path(__file__).name}") 9 | 10 | # Explicitly initialise a scene with the screenshot folder set 11 | # If the screenshot folder is not set, by default screenshots 12 | # Will save to the current working directory 13 | screenshot_folder = "." 14 | scene = Scene( 15 | title=f"Screenshots will be saved to {screenshot_folder}", 16 | inset=True, 17 | screenshots_folder=screenshot_folder, 18 | ) 19 | 20 | # Add some actors to the scene 21 | scene.add_brain_region("TH", alpha=0.2, silhouette=True, color=salmon) 22 | scene.add_brain_region("VISp", alpha=0.4, silhouette=False, color=[50, 2, 155]) 23 | 24 | scene.slice("sagittal") 25 | 26 | # Set up a camera. Can use string, such as "sagittal". 27 | # During render runtime, press "c" to print the current camera parameters. 28 | camera = { 29 | "pos": (8777, 1878, -44032), 30 | "viewup": (0, -1, 0), 31 | "clipping_range": (24852, 54844), 32 | "focal_point": (7718, 4290, -3507), 33 | "distance": 40610, 34 | } 35 | zoom = 2.5 36 | 37 | # If you only want a screenshot and don't want to move the camera 38 | # around the scene, set interactive to False. 39 | scene.render(interactive=False, camera=camera, zoom=zoom) 40 | 41 | # Set the scale, which will be used for screenshot resolution. 42 | # Any value > 1 increases resolution, the default is in brainrender.settings. 43 | # It is easiest integer scales (non-integer can cause crashes). 44 | scale = 2 45 | 46 | # Take a screenshot - passing no name uses current time 47 | # Screenshots can be also created during runtime by pressing "s" 48 | scene.screenshot(name="example_brainrender_shot.pdf", scale=scale) 49 | 50 | scene.close() 51 | -------------------------------------------------------------------------------- /examples/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Brainrender provides several default settings (e.g. for shader style) 3 | which can be changed to personalize your rendering. 4 | This example shows you how 5 | """ 6 | 7 | from pathlib import Path 8 | 9 | from myterial import orange 10 | from rich import print 11 | 12 | import brainrender 13 | from brainrender import Scene 14 | 15 | print(f"[{orange}]Running example: {Path(__file__).name}") 16 | 17 | brainrender.settings.BACKGROUND_COLOR = [ 18 | 0.22, 19 | 0.22, 20 | 0.22, 21 | ] # change rendering background color 22 | brainrender.settings.WHOLE_SCREEN = ( 23 | False # make the rendering window be smaller 24 | ) 25 | brainrender.settings.SHOW_AXES = False # turn off the axes display 26 | 27 | # make scenes with different shader styles 28 | for shader in ("plastic", "cartoon"): 29 | brainrender.settings.SHADER_STYLE = shader 30 | scene = Scene(title=shader) 31 | scene.render() 32 | 33 | brainrender.settings.BACKGROUND_COLOR = "white" # reset background color 34 | brainrender.settings.SHOW_AXES = True # reset axes display 35 | -------------------------------------------------------------------------------- /examples/slice.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | 8 | print(f"[{orange}]Running example: {Path(__file__).name}") 9 | 10 | 11 | # Create a brainrender scene 12 | scene = Scene(title="slice") 13 | 14 | # Add brain regions 15 | th = scene.add_brain_region("TH") 16 | 17 | # You can specify color, transparency... 18 | mos, ca1 = scene.add_brain_region("MOs", "CA1", alpha=0.2, color="green") 19 | 20 | # Slice actors with frontal plane 21 | scene.slice("frontal", actors=[th]) 22 | 23 | # Slice with a custom plane 24 | plane = scene.atlas.get_plane(pos=mos.center_of_mass(), norm=(1, 1, 0)) 25 | scene.slice(plane, actors=[mos, ca1]) 26 | 27 | # Render! 28 | scene.render() 29 | -------------------------------------------------------------------------------- /examples/streamlines.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | from brainrender.actors.streamlines import make_streamlines 8 | from brainrender.atlas_specific import get_streamlines_for_region 9 | 10 | print(f"[{orange}]Running example: {Path(__file__).name}") 11 | 12 | # Create a brainrender scene 13 | scene = Scene() 14 | 15 | # Add brain regions 16 | scene.add_brain_region("TH") 17 | 18 | # Get stramlines data and add 19 | streams = get_streamlines_for_region("TH")[:2] 20 | scene.add(*make_streamlines(*streams, color="salmon", alpha=0.5)) 21 | 22 | # Render! 23 | scene.render() 24 | -------------------------------------------------------------------------------- /examples/user_volumetric_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This example shows how to render VOLUMETRIC data in brainrender. 4 | It uses data downloaded from: https://fishatlas.neuro.mpg.de/lines/ 5 | showing gene expression for this transgenic line (brn3c:GPF): https://zfin.org/ZDB-ALT-050728-2 6 | 7 | These data are a 3D image with orientation different from the axes system used by 8 | brainrender, so it has to be loaded and transposed to the correct orientation 9 | 10 | This examples shows how to: 11 | - load volumetric data from a TIFF file 12 | - transpose the data with BrainGlobe Space to re-orient it 13 | - extract a mesh from the volumetric data using vedo 14 | - render the data 15 | 16 | """ 17 | 18 | from pathlib import Path 19 | import pooch 20 | 21 | from brainglobe_space import AnatomicalSpace 22 | from brainglobe_utils.IO.image.load import load_any 23 | from myterial import blue_grey, orange 24 | from rich import print 25 | from vedo import Volume as VedoVolume 26 | 27 | from brainrender import Scene 28 | 29 | print(f"[{orange}]Running example: {Path(__file__).name}") 30 | 31 | download_path = Path.home() / ".brainglobe" / "brainrender" / "example-data" 32 | filename = "T_AVG_s356tTg.tif" 33 | scene = Scene(atlas_name="mpin_zfish_1um") 34 | 35 | # for some reason the list of returned by pooch does not seem to be 36 | # in the same order every time 37 | _ = pooch.retrieve( 38 | url="https://api.mapzebrain.org/media/Lines/brn3cGFP/average_data/T_AVG_s356tTg.zip", 39 | known_hash="54b59146ba08b4d7eea64456bcd67741db4b5395235290044545263f61453a61", 40 | path=download_path, 41 | progressbar=True, 42 | processor=pooch.Unzip(extract_dir="."), 43 | ) 44 | 45 | datafile = download_path / filename 46 | 47 | 48 | # 1. load the data 49 | print("Loading data") 50 | data = load_any(datafile) 51 | 52 | # 2. aligned the data to the scene's atlas' axes 53 | print("Transforming data") 54 | scene = Scene(atlas_name="mpin_zfish_1um") 55 | 56 | source_space = AnatomicalSpace( 57 | "ira" 58 | ) # for more info: https://docs.brainglobe.info/brainglobe-space/usage 59 | target_space = scene.atlas.space 60 | transformed_stack = source_space.map_stack_to(target_space, data) 61 | 62 | # 3. create a Volume vedo actor and smooth 63 | print("Creating volume") 64 | vol = VedoVolume(transformed_stack) 65 | vol.smooth_median() 66 | 67 | 68 | # 4. Extract a surface mesh from the volume actor 69 | print("Extracting surface") 70 | mesh = vol.isosurface(value=20).c(blue_grey).decimate().clean() 71 | SHIFT = [30, 15, -20] # fine tune mesh position 72 | current_position = mesh.pos() 73 | new_position = [SHIFT[i] + current_position[i] for i in range(3)] 74 | mesh.pos(*new_position) 75 | 76 | # 5. render 77 | print("Rendering") 78 | scene.add(mesh) 79 | scene.render(zoom=13) 80 | -------------------------------------------------------------------------------- /examples/video.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | from brainrender.video import VideoMaker 8 | 9 | print(f"[{orange}]Running example: {Path(__file__).name}") 10 | 11 | # Create a scene 12 | scene = Scene("my video") 13 | scene.add_brain_region("TH") 14 | 15 | # Create an instance of video maker 16 | vm = VideoMaker(scene, "./examples", "vid1") 17 | 18 | # make a video with the custom make frame function 19 | # this just rotates the scene 20 | vm.make_video(elevation=2, duration=2, fps=15) 21 | 22 | 23 | # Make a custom make frame function 24 | def make_frame(scene, frame_number, *args, **kwargs): 25 | alpha = scene.root.alpha() 26 | if alpha < 0.5: 27 | scene.root.alpha(1) 28 | else: 29 | scene.root.alpha(0.2) 30 | 31 | 32 | # Now make a video with our custom function 33 | scene = Scene("my video2") 34 | scene.add_brain_region("TH") 35 | vm = VideoMaker(scene, ".", "vid2", make_frame_func=make_frame) 36 | vm.make_video(duration=1, fps=15) 37 | -------------------------------------------------------------------------------- /examples/volumetric_data.py: -------------------------------------------------------------------------------- 1 | """ 2 | This example shows how to render volumetric (i.e. organized in voxel) 3 | data in brainrender. The data used are is the localized expression of 4 | 'Gpr161' from the Allen Atlas database, downloaded with brainrender 5 | and saved to a numpy file 6 | """ 7 | 8 | import numpy as np 9 | 10 | from brainrender import Scene, settings 11 | from brainrender.actors import Volume 12 | 13 | from pathlib import Path 14 | 15 | from myterial import orange 16 | from rich import print 17 | 18 | settings.SHOW_AXES = False 19 | volume_file = Path(__file__).parent.parent / "resources" / "volume.npy" 20 | 21 | 22 | print(f"[{orange}]Running example: {Path(__file__).name}") 23 | 24 | scene = Scene(inset=False) 25 | 26 | data = np.load(volume_file) 27 | print(data.shape) 28 | 29 | # make a volume actor and add 30 | actor = Volume( 31 | data, 32 | voxel_size=200, # size of a voxel's edge in microns 33 | as_surface=False, # if true a surface mesh is rendered instead of a volume 34 | c="Reds", # use matplotlib colormaps to color the volume 35 | ) 36 | scene.add(actor) 37 | scene.render(zoom=1.6) 38 | -------------------------------------------------------------------------------- /examples/web_export.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from myterial import orange 4 | from rich import print 5 | 6 | from brainrender import Scene 7 | 8 | print(f"[{orange}]Running example: {Path(__file__).name}") 9 | 10 | # Create a brainrender scene 11 | scene = Scene(title="brainrender web export") 12 | 13 | # Add brain regions 14 | scene.add_brain_region( 15 | "MOs", "CA1", alpha=0.2, color="green", hemisphere="right" 16 | ) 17 | 18 | # Render! 19 | scene.render() 20 | 21 | # Export to web 22 | scene.export("brain_regions.html") 23 | -------------------------------------------------------------------------------- /imgs/cellfinder_cells_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/cellfinder_cells_3.png -------------------------------------------------------------------------------- /imgs/gene_expression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/gene_expression.png -------------------------------------------------------------------------------- /imgs/human_regions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/human_regions.png -------------------------------------------------------------------------------- /imgs/injection_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/injection_2.png -------------------------------------------------------------------------------- /imgs/mouse_neurons_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/mouse_neurons_2.png -------------------------------------------------------------------------------- /imgs/probes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/probes.png -------------------------------------------------------------------------------- /imgs/three_atlases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/three_atlases.png -------------------------------------------------------------------------------- /imgs/zfish_functional_clusters_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/zfish_functional_clusters_2.png -------------------------------------------------------------------------------- /imgs/zfish_gene_expression.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/zfish_gene_expression.png -------------------------------------------------------------------------------- /imgs/zfish_neurons.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/zfish_neurons.png -------------------------------------------------------------------------------- /imgs/zfish_regions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/imgs/zfish_regions.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "brainrender" 3 | authors = [ 4 | { name = "Federico Claudi, Adam Tyson, Luigi Petrucco", email = "hello@brainglobe.info" }, 5 | ] 6 | description = "Visualisation and exploration of brain atlases and other anatomical data" 7 | readme = "README.md" 8 | requires-python = ">=3.11" 9 | dynamic = ["version"] 10 | 11 | dependencies = [ 12 | "brainglobe-atlasapi>=2.0.1", 13 | "brainglobe-space>=1.0.0", 14 | "brainglobe-utils>=0.5.0", 15 | "h5py", 16 | "k3d", 17 | "loguru", 18 | "morphapi>=0.2.1", 19 | "msgpack", 20 | "myterial", 21 | "numpy", 22 | "pandas", 23 | "pooch", 24 | "pyinspect>=0.0.8", 25 | "pyyaml>=5.3", 26 | "requests", 27 | "tables", 28 | "vedo>=2025.5.3", 29 | "vtk" 30 | ] 31 | 32 | license = { text = "BSD-3-Clause" } 33 | classifiers = [ 34 | "Development Status :: 3 - Alpha", 35 | "Programming Language :: Python", 36 | "Programming Language :: Python :: 3", 37 | "Programming Language :: Python :: 3.11", 38 | "Programming Language :: Python :: 3.12", 39 | "Programming Language :: Python :: 3.13", 40 | "Operating System :: OS Independent", 41 | "License :: OSI Approved :: BSD License", 42 | "Intended Audience :: Developers", 43 | "Intended Audience :: Science/Research", 44 | ] 45 | 46 | [project.urls] 47 | Homepage = "https://brainglobe.info/" 48 | "Source Code" = "https://github.com/brainglobe/brainrender" 49 | "Bug Tracker" = "https://github.com/brainglobe/brainrender/issues" 50 | Documentation = "https://brainglobe.info/documentation/brainrender/index.html" 51 | "User Support" = "https://forum.image.sc/tag/brainglobe" 52 | 53 | [project.optional-dependencies] 54 | dev = [ 55 | "pytest", 56 | "pytest-cov", 57 | "coverage", 58 | "tox", 59 | "black", 60 | "mypy", 61 | "pre-commit", 62 | "ruff", 63 | "setuptools_scm", 64 | ] 65 | nb = ["jupyter", "k3d"] 66 | 67 | 68 | [build-system] 69 | requires = ["setuptools>=45", "wheel", "setuptools_scm[toml]>=6.2"] 70 | build-backend = "setuptools.build_meta" 71 | 72 | [tool.setuptools] 73 | include-package-data = true 74 | 75 | [tool.setuptools.packages.find] 76 | include = ["brainrender*"] 77 | exclude = ["tests*", "docs*", "examples*", "imgs*"] 78 | 79 | 80 | [tool.pytest.ini_options] 81 | addopts = "--cov=brainrender" 82 | markers = [ 83 | "slow: marks tests as slow (deselect with '-m \"not slow\"')", 84 | "local: marks test as local (not for CI)", 85 | ] 86 | 87 | [tool.black] 88 | target-version = ['py311','py312', 'py313'] 89 | skip-string-normalization = false 90 | line-length = 79 91 | 92 | [tool.setuptools_scm] 93 | 94 | [tool.check-manifest] 95 | ignore = [ 96 | ".yaml", 97 | "tox.ini", 98 | "tests/", 99 | "tests/test_unit/", 100 | "tests/test_integration/", 101 | "docs/", 102 | "docs/source/", 103 | ] 104 | 105 | # should revisit some of these. 106 | [tool.ruff] 107 | line-length = 79 108 | exclude = ["__init__.py", "build", ".eggs", "examples"] 109 | fix = true 110 | ignore = ["E501", "E402"] 111 | 112 | [tool.ruff.lint] 113 | select = ["I", "E", "F"] 114 | 115 | [tool.tox] 116 | legacy_tox_ini = """ 117 | [tox] 118 | envlist = py{311,312,313} 119 | isolated_build = True 120 | 121 | [gh-actions] 122 | python = 123 | 3.11: py311 124 | 3.12: py312 125 | 3.13: py313 126 | 127 | [testenv] 128 | extras = 129 | dev 130 | commands = 131 | pytest -v --color=yes --cov=brainrender --cov-report=xml 132 | passenv = 133 | CI 134 | GITHUB_ACTIONS 135 | DISPLAY 136 | XAUTHORITY 137 | NUMPY_EXPERIMENTAL_ARRAY_FUNCTION 138 | PYVISTA_OFF_SCREEN 139 | """ 140 | -------------------------------------------------------------------------------- /resources/points.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/resources/points.npy -------------------------------------------------------------------------------- /resources/probe_1_striatum.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/resources/probe_1_striatum.npy -------------------------------------------------------------------------------- /resources/probe_2_RSP.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/resources/probe_2_RSP.npy -------------------------------------------------------------------------------- /resources/random_cells.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/resources/random_cells.h5 -------------------------------------------------------------------------------- /resources/random_cells.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/resources/random_cells.npy -------------------------------------------------------------------------------- /resources/volume.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/resources/volume.npy -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | from brainrender import settings 2 | 3 | settings.INTERACTIVE = False 4 | settings.OFFSCREEN = True 5 | settings.DEFAULT_ATLAS = "allen_mouse_100um" 6 | 7 | from vedo import settings as vsettings 8 | 9 | vsettings.use_depth_peeling = False 10 | vsettings.screenshot_transparent_background = False # vedo for transparent bg 11 | vsettings.use_fxaa = True # This needs to be false for transparent bg 12 | -------------------------------------------------------------------------------- /tests/data/screenshot.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/tests/data/screenshot.jpg -------------------------------------------------------------------------------- /tests/data/screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brainglobe/brainrender/012eb7a10dd21c23aafc32868f9fe652cfe756b5/tests/data/screenshot.png -------------------------------------------------------------------------------- /tests/test_01_atlas_download.py: -------------------------------------------------------------------------------- 1 | from brainrender.atlas import Atlas 2 | 3 | 4 | def test_atlas_download(): 5 | """ 6 | Just downloads the test atlas to ensure 7 | the data is available in GitHub actions 8 | """ 9 | atlas = Atlas() 10 | assert atlas.atlas_name == "allen_mouse_100um" 11 | -------------------------------------------------------------------------------- /tests/test_aba_gene.py: -------------------------------------------------------------------------------- 1 | # from brainrender import Scene 2 | # from brainrender.atlas_specific import GeneExpressionAPI 3 | # from brainrender.actor import Actor 4 | # import pytest 5 | 6 | # gene = "Cacna2d1" 7 | 8 | 9 | # @pytest.fixture 10 | # def geapi(): 11 | # geapi = GeneExpressionAPI() 12 | 13 | # return geapi 14 | 15 | # @pytest.mark.xfail 16 | # def test_gene_expression_api(geapi): 17 | 18 | # s = Scene(title="BR") 19 | 20 | # geapi.get_gene_id_by_name(gene) 21 | # expids = geapi.get_gene_experiments(gene) 22 | 23 | # data = geapi.get_gene_data(gene, expids[0], use_cache=True) 24 | 25 | # # make actor 26 | # gene_actor = geapi.griddata_to_volume( 27 | # data, min_quantile=90, cmap="inferno" 28 | # ) 29 | # assert isinstance(gene_actor, Actor) 30 | # assert gene_actor.name == gene 31 | # assert gene_actor.br_class == "Gene Data" 32 | 33 | # s.add(gene_actor) 34 | 35 | 36 | # @pytest.mark.xfail 37 | # @pytest.mark.slow 38 | # def test_download_no_cache(geapi): 39 | # geapi.get_gene_id_by_name(gene) 40 | # expids = geapi.get_gene_experiments(gene) 41 | # geapi.get_gene_data(gene, expids[0], use_cache=False) 42 | -------------------------------------------------------------------------------- /tests/test_actor.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | from brainglobe_space import AnatomicalSpace 5 | from rich import print as rprint 6 | from vedo import Mesh 7 | 8 | from brainrender import Scene 9 | from brainrender._io import load_mesh_from_file 10 | from brainrender.actor import Actor 11 | 12 | 13 | @pytest.fixture 14 | def mesh_actor(): 15 | resources_dir = Path(__file__).parent.parent / "resources" 16 | data_path = resources_dir / "CC_134_1_ch1inj.obj" 17 | obj_mesh = load_mesh_from_file(data_path, color="tomato") 18 | 19 | return Actor(obj_mesh, name=data_path.name, br_class="from file") 20 | 21 | 22 | def test_actor(): 23 | s = Scene() 24 | 25 | s = s.root 26 | assert isinstance(s, Actor) 27 | print(s) 28 | str(s) 29 | rprint(s) 30 | 31 | assert isinstance(s.mesh, Mesh) 32 | assert s.alpha() == s.mesh.alpha() 33 | assert s.name == "root" 34 | assert s.br_class == "brain region" 35 | 36 | 37 | @pytest.mark.parametrize( 38 | "axis, expected_ind", 39 | [ 40 | ("z", 2), 41 | ("y", 1), 42 | ("x", 0), 43 | ("frontal", 2), 44 | ("vertical", 1), 45 | ("sagittal", 0), 46 | ], 47 | ) 48 | def test_mirror_origin(mesh_actor, axis, expected_ind): 49 | original_center = mesh_actor.center 50 | mesh_actor.mirror(axis) 51 | new_center = mesh_actor.center 52 | 53 | assert new_center[expected_ind] == -original_center[expected_ind] 54 | 55 | 56 | @pytest.mark.parametrize( 57 | "axis, expected_ind", 58 | [ 59 | ("z", 2), 60 | ("y", 1), 61 | ("x", 0), 62 | ("frontal", 2), 63 | ("vertical", 1), 64 | ("sagittal", 0), 65 | ], 66 | ) 67 | def test_mirror_around_root(mesh_actor, axis, expected_ind): 68 | scene = Scene() 69 | root_center = scene.root.center 70 | 71 | original_center = mesh_actor.center 72 | mesh_actor.mirror(axis, origin=root_center) 73 | new_center = mesh_actor.center 74 | 75 | # The new center should be the same distance from the root center as the original center 76 | expected_location = ( 77 | -(original_center[expected_ind] - root_center[expected_ind]) 78 | + root_center[expected_ind] 79 | ) 80 | 81 | assert new_center[expected_ind] == pytest.approx( 82 | expected_location, abs=1e-3 83 | ) 84 | 85 | 86 | @pytest.mark.parametrize( 87 | "axis, expected_ind", 88 | [ 89 | ("z", 2), 90 | ("y", 1), 91 | ("x", 0), 92 | ("frontal", 1), 93 | ("vertical", 0), 94 | ("sagittal", 2), 95 | ], 96 | ) 97 | def test_mirror_custom_space(mesh_actor, axis, expected_ind): 98 | scene = Scene() 99 | scene.atlas.space = AnatomicalSpace("sra") 100 | 101 | original_center = mesh_actor.center 102 | mesh_actor.mirror(axis, atlas=scene.atlas) 103 | new_center = mesh_actor.center 104 | 105 | assert new_center[expected_ind] == -original_center[expected_ind] 106 | 107 | 108 | @pytest.mark.parametrize( 109 | "axis, expected_ind", 110 | [ 111 | ("z", 2), 112 | ("y", 1), 113 | ("x", 0), 114 | ("frontal", 1), 115 | ("vertical", 0), 116 | ("sagittal", 2), 117 | ], 118 | ) 119 | def test_mirror_custom_space_around_root(mesh_actor, axis, expected_ind): 120 | scene = Scene() 121 | scene.atlas.space = AnatomicalSpace("sra") 122 | root_center = scene.root.center 123 | 124 | original_center = mesh_actor.center 125 | mesh_actor.mirror(axis, origin=root_center, atlas=scene.atlas) 126 | new_center = mesh_actor.center 127 | 128 | # The new center should be the same distance from the root center as the original center 129 | expected_location = ( 130 | -(original_center[expected_ind] - root_center[expected_ind]) 131 | + root_center[expected_ind] 132 | ) 133 | 134 | assert new_center[expected_ind] == pytest.approx( 135 | expected_location, abs=1e-3 136 | ) 137 | -------------------------------------------------------------------------------- /tests/test_atlas.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brainrender import Scene 4 | from brainrender.actor import Actor 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "pos, plane, norm", 9 | [ 10 | (None, "sagittal", None), 11 | (None, "frontal", [1, 1, 1]), 12 | ([1000, 1000, 1000], "horizontal", None), 13 | ([1000, 1000, 1000], None, [0, 1, -1]), 14 | ], 15 | ) 16 | def test_atlas_plane(pos, plane, norm): 17 | s = Scene() 18 | 19 | p1 = s.atlas.get_plane(plane=plane, pos=pos, norm=norm, sx=1, sy=11) 20 | p2 = s.atlas.get_plane(plane=plane, pos=pos, norm=norm) 21 | assert isinstance(p1, Actor) 22 | assert isinstance(p2, Actor) 23 | 24 | # # s.render(interactive=False) 25 | del s 26 | -------------------------------------------------------------------------------- /tests/test_camera.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brainrender import Scene 4 | from brainrender.camera import ( 5 | check_camera_param, 6 | get_camera, 7 | get_camera_params, 8 | set_camera, 9 | set_camera_params, 10 | ) 11 | 12 | cameras = [ 13 | "sagittal", 14 | "sagittal2", 15 | "frontal", 16 | "top", 17 | "top_side", 18 | "three_quarters", 19 | ] 20 | 21 | 22 | def test_get_camera(): 23 | for camera in cameras: 24 | get_camera(camera) 25 | 26 | with pytest.raises(KeyError): 27 | get_camera("nocamera") 28 | 29 | 30 | def test_camera_params(): 31 | for camera in cameras: 32 | check_camera_param(camera) 33 | 34 | 35 | def test_get_camera_params(): 36 | s = Scene() 37 | s.render(interactive=False) 38 | cam = s.plotter.camera 39 | 40 | params = get_camera_params(scene=s) 41 | params2 = get_camera_params(camera=cam) 42 | 43 | check_camera_param(params) 44 | check_camera_param(params2) 45 | 46 | 47 | def test_set_camera_params(): 48 | s = Scene() 49 | params = get_camera_params(scene=s) 50 | 51 | set_camera_params(s.plotter.camera, params) 52 | 53 | 54 | def test_set_camera(): 55 | s = Scene() 56 | s.render(interactive=False) 57 | cam = s.plotter.camera 58 | 59 | set_camera(s, cam) 60 | set_camera(s, "sagittal") 61 | -------------------------------------------------------------------------------- /tests/test_colors.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from brainrender._colors import get_random_colors, make_palette, map_color 4 | 5 | 6 | @pytest.mark.parametrize( 7 | "value, name, vmin, vmax", 8 | [(1, "jet", 0, 2), (1, "jet", 0, -2), (1, "Blues", 0, 2)], 9 | ) 10 | def test_cmap(value, name, vmin, vmax): 11 | if vmax < vmin: 12 | with pytest.raises(ValueError): 13 | col = map_color(value, name=name, vmin=vmin, vmax=vmax) 14 | else: 15 | col = map_color(value, name=name, vmin=vmin, vmax=vmax) 16 | assert len(col) == 3 17 | 18 | 19 | @pytest.mark.parametrize( 20 | "N, colors", 21 | [ 22 | (5, ("salmon", "green", "blue", "red", "magenta", "black")), 23 | (1, ("salmon", "red")), 24 | (3, ("salmon", "blue")), 25 | ], 26 | ) 27 | def test_make_palette(N, colors): 28 | if len(colors) > N: 29 | with pytest.raises(ValueError): 30 | cols = make_palette(N, *colors) 31 | else: 32 | cols = make_palette(N, *colors) 33 | if N == 1: 34 | assert len(set(cols)) == 1 35 | assert len(cols) == N 36 | 37 | 38 | @pytest.mark.parametrize("n", [(1), (2), (5)]) 39 | def test_get_random_colors(n): 40 | cols = get_random_colors(n_colors=n) 41 | if n == 1: 42 | assert isinstance(cols, str) 43 | else: 44 | assert len(cols) == n 45 | -------------------------------------------------------------------------------- /tests/test_cylinder.py: -------------------------------------------------------------------------------- 1 | from brainrender import Scene 2 | from brainrender.actors import Cylinder 3 | 4 | 5 | def test_cylinder(): 6 | s = Scene(title="BR") 7 | 8 | th = s.add_brain_region("TH") 9 | s.add(Cylinder(th, s.root)) 10 | s.add(Cylinder(th.center_of_mass(), s.root)) 11 | del s 12 | -------------------------------------------------------------------------------- /tests/test_examples.py: -------------------------------------------------------------------------------- 1 | def test_examples(): 2 | """ 3 | Run every script in the examples directory 4 | """ 5 | import examples # noqa 6 | -------------------------------------------------------------------------------- /tests/test_export_html.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | 5 | from brainrender import Scene 6 | 7 | 8 | @pytest.fixture 9 | def scene(): 10 | """Provide a scene with a brain region""" 11 | s = Scene(title="BR") 12 | th = s.add_brain_region("TH") 13 | s.add_label(th, "TH") 14 | return s 15 | 16 | 17 | def test_export_for_web(scene): 18 | """Check that exporting to html creates the expected file""" 19 | path = scene.export("test.html") 20 | assert path == "test.html" 21 | 22 | path = Path(path) 23 | assert path.exists() 24 | 25 | path.unlink() 26 | 27 | 28 | def test_export_for_web_raises(scene): 29 | """Check that exporting with invalid file extention raises ValueError""" 30 | with pytest.raises(ValueError): 31 | scene.export("test.py") 32 | -------------------------------------------------------------------------------- /tests/test_integration.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pooch 5 | import pytest 6 | from brainglobe_space import AnatomicalSpace 7 | from brainglobe_utils.IO.image.load import load_any 8 | from vedo import Volume as VedoVolume 9 | 10 | from brainrender import Animation, Scene, VideoMaker 11 | from brainrender.actors import ( 12 | Neuron, 13 | Points, 14 | PointsDensity, 15 | Volume, 16 | ruler, 17 | ruler_from_surface, 18 | ) 19 | from brainrender.atlas_specific import GeneExpressionAPI 20 | 21 | resources_dir = Path(__file__).parent.parent / "resources" 22 | 23 | 24 | def get_n_points_in_region(region, N): 25 | """ 26 | Gets N points inside (or on the surface) of a mes 27 | """ 28 | 29 | region_bounds = region.mesh.bounds() 30 | X = np.linspace(region_bounds[0], region_bounds[1], num=N) 31 | Y = np.linspace(region_bounds[2], region_bounds[3], num=N) 32 | Z = np.linspace(region_bounds[4], region_bounds[5], num=N) 33 | pts = [[x, y, z] for x, y, z in zip(X, Y, Z)] 34 | 35 | ipts = region.mesh.inside_points(pts).points 36 | return np.vstack(ipts) 37 | 38 | 39 | def check_bounds(bounds, parent_bounds): 40 | """ 41 | Checks that the bounds of an actor are within the bounds of the root 42 | """ 43 | for i, bound in enumerate(bounds): 44 | if i % 2 == 0: 45 | assert bound >= parent_bounds[i] 46 | else: 47 | assert bound <= parent_bounds[i] 48 | 49 | 50 | @pytest.fixture 51 | def scene(): 52 | scene = Scene(atlas_name="allen_mouse_100um", inset=False) 53 | yield scene 54 | scene.close() 55 | del scene 56 | 57 | 58 | def test_scene_with_brain_region(scene): 59 | brain_region = scene.add_brain_region( 60 | "grey", 61 | alpha=0.4, 62 | ) 63 | 64 | bounds = brain_region.bounds() 65 | root_bounds = scene.root.bounds() 66 | 67 | assert scene.actors[1] == brain_region 68 | 69 | check_bounds(bounds, root_bounds) 70 | 71 | 72 | def test_add_cells(scene): 73 | mos = scene.add_brain_region("MOs", alpha=0.15) 74 | coordinates = get_n_points_in_region(mos, 1000) 75 | points = Points(coordinates, name="CELLS", colors="steelblue") 76 | 77 | scene.add(points) 78 | 79 | assert scene.actors[0] == scene.root 80 | assert scene.actors[1] == mos 81 | assert scene.actors[2] == points 82 | 83 | scene.render(interactive=False) 84 | 85 | root_bounds = scene.root.bounds() 86 | region_bounds = mos.bounds() 87 | points_bounds = points.bounds() 88 | 89 | check_bounds(points_bounds, root_bounds) 90 | check_bounds(region_bounds, root_bounds) 91 | 92 | 93 | def test_add_labels(scene): 94 | th, mos = scene.add_brain_region("TH", "MOs") 95 | scene.add_label(th, "TH") 96 | 97 | scene.render(interactive=False) 98 | 99 | assert scene.actors[1] == th 100 | assert scene.actors[2] == mos 101 | assert len(th.labels) == 2 102 | assert len(mos.labels) == 0 103 | 104 | th_label_text_bounds = th.labels[0].bounds() 105 | th_label_bounds = th.labels[1].bounds() 106 | root_bounds = scene.root.bounds() 107 | 108 | check_bounds(th_label_text_bounds, root_bounds) 109 | check_bounds(th_label_bounds, root_bounds) 110 | 111 | 112 | def test_add_mesh_from_file(scene): 113 | data_path = resources_dir / "CC_134_1_ch1inj.obj" 114 | scene.add_brain_region("SCm", alpha=0.2) 115 | file_mesh = scene.add(data_path, color="tomato") 116 | 117 | scene.render(interactive=False) 118 | 119 | file_mesh_bounds = file_mesh.bounds() 120 | root_bounds = scene.root.bounds() 121 | 122 | check_bounds(file_mesh_bounds, root_bounds) 123 | 124 | 125 | def test_animation(scene, pytestconfig): 126 | root_path = pytestconfig.rootpath 127 | vid_directory = root_path / "tests" / "examples" 128 | 129 | scene.add_brain_region("TH") 130 | anim = Animation(scene, vid_directory, "vid3") 131 | 132 | anim.add_keyframe(0, camera="top", zoom=1) 133 | anim.add_keyframe(1.5, camera="sagittal", zoom=0.95) 134 | anim.add_keyframe(3, camera="frontal", zoom=1) 135 | anim.add_keyframe(4, camera="frontal", zoom=1.2) 136 | 137 | anim.make_video(duration=5, fps=15) 138 | 139 | vid_path = Path(root_path / "tests" / "examples" / "vid3.mp4") 140 | 141 | assert vid_path.exists() 142 | vid_path.unlink() 143 | assert not vid_path.exists() 144 | 145 | 146 | def test_adding_multiple_brain_regions(scene): 147 | th = scene.add_brain_region("TH") 148 | brain_regions = scene.add_brain_region( 149 | "MOs", "CA1", alpha=0.2, color="green" 150 | ) 151 | 152 | scene.render(interactive=False) 153 | 154 | assert len(scene.actors) == 4 155 | assert scene.actors[1].name == "TH" 156 | assert scene.actors[2].name == "MOs" 157 | assert scene.actors[3].name == "CA1" 158 | 159 | root_bounds = scene.root.bounds() 160 | th_bounds = th.bounds() 161 | mos_bounds = brain_regions[0].bounds() 162 | ca1_bounds = brain_regions[1].bounds() 163 | 164 | check_bounds(th_bounds, root_bounds) 165 | check_bounds(mos_bounds, root_bounds) 166 | check_bounds(ca1_bounds, root_bounds) 167 | 168 | 169 | def test_brainglobe_atlas(): 170 | scene = Scene(atlas_name="example_mouse_100um", title="example_mouse") 171 | 172 | scene.render(interactive=False) 173 | 174 | assert len(scene.actors) == 2 175 | assert scene.actors[0].name == "root" 176 | assert scene.actors[1].name == "title" 177 | assert scene.atlas.atlas_name == "example_mouse_100um" 178 | 179 | 180 | def test_cell_density(scene): 181 | mos = scene.add_brain_region("MOs", alpha=0.0) 182 | coordinates = get_n_points_in_region(mos, 2000) 183 | 184 | points = Points(coordinates, name="CELLS", colors="salmon") 185 | points_density = PointsDensity(coordinates) 186 | scene.add(points) 187 | scene.add(points_density) 188 | 189 | scene.render(interactive=False) 190 | 191 | assert scene.actors[1] == mos 192 | assert scene.actors[2] == points 193 | assert scene.actors[3] == points_density 194 | 195 | root_bounds = scene.root.bounds() 196 | points_bounds = points.bounds() 197 | points_density_bounds = points_density.bounds() 198 | 199 | check_bounds(points_bounds, root_bounds) 200 | check_bounds(points_density_bounds, root_bounds) 201 | 202 | ids = points_density.mesh.isosurface().inside_points( 203 | coordinates, return_ids=True 204 | ) 205 | 206 | # Check that at least 75% of the points are inside the mesh 207 | assert len(ids) >= 0.75 * len(coordinates) 208 | 209 | 210 | def test_gene_expression(scene): 211 | gene = "Gpr161" 212 | geapi = GeneExpressionAPI() 213 | expids = geapi.get_gene_experiments(gene) 214 | data = geapi.get_gene_data(gene, expids[1]) 215 | 216 | gene_actor = geapi.griddata_to_volume( 217 | data, min_quantile=99, cmap="inferno" 218 | ) 219 | ca1 = scene.add_brain_region("CA1", alpha=0.2, color="skyblue") 220 | act = scene.add(gene_actor) 221 | 222 | scene.render(interactive=False) 223 | 224 | # Expand bounds by 600 px 225 | ca1_bounds = ca1.bounds() 226 | expanded_bounds = [ 227 | bound - 600 if i % 2 == 0 else bound + 600 228 | for i, bound in enumerate(ca1_bounds) 229 | ] 230 | 231 | gene_actor_bounds = act.bounds() 232 | 233 | assert scene.actors[1] == ca1 234 | assert scene.actors[2] == act 235 | 236 | check_bounds(gene_actor_bounds, expanded_bounds) 237 | 238 | 239 | def test_neurons(scene, pytestconfig): 240 | data_path = resources_dir / "neuron1.swc" 241 | 242 | neuron = Neuron(data_path) 243 | scene.add(neuron) 244 | scene.render(interactive=False) 245 | 246 | assert len(scene.actors) == 2 247 | assert scene.actors[1].name == "neuron1.swc" 248 | 249 | neuron_bounds = scene.actors[1].bounds() 250 | # Based on pre-calculated bounds of this specific neuron 251 | expected_bounds = (2177, 7152, 2319, 5056, -9147, -1294) 252 | 253 | check_bounds(neuron_bounds, expected_bounds) 254 | 255 | 256 | def test_ruler(scene): 257 | th, mos = scene.add_brain_region("TH", "MOs", alpha=0.3) 258 | p1 = th.center_of_mass() 259 | p2 = mos.center_of_mass() 260 | 261 | rul1 = ruler(p1, p2, unit_scale=0.01, units="mm") 262 | rul2 = ruler_from_surface(p1, scene.root, unit_scale=0.01, units="mm") 263 | 264 | scene.add(rul1, rul2) 265 | 266 | scene.render(interactive=False) 267 | 268 | assert len(scene.actors) == 5 269 | assert scene.actors[1] == th 270 | assert scene.actors[2] == mos 271 | assert scene.actors[3] == rul1 272 | assert scene.actors[4] == rul2 273 | 274 | root_bounds = scene.root.bounds() 275 | th_bounds = th.bounds() 276 | mos_bounds = mos.bounds() 277 | rul1_bounds = rul1.bounds() 278 | rul2_bounds = rul2.bounds() 279 | 280 | check_bounds(th_bounds, root_bounds) 281 | check_bounds(mos_bounds, root_bounds) 282 | check_bounds(rul1_bounds, root_bounds) 283 | check_bounds(rul2_bounds, root_bounds) 284 | 285 | 286 | def test_slice(scene): 287 | th, mos, ca1 = scene.add_brain_region( 288 | "TH", "MOs", "CA1", alpha=0.2, color="green" 289 | ) 290 | th_clone = th._mesh.clone() 291 | mos_clone = mos._mesh.clone() 292 | ca1_clone = ca1._mesh.clone() 293 | 294 | scene.slice("frontal", actors=[mos]) 295 | plane = scene.atlas.get_plane(pos=mos.center_of_mass(), norm=(1, 1, 2)) 296 | scene.slice(plane, actors=[ca1]) 297 | scene.render(interactive=False) 298 | 299 | assert np.all(th_clone.bounds() == th.bounds()) 300 | assert np.all(mos_clone.bounds() != mos.bounds()) 301 | assert np.all(ca1_clone.bounds() != ca1.bounds()) 302 | 303 | 304 | def test_user_volumetric_data(): 305 | download_path = ( 306 | Path.home() / ".brainglobe" / "brainrender" / "example-data" 307 | ) 308 | filename = "T_AVG_s356tTg.tif" 309 | scene = Scene(atlas_name="mpin_zfish_1um") 310 | 311 | # for some reason the list of returned by pooch does not seem to be 312 | # in the same order every time 313 | _ = pooch.retrieve( 314 | url="https://api.mapzebrain.org/media/Lines/brn3cGFP/average_data/T_AVG_s356tTg.zip", 315 | known_hash="54b59146ba08b4d7eea64456bcd67741db4b5395235290044545263f61453a61", 316 | path=download_path, 317 | progressbar=True, 318 | processor=pooch.Unzip(extract_dir="."), 319 | ) 320 | 321 | datafile = download_path / filename 322 | data = load_any(datafile) 323 | source_space = AnatomicalSpace("ira") 324 | target_space = scene.atlas.space 325 | transformed_data = source_space.map_stack_to(target_space, data) 326 | 327 | vol = VedoVolume(transformed_data).smooth_median() 328 | 329 | mesh = vol.isosurface(value=20).decimate().clean() 330 | SHIFT = [30, 15, -20] # fine tune mesh position 331 | current_position = mesh.pos() 332 | new_position = [SHIFT[i] + current_position[i] for i in range(3)] 333 | mesh.pos(*new_position) 334 | 335 | scene.add(mesh) 336 | scene.render(interactive=False) 337 | 338 | assert len(scene.actors) == 2 339 | 340 | root_bounds = scene.root.bounds() 341 | mesh_bounds = scene.actors[1].bounds() 342 | 343 | # Have to expand root bounds by 20 px 344 | 345 | expanded_bounds = [ 346 | bound - 20 if i % 2 == 0 else bound + 20 347 | for i, bound in enumerate(root_bounds) 348 | ] 349 | 350 | check_bounds(mesh_bounds, expanded_bounds) 351 | 352 | 353 | def test_video(scene, pytestconfig): 354 | root_path = pytestconfig.rootpath 355 | video_directory = root_path / "tests" / "videos" 356 | 357 | scene.add_brain_region("TH") 358 | vm = VideoMaker(scene, video_directory, "vid1") 359 | vm.make_video(elevation=2, duration=2, fps=15) 360 | video_path = video_directory / "vid1.mp4" 361 | 362 | assert video_directory.exists() 363 | assert video_path.exists() 364 | 365 | video_path.unlink() 366 | Path.rmdir(video_directory) 367 | 368 | 369 | def test_volumetric_data(scene): 370 | data_path = resources_dir / "volume.npy" 371 | data = np.load(data_path) 372 | actor = Volume( 373 | data, 374 | voxel_size=200, 375 | as_surface=False, 376 | c="Reds", 377 | ) 378 | scene.add(actor) 379 | scene.render(interactive=False) 380 | 381 | assert len(scene.actors) == 2 382 | assert scene.actors[1] == actor 383 | 384 | root_bounds = scene.root.bounds() 385 | actor_bounds = actor.bounds() 386 | 387 | # Have to expand root bounds by 450 px 388 | # This will fail if the volume is misaligned along z 389 | # or rotated in relation with the root 390 | expanded_bounds = [ 391 | bound - 550 if i % 2 == 0 else bound + 550 392 | for i, bound in enumerate(root_bounds) 393 | ] 394 | 395 | check_bounds(actor_bounds, expanded_bounds) 396 | -------------------------------------------------------------------------------- /tests/test_line.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from brainrender import Scene 4 | from brainrender.actor import Actor 5 | from brainrender.actors import Line 6 | 7 | 8 | def test_line(): 9 | s = Scene() 10 | 11 | line = Line( 12 | np.array( 13 | [ 14 | [0, 0, 0], 15 | [1, 1, 1], 16 | [2, 2, 2], 17 | ] 18 | ) 19 | ) 20 | 21 | s.add(line) 22 | assert isinstance(line, Actor) 23 | 24 | del s 25 | -------------------------------------------------------------------------------- /tests/test_neuron.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import pytest 4 | from vedo import Sphere 5 | 6 | from brainrender import Scene 7 | from brainrender.actor import Actor 8 | from brainrender.actors import Neuron, make_neurons 9 | 10 | resources_dir = Path(__file__).parent.parent / "resources" 11 | 12 | 13 | def test_neuron(): 14 | s = Scene(title="BR") 15 | neuron = s.add(Neuron(resources_dir / "neuron1.swc")) 16 | s.add(Neuron(Actor(neuron.mesh))) 17 | s.add(Neuron(neuron.mesh)) 18 | Neuron(Sphere()) 19 | 20 | with pytest.raises(ValueError): 21 | Neuron(1) 22 | 23 | with pytest.raises(FileExistsError): 24 | Neuron(resources_dir / "neuronsfsfs.swc") 25 | with pytest.raises(NotImplementedError): 26 | Neuron(resources_dir / "random_cells.h5") 27 | 28 | del s 29 | 30 | 31 | def test_make_neurons(): 32 | data_path = resources_dir / "neuron1.swc" 33 | make_neurons(data_path, data_path) 34 | -------------------------------------------------------------------------------- /tests/test_points.py: -------------------------------------------------------------------------------- 1 | import random 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | from brainrender import Scene 8 | from brainrender.actor import Actor 9 | from brainrender.actors import Point, Points, PointsDensity 10 | 11 | resources_dir = Path(__file__).parent.parent / "resources" 12 | 13 | 14 | def get_n_random_points_in_region(region, N): 15 | """ 16 | Gets N random points inside (or on the surface) of a mes 17 | """ 18 | 19 | region_bounds = region.mesh.bounds() 20 | X = np.random.randint(region_bounds[0], region_bounds[1], size=10000) 21 | Y = np.random.randint(region_bounds[2], region_bounds[3], size=10000) 22 | Z = np.random.randint(region_bounds[4], region_bounds[5], size=10000) 23 | pts = [[x, y, z] for x, y, z in zip(X, Y, Z)] 24 | 25 | ipts = region.mesh.inside_points(pts).points 26 | return np.vstack(random.choices(ipts, k=N)) 27 | 28 | 29 | def test_points_working(): 30 | s = Scene(title="BR") 31 | data_path = resources_dir / "random_cells.npy" 32 | act = Points(np.load(data_path)) 33 | act2 = Points(data_path, colors="k") 34 | act3 = Points(data_path, name="test") 35 | assert act3.name == "test" 36 | 37 | s.add(act) 38 | s.add(act2) 39 | 40 | point = Point([100, 233, 422]) 41 | s.add(point) 42 | assert isinstance(point, Actor) 43 | assert isinstance(act, Actor) 44 | assert isinstance(act2, Actor) 45 | assert isinstance(act3, Actor) 46 | assert point.name == "Point" 47 | 48 | del s 49 | 50 | 51 | def test_points_density(): 52 | s = Scene(title="BR") 53 | mos = s.add_brain_region("MOs", alpha=0.0) 54 | coordinates = get_n_random_points_in_region(mos, 2000) 55 | pd = s.add(PointsDensity(coordinates)) 56 | 57 | assert isinstance(pd, Actor) 58 | del s 59 | 60 | 61 | def test_points_error(): 62 | with pytest.raises(FileExistsError): 63 | Points( 64 | resources_dir / "testsfsdfs.npy", 65 | colors="k", 66 | ) 67 | with pytest.raises(NotImplementedError): 68 | Points( 69 | resources_dir / "random_cells.h5", 70 | colors="k", 71 | ) 72 | -------------------------------------------------------------------------------- /tests/test_ruler.py: -------------------------------------------------------------------------------- 1 | from brainrender import Scene 2 | from brainrender.actors import ruler, ruler_from_surface 3 | 4 | 5 | def test_ruler(): 6 | s = Scene(title="BR") 7 | th = s.add_brain_region("TH", hemisphere="left") 8 | mos = s.add_brain_region("MOs", hemisphere="right") 9 | 10 | s.add( 11 | ruler( 12 | th.center_of_mass(), 13 | mos.center_of_mass(), 14 | unit_scale=0.01, 15 | units="mm", 16 | ) 17 | ) 18 | 19 | # s.render(interactive=False) 20 | del s 21 | 22 | 23 | def test_ruler_from_surface(): 24 | s = Scene(title="BR") 25 | th = s.add_brain_region("TH", hemisphere="left") 26 | 27 | s.add( 28 | ruler_from_surface( 29 | th.center_of_mass(), s.root, unit_scale=0.01, units="mm" 30 | ) 31 | ) 32 | 33 | # s.render(interactive=False) 34 | del s 35 | -------------------------------------------------------------------------------- /tests/test_scene.py: -------------------------------------------------------------------------------- 1 | from brainrender import Scene 2 | from brainrender.actor import Actor 3 | 4 | 5 | def test_scene_creation(): 6 | scene = Scene(root=False) 7 | scene.root 8 | noinset = Scene(inset=False, title="TEST") 9 | noinset.root 10 | 11 | 12 | def test_scene_render_simple(): 13 | scene = Scene() 14 | scene.render(interactive=False) 15 | 16 | 17 | def test_scene_specials(): 18 | scene = Scene() 19 | print(scene) 20 | str(scene) 21 | scene.content 22 | 23 | 24 | def test_brain_regions(): 25 | scene = Scene() 26 | th = scene.add_brain_region("TH") 27 | assert scene.actors[-1] == th 28 | assert isinstance(th, Actor) 29 | 30 | regs = scene.add_brain_region("MOs", "CA1") 31 | assert isinstance(regs, list) 32 | assert len(regs) == 2 33 | 34 | nan = scene.add_brain_region("MOs", "CA1") 35 | assert nan is None 36 | noone = scene.add_brain_region("what is this") 37 | assert noone is None 38 | 39 | scene.add_brain_region("TH", hemisphere="left") 40 | scene.add_brain_region("CA1", hemisphere="right") 41 | scene.add_brain_region("STN", hemisphere="right") 42 | 43 | 44 | def test_labels(): 45 | scene = Scene() 46 | th = scene.add_brain_region("TH") 47 | scene.add_label(th, "TH") 48 | 49 | 50 | def test_scene_render(): 51 | scene = Scene() 52 | scene.add_brain_region("TH") 53 | 54 | scene.render(interactive=False, zoom=1.4) 55 | 56 | scene.render( 57 | interactive=False, 58 | camera=dict( 59 | pos=( 60 | 10705.845660949382, 61 | 7435.678067378925, 62 | -36936.3695486442, 63 | ), 64 | viewup=( 65 | -0.0050579179155257475, 66 | -0.9965615097647067, 67 | -0.08270172139591858, 68 | ), 69 | clipping_range=(30461.81976236306, 58824.38622122339), 70 | ), 71 | ) 72 | 73 | 74 | def test_scene_slice(): 75 | s = Scene() 76 | s.add_brain_region("TH") 77 | 78 | s.slice("frontal") 79 | 80 | ret = s.slice( 81 | "frontal", 82 | ) 83 | assert ret is None 84 | 85 | s.slice("sagittal", close_actors=True) 86 | 87 | s = Scene() 88 | th = s.add_brain_region("TH") 89 | 90 | plane = s.atlas.get_plane(pos=[1999, 1312, 3421], norm=[1, -1, 2]) 91 | s.slice(plane, actors=th) 92 | ret = s.slice( 93 | plane, 94 | actors=[th, s.root], 95 | ) 96 | del s 97 | 98 | 99 | def test_actor_removal(): 100 | s = Scene() 101 | th = s.add_brain_region("TH") 102 | assert len(s.actors) == 2 103 | 104 | s.remove(th) 105 | assert len(s.actors) == 1 106 | 107 | s.remove("no actor") 108 | assert len(s.actors) == 1 109 | 110 | 111 | def test_get_actors(): 112 | s = Scene() 113 | th = s.add_brain_region("TH") 114 | 115 | found1 = s.get_actors(name="TH") 116 | assert len(found1) == 1 117 | assert th in found1 118 | 119 | found2 = s.get_actors(br_class="brain region") 120 | assert len(found2) == 2 121 | assert th in found2 122 | assert s.root in found2 123 | -------------------------------------------------------------------------------- /tests/test_screenshot.py: -------------------------------------------------------------------------------- 1 | import platform 2 | from pathlib import Path 3 | 4 | import pytest 5 | from skimage.color import rgb2gray 6 | from skimage.io import imread 7 | from skimage.metrics import structural_similarity as ssim 8 | 9 | from brainrender import Scene 10 | 11 | validate_directory = Path(__file__).parent / "data" 12 | 13 | 14 | @pytest.mark.parametrize("extension", ["png", "jpg", "svg", "eps", "pdf"]) 15 | def test_screenshot(tmp_path, extension, similarity_threshold=0.75): 16 | filename = "screenshot." + extension 17 | scene = Scene( 18 | screenshots_folder=tmp_path, 19 | ) 20 | scene.add_brain_region("TH") 21 | scene.render(interactive=False, zoom=2) 22 | scene.screenshot(name=filename) 23 | scene.close() 24 | 25 | test_filepath = tmp_path / filename 26 | 27 | # These are saved compressed 28 | if extension in ["eps", "svg"]: 29 | test_filepath = test_filepath.parent / (test_filepath.name + ".gz") 30 | 31 | assert test_filepath.exists() 32 | 33 | # These are the only raster formats 34 | if extension in ["png", "jpg"]: 35 | test_image = rgb2gray(imread(test_filepath)) 36 | validate_filepath = validate_directory / filename 37 | validate_image = rgb2gray(imread(validate_filepath)) 38 | 39 | assert test_image.shape == validate_image.shape 40 | 41 | if platform.system() != "Linux": 42 | # The screenshots are not produced correctly on Linux in CI 43 | data_range = validate_image.max() - validate_image.min() 44 | similarity_index, _ = ssim( 45 | test_image, validate_image, data_range=data_range, full=True 46 | ) 47 | 48 | assert similarity_index > similarity_threshold 49 | -------------------------------------------------------------------------------- /tests/test_streamlines.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import pytest 3 | 4 | from brainrender import Scene 5 | from brainrender.actors.streamlines import ( 6 | Streamlines, 7 | make_streamlines, 8 | ) 9 | from brainrender.atlas_specific import get_streamlines_for_region 10 | 11 | 12 | @pytest.mark.xfail(reason="Likely due to fail due to neuromorpho") 13 | def test_download(): 14 | streams = get_streamlines_for_region("TH", force_download=False) 15 | assert len(streams) == 54 16 | assert isinstance(streams[0], pd.DataFrame) 17 | 18 | 19 | @pytest.mark.xfail(reason="Likely due to fail due to neuromorpho") 20 | def test_download_slow(): 21 | streams = get_streamlines_for_region("TH", force_download=True) 22 | assert len(streams) == 54 23 | assert isinstance(streams[0], pd.DataFrame) 24 | 25 | 26 | @pytest.mark.xfail(reason="Likely due to fail due to neuromorpho") 27 | def test_streamlines(): 28 | s = Scene(title="BR") 29 | streams = get_streamlines_for_region("TH", force_download=False) 30 | s.add(Streamlines(streams[0])) 31 | s.add(*make_streamlines(*streams[1:3])) 32 | 33 | with pytest.raises(TypeError): 34 | Streamlines([1, 2, 3]) 35 | 36 | del s 37 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from brainrender._utils import get_subdirs, listdir, listify, return_list_smart 2 | 3 | 4 | def test_listdir(): 5 | items = listdir("test") 6 | assert isinstance(items, list) 7 | 8 | 9 | def test_get_subdirs(): 10 | subs = get_subdirs("brainrender") 11 | assert isinstance(subs, list) 12 | 13 | 14 | def test_listify(): 15 | assert isinstance(listify([1, 2, 3]), list) 16 | assert isinstance(listify((1, 2, 3)), list) 17 | assert isinstance(listify(1), list) 18 | 19 | 20 | def test_return_list_smart(): 21 | l1 = [1, 2, 3] 22 | assert isinstance(return_list_smart(l1), list) 23 | assert return_list_smart([]) is None 24 | -------------------------------------------------------------------------------- /tests/test_video.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from brainrender.scene import Scene 4 | from brainrender.video import Animation, VideoMaker 5 | 6 | 7 | def test_video(): 8 | s = Scene(title="BR") 9 | 10 | s.add_brain_region("TH") 11 | 12 | vm = VideoMaker(s, "tests", "test") 13 | savepath = vm.make_video(duration=1, fps=15, azimuth=3) 14 | 15 | assert savepath == "tests/test.mp4" 16 | path = Path(savepath) 17 | assert path.exists() 18 | path.unlink() 19 | 20 | 21 | def test_video_custom(): 22 | def custom(scene, *args, **kwargs): 23 | return 24 | 25 | s = Scene(title="BR") 26 | 27 | s.add_brain_region("TH") 28 | 29 | vm = VideoMaker(s, "tests", "test", make_frame_func=custom) 30 | 31 | savepath = vm.make_video(duration=1, fps=15, azimuth=3) 32 | 33 | assert savepath == "tests/test.mp4" 34 | path = Path(savepath) 35 | assert path.exists() 36 | path.unlink() 37 | 38 | 39 | def test_animation(): 40 | # Create a brainrender scene 41 | scene = Scene(title="brain regions", inset=False) 42 | 43 | # Add brain regions 44 | scene.add_brain_region("TH") 45 | 46 | anim = Animation(scene, "tests", "test") 47 | anim.add_keyframe(0, camera="top", zoom=1.3) 48 | anim.add_keyframe(1, camera="sagittal", zoom=2.1) 49 | anim.add_keyframe(2, camera="frontal", zoom=3) 50 | anim.add_keyframe(3, camera="frontal", zoom=2) 51 | anim.add_keyframe(3, camera="frontal", zoom=2) # overwrite 52 | anim.add_keyframe(30, camera="frontal", zoom=2) # too many 53 | 54 | savepath = anim.make_video(duration=3, fps=10) 55 | assert savepath == "tests/test.mp4" 56 | path = Path(savepath) 57 | assert path.exists() 58 | path.unlink() 59 | -------------------------------------------------------------------------------- /tests/test_volume.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | 5 | from brainrender import Scene 6 | from brainrender.actors import Volume 7 | 8 | resources_dir = Path(__file__).parent.parent / "resources" 9 | 10 | 11 | def test_volume(): 12 | s = Scene(inset=False, root=True) 13 | 14 | data = np.load(resources_dir / "volume.npy") 15 | s.add(Volume(data, voxel_size=200, as_surface=False, c="Reds")) 16 | s.add(Volume(data, voxel_size=200, as_surface=True, c="Reds")) 17 | del s 18 | --------------------------------------------------------------------------------