├── .github └── workflows │ └── python-publish.yml ├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.md ├── iblviewer ├── __init__.py ├── application.py ├── collection.py ├── launcher.py ├── mouse_brain.py ├── objects.py ├── qt_application.py ├── utils.py └── volume.py ├── iblviewer_assets ├── __init__.py ├── allen_atlas │ └── mouse_ccf_structure.csv ├── doc │ ├── iblviewer_architecture.jpg │ ├── iblviewer_architecture.svg │ ├── iblviewer_v2_demo_brain_wide_map_1.jpg │ ├── iblviewer_v2_demo_brain_wide_map_2.jpg │ └── iblviewer_v2_demo_volume_mapping_qt.jpg ├── fonts │ ├── OFL.txt │ └── SourceSansPro-Regular.ttf └── surfaces │ └── 997.ply ├── iblviewer_examples ├── __init__.py ├── data │ ├── channels.f22c0fd9-6b7d-f8dd-9229-884c6f573046.pqt │ ├── exp2_db4df448-e449-4a6f-a0e7-288711e7a75a_both │ ├── exp3_3dd347df-f14e-40d5-9ff2-9c49f84d2157_both │ ├── exp4_3c851386-e92d-4533-8d55-89a46f0e7384_both │ ├── exp5_158d5d35-a2ab-4a76-87b0-51048c5d5283_both │ ├── ibl_point_neurons.npz │ ├── ncov_100.npz │ ├── ncov_25.npz │ ├── ncov_50.npz │ ├── stimonR_top10_rawpoints.p │ ├── valid_insertions.p │ └── valid_insertions_data.p ├── headless_render.py ├── human_brain.py ├── ibl_brain_coverage.py ├── ibl_brain_wide_map.py ├── ibl_insertion_probes.py ├── ibl_point_neurons.py ├── ibl_volume_mapping.py └── viewer.ipynb └── setup.py /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package on PyPi 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | jobs: 16 | deploy: 17 | 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: '3.x' 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install build 30 | - name: Build package 31 | run: python -m build 32 | - name: Publish package 33 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 34 | with: 35 | user: __token__ 36 | password: ${{ secrets.PYPI_API_TOKEN }} 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | .DS_Store 132 | .vscode -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 International Brain Laboratory 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | recursive-include iblviewer_assets/* 2 | include iblviewer_examples/* 3 | include iblviewer_examples/data/* -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IBL Viewer 2 | The International Brain Laboratory Viewer is a simple and fast 3D interactive visualization tool based on VTK that uses GPU accelerated volume and surface rendering. It runs on python 3.8+ and it can be embed in Jupyter Lab/Notebook and Qt user interfaces. 3 | 4 | This viewer is featured with an optional QT user interface with more advanced functionalities like dynamic statistics. In the terminal, type `iblviewer` and see what parameters are available. The viewer launches by default with a Qt UI. 5 | 6 | Most of the viewer makes VTK usable as an interactive application and you may use it as such. Just use `from iblviewer.application import Viewer` 7 | 8 | The small added part related to IBL allows scientists to view their data and models for further analysis. From electrophysiological data to neuronal connectivity, this tool allows simple and effective 3D visualization for many use-cases like multi-slicing and time series even on volumes. In that case, you will use `from iblviewer.mouse_brain import MouseBrainViewer` 9 | 10 | ## Installation 11 | ```bash 12 | pip install iblviewer 13 | ``` 14 | 15 | ## Installation related to IBL 16 | If you wish to use this viewer with International Brain Laboratory data sets and libraries, you will need ibllib: 17 | ```bash 18 | pip install ibllib 19 | ``` 20 | 21 | An example of mouse brain-wide map of electrophysiological recordings (seen here as point neurons) in the Allen Brain Atlas CCF v3 with both DWI and segmented volumes. 22 | ![Viewer multi-volume and points rendering](iblviewer_assets/doc/iblviewer_v2_demo_brain_wide_map_1.jpg?raw=true) 23 | 24 | Another example of the Qt UI with additional features like custom interactive statistics, in this case with matplotlib. 25 | ![Viewer with stats in Qt](iblviewer_assets/doc/iblviewer_v2_demo_volume_mapping_qt.jpg?raw=true) 26 | 27 | ## Troubleshooting 28 | If at some point it complains about failing to uninstall vtk, it's likely that vtk is already installed within your conda environment and that it causes troubles (even if it's the proper version). 29 | Run the following: 30 | ```bash 31 | conda uninstall vtk 32 | pip install vtk 33 | ``` 34 | This will uninstall vtk and reinstall it (version 9+) with pip. 35 | 36 | ## Updating 37 | If you have installed IBLViewer (see below) and you to update to the latest version, run: 38 | ```bash 39 | pip install -U iblviewer 40 | ``` 41 | 42 | In rare cases like observed on Windows once, updating fails and the user doesn't know about it. Reinstall iblviewer: 43 | ```bash 44 | pip uninstall iblviewer 45 | pip install iblviewer 46 | ``` 47 | 48 | ## Examples 49 | Write `iblviewer` in the command line to start the viewer, add `--help` for info about arguments. 50 | You may launch examples/demos from the command line too, write `iblviewer`, hit `TAB` key twice and a list of names are given, like `iblviewer-volume-mapping-demo`. There's a demo for headless rendering when you only need to execute code and produce an image or video. 51 | 52 | If you wish to run your own code, here are steps below. 53 | 54 | Code to run the launcher with arguments from the command line (such as using the Qt UI) 55 | ```python 56 | from iblviewer.launcher import IBLViewer 57 | viewer = IBLViewer() 58 | viewer.launch() 59 | ``` 60 | 61 | If you're not interested in the Qt UI, you may either directly use the VTK viewer below or the neuroscience one. 62 | 63 | Sample code to run the generic VTK viewer: 64 | ```python 65 | from iblviewer.application import Viewer 66 | viewer = Viewer() 67 | viewer.initialize(embed_ui=True) 68 | # Add some random point data 69 | points = np.random.random((500, 3)) * 1000 70 | viewer.add_points(points) 71 | # Select and autofocus the last added object (the points) 72 | viewer.select(-1) 73 | # viewer.select(points) or viewer.select(points.name) yield the same result 74 | viewer.show() 75 | ``` 76 | 77 | Sample code to run the mouse atlas viewer: 78 | ```python 79 | from iblviewer.mouse_brain import MouseBrainViewer 80 | viewer = MouseBrainViewer() 81 | # See initialize parameters for more choices 82 | viewer.initialize(resolution=50, mapping='Allen', add_atlas=True, 83 | add_dwi=False, embed_ui=True, jupyter=False) 84 | viewer.show() 85 | ``` 86 | 87 | [Volumetric time series](iblviewer_examples/ibl_volume_mapping.py) of values assigned to brain regions. 88 | 89 | [Point neurons](iblviewer_examples/ibl_point_neurons.py) and connectivity data. 90 | 91 | [Insertion probes](iblviewer_examples/ibl_insertion_probes.py), or how to display lines made of an heterogeneous amount of points. This example requires valid credentials to IBL back-end. 92 | 93 | Since this tool is built on top of VTK and [vedo](https://github.com/marcomusy/vedo), a wrapper for VTK that makes it easy to use, you have endless possibilities for plotting and visualizing your data. 94 | 95 | ## Architecture 96 | This application relies on the well-known pattern MVC (Model-View-Controller). 97 | By decoupling elements, it is easy to extend the application and to customize it for your needs. 98 | 99 | This application partly relies on vedo, a wrapper for vtk python (that makes it easier to use). When it comes to volume rendering, vedo and its challenger pyvista are lacking. When you start working on scientific analysis and modeling using volumetric data (combined with surface meshes if you wish), this viewer comes in handy. 100 | 101 | vedo and pyvista, two packages doing the same thing really, that is wrapping vtk python in an easily accessible way are great to start with. If you want to build an application with optimized updating mechanisms (that are part of VTK already), vedo and pyvista are not made for this per say. So we keep here the useful parts of vedo and for all the rest, we use vtk python. 102 | 103 | IBLViewer adds the following features: 104 | - simple but powerful features 105 | - update-oriented rather than destroy-create 106 | - per-context UI and state 107 | - slicer that can be controlled by the UI or by code 108 | - interactive volumetric data mapping 109 | - mixing volumes and surfaces 110 | 111 | ![UML schema](iblviewer_assets/doc/iblviewer_architecture.jpg?raw=true) 112 | 113 | ## Issues and feature request 114 | Feel free to request features, submit PRs and raise issues. 115 | 116 | ## Author 117 | Nicolas Antille, International Brain Laboratory, 2021 118 | 119 | ## Special thanks 120 | Thanks Marco Musy and Federico Claudi for their support in using [vedo](https://github.com/marcomusy/vedo). Check out the tool that Federico made, called [brainrender](https://github.com/brainglobe/brainrender), a tool that leverages surface rendering to create great scientific figures. 121 | 122 | From International Brain Laboratory: 123 | Thanks professor Alexandre Pouget, Berk Gerçek, Guido Meijer, Leenoy Meshulam and Alessandro Santos for their constructive feedbacks and guidance. Thanks Olivier Winter, Gaelle Chapuis and Shan Shen for their support. 124 | 125 | The project was initiated and funded by [the laboratory of professor Alexandre Pouget](https://www.unige.ch/medecine/neuf/en/research/grecherche/alexandre-pouget), University of Geneva, Faculty of Medecine, Basic Neuroscience which participates to [International Brain Laboratory](https://www.internationalbrainlab.com). 126 | -------------------------------------------------------------------------------- /iblviewer/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import iblviewer.mouse_brain 3 | import iblviewer.qt_application 4 | except ModuleNotFoundError: 5 | pass 6 | import iblviewer.launcher 7 | import iblviewer.collection 8 | import iblviewer.objects 9 | import iblviewer.utils 10 | import iblviewer.volume 11 | import iblviewer.application 12 | 13 | if __name__ == '__main__': 14 | app = iblviewer.launcher.main() -------------------------------------------------------------------------------- /iblviewer/collection.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | 3 | 4 | class Collection(OrderedDict): 5 | 6 | current = None 7 | current_key_id = 0 8 | targets = [] 9 | 10 | def get_current_key(self): 11 | """ 12 | Get the current key that maps to the current object 13 | """ 14 | return self.get_keys()[self.current_key_id] 15 | 16 | def set_current(self, target): 17 | """ 18 | Set the current data 19 | :param key_or_id: A valid key or an int or the data 20 | object itself 21 | """ 22 | keys = self.get_keys() 23 | if target in self: 24 | key = target 25 | elif isinstance(target, int): 26 | key_id = target 27 | try: 28 | key = keys[key_id] 29 | except Exception: 30 | return 31 | else: 32 | for key in self: 33 | if self[key] == target: 34 | break 35 | 36 | key_id = 0 37 | for other_key in keys: 38 | if other_key == key: 39 | break 40 | key_id += 1 41 | 42 | # This is for when you need to know where is the key in the array 43 | self.current_key_id = key_id 44 | self.current = self.get(key) 45 | 46 | def next(self, loop=False): 47 | """ 48 | Set the next data as current one 49 | :param loop: Whether looping is enabled, 50 | which means that if next value is out of range, 51 | we start back at 0 52 | """ 53 | keys = self.get_keys() 54 | new_id = self.current_id + 1 55 | if new_id > len(keys) - 1: 56 | new_id = 0 if loop else len(keys) - 1 57 | self.set_current(new_id) 58 | return new_id 59 | 60 | def previous(self, loop=False): 61 | """ 62 | Set the previous data as current one 63 | :param loop: Whether looping is enabled, 64 | which means that if previous value is out of range, 65 | we go to -1 in a backward loop 66 | """ 67 | keys = self.get_keys() 68 | new_id = self.current_id - 1 69 | if new_id < 0: 70 | new_id = len(keys) - 1 if loop else 0 71 | self.set_current(new_id) 72 | return new_id 73 | 74 | def store(self, data, data_id=None, replace_existing=True, set_current=False): 75 | """ 76 | Store a data 77 | :param data: data instance 78 | :param data_id: data id, a unique string if possible. 79 | If it's not a unique string, it will be appended a 80 | number so that it's unique. 81 | :param replace_existing: Whether any existing data with the same id is replaced or not 82 | :param set_current: Whether the newly stored data is set as the current one 83 | :return: The new data_id or None 84 | """ 85 | if data_id is None or (data_id in self and not replace_existing): 86 | data_id = self.get_new_name(data) 87 | if data_id in self and not replace_existing: 88 | return 89 | self[data_id] = data 90 | if set_current or len(self) == 1: 91 | self.set_current(data_id) 92 | return data_id 93 | 94 | def get_or_create(self, data_id, data_class=None): 95 | """ 96 | Get a data from a dictionary of data or create it if none found. 97 | This method works only with datas that have a name (str) property. 98 | :param data_id: Either a data name or its id 99 | :param data_class: The class of the data 100 | :return: A data of type data_class 101 | """ 102 | name = data_id 103 | if isinstance(data_id, int): 104 | name = self.get_keys()[data_id] 105 | data = self.get(name) 106 | if data is None and data_class is not None: 107 | if isinstance(data_id, str): 108 | data = data_class(name=data_id) 109 | else: 110 | data = data_class() 111 | data_id = self.get_new_name(data) 112 | self[data_id] = data 113 | return data 114 | 115 | def get_keys(self): 116 | """ 117 | Get all data ids 118 | """ 119 | return list(self.keys()) 120 | 121 | def find_keys(self, id_or_subid): 122 | """ 123 | Get all ids/keys that have the given param as a substring 124 | """ 125 | keys = self.get_keys() 126 | found_ones = [] 127 | for key in keys: 128 | if id_or_subid in key: #key could be an array or any iterable too 129 | found_ones.append(key) 130 | return found_ones 131 | 132 | def get_new_name(self, obj): 133 | """ 134 | Get a new name derived from an object's class name 135 | :return: String 136 | """ 137 | return f'{obj.__class__.__name__}{len(self)}' 138 | 139 | def get_name(self, *args): 140 | """ 141 | Get given arguments separated by underscores as a single string 142 | :return: String 143 | """ 144 | return '_'.join(args) 145 | -------------------------------------------------------------------------------- /iblviewer/launcher.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | from iblviewer.qt_application import ViewerApp 4 | from iblviewer.application import Viewer 5 | got_ibllib = True 6 | from iblviewer.mouse_brain import MouseBrainViewer 7 | 8 | 9 | 10 | """ 11 | Project: IBL Viewer 12 | 13 | Description: this is a fast and interactive 3D viewer for exploring 14 | and analysing volumes, surfaces, points and lines. 15 | It's based on Python, VTK and partly on vedo (a VTK wrapper). 16 | 17 | In the context of the International Brain Laboratory, this viewer is 18 | used by neuroscientists to perform analysis of data models in the 19 | context of the Allen Mouse CCF v3 atlas. 20 | 21 | Copyright: 2021 Nicolas Antille, International Brain Laboratory 22 | License: MIT 23 | """ 24 | # From stackoverflow 25 | def str2bool(v): 26 | if isinstance(v, bool): 27 | return v 28 | elif v.lower() in ('yes', 'true', 't', 'y', '1'): 29 | return True 30 | elif v.lower() in ('no', 'false', 'f', 'n', '0'): 31 | return False 32 | else: 33 | raise argparse.ArgumentTypeError('Boolean value expected.') 34 | 35 | class IBLViewer(): 36 | 37 | def __init__(self): 38 | self.viewer = None 39 | self.qt_app = None 40 | self.args = None 41 | 42 | def parse_args(self, parser=None): 43 | """ 44 | Standard argument parser for iblviewer. Make sure you do not use the below argument shortcuts: 45 | -s, -t, -ui, -r, -m, -d, -atlas, -dwi, -cm, -nc, -na 46 | :param parser: An existing ArgumentParser that will be updated with standard arguments. 47 | If None, a new one is created (default). 48 | :return: ArgumentParser 49 | """ 50 | if parser is None: 51 | parser = argparse.ArgumentParser(description='International Brain Viewer based on VTK') 52 | 53 | parser.add_argument('-neuro', dest='neuroscience', type=str2bool, default=True, 54 | help='Whether the viewer starts in Neuroscience mode with Mouse Brain volume preset (1) or as generic 3D viewer (0)') 55 | 56 | parser.add_argument('-t', dest='test_data', type=str2bool, default=False, 57 | help='Whether a simple random set of points is added as test data') 58 | 59 | parser.add_argument('-ui', dest='ui', type=int, default=1, 60 | help='User interface. 0 for none, 1 for Qt, 2 for embed') 61 | 62 | # More command-line options are given in the context of neuroscience 63 | parser.add_argument('-r', dest='resolution', type=int, default=50, 64 | help='Volume resolution. Possible values: 100, 50, 25, and 10. Units are in microns.\ 65 | The 10um volume takes a lot of RAM (and some time to load)') 66 | 67 | parser.add_argument('-m', dest='mapping', type=str, default='Allen', 68 | help='Volume mapping name. Either Allen (default value) or Beryl (IBL specific simplified mapping).') 69 | 70 | parser.add_argument('-d', dest='dark_mode', type=str2bool, default=True, 71 | help='Enable (1) or disable (0) dark mode.') 72 | 73 | parser.add_argument('-atlas', dest='add_atlas', type=str2bool, default=True, 74 | help='If the Allen Atlas volume should be added to the viewer') 75 | 76 | parser.add_argument('-dwi', dest='add_dwi', type=str2bool, default=False, 77 | help='If the Allen Atlas raw DWI volume should be added to the viewer') 78 | 79 | parser.add_argument('-cm', dest='color_map', type=str, default='viridis', 80 | help='Color map for custom data mapped on to the Allen atlas volume') 81 | 82 | parser.add_argument('-nc', dest='nan_color', type=float, default=0.65, 83 | help='Gray color (between 0 and 1) for regions that have no assigned value') 84 | 85 | parser.add_argument('-na', dest='nan_alpha', type=float, default=0.5, 86 | help='Alpha (opacity) value for regions that have no assigned value') 87 | 88 | args = parser.parse_args() 89 | self.args = args 90 | return args 91 | 92 | def launch(self, callable=None, stats_callable=None, args=None, 93 | jupyter=False, neuroscience=True, **kwargs): 94 | """ 95 | Start the 3D viewer according to parameters given in the console 96 | :param callable: Function that will be called when the 3D viewer is initialized 97 | :param stats_callable: Function that will be called when statistics are updated, 98 | when a selection or sub selection changes in the 3D viewer. Available when 99 | you use the Qt UI only. 100 | :param args: Any existing ArgumentParser. If None, a new IBL standard one is created. 101 | :param jupyter: Whether you launch the viewer within a jupyter notebook or lab 102 | :param neuroscience: Whether the viewer in jupyter is started in neuroscience mode or not 103 | :param kwargs: All further keyword arguments set to viewer.initialize() method (for jupyter mode) 104 | :return: Either a qt_application.ViewerApp (if Qt) or a viewer instance (mouse_brain.MouseBrainViewer 105 | or application.) 106 | """ 107 | ibllib_msg = 'The viewer is set to start in neuroscience mode but you do not have ibllib ' 108 | ibllib_msg += 'optional module installed.\n\nPlease run pip install ibllib and run the viewer ' 109 | ibllib_msg += 'again if you want to start in neuroscience mode.\n\n' 110 | ibllib_msg += 'Alternatively, you may use the viewer in standard mode with random points for test: iblviewer -neuro 0 -t 1\n' 111 | if jupyter: 112 | if neuroscience and not got_ibllib: 113 | print(ibllib_msg) 114 | exit() 115 | if neuroscience: 116 | # This a computational neuroscience environment, in this case focused 117 | # on the Allen Brain Atlas and International Brain Laboratory data models 118 | self.viewer = MouseBrainViewer() 119 | else: 120 | # This is a generic 3D viewer, not related to neuroscience 121 | self.viewer = Viewer() 122 | self.viewer.initialize(**kwargs) 123 | return self.viewer.show() 124 | 125 | if args is None: 126 | args = self.args 127 | if args is None: 128 | args = self.parse_args() 129 | self.args = args 130 | 131 | if args.neuroscience: 132 | # This a computational neuroscience environment, in this case focused 133 | # on the Allen Brain Atlas and International Brain Laboratory data models 134 | self.viewer = MouseBrainViewer() 135 | else: 136 | # This is a generic 3D viewer, not related to neuroscience 137 | self.viewer = Viewer() 138 | 139 | qt_mode = args.ui == 1 140 | if qt_mode: 141 | self.qt_app = ViewerApp() 142 | if args.neuroscience: 143 | # viewer.initialize(...) method will be called internally with the expanded args 144 | self.qt_app.initialize(viewer=self.viewer, callable=callable, stats_callable=stats_callable, 145 | embed_ui=args.ui==2, offscreen=False, dark_mode=args.dark_mode, 146 | resolution=args.resolution, mapping=args.mapping, add_dwi=args.add_dwi, 147 | add_atlas=args.add_atlas) 148 | else: 149 | new_function = callable 150 | if args.test_data: 151 | # Test data 152 | points = np.random.random((500, 3)) * 1000 153 | def new_function(viewer): 154 | if callable is not None: 155 | callable(viewer) 156 | viewer.add_spheres(points, radius=10) 157 | # viewer.initialize(...) method will be called internally with the expanded args 158 | self.qt_app.initialize(viewer=self.viewer, callable=new_function, stats_callable=stats_callable, 159 | embed_ui=args.ui==2, offscreen=False, dark_mode=args.dark_mode) 160 | return self.qt_app 161 | # Any code below here is only executed once you quit the Qt application 162 | 163 | else: 164 | if args.neuroscience: 165 | self.viewer.initialize(resolution=args.resolution, mapping=args.mapping, add_dwi=args.add_dwi, 166 | add_atlas=args.add_atlas, embed_ui=args.ui==2, offscreen=False, 167 | jupyter=jupyter, dark_mode=args.dark_mode) 168 | else: 169 | self.viewer.initialize(embed_ui=args.ui==2, offscreen=qt_mode, jupyter=jupyter) 170 | if args.test_data: 171 | # Test data 172 | points = np.random.random((500, 3)) * 1000 173 | self.viewer.add_points(points, radius=10) 174 | 175 | if callable is not None: 176 | callable(self.viewer) 177 | self.viewer.show() 178 | return self.viewer 179 | 180 | def main(auto_close_viewer=True): 181 | app = IBLViewer() 182 | app.launch() 183 | if auto_close_viewer: 184 | app.viewer.close() 185 | return app 186 | 187 | if __name__ == '__main__': 188 | app = main() -------------------------------------------------------------------------------- /iblviewer/mouse_brain.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | import os 3 | import logging 4 | import numpy as np 5 | import requests 6 | from pathlib import Path 7 | import textwrap 8 | 9 | import vedo 10 | import nrrd 11 | import ibllib.atlas 12 | from ibllib.atlas.regions import BrainRegions 13 | from iblutil.numerical import ismember 14 | 15 | from iblviewer.application import Viewer 16 | from iblviewer.volume import VolumeController, VolumeModel, LUTModel 17 | import iblviewer.utils as utils 18 | 19 | ALLEN_ATLAS_RESOLUTIONS = [10, 25, 50, 100] 20 | ALLEN_ATLASES = {'base_url': 'http://download.alleninstitute.org/informatics-archive', 21 | 'mouse_ccf_folder': '/current-release/mouse_ccf', 22 | 'atlas_folder': '/annotation/ccf_2017', 'atlas_prefix': '/annotation_', 23 | 'dwi_folder': '/average_template', 'dwi_prefix': '/average_template_', 24 | 'volume_extension': '.nrrd'} 25 | 26 | # Default origin is "bregma", an origin defined at the center of the XY axes (not on Z) 27 | # For reference, bregma is np.array([5739.0, 5400.0, 332.0]) 28 | # And for some reason, it's not exactly in the middle of X axis (should be 5400.0)... 29 | IBL_BREGMA_ORIGIN = ibllib.atlas.ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] 30 | BASE_PATH = utils.split_path(os.path.realpath(__file__))[0] 31 | REMAPPED_VOLUME_SUFFIX = '_remapped' 32 | _logger = logging.getLogger('ibllib') 33 | LUT_VERSION = 'v01' # version 01 is the lateralized version 34 | 35 | 36 | class AllenAtlasExt(ibllib.atlas.AllenAtlas): 37 | """ 38 | This overwrites the constructor of AllenAtlas that is not designed to be used for the 39 | public, that is people outside of IBL. Typically, you'd want to display the Allen volume 40 | data in this viewer and perform additional tasks (such as loading your own extra data) 41 | with other libraries. Dev note: I'm forced to copy and modify the whole constructor in this case. 42 | 43 | Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution 44 | using the IBL Bregma and coordinate system 45 | """ 46 | @staticmethod 47 | def _read_volume(file_volume): 48 | if file_volume.suffix == '.nrrd': 49 | volume, _ = nrrd.read(file_volume, index_order='C') # ml, dv, ap 50 | # we want the coronal slice to be the most contiguous 51 | volume = np.transpose(volume, (2, 0, 1)) # image[iap, iml, idv] 52 | elif file_volume.suffix == '.npz': 53 | volume = np.load(str(file_volume), allow_pickle=True)['arr_0'] 54 | return volume 55 | 56 | def __init__(self, res_um=25, brainmap='Allen', scaling=np.array([1, 1, 1]), 57 | image_file_path=None, label_file_path=None): 58 | """ 59 | :param res_um: 10, 25 or 50 um 60 | :param brainmap: defaults to 'Allen', see ibllib.atlas.BrainRegion for re-mappings 61 | :param scaling: scale factor along ml, ap, dv for squeeze and stretch ([1, 1, 1]) 62 | :return: atlas.AllenAtlasGP 63 | """ 64 | xyz2dims = np.array([1, 0, 2]) # this is the c-contiguous ordering 65 | dims2xyz = np.array([1, 0, 2]) 66 | # we use Bregma as the origin 67 | self.res_um = res_um 68 | dxyz = self.res_um * 1e-6 * np.array([1, -1, -1]) * scaling 69 | 70 | if label_file_path is None: 71 | # No point in going further 72 | return 73 | 74 | regions = BrainRegions() 75 | ibregma = (ibllib.atlas.ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um) 76 | #image_path = atlas_path.joinpath(f'average_template_{res_um}.nrrd') 77 | # It is really unfortunate that users are forced to have in memory two volumes: the atlas and the DWI! 78 | image = AllenAtlasExt._read_volume(Path(image_file_path)) 79 | label = self.remap_atlas(label_file_path, regions, ibregma) 80 | 81 | # This calls BrainAtlas, the mother class of AllenAtlas because we want to overwrite it 82 | super(ibllib.atlas.AllenAtlas, self).__init__(image, label, dxyz, regions, 83 | ibregma, dims2xyz=dims2xyz, xyz2dims=xyz2dims) 84 | 85 | def remap_atlas(self, local_file_path, regions=None, ibregma=None): 86 | """ 87 | Remap the atlas label into a usable volume 88 | """ 89 | file_path = Path(local_file_path) 90 | parent_folder = file_path.parent.absolute() 91 | npz_name = f'{file_path.stem}_lut_{LUT_VERSION}.npz' 92 | remapped_file_path = parent_folder.joinpath(npz_name) 93 | if not remapped_file_path.exists(): 94 | volume = self._read_volume(file_path) 95 | if regions is None: 96 | # Encapsulating the lateralization, that should be a given 97 | volume = self.lateralize(volume, regions, ibregma) 98 | _logger.info(f"saving {remapped_file_path} ...") 99 | np.savez_compressed(str(remapped_file_path), volume) 100 | else: 101 | volume = np.load(str(remapped_file_path))['arr_0'] 102 | return volume 103 | 104 | def lateralize(self, label, regions=None, ibregma=None): 105 | """ 106 | Breaks the symmetry in regions labels in the Allen Mouse Atlas where the id 107 | of a region in the left hemisphere is the same as the same region in the right 108 | hemisphere. But if we want to map recordings to the brain, we need separate ids. 109 | :param label: Segmented volume 110 | :return: Modified volume 111 | """ 112 | _logger.info("computing brain atlas annotations lookup table") 113 | if regions is None: 114 | regions = BrainRegions() 115 | if ibregma is None: 116 | ibregma = (ibllib.atlas.ALLEN_CCF_LANDMARKS_MLAPDV_UM['bregma'] / self.res_um) 117 | xyz2dims = np.array([1, 0, 2]) # this is the c-contiguous ordering 118 | # lateralize atlas: for this the regions of the left hemisphere have primary 119 | # keys opposite to to the normal ones 120 | lateral = np.zeros(label.shape[xyz2dims[0]]) 121 | lateral[int(np.floor(ibregma[0]))] = 1 122 | lateral = np.sign(np.cumsum(lateral)[np.newaxis, :, np.newaxis] - 0.5) 123 | label = label * lateral 124 | _, im = ismember(label, regions.id) 125 | label = np.reshape(im.astype(np.uint16), label.shape) 126 | return label 127 | 128 | 129 | 130 | @dataclass 131 | class IBLAtlasModel(): 132 | """ 133 | IBL Atlas is a wrapper for the Allen Atlas with added features. 134 | The volume is also modified such that it fits functional needs. 135 | """ 136 | origin: np.ndarray = IBL_BREGMA_ORIGIN 137 | atlas: ibllib.atlas.AllenAtlas = None 138 | atlas_lut: LUTModel = None 139 | atlas_mapping_ids: list = None 140 | ibl_back_end: bool = True 141 | 142 | IBL_TRANSPOSE = (1, 0, 2) 143 | 144 | atlas_volume: VolumeModel = None 145 | dwi_volume: VolumeModel = None 146 | 147 | def initialize(self, resolution=None, ibl_back_end=True): 148 | """ 149 | Get Allen Atlas metadata (.csv file that is copied into ibllib.atlas) and volume files 150 | :param resolution: Volume resolution in microns 151 | """ 152 | if resolution not in ALLEN_ATLAS_RESOLUTIONS: 153 | resolution = ALLEN_ATLAS_RESOLUTIONS[-1] 154 | if resolution == ALLEN_ATLAS_RESOLUTIONS[0]: 155 | print('Warning: the Allen Atlas at 10um resolution will require over 10GB of RAM') 156 | self.resolution = resolution 157 | self.use_ibl_back_end = ibl_back_end 158 | 159 | try: 160 | self.atlas = ibllib.atlas.AllenAtlas(resolution) 161 | except Exception as e: 162 | atlas_volume_url = self.get_allen_volume_url(resolution) 163 | atlas_volume_name = self.get_allen_volume_file_name(resolution) 164 | atlas_file = requests.get(atlas_volume_url) 165 | atlas_volume_path = './' + atlas_volume_name 166 | open(atlas_volume_path, 'wb').write(atlas_file.content) 167 | 168 | dwi_volume_url = self.get_allen_volume_url(resolution, True) 169 | dwi_volume_name = self.get_allen_volume_file_name(resolution, True) 170 | dwi_file = requests.get(dwi_volume_url) 171 | dwi_volume_path = './' + dwi_volume_name 172 | open(dwi_volume_path, 'wb').write(dwi_file.content) 173 | self.atlas = AllenAtlasExt(resolution, image_file_path=dwi_volume_path, label_file_path=atlas_volume_path) 174 | 175 | self.atlas_mapping_ids = list(self.atlas.regions.mappings.keys()) 176 | # Further than initialize(), you will need to either call get_atlas_model() or get_dwi_model() 177 | 178 | def get_atlas_model(self, atlas_mapping=None): 179 | """ 180 | Get a VolumeModel instance that represents the atlas (segmented) volume 181 | :param atlas_mapping: IBL Mapping that we want to use on this atlas. 182 | See ibllib.atlas.AllenAtlas.regions.mappings.keys() 183 | :return: VolumeModel 184 | """ 185 | if self.atlas is None: 186 | print('No atlas!') 187 | return 188 | if self.atlas_volume is not None: 189 | return self.atlas_volume 190 | self.atlas_volume = VolumeModel(resolution=self.resolution, base_color_map=self.atlas.regions.rgb) 191 | self.atlas_volume.data = self.get_mapped_volume(self.atlas.label, atlas_mapping) 192 | self.atlas_volume.mapping = atlas_mapping 193 | self.atlas_volume.data_type = VolumeModel.SEGMENTED 194 | self.atlas_volume.name = 'Allen Mouse CCF v3 Atlas volume' 195 | # IBL convention 196 | self.atlas_volume.lateralized = atlas_mapping is not None and '-lr' in atlas_mapping 197 | if self.ibl_back_end: 198 | # The IBL back-end uses a different convention for memory representation 199 | # so we are forced to untranspose the volume... 200 | self.atlas_volume.transpose(IBLAtlasModel.IBL_TRANSPOSE) 201 | self.atlas_volume.build_lut(color_map=self.atlas.regions.rgb, make_active=True) 202 | self.atlas_volume.compute_size() 203 | return self.atlas_volume 204 | 205 | def get_dwi_model(self): 206 | """ 207 | Get a VolumeModel instance of the DWI volume image 208 | :return: VolumeModel 209 | """ 210 | if self.atlas is None: 211 | return 212 | if self.dwi_volume is not None: 213 | return self.dwi_volume 214 | self.dwi_volume = VolumeModel(resolution=self.resolution) 215 | self.dwi_volume.data = self.atlas.image 216 | self.dwi_volume.data_type = VolumeModel.RAW 217 | self.dwi_volume.name = 'Allen Mouse CCF v3 DWI volume' 218 | if self.ibl_back_end: 219 | self.dwi_volume.transpose(IBLAtlasModel.IBL_TRANSPOSE) 220 | self.dwi_volume.compute_size() 221 | return self.dwi_volume 222 | 223 | def get_num_regions(self): 224 | """ 225 | Get how many regions are labelled 226 | """ 227 | return self.atlas.regions.id.size 228 | 229 | def get_value_from_scalar_map(self, scalar): 230 | """ 231 | Reverse look-up in array to find a corresponding value 232 | :param scalar: Scalar value 233 | :return: Raw volume value 234 | """ 235 | scalar_map = self.atlas_volume.luts.current.scalar_map 236 | if scalar_map is None: 237 | return 238 | for value in range(len(scalar_map)): 239 | if scalar_map[value] is None: 240 | continue 241 | #print(scalar - scalar_map[value]) 242 | if scalar_map[value] == scalar: 243 | return value 244 | 245 | def get_mapped_data(self, value): 246 | """ 247 | Given a value from the segmented volume, we retrieve useful info 248 | :param value: Value from the volume 249 | :return: Dictionary of corresponding data 250 | """ 251 | if value is None: 252 | return 253 | value = int(value) 254 | region_id = self.atlas.regions.id[value] 255 | region_data = self.atlas.regions.get(region_id) 256 | 257 | data_lut = self.atlas_volume.luts.current 258 | region_name = region_data.name[0].title() 259 | color = data_lut.color_map[value][1] 260 | alpha = 1.0 261 | if data_lut.alpha_map is not None: 262 | alpha = data_lut.alpha_map[value][1] 263 | scalar = None 264 | scalar_map = data_lut.scalar_map 265 | if scalar_map is not None: 266 | if isinstance(scalar_map, dict): 267 | scalar = scalar_map.get(value) 268 | else: 269 | scalar = scalar_map[value] 270 | return {'scalar':scalar, 'region_id':region_id, 'color':color, 'alpha':alpha, 271 | 'region_name':region_name, 'region_data':region_data} 272 | 273 | def remap(self, ids, source='Allen', dest='Beryl'): 274 | """ 275 | Remap ids/scalar values from source to destination 276 | Function by Olivier Winter 277 | :param ids: List of ids 278 | :param source: Source mapping 279 | :param dest: Destination mapping 280 | """ 281 | #from ibllib.atlas import BrainRegions as br 282 | _, inds = ismember(ids, self.atlas.regions.mappings[source]) 283 | return self.atlas.regions.mappings[dest][inds] 284 | 285 | def get_allen_volume_file_name(self, resolution, raw_image=False): 286 | """ 287 | Get the Allen volume file name given its resolution 288 | :param resolution: Resolution of the volume 289 | :param raw_image: Whether we want the raw volume file name or the segmented one 290 | :return: String 291 | """ 292 | file_name = ALLEN_ATLASES['dwi_prefix'] if raw_image else ALLEN_ATLASES['atlas_prefix'] 293 | file_name += str(resolution) + ALLEN_ATLASES['volume_extension'] 294 | return file_name 295 | 296 | def get_allen_volume_url(self, resolution, raw_image=False): 297 | """ 298 | Construct a URL with which we can download data sets 299 | :param resolution: Volume resolution, either 10, 25, 50 or 100 (um) 300 | :param raw_image: Whether the volume is the segmented one (aka the atlas) or the DWI 301 | :return: String 302 | """ 303 | url = ALLEN_ATLASES['base_url'] + ALLEN_ATLASES['mouse_ccf_folder'] 304 | url += ALLEN_ATLASES['dwi_folder'] if raw_image else ALLEN_ATLASES['atlas_folder'] 305 | url += self.get_allen_volume_file_name(resolution, raw_image) 306 | return url 307 | 308 | def load_volume(self, file_path, remap_scalars=False, mapping=None, make_current=True): 309 | """ 310 | Load a volume data file. Supports NRRD and many other formats thanks to vedo/VTK 311 | :param file_path: Volume file path. Could support other file types easily. 312 | :param remap_scalars: Whether scalar values in the volume are replaced by 313 | their row id from a mapping that stores. This is necessary in the case of segmented 314 | volumes with regions that have a discontinuous id. 315 | :param mapping: Pandas Series or a Dictionary 316 | :param make_current: Set the volume data as the current one 317 | :return: 3D array 318 | """ 319 | return super().load_volume(file_path, remap_scalars, self.atlas.regions.id, make_current) 320 | 321 | def get_name(self, *args): 322 | """ 323 | Get full name for a model, separated by underscores 324 | :return: String 325 | """ 326 | return '_'.join(args) 327 | 328 | def get_mapped_volume(self, volume, atlas_mapping=None, ibl_back_end=True): 329 | """ 330 | Set the volume data according to a mapping 331 | :param volume: Given volume to display 332 | :param atlas_mapping: Mapping, either a string for the name of the mapping or an integer. 333 | :param ibl_back_end: If you are not using ibllib and want to load your own volume, set this to False 334 | so that there will be no transposition of the volume (needed for the ones from IBL) 335 | :return: Volume nd array 336 | """ 337 | volume_data = None 338 | if ibl_back_end: 339 | if isinstance(atlas_mapping, int): 340 | if atlas_mapping > len(self.atlas_mapping_ids) - 1: 341 | #logging.error('[AtlasModel.get_mapped_volume()] could not find atlas mapping with id ' + str(atlas_mapping) + '. Returning raw volume...') 342 | return volume 343 | map_id = self.atlas_mapping_ids[atlas_mapping] 344 | elif atlas_mapping is None: 345 | map_id = self.atlas_mapping_ids[0] 346 | else: 347 | map_id = atlas_mapping 348 | 349 | # This mapping actually changes the order of the axes in the volume... 350 | volume_data = self.atlas.regions.mappings[map_id][volume] 351 | #logging.info('Loaded atlas volume with ' + map_id + ' mapping') 352 | else: 353 | volume_data = volume 354 | return volume_data 355 | 356 | def get_region_and_row_id(self, acronym): 357 | """ 358 | Get region and row id given an acronym 359 | :param acronym: Acronym of a brain region 360 | :return: Region id and row id 361 | """ 362 | ind = np.where(self.atlas.regions.acronym == acronym)[0] 363 | if ind.size < 1: 364 | return None, None 365 | return self.atlas.regions.id[ind], ind 366 | 367 | def get_regions_mask(self, region_ids, alpha_map=None): 368 | """ 369 | Build an alpha map that reveals only the given region ids 370 | :param region_ids: List or numpy array of region ids 371 | :param alpha_map: Optional alpha map that will be modified. If None provided, the method will attempt 372 | to use the current active alpha map 373 | :return: 2D numpy array with region ids and corresponding alpha values 374 | """ 375 | if alpha_map is None: 376 | alpha_map = self.lut.alpha_map 377 | if alpha_map is None: 378 | #logging.error('[Method build_regions_alpha_map()] requires that an alpha map is created by build_regions_alpha_map()') 379 | return 380 | new_alpha_map = np.zeros_like(alpha_map).astype(float) 381 | new_alpha_map[:, 0] = alpha_map[:, 0] 382 | new_alpha_map[region_ids, 1] = alpha_map[region_ids, 1] 383 | self.lut.sec_alpha_map = new_alpha_map 384 | return new_alpha_map 385 | 386 | 387 | class MouseBrainViewer(Viewer): 388 | """ 389 | This is your entry point to International Brain Laboratory data visualization 390 | """ 391 | 392 | def __init__(self): 393 | """ 394 | Constructor 395 | """ 396 | super().__init__() 397 | self.bounding_mesh = None 398 | self.atlas_controller = None 399 | self.dwi_controller = None 400 | self.ibl_model = None 401 | # Shortcut for users 402 | self.ibl_transpose = IBLAtlasModel.IBL_TRANSPOSE 403 | 404 | def initialize(self, resolution=25, mapping='Beryl', add_atlas=True, add_dwi=False, 405 | dwi_color_map='viridis', dwi_alpha_map=None, local_allen_volumes_path=None, 406 | offscreen=False, jupyter=False, embed_ui=False, embed_font_size=15, 407 | plot=None, plot_window_id=0, num_windows=1, render=False, dark_mode=False): 408 | """ 409 | Initialize the controller, main entry point to the viewer 410 | :param resolution: Resolution of the atlas volume. 411 | Possible values are 10 (requires a lot of RAM), 25, 50, 100. Units are in microns 412 | :param mapping: Optional mapping value. In the context of IBL, there is 'Allen' for the standard Allen map 413 | and 'Beryl' (random name) which aggregates cortical layers as one. 414 | :param add_atlas: Whether the Atlas is included in the viewer 415 | :param add_dwi: Whether the diffusion weighted imaging is loaded in the viewer (same boundaries as atlas) 416 | :param context: Context of the visualization 417 | :param embed_ui: Whether the UI is embed within the VTK window 418 | :param embed_font_size: Embed font size. Defaults to 16 points. You might need larger values 419 | in case you have a small screen with high dpi (but VTK methods fail to detect that). 420 | :param jupyter: Whether we're running from a jupyter notebook or not 421 | :param plot: A vedo Plotter instance. You can either create it by yourself before hand, in case you want 422 | to have multiple windows with other stats or let the controller create a new one 423 | :param plot_window_id: Sub-window id where the 3D visualization will be displayed 424 | :param num_windows: Number of subwindows, in case you want to display your own stuff later 425 | :param dark_mode: Whether the viewer is in dark mode 426 | """ 427 | self.model.title = 'IBL Viewer' 428 | 429 | # ibllib works with two volumes at the same time: the segmented volume (called 'label') 430 | # and the DWI volume (called 'image') 431 | self.ibl_model = IBLAtlasModel() 432 | self.ibl_model.initialize(resolution, local_allen_volumes_path) 433 | self.model.origin = self.ibl_model.origin 434 | 435 | # Neuroscience specific 436 | self.model.probe_initial_point1 = self.model.origin 437 | self.model.probe_initial_point2 = self.model.origin + [0, 0, 8000] 438 | 439 | super().initialize(offscreen, jupyter, embed_ui, embed_font_size, 440 | plot, plot_window_id, num_windows, dark_mode) 441 | 442 | # A VolumeController has a unique volume as target so if we want to visualize both volumes, we create two views 443 | if add_atlas: 444 | self.ibl_model.get_atlas_model(mapping) 445 | self.add_atlas_segmentation() 446 | if add_dwi: 447 | self.ibl_model.get_dwi_model() 448 | if dwi_alpha_map is None: 449 | #dwi_alpha_map = np.ones(516) 450 | #dwi_alpha_map[:140] = 0 451 | dwi_alpha_map = [0.0, 1.0, 1.0] 452 | self.add_atlas_dwi(dwi_color_map, dwi_alpha_map) 453 | 454 | try: 455 | self.load_bounding_mesh() 456 | except Exception: 457 | pass 458 | 459 | #light = vedo.Light(self.model.IBL_BREGMA_ORIGIN - [0, 0, 1000], c='w', intensity=0.2) 460 | #self.plot.add(light) 461 | 462 | # By default, the atlas volume is our target 463 | if self.atlas_controller is not None: 464 | self.select(self.atlas_controller.actor) 465 | elif self.dwi_controller is not None: 466 | self.select(self.dwi_controller.actor) 467 | 468 | if self.model.selection is not None: 469 | #self.add_origin() 470 | self.handle_lut_update() 471 | self.set_left_view() 472 | 473 | def add_atlas_segmentation(self): 474 | """ 475 | Add the Allen Atlas segmented volume (aka label) 476 | """ 477 | if isinstance(self.atlas_controller, VolumeController): 478 | return 479 | self.atlas_controller = VolumeController(self.plot, self.ibl_model.atlas_volume, 480 | alpha_unit_upper_offset=0.1, center_on_edges=True) 481 | self.register_controller(self.atlas_controller, self.atlas_controller.get_related_actors()) 482 | 483 | def add_atlas_dwi(self, color_map, alpha_map): 484 | """ 485 | Add the Allen Atlas diffusion weighted image 486 | :param color_map: Color map for the volume 487 | :param alpha_map: Alpha map for the volume 488 | """ 489 | if isinstance(self.dwi_controller, VolumeController): 490 | return 491 | self.dwi_controller = VolumeController(self.plot, self.ibl_model.dwi_volume, 492 | center_on_edges=True) 493 | self.dwi_controller.set_color_map(color_map, alpha_map) 494 | self.register_controller(self.dwi_controller, self.dwi_controller.get_related_actors()) 495 | 496 | def load_bounding_mesh(self, add_to_scene=False, alpha_on_scene=0.3): 497 | """ 498 | Load the bounding mesh of the mouse brain that represents its approximate pial limit 499 | """ 500 | self.bounding_mesh = utils.load_surface_mesh('997') 501 | if add_to_scene: 502 | self.bounding_mesh.alpha(alpha_on_scene) 503 | self.plot.add(self.bounding_mesh) 504 | 505 | # An offset is applied to the volume in build_actor, so we have to apply it here 506 | self.bounding_mesh.pos(np.array([-100+self.ibl_model.resolution, 0.0, 0.0])) 507 | #self.bounding_mesh.mapper().SetClippingPlanes(self.clipping_planes) 508 | 509 | def find_region(self, term): 510 | """ 511 | Find a region with a substring 512 | :param term: Search term 513 | :return: List of matching entries and the corresponding mask 514 | """ 515 | # ibl_model.atlas.regions is a BrainRegion object that puts a pandas dataframe 516 | # into separate numpy arrays (like 'name') so we work on a numpy array here 517 | mask = np.flatnonzero(np.char.find(self.ibl_model.atlas.regions.name.astype(str), term) != -1) 518 | return mask 519 | 520 | def get_region_names(self): 521 | """ 522 | Get the region names 523 | :return: List 524 | """ 525 | return self.ibl_model.atlas.regions.name.tolist() 526 | 527 | def _select(self, actor=None, controller=None, event=None, 528 | camera_position=None, position=None, value=None): 529 | """ 530 | Define the current object selected 531 | :param actor: a vtkActor 532 | :param controller: Controller of the given actor (optional) 533 | :param event: a vedo event from which we use picked3d and picked2d (we could directly use vtk) 534 | :param camera_position: position of the camera (optional) at selection time 535 | :param position: The final position computed on the volume or mesh or point or line. 536 | If not given, this will be automatically calculated 537 | .param value: The value corresponding to the point on the object. If not given, this will 538 | be automatically retrieved 539 | """ 540 | super()._select(actor, controller, event, camera_position, position, value) 541 | extra_data = self.ibl_model.get_mapped_data(self.model.selection_value) 542 | if extra_data is None: 543 | return 544 | # This is where we retrieve neuroscience specific data about our selection 545 | selection = self.model.selection 546 | if selection == self.atlas_controller.actor or self.is_probe(selection): 547 | self.model.selection_related_name = extra_data.get('region_name') 548 | self.model.selection_related_value = extra_data.get('scalar') 549 | 550 | def get_selection_info(self, line_length=40, precision=5): 551 | """ 552 | Get information about the current selection 553 | :param line_length: Region name line length after what it's word-wrapped 554 | :param precision: Scalar value floating precision displayed 555 | :return: Preformatted multiline text and a dictionary of extra data 556 | """ 557 | text, data = super().get_selection_info() 558 | if self.atlas_controller is None: 559 | return text, data 560 | region_name = self.model.selection_related_name 561 | scalar = self.model.selection_related_value 562 | if region_name is not None: 563 | if isinstance(line_length, int): 564 | lines = textwrap.wrap(region_name, line_length, break_long_words=True) 565 | region_name = '\n'.join(lines) 566 | text += f'\nRegion: {region_name}' 567 | if scalar: 568 | text += f'\n\nScalar value: {scalar:.{precision}f}' 569 | return text, data 570 | 571 | def add_origin(self): 572 | """ 573 | Add the origin on scene 574 | """ 575 | self.atlas_origin = utils.Cross3DExt(self.model.origin, thickness=2, size=500) 576 | self.atlas_origin.lighting('off') 577 | self.plot.add(self.atlas_origin) 578 | #text_pos = self.atlas_origin.pos()+[0, 100, 0] 579 | #font_size = self.model.ui.font_size * 10 580 | #self.atlas_origin_label = vedo.Text3D('Bregma origin', pos=text_pos, s=font_size, c='k') 581 | #self.atlas_origin_label.followCamera() 582 | #self.plot.add([self.atlas_origin, self.atlas_origin_label]) 583 | 584 | def add_many_points_test(self, positions, point_radius=2, auto_xy_rotate=True, add_to_scene=True): 585 | """ 586 | Test method that validates that VTK is fast enough for displaying 10 million points interactively (and it is :) 587 | """ 588 | if positions is None: 589 | try: 590 | points_path = utils.get_local_data_file_path('mouse_brain_neurons', extension='npz') 591 | positions = np.load(points_path, allow_pickle=True) 592 | positions = positions['arr_0'] 593 | except Exception: 594 | # We sample a cube if you don't have the pickle file for neurons in the brain 595 | positions = np.random.rand(1000000, 3) * 10000 596 | values = np.random.rand(len(positions)) * 1.0 597 | point_cloud = self.add_points(positions, point_radius, values, use_origin=False, as_spheres=False) 598 | if auto_xy_rotate: 599 | point_cloud.rotateX(90) 600 | point_cloud.rotateZ(90) 601 | self.register_object(point_cloud) 602 | if add_to_scene: 603 | self.plot.add(point_cloud) 604 | return point_cloud 605 | 606 | def add_spheres(self, positions, radius=10, values=None, color_map='Accent', name='Spheres', 607 | use_origin=True, add_to_scene=True, noise_amount=0, trim_outliers=True, 608 | bounding_mesh=None, ibl_flip_yz=True, **kwargs): 609 | """ 610 | Add new spheres 611 | :param positions: 3D array of coordinates 612 | :param radius: List same length as positions of radii. The default size is 5um, or 5 pixels 613 | in case as_spheres is False. 614 | :param values: 1D array of values, one per neuron or a time series of such 1D arrays (numpy format) 615 | :param color_map: A color map, it can be a color map built by IBLViewer or 616 | a color map name (see vedo documentation), or a list of values, etc. 617 | :param name: All point neurons are grouped into one object, you can give it a custom name 618 | :param use_origin: Whether the origin is added as offset to the given positions 619 | :param add_to_scene: Whether the new lines are added to scene/plot and rendered 620 | :param noise_amount: Amount of 3D random noise applied to each point. Defaults to 0 621 | :param trim_outliers: If bounding_mesh param is given, then the spheres will be trimmed, 622 | only the ones inside the bounding mesh will be kept 623 | :param bounding_mesh: A closed manifold surface mesh used for trimming segments. If None, 624 | the current self.bounding_mesh is used (if it exists) 625 | :param ibl_flip_yz: If you have an IBL data set, its 3D coordinates will be multiplied by -1 626 | on Y and Z axes in order to match Allen Brain Atlas volume and how it's stored by IBL. 627 | :return: objects.Points 628 | """ 629 | axes = [1, 1, 1] 630 | if ibl_flip_yz: 631 | axes = [1, -1, -1] 632 | positions = np.array(positions) * [axes] 633 | if noise_amount is not None: 634 | positions += np.random.rand(len(positions), 3) * noise_amount 635 | link = True if add_to_scene and not trim_outliers else False 636 | spheres = super().add_spheres(positions, radius, values, color_map, 637 | name, use_origin, link, **kwargs) 638 | spheres.axes = axes 639 | if bounding_mesh is None: 640 | bounding_mesh = self.bounding_mesh 641 | if trim_outliers and bounding_mesh is not None: 642 | spheres.cutWithMesh(bounding_mesh) 643 | spheres.mapper().SetScalarVisibility(True) 644 | if add_to_scene: 645 | self.plot.add(spheres) 646 | return spheres 647 | 648 | def add_points(self, positions, radius=10, values=None, color_map='Accent', name='Points', screen_space=False, 649 | use_origin=True, add_to_scene=True, noise_amount=0, trim_outliers=True, bounding_mesh=None, 650 | ibl_flip_yz=True, **kwargs): 651 | """ 652 | Add new points 653 | :param positions: 3D array of coordinates 654 | :param radius: List same length as positions of radii. The default size is 5um, or 5 pixels 655 | in case as_spheres is False. 656 | :param values: 1D array of values, one per neuron or a time series of such 1D arrays (numpy format) 657 | :param color_map: A color map, it can be a color map built by IBLViewer or 658 | a color map name (see vedo documentation), or a list of values, etc. 659 | :param name: All point neurons are grouped into one object, you can give it a custom name 660 | :param screen_space: Type of point, if True then the points are static screen-space points. 661 | If False, then the points are made to scale in 3D, ie you see them larger when you 662 | zoom closer to them, while this is not the case with screen-space points. Defaults to False. 663 | :param use_origin: Whether the origin is added as offset to the given positions 664 | :param add_to_scene: Whether the new lines are added to scene/plot and rendered 665 | :param noise_amount: Amount of 3D random noise applied to each point. Defaults to 0 666 | :param trim_outliers: If bounding_mesh param is given, then the spheres will be trimmed, 667 | only the ones inside the bounding mesh will be kept 668 | :param bounding_mesh: A closed manifold surface mesh used for trimming segments. If None, 669 | the current self.bounding_mesh is used (if it exists) 670 | :param ibl_flip_yz: If you have an IBL data set, its 3D coordinates will be multiplied by -1 671 | on Y and Z axes in order to match Allen Brain Atlas volume and how it's stored by IBL. 672 | :return: objects.Points 673 | """ 674 | axes = [1, 1, 1] 675 | if ibl_flip_yz: 676 | axes = [1, -1, -1] 677 | positions = np.array(positions) * [axes] 678 | if noise_amount is not None: 679 | positions += np.random.rand(len(positions), 3) * noise_amount 680 | link = True if add_to_scene and not trim_outliers else False 681 | points = super().add_points(positions, radius, values, color_map, name, 682 | screen_space, use_origin, link, **kwargs) 683 | points.axes = axes 684 | if bounding_mesh is None: 685 | bounding_mesh = self.bounding_mesh 686 | if trim_outliers and bounding_mesh is not None: 687 | points.cutWithMesh(bounding_mesh) 688 | points.mapper().SetScalarVisibility(True) 689 | if add_to_scene: 690 | self.plot.add(points) 691 | return points 692 | 693 | def add_segments(self, points, end_points=None, line_width=2, values=None, color_map='Accent', 694 | name='Segments', use_origin=True, add_to_scene=True, relative_end_points=False, 695 | spherical_angles=None, radians=True, trim_outliers=True, bounding_mesh=None, 696 | ibl_flip_yz=True): 697 | """ 698 | Add a set of segments 699 | :param points: 3D numpy array of points of length n 700 | :param end_points: 3D numpy array of points of length n 701 | :param line_width: Line width, defaults to 2px 702 | :param values: 1D list of length n, for one scalar value per line 703 | :param color_map: A color map, it can be a color map built by IBLViewer or 704 | a color map name (see vedo documentation), or a list of values, etc. 705 | :param name: Name to give to the object 706 | :param use_origin: Whether the current origin (not necessarily absolute 0) is used as offset 707 | :param add_to_scene: Whether the new lines are added to scene/plot and rendered 708 | :param relative_end_points: Whether the given end point is relative to the start point. False by default, 709 | except is spherical coordinates are given 710 | :param spherical_angles: 3D numpy array of spherical angle data of length n 711 | In case end_points is None, this replaces end_points by finding the relative 712 | coordinate to each start point with the given radius/depth, theta and phi 713 | :param radians: Whether the given spherical angle data is in radians or in degrees 714 | :param trim_outliers: Whether segments are cropped by the bounding mesh 715 | :param bounding_mesh: A closed manifold surface mesh used for trimming segments. If None, 716 | the current self.bounding_mesh is used (if it exists) 717 | :param ibl_flip_yz: If you have an IBL data set, its 3D coordinates will be multiplied by -1 718 | on Y and Z axes in order to match Allen Brain Atlas volume and how it's stored by IBL. 719 | :return: objects.Lines 720 | """ 721 | axes = [1, 1, 1] 722 | if ibl_flip_yz: 723 | axes = [1, -1, -1] 724 | points = np.array(points) * axes 725 | if end_points is not None: 726 | end_points = np.array(end_points) * axes 727 | pre_add = True if add_to_scene and not trim_outliers else False 728 | 729 | #lines = super().add_segments(points, end_points, line_width, values, color_map, name, use_origin, 730 | #pre_add, relative_end_points, spherical_angles, radians) 731 | ''' 732 | Crazy python stuff here [WARNING] 733 | If the above line with super() is called, then the scope of self within super() is this one, 734 | which is wrong. When we are in super, self should be the parent class. 735 | ''' 736 | # Copy-paste from application.Viewer.add_segments, due to above reason 737 | if end_points is None and spherical_angles is not None: 738 | relative_end_points = True 739 | spherical_angles = np.array(spherical_angles) 740 | if radians: 741 | end_points = spherical_angles.apply(vedo.spher2cart) 742 | else: 743 | end_points = spherical_angles.apply(utils.spherical_degree_angles_to_xyz) 744 | if relative_end_points: 745 | end_points += points 746 | points = np.c_[points, end_points].reshape(-1, 2, 3) 747 | elif end_points is not None and len(points) != len(end_points): 748 | n = min(len(points), len(end_points)) 749 | logging.error(f'[add_segments() error] Mismatch between start and end points length. Only {n} segments shown.') 750 | points = np.c_[points[n], end_points[n]].reshape(-1, 2, 3) 751 | lines = super().add_lines(points, line_width, values, color_map, name, use_origin, pre_add) 752 | 753 | lines.axes = axes 754 | if bounding_mesh is None: 755 | bounding_mesh = self.bounding_mesh 756 | if trim_outliers and bounding_mesh is not None: 757 | lines.cutWithMesh(bounding_mesh) 758 | lines.mapper().SetScalarVisibility(True) 759 | if add_to_scene: 760 | self.plot.add(lines) 761 | return lines 762 | 763 | def add_lines(self, points, line_width=2, values=None, color_map='Accent', name='Lines', 764 | use_origin=True, add_to_scene=True, trim_outliers=True, bounding_mesh=None, ibl_flip_yz=True): 765 | """ 766 | Create a set of lines with given point sets 767 | :param points: List of lists of 3D coordinates 768 | :param line_width: Line width, defaults to 2px 769 | :param values: 1D list of length n, for one scalar value per line 770 | :param color_map: A color map, it can be a color map built by IBLViewer or 771 | a color map name (see vedo documentation), or a list of values, etc. 772 | :param name: Name to give to the object 773 | :param use_origin: Whether the current origin (not necessarily absolute 0) is used as offset 774 | :param add_to_scene: Whether the new lines are added to scene/plot and rendered 775 | :param trim_outliers: Whether segments that are out of the brain envelope are trimmed or not. True by default 776 | :param bounding_mesh: A closed manifold surface mesh used for trimming lines. If None, 777 | the current self.bounding_mesh is used (if it exists) 778 | :param ibl_flip_yz: If you have an IBL data set, its 3D coordinates will be multiplied by -1 779 | on Y and Z axes in order to match Allen Brain Atlas volume and how it's stored by IBL. 780 | :return: objects.Lines 781 | """ 782 | axes = [1, 1, 1] 783 | if ibl_flip_yz: 784 | axes = [1, -1, -1] 785 | #target = list(points.keys()) if isinstance(points, dict) else range(len(points)) 786 | if not isinstance(points, np.ndarray): 787 | ''' 788 | This part below is to handle the numpy error that does not allow a numpy array 789 | to contain lists with unequal lengths. 790 | -> VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences 791 | (which is a list-or-tuple of lists-or-tuples-or ndarrays with different 792 | lengths or shapes) is deprecated 793 | ''' 794 | all_points = [] 795 | indices = [] 796 | line_id = 0 797 | # Possible speed improvement: use map or np.apply_along_axis 798 | for index in range(len(points)): 799 | point_set = points[index] 800 | point_set = np.array(point_set).astype(float) 801 | point_set = point_set * [axes] 802 | if use_origin: 803 | point_set = point_set + self.model.origin 804 | all_points.append(point_set) 805 | indices.append(line_id) 806 | line_id += 1 807 | points = all_points 808 | if values is None: 809 | values = indices 810 | 811 | pre_add = True if add_to_scene and not trim_outliers else False 812 | lines = super().add_lines(points, line_width, values, color_map, name, False, pre_add) 813 | 814 | if bounding_mesh is None: 815 | bounding_mesh = self.bounding_mesh 816 | if trim_outliers and bounding_mesh is not None: 817 | lines.cutWithMesh(bounding_mesh) 818 | # This is the sort of thing that kills me. I was loosing all scalar colors 819 | # and it took me a while to see it's because of vedo's cutWithMesh that disables 820 | # scalar visibility! 821 | lines.mapper().SetScalarVisibility(True) 822 | if add_to_scene: 823 | self.plot.add(lines) 824 | return lines 825 | 826 | def add_volume(self, data=None, resolution=None, file_path=None, color_map='viridis', 827 | alpha_map=None, select=False, add_to_scene=True, transpose=None): 828 | """ 829 | Add a volume to the viewer with box clipping and slicing enabled by default 830 | :param data: Volume image data or a file_path 831 | :param resolution: Resoluton of the volume 832 | :param file_path: File path of the volume. If you don't provide an image volume data, 833 | then the file_path will be used to load the volume data 834 | :param color_map: Color map for the volume 835 | :param alpha_map: Alpha map for the volume. If None, it will assume that 0 values 836 | are transparent and maximum values are opaque 837 | :param select: Whether the volume is selected 838 | :param add_to_scene: Whether the volume is added to scene 839 | :param transpose: Transposition parameter. If None. nothing happens. If True, 840 | then the default IBL transposition is applied. You can provide your own, that is, 841 | a list of 3 elements to reorder the volume as desired. 842 | :return: VolumeController 843 | """ 844 | if transpose == True: 845 | transpose = self.ibl_transpose 846 | return super().add_volume(data, resolution, file_path, color_map, 847 | alpha_map, select, add_to_scene, transpose) 848 | 849 | def set_left_view(self): 850 | """ 851 | Set left sagittal view 852 | """ 853 | self.update_camera([1.0, 0.0, 0.0], self.model.Z_DOWN) 854 | 855 | def set_right_view(self): 856 | """ 857 | Set right sagittal view 858 | """ 859 | self.update_camera([-1.0, 0.0, 0.0], self.model.Z_DOWN) 860 | 861 | def set_anterior_view(self): 862 | """ 863 | Set anterior coronal view 864 | """ 865 | self.update_camera([0.0, 1.0, 0.0], self.model.Z_DOWN) 866 | 867 | def set_posterior_view(self): 868 | """ 869 | Set posterior coronal view 870 | """ 871 | self.update_camera([0.0, -1.0, 0.0], self.model.Z_DOWN) 872 | 873 | def set_dorsal_view(self): 874 | """ 875 | Set dorsal axial view 876 | """ 877 | self.update_camera([0.0, 0.0, 1.0], self.model.X_UP) 878 | 879 | def set_ventral_view(self): 880 | """ 881 | Set ventral axial view 882 | """ 883 | self.update_camera([0.0, 0.0, -1.0], self.model.X_UP) 884 | -------------------------------------------------------------------------------- /iblviewer/objects.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from vtk.util.numpy_support import numpy_to_vtk 3 | import vtk 4 | import vedo 5 | 6 | """ 7 | The classes are modifications of existing vedo classes that were either doing 8 | things in unsatisfying manner or lacking features. 9 | """ 10 | 11 | 12 | def Cross3D(pos=(0,0,0), size=1.0, thickness=0.5, color='black', alpha=1, res=4, lines_mode=True): 13 | """ 14 | Build a 3D cross shape, mainly useful as a 3D marker. 15 | :param pos: Position of the cross 16 | :param size: Size of the cross 17 | :param thickness: Thickness in pixels (remains constant on screen) 18 | :param color: Color of the cross 19 | :param alpha: Alpha/opacity of the cross 20 | :param res: Resolution of the cylinders if not used in line_mode 21 | :param lines_mode: Whether lines are used or cylinders. The difference is in how they look like 22 | when you zoom in on the cross. If you use lines, their thickness is constant on screen, whether 23 | close or far. If you use cylinders, they will get thicker the closer you approach the camera 24 | (in perspective mode of course) 25 | :return: vedo.Mesh 26 | """ 27 | if lines_mode: 28 | x1 = np.array([1.0, 0.0, 0.0]) * size / 2 29 | x2 = np.array([-1.0, 0.0, 0.0]) * size / 2 30 | c1 = vedo.Line(x1, x2, lw=thickness) 31 | 32 | y1 = np.array([0.0, 1.0, 0.0]) * size / 2 33 | y2 = np.array([0.0, -1.0, 0.0]) * size / 2 34 | c2 = vedo.Line(y1, y2, lw=thickness) 35 | 36 | z1 = np.array([0.0, 0.0, 1.0]) * size / 2 37 | z2 = np.array([0.0, 0.0, -1.0]) * size / 2 38 | c3 = vedo.Line(z1, z2, lw=thickness) 39 | else: 40 | c1 = vedo.Cylinder(r=thickness, height=size, res=res) 41 | c2 = vedo.Cylinder(r=thickness, height=size, res=res).rotateX(90) 42 | c3 = vedo.Cylinder(r=thickness, height=size, res=res).rotateY(90) 43 | cross = vedo.merge(c1,c2,c3).color(color).alpha(alpha) 44 | cross.SetPosition(pos) 45 | cross.name = 'Marker' 46 | return cross 47 | 48 | 49 | class Lines(vedo.Lines): 50 | """ 51 | Improved Lines class that supports point sets of varying lengths 52 | """ 53 | def __init__(self, points, end_points=None, c='gray', alpha=1, lw=1, dotted=False): 54 | """ 55 | Constructor 56 | parameters are the same as vedo.Line 57 | """ 58 | self.axes = [1, 1, 1] 59 | #if not isinstance(point_sets, np.ndarray): 60 | #point_set = np.array(point_sets, dtype=object) 61 | if len(points.shape) > 1 and points.shape[1] == 2: 62 | super().__init__(points, end_points, c=c, alpha=alpha, lw=lw, dotted=dotted) 63 | else: 64 | polylns = vtk.vtkAppendPolyData() 65 | for point_set in points: 66 | positions = point_set 67 | if not isinstance(point_set, np.ndarray): 68 | point_set = np.array(point_set) 69 | # numpy_to_vtk is unhappy if dtype is not to its liking 70 | point_set = point_set.astype(float) 71 | positions = numpy_to_vtk(np.ascontiguousarray(point_set), deep=True) 72 | 73 | # This part taken from class Line, which accepts n points 74 | vtk_points = vtk.vtkPoints() 75 | vtk_points.SetData(positions) 76 | 77 | lines = vtk.vtkCellArray() 78 | num_pts = len(point_set) 79 | lines.InsertNextCell(num_pts) 80 | for i in range(num_pts): 81 | lines.InsertCellPoint(i) 82 | 83 | poly = vtk.vtkPolyData() 84 | poly.SetPoints(vtk_points) 85 | poly.SetLines(lines) 86 | polylns.AddInputData(poly) 87 | polylns.Update() 88 | 89 | vedo.Mesh.__init__(self, polylns.GetOutput()) 90 | self.lw(lw).lighting('off') 91 | if dotted: 92 | self.GetProperty().SetLineStipplePattern(0xF0F0) 93 | self.GetProperty().SetLineStippleRepeatFactor(1) 94 | 95 | self.name = 'Lines' 96 | 97 | 98 | class Points(vedo.Points): 99 | """ 100 | Improved Points class that supports time series and screen-space mode 101 | """ 102 | 103 | def __init__(self, positions, radius=1, values=None, color_map='viridis', screen_space=False, 104 | alpha=1, res=6, min_v=None, max_v=None, scalars_prefix=None): 105 | """ 106 | Constructor 107 | :param positions: 3D positions 108 | :param radius: Radius of the points 109 | :param values: Custom scalar values. You may pass a list the same length 110 | as the number of points. If values is a 2D array, then we assume these are time series. 111 | You will then need to call: 112 | actor.polydata().GetPointData().SetActiveScalars(name) 113 | actor.mapper().SelectColorArray(name) 114 | where name is the array name that starts with the given scalars_prefix. 115 | If you have three steps in your time series, you will have by default 116 | Scalars_0, Scalars_1, Scalars_2. 117 | :param color_map: Color map, either a list of values and corresponding colors 118 | or a color map name (see vedo documentation with color maps that follow matplotlib) 119 | :param screen_space: Whether the points are rendered as screen-space points or as 120 | spheres. The main difference is that screen-space is very fast and can display millions 121 | of points whereas sphere mode allows you to zoom in on a point and you will see it 122 | bigger up-close with a perspective camera, but if you get too close, you will see that 123 | the sphere is actually made of several scaled screen-space points. 124 | :param alpha: Alpha/opacity value 125 | :param res: Resolution of the point if screen-space is disabled 126 | :param min_v: Minimum value for the given values (will be computed if not given) 127 | :param max_v: Maximum value for the given values (will be computed if not given) 128 | :param scalars_prefix: Scalar array name prefix. The rest of the name is _id 129 | where id starts at 0. 130 | """ 131 | self.scalars_prefix = 'Scalars_' if scalars_prefix is None else scalars_prefix 132 | self.axes = [1, 1, 1] 133 | 134 | # Multi component (ndimensional) arrays in vtk: 135 | # https://vtk.org/doc/nightly/html/classvtkAbstractArray.html#a528de7a4879a219e7f82a82130186dc8 136 | polydata = vtk.vtkPolyData() 137 | points = vtk.vtkPointSource() 138 | num_points = len(positions) 139 | points.SetNumberOfPoints(num_points) 140 | points.Update() 141 | #for p_id in range(num_points): 142 | #points.SetPoint(p_id, positions[p_id]) 143 | if not isinstance(positions, np.ndarray): 144 | positions = np.array(positions) 145 | positions = positions.astype(float) 146 | points_data = numpy_to_vtk(np.ascontiguousarray(positions), deep=True) 147 | #polydata.SetPoints(points) 148 | polydata = points.GetOutput() 149 | polydata.GetPoints().SetData(points_data) 150 | 151 | # We have to set scalar values after the object is created because VTK automatically 152 | # creates some array values initially and we want to ignore them later on 153 | scalars = [] 154 | if values is not None and len(values) > 0: 155 | if not isinstance(values, np.ndarray): 156 | values = np.array(values) 157 | if len(values.shape) > 1 and values.shape[1] > 1: 158 | # Safeguard 159 | if num_points == values.shape[0]: 160 | values.reshape(num_points, -1) 161 | all_values = values.ravel() 162 | if min_v is None: 163 | min_v = min(all_values) 164 | if max_v is None: 165 | max_v = max(all_values) 166 | for loop_id in range(values.shape[1]): 167 | scalars.append(self.add_scalar_data(polydata, values[:, loop_id])) 168 | else: 169 | if min_v is None: 170 | min_v = min(values) 171 | if max_v is None: 172 | max_v = max(values) 173 | scalars.append(self.add_scalar_data(polydata, values)) 174 | 175 | # The following sets the "ActiveScalar", i.e. default scalar used by 176 | # VTK (and is must for setting radius of each sphere by corresponding 177 | # value in radii array). But what tells VTK to use it for scaling? 178 | ctf = None 179 | values_range = None 180 | if len(scalars) > 0: 181 | polydata.GetPointData().SetActiveScalars(scalars[0].GetName()) 182 | if isinstance(color_map, vtk.vtkColorTransferFunction): 183 | ctf = color_map 184 | elif isinstance(color_map, vtk.vtkLookupTable): 185 | ctf = color_map 186 | elif isinstance(color_map, list) or isinstance(color_map, np.ndarray): 187 | ctf = vtk.vtkColorTransferFunction() 188 | for entry in color_map: 189 | ctf.AddRGBPoint(entry[0], *entry[1]) 190 | else: 191 | ctf = vtk.vtkColorTransferFunction() 192 | values_range = np.linspace(min_v, max_v, 20) 193 | for v_id in range(len(values_range)): 194 | value = values_range[v_id] 195 | ctf.AddRGBPoint(value, *vedo.colorMap(value, color_map, min_v, max_v)) 196 | 197 | if screen_space: 198 | glyph = vtk.vtkVertexGlyphFilter() 199 | glyph.SetInputData(polydata) 200 | else: 201 | sphere = vtk.vtkSphereSource() 202 | sphere.SetRadius(radius) 203 | sphere.SetThetaResolution(res) 204 | sphere.SetPhiResolution(res) 205 | sphere.Update() 206 | 207 | glyph = vtk.vtkGlyph3D() 208 | glyph.SetSourceConnection(sphere.GetOutputPort()) 209 | glyph.SetInputData(polydata) 210 | 211 | #glyph.SetVectorModeToUseVector() 212 | #glyph.OrientOn() 213 | glyph.ClampingOn() 214 | if len(scalars) > 0: 215 | # If a value == min_v, then the point has radius 0 216 | glyph.SetRange(min_v - 1, max_v) 217 | 218 | glyph.ScalingOn() 219 | glyph.SetScaleFactor(1) 220 | glyph.SetScaleModeToScaleByScalar() 221 | 222 | glyph.SetScaleMode(3) 223 | 224 | ''' 225 | For further work: 226 | 227 | # Tell glyph which attribute arrays to use for what 228 | glyph.SetInputArrayToProcess(0, 0, 0, 0, 'Elevation') # scalars 229 | #glyph.SetInputArrayToProcess(1,0,0,0,'RTDataGradient') # vectors 230 | # glyph.SetInputArrayToProcess(2,0,0,0,'nothing') # normals 231 | glyph.SetInputArrayToProcess(3, 0, 0, 0, 'RTData') # colors 232 | 233 | # Calling update because I'm going to use the scalar range to set the color map range 234 | glyph.Update() 235 | 236 | coloring_by = 'RTData' 237 | mapper = vtk.vtkPolyDataMapper() 238 | mapper.SetInputConnection(glyph.GetOutputPort()) 239 | mapper.SetScalarModeToUsePointFieldData() 240 | mapper.SetColorModeToMapScalars() 241 | mapper.ScalarVisibilityOn() 242 | mapper.SetScalarRange(glyph.GetOutputDataObject(0).GetPointData().GetArray(coloring_by).GetRange()) 243 | mapper.SelectColorArray(coloring_by) 244 | actor = vtk.vtkActor() 245 | actor.SetMapper(mapper) 246 | ''' 247 | 248 | glyph.Update() 249 | vedo.Mesh.__init__(self, glyph.GetOutput(), alpha=alpha) 250 | mapper = self._mapper 251 | 252 | self._polydata = polydata 253 | self.source = glyph 254 | self.name = 'Points' 255 | 256 | if screen_space: 257 | #self.GetProperty().SetColor(*color) 258 | self.GetProperty().SetPointSize(radius) 259 | self.GetProperty().SetRenderPointsAsSpheres(True) 260 | else: 261 | mapper.SetScalarModeToUsePointFieldData() 262 | if len(scalars) > 0: 263 | mapper.SetScalarRange(min_v, max_v) 264 | mapper.SetColorModeToMapScalars() 265 | 266 | if len(scalars) > 0: 267 | mapper.SelectColorArray(scalars[0].GetName()) 268 | if ctf is not None: 269 | mapper.SetLookupTable(ctf) 270 | mapper.Update() 271 | 272 | def get_number_of_arrays(self, ignore=['GlyphScale', 'Normals']): 273 | """ 274 | """ 275 | num_scalar_arrays = 0 276 | point_data = self._polydata.GetPointData() 277 | for i in range(point_data.GetNumberOfArrays()): 278 | if point_data.GetArrayName(i) in ignore: 279 | pass 280 | num_scalar_arrays += 1 281 | return num_scalar_arrays 282 | 283 | def add_scalar_data(self, polydata, values, step_id=None): 284 | """ 285 | Add scalar data to a VTK polydata 286 | :param polydata: vtkPolyData 287 | :param values: Numpy 1D array or list 288 | :param step_id: ID for naming the scalar with Scalars_#ID 289 | :return: vtkFloatArray 290 | """ 291 | scalars = numpy_to_vtk(np.ascontiguousarray(values), deep=True) 292 | num_existing_ones = polydata.GetPointData().GetNumberOfArrays() 293 | if step_id is None: 294 | step_id = num_existing_ones 295 | scalars.SetName(self.scalars_prefix + str(step_id)) 296 | polydata.GetPointData().AddArray(scalars) 297 | return scalars 298 | 299 | def update_data(self, positions, scalars=None): 300 | """ 301 | Update the positions of points and optionally their scalar values 302 | :param positions: 3D coordinates the same length as the number 303 | of points in the object 304 | :param scalars: 1D list or numpy array the same length as positions 305 | """ 306 | vtk_positions = numpy_to_vtk(np.ascontiguousarray(positions), deep=True) 307 | polydata = self.source.GetInput() 308 | polydata.GetPoints().SetData(vtk_positions) 309 | if scalars is not None: 310 | #sn = polydata.GetPointData().GetScalars().GetName() 311 | for v_id in range(len(scalars)): 312 | polydata.GetPointData().GetScalars().SetValue(v_id, scalars[v_id]) 313 | self.source.Update() 314 | 315 | 316 | class Spheres(vedo.Mesh): 317 | """ 318 | Reimplementation of vedo.Spheres that was not handling things properly 319 | when it comes to setting time series and to visualising them with colors. 320 | 321 | This class isn't used at the moment. Points is preferred as it acts either as 322 | vedo.Spheres or as screen space Points depending on screen_space param. 323 | 324 | In general, vedo uses "c" and "r" short variable names which are a hindrance... 325 | utils.Spheres is deprecated and you should favor utils.Points instead. 326 | """ 327 | def __init__(self, centers, r=1, c="r", alpha=1, res=8): 328 | """ 329 | Constructor. 330 | Parameters are the same as vedo.Spheres 331 | """ 332 | self.axes = [1, 1, 1] 333 | if isinstance(centers, vedo.Points): 334 | centers = centers.points() 335 | 336 | cisseq = False 337 | if vedo.utils.isSequence(c): 338 | cisseq = True 339 | 340 | if cisseq: 341 | if len(centers) > len(c): 342 | vedo.printc("\times Mismatch in Spheres() colors", len(centers), len(c), c='r') 343 | raise RuntimeError() 344 | if len(centers) != len(c): 345 | vedo.printc("\lightningWarning: mismatch in Spheres() colors", len(centers), len(c)) 346 | 347 | risseq = False 348 | if vedo.utils.isSequence(r): 349 | risseq = True 350 | 351 | if risseq: 352 | if len(centers) > len(r): 353 | vedo.printc("times Mismatch in Spheres() radius", len(centers), len(r), c='r') 354 | raise RuntimeError() 355 | if len(centers) != len(r): 356 | vedo.printc("\lightning Warning: mismatch in Spheres() radius", len(centers), len(r)) 357 | if cisseq and risseq: 358 | vedo.printc("\noentry Limitation: c and r cannot be both sequences.", c='r') 359 | raise RuntimeError() 360 | 361 | src = vtk.vtkSphereSource() 362 | if not risseq: 363 | src.SetRadius(r) 364 | if vedo.utils.isSequence(res): 365 | res_t, res_phi = res 366 | else: 367 | res_t, res_phi = 2*res, res 368 | 369 | src.SetThetaResolution(res_t) 370 | src.SetPhiResolution(res_phi) 371 | src.Update() 372 | 373 | psrc = vtk.vtkPointSource() 374 | psrc.SetNumberOfPoints(len(centers)) 375 | psrc.Update() 376 | pd = psrc.GetOutput() 377 | vpts = pd.GetPoints() 378 | 379 | glyph = vtk.vtkGlyph3D() 380 | glyph.SetSourceConnection(src.GetOutputPort()) 381 | 382 | if cisseq: 383 | glyph.SetColorModeToColorByScalar() 384 | ucols = vtk.vtkUnsignedCharArray() 385 | ucols.SetNumberOfComponents(3) 386 | ucols.SetName("colors") 387 | #for i, p in enumerate(centers): 388 | for cx, cy, cz in c: 389 | #cx, cy, cz = getColor(acol) 390 | ucols.InsertNextTuple3(cx * 255, cy * 255, cz * 255) 391 | pd.GetPointData().SetScalars(ucols) 392 | glyph.ScalingOff() 393 | elif risseq: 394 | glyph.SetScaleModeToScaleByScalar() 395 | urads = numpy_to_vtk(np.ascontiguousarray(2*r).astype(float), deep=True) 396 | urads.SetName("radii") 397 | pd.GetPointData().SetScalars(urads) 398 | 399 | vpts.SetData(numpy_to_vtk(np.ascontiguousarray(centers), deep=True)) 400 | 401 | glyph.SetInputData(pd) 402 | glyph.Update() 403 | 404 | vedo.Mesh.__init__(self, glyph.GetOutput(), alpha=alpha) 405 | self.phong() 406 | 407 | self._polydata = pd 408 | 409 | if cisseq: 410 | self.mapper().ScalarVisibilityOn() 411 | else: 412 | self.mapper().ScalarVisibilityOff() 413 | self.GetProperty().SetColor(vedo.getColor(c)) 414 | self.name = 'Spheres' 415 | -------------------------------------------------------------------------------- /iblviewer/qt_application.py: -------------------------------------------------------------------------------- 1 | from enum import auto 2 | import sys 3 | from PyQt5 import Qt 4 | from PyQt5 import QtCore 5 | from PyQt5 import QtWidgets 6 | from ipywebrtc.webrtc import VideoStream 7 | 8 | # You may need to uncomment these lines on some systems: 9 | #import vtk.qt 10 | #vtk.qt.QVTKRWIBase = "QGLWidget" 11 | from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor 12 | 13 | from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas 14 | from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar 15 | from matplotlib.figure import Figure 16 | import matplotlib.pyplot as plt 17 | import matplotlib as mpl 18 | 19 | from iblviewer.application import Viewer 20 | got_ibllib = True 21 | try: 22 | from iblviewer.mouse_brain import MouseBrainViewer 23 | except ModuleNotFoundError: 24 | got_ibllib = False 25 | from vedo import Plotter 26 | import numpy as np 27 | 28 | from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg 29 | from matplotlib.figure import Figure 30 | from threading import * 31 | 32 | 33 | class MplCanvas(FigureCanvasQTAgg): #or simply FigureCanvas? 34 | """ 35 | Matplotlib statistics 36 | """ 37 | def __init__(self, parent=None, width=5, height=4, dpi=100, background_color='white'): 38 | fig = Figure(figsize=(width, height), dpi=dpi, facecolor=background_color) 39 | self.axes = fig.add_subplot(111) 40 | super(MplCanvas, self).__init__(fig) 41 | 42 | 43 | class ViewerWindow(Qt.QMainWindow): 44 | """ 45 | This is the main window container that holds the UI, the 3D viewer and statistics 46 | """ 47 | 48 | def __init__(self, parent=None): 49 | """ 50 | Constructor 51 | """ 52 | Qt.QMainWindow.__init__(self, parent) 53 | self.frame = Qt.QFrame() 54 | 55 | self.ui_layout = Qt.QVBoxLayout() 56 | self.main_layout = Qt.QHBoxLayout() 57 | 58 | self.frame.setLayout(self.main_layout) 59 | self.setCentralWidget(self.frame) 60 | 61 | self.left = 10 62 | self.top = 10 63 | self.title = 'International Brain Laboratory Viewer' 64 | self.width = 1920 65 | self.height = 1280 66 | 67 | self.viewer = None 68 | self.viewer_function = None 69 | self.statistics_function = None 70 | self.dark_mode = False 71 | self.kwargs = {} 72 | 73 | self.auto_complete_data_changed = False 74 | self.regions_search_names = None 75 | 76 | self.setWindowTitle(self.title) 77 | self.setGeometry(self.left, self.top, self.width, self.height) 78 | 79 | # Overwrite some plot properties to integrate it within Qt 80 | self.vtkWidget = QVTKRenderWindowInteractor(self.frame) 81 | self.plot = Plotter(qtWidget=self.vtkWidget) 82 | self.viewer_initialized = False 83 | self.neuroscience_context = False 84 | self.statistics_visible = True 85 | 86 | def initialize(self, viewer=None, callable=None, stats_callable=None, dark_mode=True, **kwargs): 87 | """ 88 | Initialize the viewer app 89 | :param viewer: Either iblviewer.application.Viewer or 90 | iblviewer.mouse_brain.MouseBrainViewer 91 | :param callable: A function that you pass to this method. You must write 92 | that function with a required parameter (the viewer). This allows 93 | you to perform actions with the viewer before content is shown in QT UI. 94 | It would also be possible to add a python console in QT in order to run 95 | Python code live like in Jupyter notebooks but this isn't implemented yet. 96 | :param stats_callable: A function that will be executed every time the plot is updated 97 | for instance when a new object or sub selecton is made 98 | :param dark_mode: Whether the app is in dark mode or not 99 | """ 100 | self.viewer_function = callable 101 | self.statistics_function = stats_callable 102 | self.dark_mode = dark_mode 103 | if isinstance(kwargs, dict): 104 | self.kwargs = kwargs 105 | if got_ibllib: 106 | if isinstance(viewer, MouseBrainViewer) or isinstance(viewer, Viewer): 107 | self.viewer = viewer 108 | if self.viewer is None: 109 | self.viewer = MouseBrainViewer() 110 | self.neuroscience_context = isinstance(self.viewer, MouseBrainViewer) 111 | else: 112 | self.neuroscience_context = False 113 | 114 | self.initialize_ui() 115 | 116 | # It's important to start the viewer in another thread so that the QT UI 117 | # doesn't freeze when interacting with it. 118 | # In case you need to go further with this, look at QThread with a good summary here: 119 | # https://realpython.com/python-pyqt-qthread/#using-qthread-to-prevent-freezing-guis 120 | #thread = Thread(target=self._initialize_viewer) 121 | #thread.start() 122 | self._initialize_viewer() 123 | 124 | def _initialize_viewer(self): 125 | """ 126 | Initialize the viewer 127 | """ 128 | #if viewer.plot is not None: 129 | #viewer.plot.close() 130 | if 'embed_ui' in self.kwargs: 131 | self.kwargs['embed_ui'] = False 132 | #if 'offscreen' in self.kwargs: 133 | #self.kwargs['offscreen'] = True 134 | if self.viewer is None: 135 | if got_ibllib: 136 | self.viewer = MouseBrainViewer() 137 | else: 138 | self.viewer = Viewer() 139 | 140 | self.viewer.initialize(plot=self.plot, dark_mode=self.dark_mode, **self.kwargs) 141 | if self.viewer_function is not None: 142 | try: 143 | # Allows users to add data to be visualized 144 | self.viewer_function(self.viewer) 145 | except Exception as e: 146 | print(e) 147 | 148 | # Assign functions (to mimick event callbacks) 149 | # Have a look at Qt signals if you want to do something more advanced 150 | # Basic example : https://stackoverflow.com/questions/28793440/pyqt5-focusin-out-events 151 | self.viewer.selection_changed = self.onSelectionChanged 152 | self.viewer.sub_selection_changed = self.onSelectionChanged 153 | self.viewer.objects_changed = self.onObjectsChanged 154 | 155 | self.viewer_initialized = True 156 | self.show_viewer() 157 | 158 | def initialize_ui(self): 159 | """ 160 | Initialize the QT UI 161 | """ 162 | self.background_color = 'white' 163 | mpl.rcParams['axes.spines.left'] = True 164 | mpl.rcParams['axes.spines.right'] = False 165 | mpl.rcParams['axes.spines.top'] = False 166 | mpl.rcParams['axes.spines.bottom'] = True 167 | self.set_dark_mode(self.dark_mode) 168 | 169 | self.main_layout.addLayout(self.ui_layout, 1) 170 | 171 | self.menu_tabs = Qt.QTabWidget() 172 | self.camera_menu = Qt.QWidget() 173 | self.tools_menu = Qt.QWidget() 174 | self.object_menu = Qt.QWidget() 175 | #self.data_menu = Qt.QWidget() 176 | self.export_menu = Qt.QWidget() 177 | self.menu_tabs.addTab(self.camera_menu, 'Camera') 178 | self.menu_tabs.addTab(self.tools_menu, 'Tools') 179 | self.menu_tabs.addTab(self.object_menu, 'Object') 180 | #self.menu_tabs.addTab(self.data_menu, 'Data') 181 | self.menu_tabs.addTab(self.export_menu, 'Export') 182 | 183 | #camera_group = QtWidgets.QGroupBox('Camera presets') 184 | self.camera_menu.layout = Qt.QVBoxLayout() 185 | hbox = Qt.QHBoxLayout() 186 | self.add_button('Left', self.onLeftCameraPreset, hbox) 187 | self.add_button('Right', self.onRightCameraPreset, hbox) 188 | self.camera_menu.layout.addLayout(hbox) 189 | hbox = Qt.QHBoxLayout() 190 | self.add_button('Dorsal', self.onDorsalCameraPreset, hbox) 191 | self.add_button('Ventral', self.onVentralCameraPreset, hbox) 192 | self.camera_menu.layout.addLayout(hbox) 193 | hbox = Qt.QHBoxLayout() 194 | self.add_button('Anterior', self.onAnteriorCameraPreset, hbox) 195 | self.add_button('Posterior', self.onPosteriorCameraPreset, hbox) 196 | self.camera_menu.layout.addLayout(hbox) 197 | hbox = Qt.QHBoxLayout() 198 | self.add_checkbox('Autofocus', self.onToggleAutofocus, hbox, set_checked=True) 199 | self.add_checkbox('Orthographic', self.onToggleOrthoCamera, hbox) 200 | hbox.addStretch(1) 201 | self.camera_menu.layout.addLayout(hbox) 202 | hbox = Qt.QHBoxLayout() 203 | self.add_button('View selected', self.onViewSelection, hbox) 204 | self.camera_menu.layout.addLayout(hbox) 205 | self.camera_menu.layout.addStretch(1) 206 | self.camera_menu.setLayout(self.camera_menu.layout) 207 | #self.ui_layout.addWidget(camera_group) 208 | 209 | scene_group = QtWidgets.QGroupBox('Scene') 210 | vbox = Qt.QVBoxLayout() 211 | self.selection_combo = self.add_combo('Select an object', [], self.onChangeSelection, vbox) 212 | scene_group.setLayout(vbox) 213 | 214 | view_group = QtWidgets.QGroupBox('View options') 215 | vbox = Qt.QVBoxLayout() 216 | hbox = Qt.QHBoxLayout() 217 | self.add_checkbox('Outline', self.onToggleOutline, hbox, set_checked=True) 218 | self.add_checkbox('Marker', self.onToggleMarker, hbox, set_checked=True) 219 | self.add_checkbox('Color bar', self.onToggleColorBar, hbox, set_checked=True) 220 | vbox.addLayout(hbox) 221 | hbox = Qt.QHBoxLayout() 222 | self.add_checkbox('Information text', self.onToggleInfoText, hbox, set_checked=True) 223 | self.add_checkbox('Dark background', self.onToggleDarkBackground, hbox, set_checked=True) 224 | vbox.addLayout(hbox) 225 | hbox = Qt.QHBoxLayout() 226 | self.add_checkbox('Fast volumes', self.onToggleVolumesLOD, hbox) 227 | vbox.addLayout(hbox) 228 | view_group.setLayout(vbox) 229 | 230 | self.ui_layout.addWidget(scene_group) 231 | self.ui_layout.addWidget(view_group) 232 | 233 | if self.statistics_function is not None: 234 | stats_group = QtWidgets.QGroupBox('Statistics') 235 | hbox = Qt.QHBoxLayout() 236 | self.add_checkbox('Statistics panel', self.onToggleStatistics, hbox, auto_render=False) 237 | stats_group.setLayout(hbox) 238 | self.ui_layout.addWidget(stats_group) 239 | 240 | #object_group = QtWidgets.QGroupBox('Object settings') 241 | self.object_menu.layout = Qt.QVBoxLayout() 242 | self.opacity_slider = self.add_slider('Opacity', 1.0, 0.0, 1.0, 0.05, self.onOpacityChange, self.object_menu.layout) 243 | self.slices_opacity_slider = self.add_slider('Slices opacity', 1.0, 0.0, 1.0, 0.05, 244 | self.onSliceOpacityChange, self.object_menu.layout) 245 | self.hollow_checkbox = self.add_checkbox('Hollow regions', self.onToggleHollow, self.object_menu.layout) 246 | self.time_series_slider = self.add_slider('Time series', 1, 0, 10, 1, self.onTimeSeriesChange, self.object_menu.layout) 247 | self.add_button('Remove object', self.onRemoveObject, self.object_menu.layout) 248 | self.object_menu.layout.addStretch(1) 249 | self.object_menu.setLayout(self.object_menu.layout) 250 | #self.ui_layout.addWidget(object_group) 251 | #self.object_menu.layout.addWidget(object_group) 252 | 253 | self.tools_menu.layout = Qt.QVBoxLayout() 254 | self.new_probe_button = self.add_button('Add new probe', self.onNewProbe, self.tools_menu.layout) 255 | self.edit_probe_button = self.add_button('Edit probe', self.onEditProbe, self.tools_menu.layout) 256 | self.slicer_button = self.add_button('Cutter/Slicer mode', self.onSlicerToggle, self.tools_menu.layout, toggle=True) 257 | 258 | if self.neuroscience_context: 259 | self.search_input = self.add_input('Search an atlas region', None, self.tools_menu.layout, autocomplete=True) 260 | #self.isosurface_checkbox = self.add_checkbox('Show regions surface', None, vbox, set_checked=True) 261 | self.search_button = self.add_button('Get region', self.onSearch, self.tools_menu.layout) 262 | 263 | self.clipping_combo = self.add_combo('Select a clipping object', [], None, self.tools_menu.layout, auto_render=False) 264 | self.clip_to_bounds = self.add_button('Clip to object bounds', self.onClipToBounds, self.tools_menu.layout) 265 | self.tools_menu.layout.addStretch(1) 266 | self.tools_menu.setLayout(self.tools_menu.layout) 267 | 268 | #group = QtWidgets.QGroupBox('Data settings') 269 | #self.data_menu.layout = Qt.QVBoxLayout() 270 | #self.selection_label = QtWidgets.QLabel() 271 | #self.data_menu.layout.addWidget(self.selection_label) 272 | #self.data_menu.layout.addStretch(1) 273 | #self.data_menu.setLayout(self.data_menu.layout) 274 | #self.ui_layout.addWidget(data_group) 275 | 276 | #video_group = QtWidgets.QGroupBox('Video export') 277 | self.export_menu.layout = Qt.QVBoxLayout() 278 | video_group = QtWidgets.QGroupBox('Export video presets') 279 | vbox = Qt.QVBoxLayout() 280 | self.duration_slider = self.add_slider('Duration', 8, 0, 60, 1, None, vbox) 281 | self.start_angle_slider = self.add_slider('Start angle', 0, 0, 360, 1, None, vbox) 282 | self.end_angle_slider = self.add_slider('End angle', 360, 0, 360, 1, None, vbox) 283 | self.end_angle_slider.setTracking(True) 284 | self.end_angle_slider.setTickPosition(QtWidgets.QSlider.TicksAbove) 285 | self.export_button = self.add_button('Export video...', self.onExportVideo, vbox) 286 | video_group.setLayout(vbox) 287 | #self.video_menu.layout.addStretch(1) 288 | self.export_menu.layout.addWidget(video_group) 289 | 290 | image_group = QtWidgets.QGroupBox('Export image presets') 291 | vbox = Qt.QVBoxLayout() 292 | self.magnification_scale = self.add_slider('Magnification scale', 2, 1, 10, 1, None, vbox) 293 | self.export_button = self.add_button('Export image...', self.onExportImage, vbox) 294 | image_group.setLayout(vbox) 295 | self.export_menu.layout.addStretch(1) 296 | self.export_menu.layout.addWidget(image_group) 297 | 298 | self.export_menu.setLayout(self.export_menu.layout) 299 | #self.ui_layout.addWidget(video_group) 300 | 301 | # Once we're done with preparing the whole menu with tabs, add that to the UI 302 | self.ui_layout.addWidget(self.menu_tabs) 303 | self.ui_layout.addStretch(1) 304 | 305 | splitter = QtWidgets.QSplitter(QtCore.Qt.Vertical) 306 | splitter.addWidget(self.vtkWidget) 307 | 308 | self.statistics = None 309 | self.windows_qt_mpl_issue = sys.platform == "win32" 310 | if not self.windows_qt_mpl_issue: 311 | self.statistics = MplCanvas(self, 5, 4, 100, self.background_color) 312 | self.statistics.setStyleSheet("background-color:transparent;") 313 | self.update_statistics() 314 | # Create toolbar, passing statistics as first parament, parent (self, the MainWindow) as second. 315 | #toolbar = NavigationToolbar(self.statistics, self) 316 | #layout = Qt.QVBoxLayout() 317 | #layout.addWidget(toolbar) 318 | #layout.addWidget(self.statistics) 319 | #self.main_vbox.addLayout(layout) 320 | 321 | self.statistics_widget = Qt.QWidget() 322 | self.statistics_layout = Qt.QHBoxLayout(self.statistics_widget) 323 | self.statistics_layout.addWidget(self.statistics) 324 | # Don't show stats initially 325 | self.onToggleStatistics() 326 | 327 | splitter.addWidget(self.statistics_widget) 328 | 329 | self.main_layout.addWidget(splitter, 4) 330 | self.main_splitter = splitter 331 | #splitter.setStretchFactor(3, 1) 332 | #splitter.setSizes([125, 150]) 333 | 334 | self.vtkWidget.update() 335 | self.show() 336 | 337 | def onViewerInitialized(self): 338 | if self.neuroscience_context: 339 | regions_data = self.viewer.get_region_names() 340 | self.search_input.completer_model.setStringList(regions_data) 341 | 342 | def show_viewer(self): 343 | """ 344 | Show the viewer (when it's initialized) 345 | """ 346 | self.viewer.show() 347 | self.vtkWidget.update() 348 | self.onObjectsChanged() 349 | self.onViewerInitialized() 350 | self.update_ui() 351 | 352 | def set_light_mode(self): 353 | """ 354 | Set light mode to viewer and stats 355 | """ 356 | self.set_dark_mode(False) 357 | 358 | def set_dark_mode(self, on=True): 359 | """ 360 | Set dark mode to viewer and stats 361 | :param on: Whether dark mode is on 362 | """ 363 | if on: 364 | self.background_color = '#2d2d2d' 365 | plt.style.use('dark_background') 366 | mpl.rcParams['axes.facecolor'] = self.background_color 367 | else: 368 | self.background_color = '#dddddd' 369 | plt.style.use('default') 370 | mpl.rcParams['axes.facecolor'] = self.background_color 371 | 372 | if self.viewer is not None: 373 | self.viewer.set_dark_mode(on) 374 | 375 | def update_ui(self): 376 | """ 377 | Update the QT UI 378 | """ 379 | got_selection = self.viewer.model.got_selection() 380 | is_volume = self.viewer.is_volume() 381 | is_probe = self.viewer.is_probe() 382 | 383 | self.new_probe_button.setEnabled(got_selection and is_volume) 384 | self.edit_probe_button.setEnabled(got_selection and is_probe) 385 | #self.isosurface_checkbox.setEnabled(got_selection and is_volume) 386 | self.hollow_checkbox.setEnabled(got_selection and is_volume) 387 | 388 | self.clipping_combo.setEnabled(got_selection) 389 | self.clip_to_bounds.setEnabled(got_selection) 390 | self.slicer_button.setEnabled(got_selection) 391 | 392 | self.slicer_button.setChecked(self.viewer.box_widget is not None) 393 | 394 | if got_selection: 395 | ''' 396 | text, data = self.viewer.get_selection_info(line_length=32) 397 | if text is not None: 398 | self.selection_label.setText(text+'\n') 399 | else: 400 | self.selection_label.setText('') 401 | ''' 402 | index = self.selection_combo.findText(self.viewer.model.selection.name) 403 | if index != -1: 404 | self.selection_combo.setCurrentIndex(index) 405 | else: 406 | self.selection_combo.setCurrentIndex(0) 407 | #self.selection_label.setText('') 408 | 409 | opacity = self.viewer.get_selection_opacity() 410 | self.opacity_slider.setEnabled(opacity is not None) 411 | self.opacity_slider.label.setEnabled(opacity is not None) 412 | ''' 413 | if opacity is None: 414 | self.opacity_slider.label.hide() 415 | else: 416 | self.opacity_slider.show() 417 | self.opacity_slider.label.show() 418 | ''' 419 | if opacity is not None: 420 | self.opacity_slider.setValue(int(opacity / self.opacity_slider.step)) 421 | 422 | slices_opacity = self.viewer.get_selection_slices_opacity() 423 | self.slices_opacity_slider.setEnabled(slices_opacity is not None) 424 | self.slices_opacity_slider.label.setEnabled(slices_opacity is not None) 425 | if slices_opacity is not None: 426 | self.slices_opacity_slider.setValue(int(slices_opacity / self.slices_opacity_slider.step)) 427 | 428 | current_value, max_value = self.viewer.get_time_series_info() 429 | self.time_series_slider.setEnabled(current_value is not None) 430 | if isinstance(current_value, int): 431 | self.time_series_slider.setValue(current_value) 432 | if isinstance(max_value, int): 433 | self.time_series_slider.setRange(0, max_value) 434 | 435 | def update_statistics(self): 436 | """ 437 | Update statistics 438 | """ 439 | if not self.statistics_visible: 440 | return 441 | if self.statistics_function is not None: 442 | try: 443 | plot = self.statistics_function(self.statistics, self.viewer) 444 | if plot is not None: 445 | self.statistics = plot 446 | except Exception as e: 447 | e_type = type(e).__name__ 448 | msg = 'Your custom statistics function failed with error type ' + e_type 449 | if 'TypeError' in e_type: 450 | msg += '\nMake sure your functions accepts two arguments (the statistics plot and the 3d viewer).' 451 | print(msg) 452 | print(e) 453 | else: 454 | self.statistics.axes.clear() 455 | self.statistics.axes.plot(np.arange(20), np.random.random(20)/2) 456 | #self.statistics.setStyleSheet("background-color:#eeeeee;") 457 | self.statistics.draw() 458 | 459 | def onObjectsChanged(self): 460 | """ 461 | Event triggered when the dictionary of 3D objects has been updated 462 | """ 463 | names = self.viewer.get_object_names() 464 | current_id = None 465 | for n_id in range(len(names)): 466 | if names[n_id] == self.selection_combo.currentText(): 467 | current_id = n_id 468 | names = ['None'] + names 469 | self.selection_combo.clear() 470 | self.selection_combo.addItems(names) 471 | self.clipping_combo.clear() 472 | self.clipping_combo.addItems(names) 473 | if current_id is not None: 474 | self.selection_combo.setCurrentIndex(current_id) 475 | 476 | def onChangeSelection(self, value): 477 | """ 478 | Event triggered by QT to change the viewer's selection 479 | """ 480 | index = self.selection_combo.findText(value) 481 | if index != -1: 482 | self.selection_combo.setCurrentIndex(index) 483 | self.viewer.select(value, allow_none=True) 484 | 485 | def onSelectionChanged(self): 486 | """ 487 | Event triggered by the viewer when a new valid selection is made 488 | """ 489 | self.update_ui() 490 | self.update_statistics() 491 | 492 | def onSearch(self): 493 | search_term = self.search_input.text() 494 | if search_term == '': 495 | return 496 | result = self.viewer.find_region(search_term) 497 | if len(result) < 1: 498 | return 499 | # TODO: improve this and give user the choice (left or right hemisphere) 500 | region_id = result[0] 501 | #if self.isosurface_checkbox.isChecked(): 502 | self.viewer.isosurface(region_id, split_meshes=False) 503 | 504 | def onOpacityChange(self, value): 505 | self.viewer.update_opacity(value=value) 506 | 507 | def onSliceOpacityChange(self, value): 508 | self.viewer.update_slices_opacity(value=value) 509 | 510 | def onTimeSeriesChange(self, value): 511 | self.viewer.set_time_series(value) 512 | 513 | @Qt.pyqtSlot() 514 | def onToggleStatistics(self): 515 | if self.windows_qt_mpl_issue: 516 | return 517 | if self.statistics_visible: 518 | self.statistics_visible = False 519 | self.statistics_widget.hide() 520 | else: 521 | self.statistics_visible = True 522 | 523 | # Using show on the maptlotlib widget will segfault with an error 524 | # in mpl backend wrt to QT. So it's better to start with a new plot 525 | self.statistics_layout.removeWidget(self.statistics) 526 | self.statistics = MplCanvas(self, 5, 4, 100, self.background_color) 527 | self.statistics.setStyleSheet("background-color:transparent;") 528 | self.update_statistics() 529 | self.statistics_layout.addWidget(self.statistics) 530 | self.statistics_widget.show() 531 | 532 | @Qt.pyqtSlot() 533 | def onToggleMarker(self): 534 | self.viewer.toggle_marker() 535 | 536 | @Qt.pyqtSlot() 537 | def onToggleOutline(self): 538 | self.viewer.toggle_outline() 539 | 540 | @Qt.pyqtSlot() 541 | def onToggleColorBar(self): 542 | self.viewer.toggle_color_bar() 543 | 544 | @Qt.pyqtSlot() 545 | def onToggleInfoText(self): 546 | self.viewer.toggle_info_text() 547 | 548 | @Qt.pyqtSlot() 549 | def onToggleDarkBackground(self): 550 | self.viewer.toggle_dark_mode() 551 | 552 | @Qt.pyqtSlot() 553 | def onToggleVolumesLOD(self): 554 | self.viewer.toggle_volumetric_lod() 555 | 556 | @Qt.pyqtSlot() 557 | def onToggleMarkerType(self): 558 | self.viewer.toggle_marker_type() 559 | 560 | @Qt.pyqtSlot() 561 | def onToggleHollow(self): 562 | self.viewer.toggle_hollow_mode() 563 | 564 | @Qt.pyqtSlot() 565 | def onRemoveObject(self): 566 | self.viewer.remove_object() 567 | self.onObjectsChanged() 568 | 569 | @Qt.pyqtSlot() 570 | def onExportImage(self): 571 | options = QtWidgets.QFileDialog.Options() 572 | options |= QtWidgets.QFileDialog.DontUseNativeDialog 573 | file_path, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'QFileDialog.getSaveFileName()', 574 | 'iblviewer.jpg', 'All Files (*);;Images (*.png *.jpg);', 575 | options=options) 576 | if file_path: 577 | self.viewer.render(file_path) 578 | Qt.QMessageBox.about(self, 'Image rendering complete', f'File was saved under {file_path}') 579 | 580 | @Qt.pyqtSlot() 581 | def onExportVideo(self): 582 | options = QtWidgets.QFileDialog.Options() 583 | options |= QtWidgets.QFileDialog.DontUseNativeDialog 584 | file_path, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'QFileDialog.getSaveFileName()', 585 | 'iblviewer.mp4', 'All Files (*);;Videos (*.mp4);', 586 | options=options) 587 | if file_path: 588 | start_angle = self.start_angle_slider.value() 589 | end_angle = self.end_angle_slider.value() 590 | duration = self.duration_slider.value() 591 | self.viewer.export_turntable_video(file_path, start_angle, end_angle, duration) 592 | Qt.QMessageBox.about(self, 'Video rendering complete', f'File was saved under {file_path}') 593 | 594 | @Qt.pyqtSlot() 595 | def onClipToBounds(self): 596 | bounds_obj = self.viewer.objects.get(self.clipping_combo.currentText()) 597 | self.viewer.clip_to_bounds(bounds_obj) 598 | 599 | @Qt.pyqtSlot() 600 | def onIsosurface(self): 601 | self.viewer.isosurface(split_meshes=True) 602 | 603 | @Qt.pyqtSlot() 604 | def onSlicerToggle(self): 605 | self.viewer.toggle_box_widget() 606 | 607 | @Qt.pyqtSlot() 608 | def onNewProbe(self): 609 | self.viewer.add_probe() 610 | 611 | @Qt.pyqtSlot() 612 | def onEditProbe(self): 613 | self.viewer.edit_probe() 614 | 615 | @Qt.pyqtSlot() 616 | def onToggleOrthoCamera(self): 617 | self.viewer.toggle_orthographic_view() 618 | 619 | @Qt.pyqtSlot() 620 | def onViewSelection(self): 621 | self.viewer.view_selected() 622 | 623 | @Qt.pyqtSlot() 624 | def onToggleAutofocus(self): 625 | self.viewer.toggle_autofocus() 626 | 627 | @Qt.pyqtSlot() 628 | def onLeftCameraPreset(self): 629 | self.viewer.set_left_view() 630 | 631 | @Qt.pyqtSlot() 632 | def onRightCameraPreset(self): 633 | self.viewer.set_right_view() 634 | 635 | @Qt.pyqtSlot() 636 | def onDorsalCameraPreset(self): 637 | self.viewer.set_dorsal_view() 638 | 639 | @Qt.pyqtSlot() 640 | def onVentralCameraPreset(self): 641 | self.viewer.set_ventral_view() 642 | 643 | @Qt.pyqtSlot() 644 | def onAnteriorCameraPreset(self): 645 | self.viewer.set_anterior_view() 646 | 647 | @Qt.pyqtSlot() 648 | def onPosteriorCameraPreset(self): 649 | self.viewer.set_posterior_view() 650 | 651 | @Qt.pyqtSlot() 652 | def onClose(self): 653 | #Disable the interactor before closing to prevent it 654 | #from trying to act on already deleted items 655 | self.vtkWidget.close() 656 | 657 | # Below are utility functions 658 | 659 | def add_input(self, label_text, change_function=None, layout=None, autocomplete=False): 660 | """ 661 | Add an input line with a label 662 | """ 663 | label = QtWidgets.QLabel(self) 664 | label.setText(label_text) 665 | input = QtWidgets.QLineEdit(self) 666 | input.label = label 667 | if change_function is not None: 668 | input.editingFinished.connect(change_function) 669 | if autocomplete: 670 | completer = QtWidgets.QCompleter() 671 | model = Qt.QStringListModel() 672 | completer.setModel(model) 673 | completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) 674 | completer.setCompletionMode(QtWidgets.QCompleter.InlineCompletion) 675 | input.setCompleter(completer) 676 | # Store properties in order to update the model with a list later on 677 | input.completer_model = model 678 | input.completer = completer 679 | # If autocomplete is a list of strings, we use it as the completion list 680 | if isinstance(autocomplete, list): 681 | try: 682 | input.completer_model.setStringList(autocomplete) 683 | except Exception: 684 | pass 685 | if layout is not None: 686 | layout.addWidget(label) 687 | layout.addWidget(input) 688 | return input 689 | 690 | def add_button(self, label, click_function, layout=None, tooltip=None, 691 | auto_render=True, toggle=False, set_checked=False): 692 | """ 693 | Add a new button to a layout 694 | """ 695 | button = QtWidgets.QPushButton(label, self) 696 | if isinstance(tooltip, str): 697 | button.setToolTip(tooltip) 698 | def click_handler(value): 699 | if click_function is not None: 700 | click_function() 701 | if auto_render: 702 | self.viewer.render() 703 | button.setCheckable(toggle) 704 | if toggle and set_checked: 705 | button.setChecked(set_checked) 706 | button.clicked.connect(click_handler) 707 | if layout is not None: 708 | layout.addWidget(button) 709 | return button 710 | 711 | def add_checkbox(self, label, click_function, layout=None, tooltip=None, 712 | auto_render=True, set_checked=False): 713 | """ 714 | Add a new checkbox to a layout 715 | """ 716 | checkbox = QtWidgets.QCheckBox(label, self) 717 | checkbox.move(20, 0) 718 | if isinstance(tooltip, str): 719 | checkbox.setToolTip(tooltip) 720 | def change_handler(value): 721 | if click_function is not None: 722 | click_function() 723 | if auto_render: 724 | self.viewer.render() 725 | checkbox.setChecked(set_checked) 726 | checkbox.stateChanged.connect(change_handler) 727 | if layout is not None: 728 | layout.addWidget(checkbox) 729 | return checkbox 730 | 731 | def add_combo(self, text, values=None, change_function=None, layout=None, auto_render=True): 732 | """ 733 | Add a new combobox with a label to a layout 734 | """ 735 | label = QtWidgets.QLabel(self) 736 | label.setText(text) 737 | 738 | combo = QtWidgets.QComboBox(self) 739 | combo.label = label 740 | combo.last_value = None 741 | if values is not None: 742 | combo.addItems(values) 743 | 744 | def update_combo(value): 745 | #label.setText(f'{value}') 746 | if change_function is not None: 747 | change_function(value) 748 | if auto_render: 749 | self.viewer.render() 750 | combo.last_value = value 751 | 752 | if change_function is not None: 753 | combo.activated[str].connect(update_combo) 754 | #combo.currentIndexChanged['QString'].connect(update_combo) 755 | #combo.valueChanged.connect(update_combo) 756 | 757 | if layout is not None: 758 | layout.addWidget(label) 759 | layout.addWidget(combo) 760 | return combo 761 | 762 | def add_slider(self, text, value, min_value=0, max_value=10, step=1, change_function=None, 763 | layout=None, ticks=None, label_precision=2, auto_render=True): 764 | """ 765 | Add a new slider with a label to a layout 766 | """ 767 | label = QtWidgets.QLabel(self) 768 | label.setText(text) 769 | #label.setPixmap(QPixmap('mute.png')) 770 | 771 | slider = QtWidgets.QSlider(QtCore.Qt.Horizontal, self) 772 | #slider.setGeometry(50,50, 200, 50) 773 | #slider.move(0, 30) 774 | # Sliders in Qt can only accept integers (!) 775 | # so we have to work around that 776 | if max_value <= 1.0 and step < 1.0: 777 | min_value /= step 778 | max_value /= step 779 | value /= step 780 | # Custom dynamic properties 781 | slider.label = label 782 | slider.step = step 783 | slider.last_value = None 784 | 785 | slider.setMinimum(int(min_value)) 786 | slider.setMaximum(int(max_value)) 787 | slider.setValue(int(value)) 788 | slider.setMinimumWidth(200) 789 | 790 | real_value = value 791 | if slider.step < 1.0: 792 | real_value = value * step 793 | if isinstance(real_value, float): 794 | label.setText(f'{text}: {real_value:.{label_precision}}') 795 | else: 796 | label.setText(f'{text}: {real_value}') 797 | 798 | if ticks is None: 799 | ticks = {'interval':2, 'position':'below'} 800 | #slider.setFocusPolicy(Qt.StrongFocus) 801 | slider.setTickPosition(QtWidgets.QSlider.TicksBelow) 802 | if ticks.get('interval') is not None: 803 | slider.setTickInterval(ticks.get('interval')) 804 | if step is not None: 805 | slider.setSingleStep(max(1, step)) 806 | 807 | def update_slider_label(value): 808 | real_value = value 809 | if slider.step < 1.0: 810 | real_value = value * step 811 | if isinstance(real_value, float): 812 | label.setText(f'{text}: {real_value:.{label_precision}}') 813 | else: 814 | label.setText(f'{text}: {real_value}') 815 | if change_function is not None: 816 | change_function(real_value) 817 | if auto_render: 818 | self.viewer.render() 819 | slider.last_value = real_value 820 | 821 | slider.valueChanged.connect(update_slider_label) 822 | 823 | if layout is not None: 824 | layout.addWidget(label) 825 | layout.addWidget(slider) 826 | return slider 827 | 828 | 829 | class ViewerApp(Qt.QApplication): 830 | """ 831 | This is the main entry point to start a Qt application. 832 | """ 833 | def __init__(self): 834 | super().__init__(sys.argv) 835 | self.window = ViewerWindow() 836 | self.aboutToQuit.connect(self.window.onClose) 837 | 838 | def initialize(self, viewer=None, callable=None, stats_callable=None, dark_mode=True, **kwargs): 839 | if dark_mode: 840 | # Other dark-style Qt stylesheets exist but few get it right 841 | from darktheme.widget_template import DarkPalette 842 | self.setStyle('Fusion') 843 | self.setPalette(DarkPalette()) 844 | # Handling disabled states with the custom stylesheet below 845 | self.setStyleSheet("QToolTip { color: #ffffff; background-color: grey; border: 1px solid white; }" 846 | "QCheckBox:disabled {color:#999999;}" 847 | "QSlider::sub-page:disabled {background:#999999;}" 848 | "QRadioButton:disabled {color:#999999;}" 849 | "QWidget:disabled {color:#999999;}") 850 | Qt.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor) 851 | self.window.initialize(viewer, callable, stats_callable, dark_mode, **kwargs) 852 | Qt.QApplication.restoreOverrideCursor() 853 | self.exec_() 854 | 855 | def main(): 856 | app = ViewerApp(dark_mode=False) 857 | 858 | if __name__ == "__main__": 859 | main() -------------------------------------------------------------------------------- /iblviewer/utils.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import numpy as np 3 | import os 4 | import glob 5 | from pathlib import Path 6 | 7 | from vtk.util.numpy_support import numpy_to_vtk, vtk_to_numpy, numpy_to_vtkIdTypeArray 8 | import vtk 9 | import vedo 10 | import math 11 | #import trimesh 12 | 13 | 14 | ROOT_FOLDER = Path(__file__).parent.parent 15 | ASSETS_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_assets') 16 | FONTS_FOLDER = ASSETS_FOLDER.joinpath('./fonts') 17 | EXAMPLES_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_examples') 18 | EXAMPLES_DATA_FOLDER = ROOT_FOLDER.joinpath('./iblviewer_examples/data') 19 | 20 | 21 | def get_type(element): 22 | """ 23 | Get the type of object as a string 24 | :return: String 25 | """ 26 | return str(element.__class__.__name__).lower() 27 | 28 | 29 | def get_unique_name(collection, name, spacer='_'): 30 | """ 31 | Create a unique key for a collection by appending numbers when entries exist 32 | :param collection: A list, collection, array, ... 33 | :param name: Name (for instance 'Points') 34 | :param spacer: Spacer char 35 | :return: New name, for instance 'Points_4' 36 | """ 37 | similar_ones = [] 38 | max_value = 0 39 | if name not in collection: 40 | return name 41 | for key in collection: 42 | if name in key: 43 | similar_ones.append(key) 44 | if spacer in key: 45 | value = key.split(spacer)[1] 46 | max_value = max(int(value), max_value) 47 | value = max(len(similar_ones), max_value) 48 | return f'{name}{spacer}{value}' 49 | 50 | 51 | def numpy2vtk(arr, dtype=None, deep=True, name=""): 52 | """ 53 | Convert a numpy array into a vtkDataArray 54 | :param arr: Array 55 | :param dtype: Data type. Allows to set a specific data type to the VTK array 56 | :param deep: Whether a deep copy is made. Defaults to True 57 | :param name: Name of the array 58 | """ 59 | if arr is None: 60 | return None 61 | arr = np.ascontiguousarray(arr) 62 | if dtype is not None and dtype!='id': 63 | arr = arr.astype(dtype) 64 | 65 | if dtype and dtype=='id': 66 | varr = numpy_to_vtkIdTypeArray(arr.astype(np.int64), deep=deep) 67 | else: 68 | varr = numpy_to_vtk(arr, deep=deep) 69 | if name: 70 | varr.SetName(name) 71 | return varr 72 | 73 | 74 | def spherical_degree_angles_to_xyz(radius, theta, phi): 75 | """ 76 | Convert spherical degree angles to XYZ coordinates 77 | :param radius: Radius 78 | :param theta: Theta angle value in degrees 79 | :param phi: Phi angle value in degrees 80 | :return: List of 3 coordinates 81 | """ 82 | return vedo.spher2cart(radius, theta / 180 * math.pi, phi / 180 * math.pi) 83 | 84 | 85 | def pick_object(plot, event_name=None, priority=None, cid=None): 86 | """ 87 | Pick an object 88 | """ 89 | x, y = plot.interactor.GetEventPosition() 90 | plot.renderer = plot.interactor.FindPokedRenderer(x, y) 91 | if not plot.picker: 92 | plot.picker = vtk.vtkPropPicker() 93 | plot.picker.PickProp(x, y, plot.renderer) 94 | plot.picked2d = (x,y) 95 | xp, yp = plot.interactor.GetLastEventPosition() 96 | actor = plot.picker.GetProp3D() 97 | delta3d = np.array([0,0,0]) 98 | picked3d = None 99 | picker = plot.picker 100 | if actor is None: 101 | # Ok, this is tricky. I found out that vtkPropPicker, even 102 | # if it optimized, can fail at detecting a simple mesh 103 | # so we use the vtkPicker as fall back plan 104 | picker = vtk.vtkPicker() 105 | picker.Pick(x, y, 0.0, plot.renderer) 106 | actor = picker.GetProp3D() 107 | if actor is not None: 108 | picked3d = np.array(picker.GetPickPosition()) 109 | if isinstance(actor, vedo.Mesh): 110 | # There is a bug with transparent objects or objects that do not have ForceOpaqueOn() 111 | # which prevents picked3d from being valid so we have to use another picking method 112 | cell_picker = vtk.vtkCellPicker() 113 | cell_picker.Pick(x, y, 0.0, plot.renderer) 114 | if cell_picker.GetProp3D() == actor: 115 | picked3d = np.array(cell_picker.GetPickPosition()) 116 | try: 117 | if actor.picked3d is not None: 118 | delta3d = picked3d - actor.picked3d 119 | actor.picked3d = picked3d 120 | except AttributeError: 121 | return 122 | else: 123 | actor = plot.picker.GetActor2D() 124 | 125 | dx, dy = x-xp, y-yp 126 | 127 | event_dict = vedo.utils.dotdict({ 128 | "name": event_name, 129 | "id": cid, 130 | "priority": priority, 131 | "at": plot.renderers.index(plot.renderer), 132 | "actor": actor, 133 | "picked3d": picked3d, 134 | "keyPressed": plot.interactor.GetKeySym(), 135 | "picked2d": (x,y), 136 | "delta2d": (dx, dy), 137 | "angle2d": np.arctan2(dy,dx), 138 | "speed2d": np.sqrt(dx*dx+dy*dy), 139 | "delta3d": delta3d, 140 | "speed3d": np.sqrt(np.dot(delta3d,delta3d)), 141 | "isPoints": isinstance(actor, vedo.Points), 142 | "isMesh": isinstance(actor, vedo.Mesh), 143 | "isAssembly": isinstance(actor, vedo.Assembly), 144 | "isVolume": isinstance(actor, vedo.Volume), 145 | "isPicture": isinstance(actor, vedo.Picture), 146 | "isActor2D": isinstance(actor, vtk.vtkActor2D) 147 | }) 148 | return event_dict 149 | 150 | 151 | def add_callback(plot, event_name, func, priority=0.0): 152 | """ 153 | Modified function from vedo. The issue is that the way vedo (and pyvista for that matter) 154 | is structured is that it helps using vtk but sometimes hinders using it with code that makes 155 | assumptions we don't want. 156 | 157 | Add a function to be executed while show() is active. 158 | Information about the event can be acquired with method ``getEvent()``. 159 | Return a unique id for the callback. 160 | The callback function (see example below) exposes a dictionary 161 | Frequently used events are: 162 | - KeyPress, KeyRelease: listen to keyboard events 163 | - LeftButtonPress, LeftButtonRelease: listen to mouse clicks 164 | - MiddleButtonPress, MiddleButtonRelease 165 | - RightButtonPress, RightButtonRelease 166 | - MouseMove: listen to mouse pointer changing position 167 | - MouseWheelForward, MouseWheelBackward 168 | - Enter, Leave: listen to mouse entering or leaving the window 169 | - Pick, StartPick, EndPick: listen to object picking 170 | - ResetCamera, ResetCameraClippingRange 171 | - Error, Warning 172 | - Char 173 | - Timer 174 | Check the complete list of events here: 175 | https://vtk.org/doc/nightly/html/classvtkCommand.html 176 | """ 177 | if not plot.interactor: 178 | return None 179 | 180 | # Processing names is removed from vedo function 181 | # Also the whole thing is refactored with improved picking 182 | def wrapper(iren=None, event_name=None): 183 | event_dict = pick_object(plot, event_name, priority, cid) 184 | func(event_dict) 185 | 186 | cid = plot.interactor.AddObserver(event_name, wrapper, priority) 187 | return cid 188 | 189 | 190 | def get_file_name(file_name, extension): 191 | """ 192 | Get full file name 193 | :param file_name: File name without extension 194 | :param extension: File extension 195 | :return: File name with extension 196 | """ 197 | if str(file_name).endswith(extension): 198 | full_file_name = str(file_name) 199 | else: 200 | full_file_name = str(file_name) + '.' + str(extension) 201 | return full_file_name 202 | 203 | 204 | def get_local_data_file_path(file_name, extension, sub_folder=''): 205 | """ 206 | Get data path 207 | :param file_name: File name without extension 208 | :param extension: File extension 209 | :return: File path 210 | """ 211 | return ASSETS_FOLDER.joinpath(sub_folder, get_file_name(file_name, extension)) 212 | 213 | 214 | def get_surface_mesh_path(file_name, meshes_path=None, extension='ply', default_meshes_path=None): 215 | """ 216 | Get a surface mesh file path 217 | :param file_name: File name without extension 218 | :param meshes_path: Folder path. If None given, this method will look into the data folder of iblviewer 219 | :param extension: File extension 220 | :param default_meshes_path: Fallback local or remote path 221 | :return: Full mesh file path 222 | """ 223 | if meshes_path is None: 224 | region_mesh_path = str(get_local_data_file_path(file_name, extension, 'surfaces')) 225 | if not os.path.exists(region_mesh_path): 226 | if default_meshes_path is not None: 227 | region_mesh_path = default_meshes_path 228 | else: 229 | region_mesh_path = 'https://raw.github.com/int-brain-lab/iblviewer/main/iblviewer_assets/surfaces/' 230 | region_mesh_path += get_file_name(file_name, extension) 231 | else: 232 | region_mesh_path = str(os.path.join(meshes_path, get_file_name(file_name, extension))) 233 | return region_mesh_path 234 | 235 | 236 | def load_surface_mesh(file_name, meshes_path=None, extension='ply'): 237 | """ 238 | Load a surface mesh with vedo. 239 | :param file_name: File name without extension 240 | :param meshes_path: Folder path. If None given, this method will look into the data folder of iblviewer 241 | :param extension: File extension 242 | :return: Mesh or None if path is invalid 243 | """ 244 | file_path = get_surface_mesh_path(file_name, meshes_path, extension) 245 | if file_path.startswith('https') or os.path.exists(file_path): 246 | return vedo.load(file_path) 247 | 248 | 249 | def change_file_name(file_path, prefix=None, name=None, suffix=None): 250 | """ 251 | Change the file name from the given file path 252 | :param file_path: Input file path 253 | :param prefix: Prefix to the file name 254 | :param name: Whether a new name is set instead of the current name. 255 | If None, the current file name is used. 256 | :param suffix: Suffix to the file name 257 | :return: New file path 258 | """ 259 | path, file_name, extension = split_path(file_path) 260 | if prefix is None: 261 | prefix = '' 262 | if suffix is None: 263 | suffix = '' 264 | if name is None or name == '' or not isinstance(name, str): 265 | name = file_name 266 | return os.path.join(path, prefix + name + suffix + extension) 267 | 268 | 269 | def split_path(path): 270 | """ 271 | Split any given file path to folder path, file name and extension 272 | :return: Folder path, file name and extension 273 | """ 274 | base_name = os.path.basename(path) 275 | file_name, extension = os.path.splitext(base_name) 276 | return path[:-len(base_name)], file_name, extension 277 | 278 | 279 | def time_diff(t): 280 | """ 281 | Get a time difference in seconds 282 | :param t: Time 283 | :return: Number of seconds 284 | """ 285 | now = datetime.now() 286 | duration = now - t 287 | return duration.total_seconds() 288 | 289 | 290 | def recompute_normals(target): 291 | pdnorm = vtk.vtkPolyDataNormals() 292 | pdnorm.SetInputData(target) 293 | pdnorm.ComputePointNormalsOn() 294 | pdnorm.ComputeCellNormalsOn() 295 | pdnorm.FlipNormalsOff() 296 | pdnorm.ConsistencyOn() 297 | pdnorm.Update() 298 | return pdnorm.GetOutput() #self._data 299 | 300 | 301 | def get_actor_center(actor): 302 | """ 303 | Get the absolute center position of an actor 304 | :param actor: VTK actor 305 | :return: 3d array 306 | """ 307 | try: 308 | if isinstance(actor, vedo.Volume): 309 | return actor.center() + actor.pos() 310 | else: 311 | return actor.centerOfMass() + actor.pos() # TODO: check that this is necessary (adding pos) 312 | except Exception as e: 313 | raise e 314 | 315 | 316 | def get_actor_bounds(actor): 317 | """ 318 | Get the bounds of an actor as xmin, xmax, ymin, ymax, zmin, zmax 319 | :param actor: VTK actor 320 | :return: Array with 6 values 321 | """ 322 | if actor is None: 323 | return 324 | try: 325 | if isinstance(actor, vedo.Volume): 326 | d = actor.dimensions() * actor.spacing() 327 | c = get_actor_center(actor) 328 | return c[0] - d[0], c[0] + d[0], c[1] - d[1], c[1] + d[1], c[2] - d[2], c[2] + d[2] 329 | else: 330 | return actor.bounds() 331 | except Exception as e: 332 | raise e 333 | 334 | 335 | def get_actor_dimensions(actor): 336 | """ 337 | Get the dimensions of an actor 338 | :param actor: VTK actor 339 | :return: 3d array 340 | """ 341 | if actor is None: 342 | return 343 | try: 344 | if isinstance(actor, vedo.Volume): 345 | return actor.dimensions() * actor.spacing()# equivalent to self.model.resolution 346 | else: 347 | xmin, xmax, ymin, ymax, zmin, zmax = actor.bounds() 348 | return np.array([xmax - xmin, ymax - ymin, zmax - zmin]) 349 | except Exception as e: 350 | raise e 351 | 352 | 353 | def get_bounding_planes(actor): 354 | """ 355 | Get bounding planes for an actor 356 | :param actor: VTK actor 357 | :return: vtkPlanes 358 | """ 359 | planes = vtk.vtkPlanes() 360 | planes.SetBounds(actor.GetBounds()) 361 | return planes 362 | 363 | 364 | def get_planes_bounds(planes): 365 | """ 366 | Get the bounding box coordinates of a series of planes. 367 | [WARNING] Only works for six planes (box mode) at the moment 368 | :param planes: vtkPlaneCollection 369 | :return: 6 values 370 | """ 371 | origins = list() 372 | for p_id in range(planes.GetNumberOfItems()): 373 | plane = planes.GetItem(p_id) 374 | origin = np.array(plane.GetOrigin()) 375 | origins.append(origin) 376 | # We don't want zeros to be accounted for so we select planes of interest 377 | # First x planes, then y planes, then z ones. To be improved/generalized. 378 | origins = np.array(origins) 379 | mi_x = np.min(origins[:2], axis=0).tolist() 380 | ma_x = np.max(origins[:2], axis=0).tolist() 381 | mi_y = np.min(origins[2:4], axis=0).tolist() 382 | ma_y = np.max(origins[2:4], axis=0).tolist() 383 | mi_z = np.min(origins[4:6], axis=0).tolist() 384 | ma_z = np.max(origins[4:6], axis=0).tolist() 385 | return mi_x[0], ma_x[0], mi_y[1], ma_y[1], mi_z[2], ma_z[2] 386 | 387 | 388 | def get_transformation_matrix(origin, normal): 389 | """ 390 | Get transformation matrix for a plane given by its origin and normal 391 | :param origin: Origin 3D vector 392 | :param normal: Normal 3D vector 393 | :return: Matrix and Translation 394 | """ 395 | newaxis = vedo.utils.versor(normal) 396 | initaxis = (0, 0, 1) 397 | crossvec = np.cross(initaxis, newaxis) 398 | angle = np.arccos(np.dot(initaxis, newaxis)) 399 | T = vtk.vtkTransform() 400 | T.PostMultiply() 401 | T.RotateWXYZ(np.rad2deg(angle), crossvec) 402 | T.Translate(np.array(origin)) 403 | M = T.GetMatrix() 404 | return M, T 405 | 406 | 407 | def set_clipping_planes(target, planes, flip_normals=False): 408 | """ 409 | Clip the volume and move the slicing planes according the given planes 410 | :param target: vedo.Mesh (can be iblviewer.objects.Points or similar) 411 | :param planes: vtkPlanes 412 | :param flip_normals: Whether clipping normals are flipped 413 | """ 414 | if not isinstance(planes, vtk.vtkPlanes): 415 | bounds = planes 416 | planes = vtk.vtkPlanes() 417 | planes.SetBounds(bounds) 418 | if flip_normals: 419 | for plane_id in range(planes.GetNumberOfPlanes()): 420 | plane = planes.GetPlane(plane_id) 421 | plane.SetNormal(np.array(plane.GetNormal())*-1) 422 | target.GetMapper().SetClippingPlanes(planes) 423 | 424 | 425 | def probe(plot, target, widget=None, interaction_callback=None, point1=None, point2=None, 426 | place_factor=1, handle_size=0.005, color=None): 427 | """ 428 | Initializes a line widget on the given target 429 | :param plot: vtk plot 430 | :param target: Target object 431 | :param widget: Existing widget. In case a valid vtkLineWidget is given, 432 | it will be used and modified directly 433 | :param interaction_callback: Function that will be called every 434 | time there is an interaction with the widget. That's where 435 | you set the clipping planes to the object for instance 436 | :param point1: Initial position of point 1 437 | :param point2: Initial position of point 2 438 | :param place_factor: see vtkBoxWidget.setPlaceFactor() 439 | :param handle_size: set the relative handle size, see vtkBoxWidget.SetHandleSize() 440 | :param color: Color of the line 441 | :return: vtkLineWidget 442 | """ 443 | existing = isinstance(widget, vtk.vtkLineWidget) 444 | if not existing: 445 | widget = vtk.vtkLineWidget() 446 | widget.SetInteractor(plot.interactor) 447 | widget.SetPlaceFactor(place_factor) 448 | widget.SetHandleSize(handle_size) 449 | widget.SetInputData(target.GetMapper().GetInput()) 450 | 451 | if color is None: 452 | color = [0.5, 0.5, 0.5] 453 | widget.GetSelectedLineProperty().SetColor(*color) 454 | #widget.GetSelectedLineProperty().SetOpacity(0.7) 455 | if point1 is not None and point2 is not None: 456 | widget.PlaceWidget() 457 | widget.SetPoint1(*point1) 458 | widget.SetPoint2(*point2) 459 | else: 460 | widget.PlaceWidget(target.GetBounds()) 461 | 462 | if interaction_callback is not None: 463 | widget.RemoveObservers('InteractionEvent') 464 | widget.AddObserver('InteractionEvent', interaction_callback) 465 | 466 | plot.interactor.Render() 467 | widget.On() 468 | plot.widgets.append(widget) 469 | return widget 470 | 471 | 472 | def box_widget(plot, target, interaction_callback=None, place_factor=1, 473 | handle_size=0.005, outline_color=None): 474 | """ 475 | Initializes a box widget on the given target 476 | :param plot: vtk plot 477 | :param target: Target object 478 | :param interaction_callback: Function that will be called every 479 | time there is an interaction with the widget. That's where 480 | you set the clipping planes to the object for instance 481 | :param place_factor: see vtkBoxWidget.setPlaceFactor() 482 | :param handle_size: set the relative handle size, see vtkBoxWidget.SetHandleSize() 483 | :param outline_color: Color of the outline 484 | :return: vtkBoxWidget 485 | """ 486 | widget = vtk.vtkBoxWidget() 487 | widget.SetInteractor(plot.interactor) 488 | widget.SetPlaceFactor(place_factor) 489 | widget.SetHandleSize(handle_size) 490 | # TODO: handle the update event in volumes in order to choose 491 | # the best method, either axis-aligned slicing when normals 492 | # are axis-aligned, or slicing with vtkImageReslice. 493 | # Both functionalities are available in VolumeView already. 494 | widget.RotationEnabledOff() 495 | widget.ScalingEnabledOn() 496 | widget.TranslationEnabledOn() 497 | widget.SetInputData(target.GetMapper().GetInput()) 498 | plot.cutterWidget = widget 499 | 500 | # Only valid for vtkBoxWidget 501 | widget.OutlineCursorWiresOn() 502 | widget.InsideOutOn() 503 | widget.GetSelectedOutlineProperty().SetColor(1, 0, 1) 504 | if outline_color is None: 505 | outline_color = [0.5, 0.5, 0.5] 506 | widget.GetOutlineProperty().SetColor(*outline_color) 507 | #widget.GetOutlineProperty().SetOpacity(0.7) 508 | 509 | #widget.SetRepresentationToOutline() 510 | existing_planes = target.GetMapper().GetClippingPlanes() 511 | if existing_planes is not None: 512 | try: 513 | bounds = get_planes_bounds(existing_planes) 514 | widget.PlaceWidget(bounds) 515 | except Exception: 516 | msg = '[Warning] Object ' + target.name + ' does not have six clipping planes.' 517 | msg += 'Placing widget is not supported atm in this configuration.' 518 | print(msg) 519 | widget.PlaceWidget(target.GetBounds()) 520 | else: 521 | widget.PlaceWidget(target.GetBounds()) 522 | 523 | def clip_target(widget=None, event=None): 524 | """ 525 | Clip the target with the current box widget 526 | """ 527 | if widget is None: 528 | return 529 | clipping_planes = vtk.vtkPlanes() 530 | widget.GetPlanes(clipping_planes) 531 | target.GetMapper().SetClippingPlanes(clipping_planes) 532 | 533 | if interaction_callback is None: 534 | interaction_callback = clip_target 535 | 536 | widget.AddObserver('InteractionEvent', interaction_callback) 537 | plot.interactor.Render() 538 | widget.On() 539 | plot.widgets.append(widget) 540 | return widget 541 | 542 | 543 | def update_scalar_bar(sb, lut, use_alpha=False, nan_color=None): 544 | """ 545 | Update a scalar bar with a new LUT 546 | :param sb: vtkScalarBarActor 547 | :param lut: vtkLookupTable 548 | :param use_alpha: whether alpha is used in the scalar bar 549 | """ 550 | if sb.GetLookupTable() == lut: 551 | return 552 | sb.SetLookupTable(lut) 553 | sb.SetUseOpacity(use_alpha) 554 | sb.SetDrawFrame(0) 555 | sb.SetDrawBackground(0) 556 | if lut.GetUseBelowRangeColor(): 557 | sb.DrawBelowRangeSwatchOn() 558 | sb.SetBelowRangeAnnotation('') 559 | if lut.GetUseAboveRangeColor(): 560 | sb.DrawAboveRangeSwatchOn() 561 | sb.SetAboveRangeAnnotation('') 562 | if nan_color is not None:#lut.GetNanColor() != (0.5, 0.0, 0.0, 1.0): 563 | lut.SetNanColor(*nan_color) 564 | sb.DrawNanAnnotationOn() 565 | sb.SetNanAnnotation('nan') 566 | 567 | 568 | def add_scalar_bar(lut, pos=(0.8, 0.05), font_color=[0, 0, 0], title="", titleYOffset=15, titleFontSize=12, 569 | size=(None,None), nlabels=None, horizontal=False, use_alpha=False): 570 | """ 571 | Create a new 2D scalar bar. This is a modified method from vedo.addons.addScalarBar 572 | :param lut: Color map LUT 573 | :param list pos: fractional x and y position in the 2D window 574 | :param list size: size of the scalarbar in pixel units (width, heigth) 575 | :param int nlabels: number of numeric labels to be shown 576 | :param bool use_alpha: retain trasparency in scalarbar 577 | :param bool horizontal: show in horizontal layout 578 | """ 579 | if isinstance(font_color, str): 580 | font_color = vedo.getColor(font_color) 581 | sb = vtk.vtkScalarBarActor() 582 | sb.SetLabelFormat('%-#6.4g') 583 | #print(sb.GetLabelFormat()) 584 | sb.SetLookupTable(lut) 585 | sb.SetUseOpacity(use_alpha) 586 | sb.SetDrawFrame(0) 587 | sb.SetDrawBackground(0) 588 | if lut.GetUseBelowRangeColor(): 589 | sb.DrawBelowRangeSwatchOn() 590 | sb.SetBelowRangeAnnotation('') 591 | if lut.GetUseAboveRangeColor(): 592 | sb.DrawAboveRangeSwatchOn() 593 | sb.SetAboveRangeAnnotation('') 594 | if lut.GetNanColor() != (0.5, 0.0, 0.0, 1.0): 595 | sb.DrawNanAnnotationOn() 596 | sb.SetNanAnnotation('nan') 597 | 598 | if title: 599 | if "\\" in repr(title): 600 | for r in vedo.shapes._reps: 601 | title = title.replace(r[0], r[1]) 602 | titprop = sb.GetTitleTextProperty() 603 | titprop.BoldOn() 604 | titprop.ItalicOff() 605 | titprop.ShadowOff() 606 | titprop.SetColor(font_color) 607 | titprop.SetVerticalJustificationToTop() 608 | titprop.SetFontSize(titleFontSize) 609 | titprop.SetFontFamily(vtk.VTK_FONT_FILE) 610 | titprop.SetFontFile(vedo.settings.fonts_path + vedo.settings.defaultFont +'.ttf') 611 | sb.SetTitle(title) 612 | sb.SetVerticalTitleSeparation(titleYOffset) 613 | sb.SetTitleTextProperty(titprop) 614 | 615 | sb.UnconstrainedFontSizeOn() 616 | sb.DrawAnnotationsOn() 617 | sb.DrawTickLabelsOn() 618 | sb.SetMaximumNumberOfColors(256) 619 | 620 | if horizontal: 621 | sb.SetOrientationToHorizontal() 622 | sb.SetNumberOfLabels(3) 623 | sb.SetTextPositionToSucceedScalarBar() 624 | sb.SetPosition(pos) 625 | sb.SetMaximumWidthInPixels(1000) 626 | sb.SetMaximumHeightInPixels(50) 627 | else: 628 | sb.SetNumberOfLabels(7) 629 | sb.SetTextPositionToPrecedeScalarBar() 630 | sb.SetPosition(pos[0]+0.09, pos[1]) 631 | sb.SetMaximumWidthInPixels(60) 632 | sb.SetMaximumHeightInPixels(250) 633 | 634 | if size[0] is not None: sb.SetMaximumWidthInPixels(size[0]) 635 | if size[1] is not None: sb.SetMaximumHeightInPixels(size[1]) 636 | 637 | if nlabels is not None: 638 | sb.SetNumberOfLabels(nlabels) 639 | 640 | sctxt = sb.GetLabelTextProperty() 641 | sctxt.SetFontFamily(vtk.VTK_FONT_FILE) 642 | sctxt.SetFontFile(vedo.settings.fonts_path + vedo.settings.defaultFont +'.ttf') 643 | sctxt.SetColor(font_color) 644 | sctxt.ItalicOff() 645 | sctxt.SetShadow(0) 646 | sctxt.SetFontSize(titleFontSize-2) 647 | sb.SetAnnotationTextProperty(sctxt) 648 | sb.PickableOff() 649 | return sb 650 | 651 | def add_caption_symbol(point, size=0.5, color='red', alpha=1.0, overlay_2d=True): 652 | """ 653 | Add a 2D or 3D overlay (aka caption in VTK). 654 | Modified from vedo caption() method 655 | """ 656 | #c = np.array(self.GetProperty().GetColor())/2 657 | color = vedo.colors.getColor(color) 658 | 659 | """ 660 | if point is None: 661 | x0,x1,y0,y1,z0,z1 = self.GetBounds() 662 | pt = [(x0+x1)/2, (y0+y1)/2, z1] 663 | point = self.closestPoint(pt) 664 | """ 665 | 666 | caption = vtk.vtkCaptionActor2D() 667 | caption.SetAttachmentPoint(point) 668 | caption.SetBorder(False) 669 | caption.SetLeader(True) 670 | sph = vtk.vtkSphereSource() 671 | sph.Update() 672 | caption.SetLeaderGlyphData(sph.GetOutput()) 673 | caption.SetLeaderGlyphSize(5) 674 | caption.SetMaximumLeaderGlyphSize(5) 675 | #capt.SetPadding(pad) 676 | #capt.SetCaption(txt) 677 | #capt.SetWidth(width) 678 | #capt.SetHeight(height) 679 | caption.SetThreeDimensionalLeader(not overlay_2d) 680 | 681 | prop = caption.GetProperty() 682 | prop.SetColor(color) 683 | prop.SetOpacity(alpha) 684 | """ 685 | pr = caption.GetCaptionTextProperty() 686 | pr.SetFontFamily(vtk.VTK_FONT_FILE) 687 | if 'LogoType' in font: # special case of big file 688 | fl = vedo.io.download("https://vedo.embl.es/fonts/LogoType.ttf") 689 | else: 690 | fl = settings.fonts_path + font + '.ttf' 691 | if not os.path.isfile(fl): 692 | fl = font 693 | pr.SetFontFile(fl) 694 | pr.ShadowOff() 695 | pr.BoldOff() 696 | pr.FrameOff() 697 | pr.SetColor(c) 698 | pr.SetOpacity(alpha) 699 | pr.SetJustificationToLeft() 700 | if "top" in justify: 701 | pr.SetVerticalJustificationToTop() 702 | if "bottom" in justify: 703 | pr.SetVerticalJustificationToBottom() 704 | if "cent" in justify: 705 | pr.SetVerticalJustificationToCentered() 706 | pr.SetJustificationToCentered() 707 | if "left" in justify: 708 | pr.SetJustificationToLeft() 709 | if "right" in justify: 710 | pr.SetJustificationToRight() 711 | pr.SetLineSpacing(vspacing) 712 | self._caption = capt 713 | """ 714 | return caption 715 | 716 | 717 | def load_gltf_mesh(file_path='./data//brain_regions.glb'): 718 | """ 719 | Read surface data from a binary GLTF scene 720 | """ 721 | loader = vtk.vtkGLTFDocumentLoader() 722 | reader = vtk.vtkGLTFReader() 723 | reader.SetFileName(file_path) 724 | reader.Update() 725 | #reader.Read() 726 | 727 | mb = reader.GetOutput() 728 | iterator = mb.NewIterator() 729 | 730 | vtk_polyobjects = [] 731 | while not iterator.IsDoneWithTraversal(): 732 | item = iterator.GetCurrentDataObject() 733 | vtk_polyobjects.append(item) 734 | iterator.GoToNextItem() 735 | 736 | print('Read', len(vtk_polyobjects), 'objects from glb') 737 | """ 738 | output_port = reader.GetOutputPort() 739 | 740 | mapper = vtkPolyDataMapper() 741 | mapper.SetInputConnection(output_port) 742 | 743 | actor = vtkActor() 744 | actor.SetMapper(mapper) 745 | """ 746 | return vtk_polyobjects -------------------------------------------------------------------------------- /iblviewer_assets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/__init__.py -------------------------------------------------------------------------------- /iblviewer_assets/doc/iblviewer_architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/doc/iblviewer_architecture.jpg -------------------------------------------------------------------------------- /iblviewer_assets/doc/iblviewer_v2_demo_brain_wide_map_1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/doc/iblviewer_v2_demo_brain_wide_map_1.jpg -------------------------------------------------------------------------------- /iblviewer_assets/doc/iblviewer_v2_demo_brain_wide_map_2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/doc/iblviewer_v2_demo_brain_wide_map_2.jpg -------------------------------------------------------------------------------- /iblviewer_assets/doc/iblviewer_v2_demo_volume_mapping_qt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/doc/iblviewer_v2_demo_volume_mapping_qt.jpg -------------------------------------------------------------------------------- /iblviewer_assets/fonts/OFL.txt: -------------------------------------------------------------------------------- 1 | Copyright 2010, 2012, 2014 Adobe Systems Incorporated (http://www.adobe.com/), with Reserved Font Name ‘Source’. 2 | 3 | This Font Software is licensed under the SIL Open Font License, Version 1.1. 4 | This license is copied below, and is also available with a FAQ at: 5 | http://scripts.sil.org/OFL 6 | 7 | 8 | ----------------------------------------------------------- 9 | SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007 10 | ----------------------------------------------------------- 11 | 12 | PREAMBLE 13 | The goals of the Open Font License (OFL) are to stimulate worldwide 14 | development of collaborative font projects, to support the font creation 15 | efforts of academic and linguistic communities, and to provide a free and 16 | open framework in which fonts may be shared and improved in partnership 17 | with others. 18 | 19 | The OFL allows the licensed fonts to be used, studied, modified and 20 | redistributed freely as long as they are not sold by themselves. The 21 | fonts, including any derivative works, can be bundled, embedded, 22 | redistributed and/or sold with any software provided that any reserved 23 | names are not used by derivative works. The fonts and derivatives, 24 | however, cannot be released under any other type of license. The 25 | requirement for fonts to remain under this license does not apply 26 | to any document created using the fonts or their derivatives. 27 | 28 | DEFINITIONS 29 | "Font Software" refers to the set of files released by the Copyright 30 | Holder(s) under this license and clearly marked as such. This may 31 | include source files, build scripts and documentation. 32 | 33 | "Reserved Font Name" refers to any names specified as such after the 34 | copyright statement(s). 35 | 36 | "Original Version" refers to the collection of Font Software components as 37 | distributed by the Copyright Holder(s). 38 | 39 | "Modified Version" refers to any derivative made by adding to, deleting, 40 | or substituting -- in part or in whole -- any of the components of the 41 | Original Version, by changing formats or by porting the Font Software to a 42 | new environment. 43 | 44 | "Author" refers to any designer, engineer, programmer, technical 45 | writer or other person who contributed to the Font Software. 46 | 47 | PERMISSION & CONDITIONS 48 | Permission is hereby granted, free of charge, to any person obtaining 49 | a copy of the Font Software, to use, study, copy, merge, embed, modify, 50 | redistribute, and sell modified and unmodified copies of the Font 51 | Software, subject to the following conditions: 52 | 53 | 1) Neither the Font Software nor any of its individual components, 54 | in Original or Modified Versions, may be sold by itself. 55 | 56 | 2) Original or Modified Versions of the Font Software may be bundled, 57 | redistributed and/or sold with any software, provided that each copy 58 | contains the above copyright notice and this license. These can be 59 | included either as stand-alone text files, human-readable headers or 60 | in the appropriate machine-readable metadata fields within text or 61 | binary files as long as those fields can be easily viewed by the user. 62 | 63 | 3) No Modified Version of the Font Software may use the Reserved Font 64 | Name(s) unless explicit written permission is granted by the corresponding 65 | Copyright Holder. This restriction only applies to the primary font name as 66 | presented to the users. 67 | 68 | 4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font 69 | Software shall not be used to promote, endorse or advertise any 70 | Modified Version, except to acknowledge the contribution(s) of the 71 | Copyright Holder(s) and the Author(s) or with their explicit written 72 | permission. 73 | 74 | 5) The Font Software, modified or unmodified, in part or in whole, 75 | must be distributed entirely under this license, and must not be 76 | distributed under any other license. The requirement for fonts to 77 | remain under this license does not apply to any document created 78 | using the Font Software. 79 | 80 | TERMINATION 81 | This license becomes null and void if any of the above conditions are 82 | not met. 83 | 84 | DISCLAIMER 85 | THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 86 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF 87 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT 88 | OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE 89 | COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 90 | INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL 91 | DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 92 | FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM 93 | OTHER DEALINGS IN THE FONT SOFTWARE. 94 | -------------------------------------------------------------------------------- /iblviewer_assets/fonts/SourceSansPro-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/fonts/SourceSansPro-Regular.ttf -------------------------------------------------------------------------------- /iblviewer_assets/surfaces/997.ply: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_assets/surfaces/997.ply -------------------------------------------------------------------------------- /iblviewer_examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/__init__.py -------------------------------------------------------------------------------- /iblviewer_examples/data/channels.f22c0fd9-6b7d-f8dd-9229-884c6f573046.pqt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/channels.f22c0fd9-6b7d-f8dd-9229-884c6f573046.pqt -------------------------------------------------------------------------------- /iblviewer_examples/data/exp2_db4df448-e449-4a6f-a0e7-288711e7a75a_both: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/exp2_db4df448-e449-4a6f-a0e7-288711e7a75a_both -------------------------------------------------------------------------------- /iblviewer_examples/data/exp3_3dd347df-f14e-40d5-9ff2-9c49f84d2157_both: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/exp3_3dd347df-f14e-40d5-9ff2-9c49f84d2157_both -------------------------------------------------------------------------------- /iblviewer_examples/data/exp4_3c851386-e92d-4533-8d55-89a46f0e7384_both: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/exp4_3c851386-e92d-4533-8d55-89a46f0e7384_both -------------------------------------------------------------------------------- /iblviewer_examples/data/exp5_158d5d35-a2ab-4a76-87b0-51048c5d5283_both: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/exp5_158d5d35-a2ab-4a76-87b0-51048c5d5283_both -------------------------------------------------------------------------------- /iblviewer_examples/data/ibl_point_neurons.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/ibl_point_neurons.npz -------------------------------------------------------------------------------- /iblviewer_examples/data/ncov_100.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/ncov_100.npz -------------------------------------------------------------------------------- /iblviewer_examples/data/ncov_25.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/ncov_25.npz -------------------------------------------------------------------------------- /iblviewer_examples/data/ncov_50.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/ncov_50.npz -------------------------------------------------------------------------------- /iblviewer_examples/data/stimonR_top10_rawpoints.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/stimonR_top10_rawpoints.p -------------------------------------------------------------------------------- /iblviewer_examples/data/valid_insertions.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/valid_insertions.p -------------------------------------------------------------------------------- /iblviewer_examples/data/valid_insertions_data.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/int-brain-lab/iblviewer/6a92a7cbff3649b1dfd776ef9c05b01242bed890/iblviewer_examples/data/valid_insertions_data.p -------------------------------------------------------------------------------- /iblviewer_examples/headless_render.py: -------------------------------------------------------------------------------- 1 | from iblviewer.mouse_brain import MouseBrainViewer 2 | 3 | 4 | def main(): 5 | # This example starts the viewer and renders a view 6 | viewer = MouseBrainViewer() 7 | # Font size is made larger for 4K rendering 8 | viewer.initialize(resolution=50, mapping='Allen', embed_font_size=30, offscreen=True) 9 | # Put objects on the scene 10 | viewer.show() 11 | 12 | # Add more code to add data and control the viewer here 13 | 14 | # Render a 4K image. 15 | viewer.set_info_visibility(False) 16 | #viewer.render('./test.jpg', 3840, 2160) 17 | viewer.close() 18 | 19 | 20 | if __name__ == '__main__': 21 | main() -------------------------------------------------------------------------------- /iblviewer_examples/human_brain.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from cloudvolume import CloudVolume 3 | from iblviewer.launcher import IBLViewer 4 | 5 | 6 | class HumanBrainData(): 7 | 8 | def __init__(self): 9 | self.volume = None 10 | self.cloud_volume = None 11 | self.atlas = 'classif' 12 | self.lod = 3 13 | self.resolution = None 14 | self.color_map = None 15 | 16 | def load_volume(self, atlas='classif', lod=3): 17 | url = 'precomputed://https://neuroglancer.humanbrainproject.eu/' 18 | url += 'precomputed/BigBrainRelease.2015/' + atlas 19 | print('About to load precomputed volume', url) 20 | volume = CloudVolume(url, mip=lod) 21 | print('Metadata for lod', lod, ':', volume.scale) 22 | image = volume[:, :, :] 23 | self.volume = image.flatten().reshape(volume.scale['size']) 24 | self.cloud_volume = volume 25 | self.resolution = volume.scale['resolution'][0] 26 | 27 | def on_viewer_initialized(self, viewer): 28 | viewer.add_volume(self.volume, self.resolution, None, self.color_map, select=True) 29 | viewer.show() 30 | 31 | 32 | def main(): 33 | parser = argparse.ArgumentParser(description='Human Brain Atlas') 34 | parser.add_argument('-l', dest='lod', type=int, default=3, 35 | help='LOD value') 36 | parser.add_argument('-v', dest='volume', type=str, default='classif', 37 | help='Volume type, either 8bit or classif') 38 | parser.add_argument('-c', dest='color_map', type=str, default='viridis', 39 | help='Color map') 40 | 41 | iblviewer = IBLViewer() 42 | args = iblviewer.parse_args(parser) 43 | 44 | hb = HumanBrainData() 45 | hb.color_map = args.color_map 46 | hb.load_volume(args.volume, args.lod) 47 | 48 | iblviewer.launch(hb.on_viewer_initialized) 49 | 50 | 51 | if __name__ == '__main__': 52 | main() -------------------------------------------------------------------------------- /iblviewer_examples/ibl_brain_coverage.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | 4 | import numpy as np 5 | import pandas as pd 6 | from scipy.ndimage import convolve 7 | 8 | from ibllib.dsp import fcn_cosine 9 | from ibllib.atlas import AllenAtlas 10 | 11 | from iblviewer.mouse_brain import MouseBrainViewer 12 | from iblviewer import utils 13 | 14 | 15 | DIST_FCN = np.array([100, 150]) / 1e6 16 | 17 | 18 | def compute_coverage_volume(ba=None): 19 | file_channels = next(utils.EXAMPLES_DATA_FOLDER.glob('channels.*.pqt')) 20 | 21 | print('Computing coverage volume...') 22 | # read the channels files and create a volume with one in each voxel containing a spike 23 | channels = pd.read_parquet(file_channels) 24 | if ba is None: 25 | ba = AllenAtlas(25) 26 | cvol = np.zeros(ba.image.shape, dtype=float) 27 | xyz = np.c_[channels['ml'].to_numpy(), channels['ap'].to_numpy(), channels['dv'].to_numpy()] / 1e6 28 | iii = ba.bc.xyz2i(xyz) 29 | cvol[np.unravel_index(ba._lookup(xyz), cvol.shape)] = 1 30 | 31 | # create the convolution kernel, 3D cosine function decreasing from 1 to 0 between bounds in DISC_FCN 32 | dx = ba.bc.dx 33 | template = np.arange(- np.max(DIST_FCN) - dx, np.max(DIST_FCN) + 2 * dx, dx) ** 2 34 | kernel = sum(np.meshgrid(template, template, template)) 35 | kernel = 1 - fcn_cosine(DIST_FCN)(np.sqrt(kernel)) 36 | 37 | # and convolve (NB: 3D FFT would probably go much faster here) 38 | start = time.time() 39 | cvol = convolve(cvol, kernel) 40 | print('Done in', time.time() - start, 'seconds') 41 | return cvol 42 | 43 | ''' 44 | def plot_agg(ccov, axis, ax=None, ba=None, **kwargs): 45 | if not ax: 46 | ax = plt.gca() 47 | ax.axis('equal') 48 | hm = np.sum(ccov, axis=axis) 49 | hm[np.all(ccov == 0, axis=axis)] = np.nan 50 | # hm[np.all(ccov == 0, axis=axis)] = np.nan 51 | if axis == 0: 52 | axextent = 1 # coronal 53 | hm = hm.T 54 | elif axis == 1: 55 | axextent = 0 # sagittal 56 | hm = hm.T 57 | elif axis == 2: 58 | axextent = 2 # horizontal 59 | ax.imshow(hm, extent=ba.extent(axis=axextent), **kwargs) 60 | return hm 61 | ''' 62 | 63 | 64 | def main(): 65 | viewer = MouseBrainViewer() 66 | resolution = 50 67 | #viewer.initialize(resolution=resolution, mapping='Allen-lr', embed_ui=True, dark_mode=False, add_atlas=False, add_dwi=True, dwi_color_map='viridis') 68 | viewer.initialize(resolution=resolution, mapping='Allen-lr', embed_ui=True, dark_mode=False) 69 | 70 | ba = viewer.ibl_model.atlas 71 | file_path = utils.EXAMPLES_DATA_FOLDER.joinpath(f'./ncov_{resolution}.npz') 72 | if os.path.exists(str(file_path)): 73 | ncov = np.load(str(file_path))['arr_0'] 74 | else: 75 | cvol = compute_coverage_volume(ba) 76 | ncov = cvol.copy() 77 | ncov[ncov < 0] = -1 78 | ncov += 1 79 | print('Done computing volume with range', np.min(ncov), np.max(ncov)) 80 | np.savez_compressed(str(file_path), ncov) 81 | 82 | cov_vol = viewer.add_volume(ncov, resolution, color_map='viridis', transpose=True, select=True) #, alpha_map=[0, 0.5, 1] 83 | cov_vol.set_opacity(1.0) 84 | viewer.show().close() 85 | ''' 86 | ncov[ncov == -1] = np.nan 87 | 88 | plt.figure(), plot_agg(ncov, 2, ax=ba.plot_hslice(-.002), ba=ba, alpha=0.5) 89 | plt.figure(), plot_agg(ncov, 1, ax=ba.plot_sslice(0), ba=ba, alpha=0.5) 90 | plt.figure(), plot_agg(ncov, 0, ax=ba.plot_cslice(0), ba=ba, alpha=0.5) 91 | ''' 92 | 93 | if __name__ == '__main__': 94 | main() -------------------------------------------------------------------------------- /iblviewer_examples/ibl_brain_wide_map.py: -------------------------------------------------------------------------------- 1 | # This code connects to IBL back end and resolves 2 | # all point neurons (channels) measured in all 3 | # valid experiments. 4 | try: 5 | from oneibl.one import ONE 6 | except ImportError: 7 | # Then we're on ONE2 8 | from one.api import ONE 9 | import os 10 | import pickle 11 | import numpy as np 12 | 13 | import ibllib.atlas as atlas 14 | from brainbox.io.one import load_channels_from_insertion 15 | import alf.io 16 | 17 | from iblviewer.mouse_brain import MouseBrainViewer 18 | from iblviewer import utils 19 | 20 | 21 | def get_valid_insertions(force_query=False, local_file_path=None): 22 | """ 23 | Get all valid insertions from the data base 24 | """ 25 | insertions = None 26 | if local_file_path is None: 27 | local_file_path = str(utils.EXAMPLES_DATA_FOLDER.joinpath('valid_insertions.p')) 28 | if not force_query and os.path.exists(os.path.abspath(local_file_path)): 29 | insertions = pickle.load(open(local_file_path, 'rb')) 30 | print('Using local file for insertions', local_file_path) 31 | else: 32 | dq = 'session__project__name__icontains,ibl_neuropixel_brainwide_01,session__qc__lt,50,' 33 | dq += '~json__qc,CRITICAL,session__extended_qc__behavior,1,json__extended_qc__tracing_exists,True,' 34 | dq += '~session__extended_qc___task_stimOn_goCue_delays__lt,0.9,' 35 | dq += '~session__extended_qc___task_response_feedback_delays__lt,0.9,' 36 | dq += '~session__extended_qc___task_wheel_move_before_feedback__lt,0.9,' 37 | dq += '~session__extended_qc___task_wheel_freeze_during_quiescence__lt,0.9,' 38 | dq += '~session__extended_qc___task_error_trial_event_sequence__lt,0.9,' 39 | dq += '~session__extended_qc___task_correct_trial_event_sequence__lt,0.9,' 40 | dq += '~session__extended_qc___task_reward_volumes__lt,0.9,' 41 | dq += '~session__extended_qc___task_reward_volume_set__lt,0.9,' 42 | dq += '~session__extended_qc___task_stimulus_move_before_goCue__lt,0.9,' 43 | dq += '~session__extended_qc___task_audio_pre_trial__lt,0.9' 44 | query_result = one.alyx.rest('insertions', 'list', django=dq) 45 | #insertions = one.alyx.rest('insertions', 'list', django=query) 46 | insertions = [] 47 | for insertion in query_result: 48 | insertions.append(insertion) 49 | pickle.dump(insertions, open(local_file_path, 'wb')) 50 | #points_query = one.alyx.rest('') 51 | return insertions 52 | 53 | 54 | def get_insertions_data(one=None, ba=None, insertions=None, start=None, end=None, 55 | exceptions=None, force_query=False, local_file_path=None): 56 | """ 57 | Get all valid insertions and related data from the DB. 58 | 59 | """ 60 | insertions_data = None 61 | if local_file_path is None: 62 | local_file_path = str(utils.EXAMPLES_DATA_FOLDER.joinpath('valid_insertions_data.p')) 63 | if not force_query and os.path.exists(os.path.abspath(local_file_path)): 64 | insertions_data = pickle.load(open(local_file_path, 'rb')) 65 | print('Using local file for insertions data', local_file_path) 66 | else: 67 | if one is None: 68 | one = ONE() 69 | if ba is None: 70 | ba = atlas.AllenAtlas() 71 | if insertions is None: 72 | insertions = get_valid_insertions() 73 | num_insertions = len(insertions) 74 | 75 | print('') 76 | print('') 77 | print('Got', len(insertions), 'valid insertions (that do not necessarily have data)') 78 | 79 | # Retrieving XYZ point neuron coordinates goes like this: 80 | # spikes.clusters -> clusters.channels -> channels.xyz 81 | # but in many cases, channels.xyz (a file called mlapdv.npy) does not exist 82 | # so you have to compute XYZ coordinates given spikes depths and insertion XYZ 83 | dtypes = [ 84 | 'spikes.depths', 85 | 'spikes.amps', 86 | 'spikes.times', 87 | 'spikes.clusters', 88 | 'clusters.channels', 89 | 'clusters.mlapdv' 90 | ] 91 | 92 | # Filter insertions and retain only valid data because even 93 | # the insertions deemed valid are sometimes not what we want 94 | insertions_data = {} 95 | 96 | if not isinstance(start, int): 97 | start = 0 98 | if not isinstance(end, int): 99 | end = num_insertions 100 | start = max(0, start) 101 | end = min(num_insertions, end) 102 | print('Getting data for insertions', start, 'to', end, 'out of', num_insertions) 103 | for loop_id in range(start, end): 104 | insertion = insertions[loop_id] 105 | probe_id = insertion['id'] 106 | if exceptions is not None and probe_id in exceptions: 107 | print('Probe', loop_id, 'with id', probe_id, 'is in exclusion list (probably because of invalid data)') 108 | continue 109 | data_sets = one.alyx.rest('datasets', 'list', probe_insertion=probe_id, django=f'dataset_type__name__in,{dtypes}') 110 | if data_sets is None or len(data_sets) < 1: 111 | print('Probe', loop_id, ': no dataset found (!) for id', probe_id) 112 | continue 113 | print('Probe', loop_id, 'using', len(data_sets), 'data sets for id', probe_id) 114 | insertions_data[probe_id] = data_sets 115 | 116 | print(len(insertions_data), 'valid insertions found on', num_insertions) 117 | print('') 118 | pickle.dump(insertions_data, open(local_file_path, 'wb')) 119 | return insertions_data 120 | 121 | 122 | def get_point_neurons_data(one=None, ba=None, insertions=None, start=0, end=None, 123 | recompute_positions=True, probe_ids=None, 124 | exceptions=['825ba9b8-ce03-49b7-b1a8-4d85ae2185af'], 125 | local_file_path=None): 126 | """ 127 | Get all point neurons data sets that IBL has measured/recorded. 128 | Warning, this can result in veeery long download time initially. 129 | I had to download 85GB of data (including relevant spike data) in May 2021. 130 | And the best part is that the position of the neurons in the end 131 | is put in a pickle file less than 1MB. :) 132 | :param one: ONE connection object 133 | :param ba: BrainAtlas object 134 | :param insertions: 135 | :param start: 136 | :param end: 137 | :param recompute_positions: 138 | :param probe_ids: 139 | :param exceptions: 140 | :param local_file_path: 141 | """ 142 | if local_file_path is None: 143 | local_file_path = str(utils.EXAMPLES_DATA_FOLDER.joinpath('ibl_point_neurons.npz')) 144 | 145 | use_local_data = not recompute_positions and os.path.exists(os.path.abspath(local_file_path)) 146 | if use_local_data: 147 | result = np.load(local_file_path, allow_pickle=True) 148 | print('Point neurons data loaded (using local npz storage)', local_file_path) 149 | insertion_ids = result['insertion_ids'] 150 | xyz_positions = result['xyz_positions'] 151 | xyz_resolved_mask = result['xyz_resolved_mask'] 152 | data_mask = result['data_mask'] 153 | return insertion_ids, xyz_positions, xyz_resolved_mask, data_mask 154 | 155 | if one is None: 156 | one = ONE() 157 | if ba is None: 158 | ba = atlas.AllenAtlas() 159 | if insertions is None: 160 | insertions = get_valid_insertions() 161 | num_insertions = len(insertions) 162 | insertions_data = get_insertions_data(one, ba, insertions, start, end, exceptions) 163 | 164 | insertion_ids = [''] * num_insertions 165 | xyz_positions = [[]] * num_insertions 166 | xyz_resolved_mask = [False] * num_insertions 167 | data_mask = [False] * num_insertions 168 | 169 | loop_id = -1 170 | got_ids_filter = isinstance(probe_ids, dict) or isinstance(probe_ids, list) 171 | 172 | # Lots of try catch ahead! This is to handle all the cases where things might be missing or failing 173 | for probe_id in insertions_data: 174 | loop_id += 1 175 | data_sets = insertions_data[probe_id] 176 | if got_ids_filter and probe_id not in probe_ids: 177 | continue 178 | try: 179 | local_files = one.download_datasets(data_sets) 180 | except Exception as e: 181 | print('Error downloading dataset', e) 182 | continue 183 | 184 | if local_files is None: 185 | print('Local files not found for', probe_id) 186 | continue 187 | ''' 188 | TODO: this is where you start using spike data. 189 | try: 190 | spikes = alf.io.load_object(local_files[0].parent, 'spikes') 191 | except AttributeError: 192 | continue 193 | ''' 194 | try: 195 | clusters = alf.io.load_object(local_files[0].parent, 'clusters') 196 | except AttributeError: 197 | continue 198 | #channels = clusters['channels'] 199 | 200 | print('Processing insertion', loop_id+1, 'on', len(insertions_data), '- Probe id', probe_id) 201 | # When mlapdv dataset does not exist, it means we have to 202 | # estimate xyz with the given spike depths and insertion xyz 203 | if 'mlapdv' in clusters: 204 | xyz_positions[loop_id] = clusters['mlapdv'] 205 | else: 206 | ''' 207 | Exemple of an insertion that throws a KeyError: 208 | 0393f34c-a2bd-4c01-99c9-f6b4ec6e786d 209 | KeyError: ibl.brainbox.io '2021-04-20T15:49:05_anup.khanal' 210 | So we have an id but something went wrong in the data pipeline and it got invalid 211 | ''' 212 | insertion = one.alyx.rest('insertions', 'read', id=probe_id) 213 | #print('Is insertion equal to itself???', insertion==insertion2, insertion['id'], insertion2['id']) 214 | try: 215 | channel_data = load_channels_from_insertion(insertion, one=one, ba=ba) 216 | except Exception as e: 217 | print(e) 218 | continue 219 | # Units are in meters so we make that consistent with mlapdv data sets that are in microns 220 | xyz_positions[loop_id] = channel_data * 1000000 221 | 222 | xyz_resolved_mask[loop_id] = 'mlapdv' in clusters 223 | data_mask[loop_id] = True 224 | insertion_ids[loop_id] = probe_id 225 | 226 | #if not isinstance(xyz_positions, np.ndarray): 227 | xyz_positions = np.array(xyz_positions) 228 | if local_file_path is not None: 229 | print('Saving processed data under', local_file_path) 230 | np.savez_compressed(local_file_path, insertion_ids=insertion_ids, xyz_positions=xyz_positions, 231 | xyz_resolved_mask=xyz_resolved_mask, data_mask=data_mask) 232 | 233 | print('Point neurons data downloaded') 234 | return insertion_ids, xyz_positions, xyz_resolved_mask, data_mask 235 | 236 | 237 | def main(): 238 | # Test probe ids. The first one has MLAPDV data, the others have their channels computed instead. 239 | probe_ids = ['00a824c0-e060-495f-9ebc-79c82fef4c67', 240 | '0143d3fe-79c2-4922-8332-62c3e4e0ba85', 241 | '4762e8ed-4d94-4fd7-9522-e927f5ffca74', 242 | '4755877d-fd14-42b3-bc15-e1996d153015'] 243 | 244 | # If you want to run the test with only the selection from above, then comment the below line 245 | probe_ids = None 246 | 247 | ''' 248 | There is an error for this data: 249 | Spike data not found: 56f2a378-78d2-4132-b3c8-8c1ba82be598 250 | ''' 251 | data = get_point_neurons_data(probe_ids=probe_ids, recompute_positions=False) 252 | insertion_ids, xyz_positions, xyz_resolved_mask, data_mask = data 253 | positions = [] 254 | values = [] 255 | loop_id = 0 256 | # xyz_positions is a sparse array so we take only valid data from it 257 | insertions = xyz_positions[data_mask] 258 | 259 | for channels in insertions: 260 | channels_positions = channels if isinstance(channels, list) else channels.tolist() 261 | #inside_brain_positions = get_points_inside_brain(viewer, channels_positions) 262 | positions += channels_positions 263 | mock_ids = np.ones(len(channels_positions))*loop_id 264 | values += mock_ids.tolist() 265 | loop_id += 1 266 | positions = np.array(positions) 267 | values = np.array(values) 268 | 269 | av = np.copy(values) * np.random.random(len(values)) 270 | bv = np.copy(values) * np.random.random(len(values)) 271 | cv = np.copy(values) * np.random.random(len(values)) 272 | values = np.c_[values, av, bv, cv] 273 | 274 | viewer = MouseBrainViewer() 275 | print('Data report: using', len(insertions), 'insertions and', len(positions), 'channels (point neurons)') 276 | print('-> Bounds:', np.amin(positions, axis=0), np.amax(positions, axis=0)) 277 | # This starts the viewer with two volumes: the segmented atlas and the DWI of the mouse brain 278 | viewer.initialize(resolution=50, add_dwi=True, dwi_color_map='viridis', 279 | add_atlas=True, mapping='Allen', dark_mode=True, embed_ui=True) 280 | 281 | # Below are a few optimizations you could use if your computer 282 | # is too slow to visualize the result: 283 | 284 | # 1. We add all the points together as one object. 285 | # The fewer objects on your 3D scene, the faster it will render. 286 | 287 | # 2. Activate screen space mode so that the points are in 2D. 288 | # This can literally change the game if you have a low-end computer 289 | 290 | # 3. Do not trim outliers, because the computation to know if each 291 | # and every point is within the brain is quite demanding. 292 | points = viewer.add_points(positions, radius=8, values=values, screen_space=True, 293 | noise_amount=100, trim_outliers=False, add_to_scene=True) 294 | 295 | # You will need some CPU/GPU power to handle volume 296 | # transparency and all those semi-transparent points 297 | points.alpha(0.5) 298 | 299 | # Finally, before showing everything, we select the latest added object (the points) 300 | viewer.select(-1) 301 | viewer.show() 302 | 303 | 304 | if __name__ == '__main__': 305 | main() -------------------------------------------------------------------------------- /iblviewer_examples/ibl_insertion_probes.py: -------------------------------------------------------------------------------- 1 | # This example connects to IBL database and fetches 2 | # all insertion probes 3 | try: 4 | from oneibl.one import ONE 5 | except ImportError: 6 | from one.api import ONE 7 | import numpy as np 8 | 9 | import argparse 10 | from iblviewer.launcher import IBLViewer 11 | 12 | 13 | class ProbeData: 14 | 15 | def __init__(self, as_segments=False, line_width=2, trim_outliers=True): 16 | """ 17 | Constructor 18 | :param viewer: The IBLViewer controller 19 | :param one_connection: The "one" connection to IBL server 20 | :param as_segments: Whether insertion probes should be reduced to straight lines (segments) 21 | :param trim_outliers: Whether you want the lines to be cut when they're out of the brain 22 | :param with_labels: Whether labels should be added to the lines 23 | """ 24 | print('You need valid credentials and access to IBL server in order to run this example.') 25 | self.one_connection = ONE(base_url="https://alyx.internationalbrainlab.org") 26 | self.as_segments = as_segments 27 | self.line_width = line_width 28 | self.trim_outliers = trim_outliers 29 | 30 | def get_bwm_ins_alyx(self, one): 31 | """ 32 | Return insertions that match criteria : 33 | - project code 34 | - session QC not critical (TODO may need to add probe insertion QC) 35 | - at least 1 alignment 36 | - behavior pass 37 | :param one: "one" connection handler 38 | :return: 39 | ins: dict containing the full details on insertion as per the alyx rest query 40 | ins_id: list of insertions eids 41 | sess_id: list of (unique) sessions eids 42 | """ 43 | ins = one.alyx.rest('insertions', 'list', 44 | provenance='Ephys aligned histology track', 45 | django='session__project__name__icontains,ibl_neuropixel_brainwide_01,' 46 | 'session__qc__lt,50,' 47 | 'json__extended_qc__alignment_count__gt,0,' 48 | 'session__extended_qc__behavior,1') 49 | 50 | ins_ids = [item['id'] for item in ins] 51 | sess_id = [item['session_info']['id'] for item in ins] 52 | # Here's what's in 'json': 53 | # dict_keys(['qc', 'n_units', 'xyz_picks', 'extended_qc', 'drift_rms_um', 'firing_rate_max', 'n_units_qc_pass', 54 | # 'amplitude_max_uV', 'firing_rate_median', 'amplitude_median_uV', 'whitening_matrix_conditioning']) 55 | positions = [] 56 | for item in ins: 57 | picks = item['json'].get('xyz_picks', []) 58 | positions.append(picks) 59 | sess_id = np.unique(sess_id) 60 | return positions, ins_ids 61 | 62 | 63 | def get_picks_mean_vectors(self, xyz_picks, extent=3): 64 | """ 65 | Get a mean vector from picks coordinates 66 | :param xyz_picks: Dictionary xyz picks, the key being the identifier for that data set 67 | :param extent: Number of points to take from start and end for mean computation of end points 68 | :return: List of varying lists of 3D points and a list of line ids 69 | """ 70 | vectors = [] 71 | ids = [] 72 | # Mean between first and last three picks 73 | for ins_id in range(len(xyz_picks)): 74 | raw_picks = xyz_picks[ins_id] 75 | end_pt = np.mean(raw_picks[-extent:], axis=0) 76 | start_pt = np.mean(raw_picks[:extent], axis=0) 77 | vectors.append([start_pt, end_pt]) 78 | ids.append(ins_id) 79 | return vectors, ids 80 | 81 | 82 | def on_viewer_initialized(self, viewer): 83 | """ 84 | Add insertion probe vectors 85 | :param viewer: Viewer instance 86 | """ 87 | lines_data, line_ids = self.get_bwm_ins_alyx(self.one_connection) 88 | if self.as_segments: 89 | segments_data, segment_ids = self.get_picks_mean_vectors(lines_data) 90 | line_ids = np.array(line_ids) 91 | segment_ids = line_ids[segment_ids] 92 | lines = viewer.add_segments(segments_data, line_width=self.line_width, 93 | add_to_scene=True, trim_outliers=self.trim_outliers) 94 | else: 95 | lines = viewer.add_lines(lines_data, line_width=self.line_width, 96 | add_to_scene=True, trim_outliers=self.trim_outliers) 97 | return lines 98 | 99 | 100 | def str2bool(v): 101 | if isinstance(v, bool): 102 | return v 103 | elif v.lower() in ('yes', 'true', 't', 'y', '1'): 104 | return True 105 | elif v.lower() in ('no', 'false', 'f', 'n', '0'): 106 | return False 107 | else: 108 | raise argparse.ArgumentTypeError('Boolean value expected.') 109 | 110 | 111 | def main(): 112 | # More parsing options are added in parse_args() method below. 113 | # -> Please check that you don't override any existing argument name! 114 | parser = argparse.ArgumentParser(description='International Brain Viewer based on VTK') 115 | parser.add_argument('-seg', dest='segments', type=str2bool, default=False, 116 | help='Whether line probes are simplified to segments. Defaults to 0 (False)') 117 | 118 | iblviewer = IBLViewer() 119 | # First retrieve command-line arguments (default ones + custom ones above) 120 | args = iblviewer.parse_args(parser) 121 | 122 | pb = ProbeData(args.segments) 123 | # Now start the viewer and add points when it's initialized 124 | iblviewer.launch(pb.on_viewer_initialized, None, args) 125 | ''' 126 | # If you'd like, you can also directly import the relevant viewer and use it as follows: 127 | from iblviewer.mouse_brain import MouseBrainViewer 128 | viewer = MouseBrainViewer() 129 | viewer.initialize(resolution=25, embed_ui=True) 130 | add_insertion_probes(viewer, one_connection, as_segments=False, line_width=5) 131 | viewer.show() 132 | ''' 133 | 134 | 135 | if __name__ == '__main__': 136 | main() 137 | -------------------------------------------------------------------------------- /iblviewer_examples/ibl_point_neurons.py: -------------------------------------------------------------------------------- 1 | # Add point neurons from connectivity data 2 | from pathlib import Path 3 | 4 | import os 5 | import numpy as np 6 | import pickle 7 | 8 | from iblviewer.launcher import IBLViewer 9 | from iblviewer import utils 10 | 11 | ''' 12 | # Invoke your own code with the lines below but you should wrap it in 13 | # properly encapsulated code... 14 | %%bash 15 | git clone https://github.com/int-brain-lab/friendly_neurons.git 16 | %load friendly_neurons/data_analysis.py 17 | ''' 18 | 19 | # After that, we try loading experimental data, here point neurons. 20 | # Once this cell is run, click on the viewer to see the update 21 | 22 | def process_point_neurons(data): 23 | """ 24 | Pre process point neurons data and find the common min and max values 25 | :param data: At least 5D array of x, y, z, region, custom data. 26 | All the columns after 'region' are be stored as a time series 27 | """ 28 | if isinstance(data, str): 29 | pickles = [] 30 | data_path = data 31 | with (open(os.path.abspath(data_path), 'rb')) as openfile: 32 | while True: 33 | try: 34 | pickles.append(pickle.load(openfile)) 35 | except EOFError: 36 | break 37 | data = pickles[0] 38 | 39 | # Structure of 'data': x | y | z | region | pre-time allegiance | during-time allegiance | post-time allegiance 40 | positions = [] 41 | regions = [] 42 | timings = [] 43 | # Cleaning weird data and extracting what we need. When Alessandro fixes his code, we can get rid of this cleaning. 44 | for weird_data in data: 45 | try: 46 | positions.append([weird_data[0][0], weird_data[1][0], weird_data[2][0]]) 47 | except TypeError: 48 | continue 49 | #positions.append(bad_stuff[:3]) 50 | regions.append(weird_data[3]) 51 | timings.append(weird_data[4:]) 52 | positions = np.array(positions).astype(float) 53 | 54 | timings = np.array(timings) 55 | regions = np.array(regions) 56 | min_v = np.min(timings) 57 | max_v = np.max(timings) 58 | return positions, timings, min_v, max_v 59 | 60 | def on_viewer_initialized(viewer): 61 | 62 | #viewer = MouseBrainViewer() 63 | #viewer.initialize(resolution=50, mapping='Allen', add_atlas=True, add_dwi=False, dwi_color_map='Greys_r', embed_ui=True) 64 | 65 | # Now add point neurons 66 | data = ['./exp2_db4df448-e449-4a6f-a0e7-288711e7a75a_both', 67 | './exp3_3dd347df-f14e-40d5-9ff2-9c49f84d2157_both', 68 | './exp4_3c851386-e92d-4533-8d55-89a46f0e7384_both', 69 | './exp5_158d5d35-a2ab-4a76-87b0-51048c5d5283_both'] 70 | data = [str(utils.EXAMPLES_DATA_FOLDER.joinpath(d)) for d in data] 71 | 72 | min_value = None 73 | max_value = None 74 | all_positions = [] 75 | all_timings = [] 76 | # A first loop to find out min and max values among timings 77 | for data_set in data: 78 | positions, timings, min_v, max_v = process_point_neurons(data_set) 79 | all_positions.append(positions) 80 | all_timings.append(timings) 81 | min_value = min(min_v, min_value) if min_value is not None else min_v 82 | max_value = max(max_v, max_value) if max_value is not None else max_v 83 | 84 | point_actors = [] 85 | 86 | # Now visualize the points. Remove existing ones (if any) first and load new ones. 87 | viewer.plot.remove(point_actors, render=False) 88 | # We keep newly added objects in memory so that if you rerun this cell and change parameters, 89 | # the previous points get replaced by the updated ones. Try changing the radius and run it again. 90 | point_actors = [] 91 | for d_id in range(len(data)): 92 | # 16um is a good compromise for visibility from afar. So we make somata roughly 2-3 times larger than they are in reality 93 | points = viewer.add_points(all_positions[d_id], radius=16, values=all_timings[d_id], screen_space=False, 94 | noise_amount=100, min_v=min_value, max_v=max_value) 95 | point_actors.append(points) 96 | viewer.plot.add(point_actors) 97 | 98 | 99 | def main(): 100 | iblviewer = IBLViewer() 101 | # First retrieve command-line arguments (default ones + custom ones above) 102 | args = iblviewer.parse_args() 103 | # Now start the viewer and add points when it's initialized 104 | iblviewer.launch(on_viewer_initialized, None, args) 105 | 106 | 107 | if __name__ == '__main__': 108 | main() -------------------------------------------------------------------------------- /iblviewer_examples/ibl_volume_mapping.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import pickle 4 | import argparse 5 | 6 | from iblviewer.launcher import IBLViewer 7 | from iblviewer import utils 8 | 9 | class DataViewer(): 10 | 11 | def __init__(self, file_path=None, aggregator='median', grouper='acronym', color_map='viridis'): 12 | """ 13 | Constructor 14 | """ 15 | self.file_path = file_path 16 | self.aggregator = aggregator 17 | self.color_map = color_map 18 | self.grouper = grouper 19 | self.df = None 20 | self.aggregated_df = None 21 | self.min_value = None 22 | self.max_value = None 23 | self.load_data(file_path) 24 | 25 | def load_data(self, file_path=None, silent=True): 26 | if file_path is None: 27 | test_data = './stimonR_top10_rawpoints.p' 28 | file_path = str(utils.EXAMPLES_DATA_FOLDER.joinpath(test_data)) 29 | self.file_path = file_path 30 | 31 | df = pickle.load(open(file_path, 'rb')) 32 | 33 | # For testing data given by Guido 34 | # df['r_over_chance'] = df['r_prior'] - df['r_prior_null'] 35 | # filtered_df = df.groupby('region').median()['r_over_chance'] 36 | 37 | raw_df = df['rawpoints'] 38 | copy_df = raw_df.copy() 39 | agg_df = copy_df.groupby(self.grouper).agg({'value': self.aggregator}) 40 | agg_df.dropna(inplace=True) 41 | self.min_value = float(np.amin(agg_df, axis=0).to_numpy()[0]) 42 | self.max_value = float(np.amax(agg_df, axis=0).to_numpy()[0]) 43 | 44 | if not silent: 45 | print('Min prior value ' + str(self.min_value)) 46 | print('Max prior value ' + str(self.max_value)) 47 | 48 | self.df = raw_df 49 | self.df.dropna(inplace=True) 50 | self.df.sort_values(by='acronym', key=lambda col: col.str.lower()) 51 | self.aggregated_df = agg_df 52 | 53 | #pLeft_iti_scores_n_gt_50 54 | # Data given by Berk Gerçek, International Brain Laboratory 55 | def get_scalars_map(self, viewer): 56 | """ 57 | Process priors data and get color map and scalar values 58 | """ 59 | scalars_map = [None]*viewer.ibl_model.get_num_regions() 60 | 61 | # This code is to be modified if you have split data for left and right hemispheres 62 | # The concept is pretty simple: scalars_map is a 1D list that maps to brain regions. 63 | # With the lateralized brain mapping, the standard region id in Allen CCF is negated 64 | # on the right hemisphere. 65 | # Currently this code uses standard acronym lookup, which yields a region on both 66 | # hemispheres. The value you assign to an acronym will thus be mirrored. 67 | 68 | # Or for i in range(0, len(df)): which preserves data types 69 | for acronym, row in self.aggregated_df.iterrows(): 70 | value = row.iloc[0] 71 | if value is None: 72 | continue 73 | region_ids, row_ids = viewer.ibl_model.get_region_and_row_id(acronym) 74 | if region_ids is None: 75 | print('Acronym', acronym, 'was not found in Atlas') 76 | continue 77 | for r_id in range(len(region_ids)): 78 | region_id = region_ids[r_id] 79 | row_id = row_ids[r_id] 80 | if region_id is None: 81 | print('Error, could not find acronym', acronym, '...ignoring it)') 82 | continue 83 | if row_id == 0: #or value.isnull().values.any(): 84 | # We ignore void acronym (which is equal to row_id 0) on Allen Mouse CCF v3 85 | continue 86 | #print('Setting row', int(row_id), 'with value', value) 87 | scalars_map[int(row_id)] = value 88 | return scalars_map 89 | 90 | def on_viewer_initialized(self, viewer): 91 | """ 92 | Method called when the viewer is initialized and ready to accept further code by the user. 93 | In this case we map prior data to the Allen Mouse brain atlas. 94 | :param viewer: MouseBrainViewer instance (mandatory) 95 | """ 96 | scalars_map = self.get_scalars_map(viewer) 97 | # Ensure we have the right selection, here 0 is the first registered object: the atlas 98 | viewer.select(-1) 99 | viewer.assign_scalars(scalars_map, [self.min_value, self.max_value], self.color_map) 100 | 101 | def on_statistics_update(self, statistics, viewer): 102 | """ 103 | Method called when statistics are updated. Here we use a scatter plot. 104 | :param statistics: MplCanvas instance (mandatory) 105 | :param viewer: MouseBrainViewer instance (mandatory) 106 | """ 107 | # Clear previous plot (nothing visible appears until you call draw()) 108 | statistics.axes.clear() 109 | 110 | # Prepare new one 111 | agg_df = self.aggregated_df 112 | statistics.axes.set_xlabel('Brain regions (none selected)') 113 | # First scatter all values 114 | statistics.axes.scatter(self.df.acronym, self.df.value, alpha=0.2, s=8) 115 | statistics.axes.set_xticks(['']) 116 | # There are multiple ways to retrieve the acronym, here's one 117 | acronyms = agg_df.index[agg_df.value == viewer.model.selection_related_value].tolist() 118 | if acronyms is None or len(acronyms) < 1: 119 | # Do not forget to draw before we return! 120 | statistics.draw() 121 | return 122 | 123 | # Then scatter the values on top of the previous scatter with the values from the selected region 124 | for acronym in acronyms: 125 | #selected_index = self.df.index[self.df.acronym == acronym] 126 | selected_data = self.df.value[self.df.acronym == acronym] 127 | #selected_index.dropna(inplace=True) 128 | selected_data.dropna(inplace=True) 129 | statistics.axes.scatter([acronym]*selected_data.size, selected_data, color='yellow', s=32) 130 | 131 | statistics.axes.set_xlabel(f'Selected region: {acronyms}') 132 | # At last, draw the result 133 | statistics.draw() 134 | 135 | 136 | def main(): 137 | # More parsing options are added in parse_args() method below. 138 | # -> Please check that you don't override any existing argument name! 139 | parser = argparse.ArgumentParser(description='International Brain Viewer based on VTK') 140 | parser.add_argument('-f', dest='file_path', type=str, default=None, 141 | help='File path to your data. None will load a default example.') 142 | parser.add_argument('-g', dest='grouper', type=str, default='acronym', 143 | help='Grouper type. Defaults to "acronym" column.') 144 | parser.add_argument('-a', dest='aggregator', type=str, default='median', 145 | help='Aggregator type. Defaults to median.') 146 | 147 | iblviewer = IBLViewer() 148 | # First retrieve command-line arguments (default ones + custom ones above) 149 | args = iblviewer.parse_args(parser) 150 | 151 | # Then update our custom class with those arguments 152 | data = DataViewer(args.file_path, args.aggregator, args.grouper, args.color_map) 153 | 154 | # Finally, launch the UI and 3D viewer 155 | iblviewer.launch(data.on_viewer_initialized, data.on_statistics_update, args) 156 | 157 | 158 | if __name__ == '__main__': 159 | main() -------------------------------------------------------------------------------- /iblviewer_examples/viewer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 5, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "IBL Viewer...\n" 13 | ] 14 | }, 15 | { 16 | "data": { 17 | "text/plain": [ 18 | "" 19 | ] 20 | }, 21 | "execution_count": 5, 22 | "metadata": {}, 23 | "output_type": "execute_result" 24 | } 25 | ], 26 | "source": [ 27 | "import os, sys\n", 28 | "path = os.path.abspath('../../')\n", 29 | "sys.path.append(path)\n", 30 | "\n", 31 | "# Lines above only for local testing by developers\n", 32 | "from iblviewer.launcher import IBLViewer\n", 33 | "app = IBLViewer()\n", 34 | "app.launch(jupyter=True, embed_ui=True, dark_mode=True)" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "metadata": {}, 41 | "outputs": [], 42 | "source": [] 43 | } 44 | ], 45 | "metadata": { 46 | "kernelspec": { 47 | "display_name": "Python [conda env:iblenv] *", 48 | "language": "python", 49 | "name": "conda-env-iblenv-py" 50 | }, 51 | "language_info": { 52 | "codemirror_mode": { 53 | "name": "ipython", 54 | "version": 3 55 | }, 56 | "file_extension": ".py", 57 | "mimetype": "text/x-python", 58 | "name": "python", 59 | "nbconvert_exporter": "python", 60 | "pygments_lexer": "ipython3", 61 | "version": "3.8.5" 62 | } 63 | }, 64 | "nbformat": 4, 65 | "nbformat_minor": 4 66 | } -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | #import iblviewer.launcher 3 | 4 | setup( 5 | name='iblviewer', 6 | version='2.4.7', 7 | description='An interactive GPU-accelerated 3D viewer based on VTK', 8 | url='https://github.com/int-brain-lab/iblviewer', 9 | author='Nicolas Antille', 10 | author_email='nicolas.antille@gmail.com', 11 | license='MIT', 12 | install_requires=['numpy', 13 | 'matplotlib', 14 | 'requests', 15 | 'pandas', 16 | 'pynrrd', 17 | 'trimesh', 18 | 'k3d', 19 | 'vtk>=9.0', 20 | 'ipywebrtc', 21 | 'ibllib', 22 | 'iblutil', 23 | 'vedo>=2022.0.1', 24 | 'ipyvtklink', 25 | 'PyQt5', 26 | 'pyqt-darktheme' 27 | ], 28 | packages=find_packages(include=['iblviewer','iblviewer.*', 'iblviewer_examples', 'iblviewer_examples.*']), 29 | package_data={'iblviewer_assets':['iblviewer_assets/*']}, 30 | include_package_data=True, 31 | entry_points={ 32 | "console_scripts": [ 33 | "iblviewer = iblviewer.launcher:main", 34 | "iblviewer-points-demo = iblviewer_examples.ibl_point_neurons:main", 35 | "iblviewer-probes-demo = iblviewer_examples.ibl_insertion_probes:main", 36 | "iblviewer-coverage-demo = iblviewer_examples.ibl_brain_coverage:main", 37 | "iblviewer-volume-mapping-demo = iblviewer_examples.ibl_volume_mapping:main", 38 | "iblviewer-brain-wide-map = iblviewer_examples.ibl_brain_wide_map:main", 39 | "iblviewer-human-brain-demo = iblviewer_examples.human_brain:main", 40 | "iblviewer-headless-render-demo = iblviewer_examples.headless_render:main" 41 | ] 42 | }, 43 | classifiers=[ 44 | 'Development Status :: 5 - Production/Stable', 45 | 'Intended Audience :: Science/Research', 46 | 'License :: OSI Approved :: MIT License', 47 | 'Operating System :: POSIX :: Linux', 48 | 'Operating System :: MacOS', 49 | 'Programming Language :: Python :: 3.8', 50 | ] 51 | ) 52 | --------------------------------------------------------------------------------