├── .gitignore ├── LICENSE ├── README.md ├── SPHARM ├── __init__.py ├── classes │ ├── __init__.py │ ├── ellipsoid.py │ ├── image_stack.py │ ├── moving_surface.py │ ├── node.py │ ├── orbital.py │ ├── profile.py │ ├── spectrum.py │ ├── stratified_group_shuffle_split.py │ ├── surface.py │ └── time_spectrum.py ├── lib │ ├── __init__.py │ ├── classification.py │ ├── confusion_matrix_pretty_print.py │ ├── parallel.py │ ├── plotting.py │ ├── segmentation.py │ ├── spharm.py │ ├── transformation.py │ └── vrml_parse.py ├── scripts │ ├── __init__.py │ ├── analyse_1_convert.py │ ├── analyse_2_spharm.py │ ├── analyse_3_plotting_individual.py │ ├── analyse_4_plotting_groups.py │ ├── analyse_5_adjust_parameters.py │ ├── analyse_6_prediction.py │ ├── analyse_T_cells_in_LN.py │ ├── analyse_ellipsoids.py │ ├── analyse_gridsize.py │ ├── analyse_orbitals.py │ ├── analyse_surfaces.py │ ├── analyse_tracks.py │ ├── convert_surface_to_tiff.py │ ├── plot_maxprojections.py │ ├── plot_surfaces.py │ └── plot_surfaces_from_stacks.py └── tests │ ├── __init__.py │ ├── data │ ├── LN_deconv_set4_Detailed.xlsx │ ├── LN_deconv_set4_small.csv │ ├── surfaces │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00001.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00002.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00003.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00004.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00005.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00006.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00007.csv │ │ ├── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00008.csv │ │ └── Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00009.csv │ ├── synthetic_cell.txt │ ├── synthetic_cells │ │ ├── ContourCell0_0.216056749 │ │ ├── ContourCell0_100.106903 │ │ ├── ContourCell0_20.0002422 │ │ ├── ContourCell0_40.3893547 │ │ ├── ContourCell0_60.0284958 │ │ └── ContourCell0_80.047142 │ ├── test_vrml.vrml │ ├── track_files │ │ └── LN │ │ │ └── LN_deconv_set4_Detailed.xlsx │ ├── vrml │ │ ├── test_vrml 2.vrml │ │ └── test_vrml1.vrml │ └── wrl │ │ └── LN │ │ └── LN_deconv_set4.csv │ ├── test_ellipsoid.py │ ├── test_image_stack.py │ ├── test_moving_surface.py │ ├── test_orbital.py │ ├── test_spectrum.py │ ├── test_spharm.py │ ├── test_stratified_group_shuffle_split.py │ ├── test_surface.py │ ├── test_time_spectrum.py │ ├── test_transformation.py │ └── test_vrml.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | MANIFEST 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *.cover 46 | .hypothesis/ 47 | .pytest_cache/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | db.sqlite3 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # Environments 84 | .env 85 | .venv 86 | env/ 87 | venv/ 88 | ENV/ 89 | env.bak/ 90 | venv.bak/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | .spyproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # mkdocs documentation 100 | /site 101 | 102 | # mypy 103 | .mypy_cache/ 104 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018-2019, Dr. Anna Medyukhina 4 | Research Group Applied Systems Biology - Head: Prof. Dr. Marc Thilo Figge 5 | https://www.leibniz-hki.de/en/applied-systems-biology.html 6 | HKI-Center for Systems Biology of Infection 7 | Leibniz Institute for Natural Product Research and Infection Biology - 8 | Hans Knöll Insitute (HKI) 9 | Adolf-Reichwein-Straße 23, 07745 Jena, Germany 10 | 11 | All rights reserved. 12 | 13 | Redistribution and use in source and binary forms, with or without 14 | modification, are permitted provided that the following conditions are met: 15 | 16 | * Redistributions of source code must retain the above copyright notice, this 17 | list of conditions and the following disclaimer. 18 | 19 | * Redistributions in binary form must reproduce the above copyright notice, 20 | this list of conditions and the following disclaimer in the documentation 21 | and/or other materials provided with the distribution. 22 | 23 | * Neither the name of the copyright holder nor the names of its 24 | contributors may be used to endorse or promote products derived from 25 | this software without specific prior written permission. 26 | 27 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 28 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 30 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 31 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 33 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 34 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 35 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 36 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dynamic SPHARM: software for dynamic spherical harmonic analysis 2 | 3 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3387142.svg)](https://doi.org/10.5281/zenodo.3387142) 4 | 5 | Author: *Anna Medyukhina* 6 | 7 | Affiliation: *Research Group Applied Systems Biology - Head: Prof. Dr. Marc Thilo Figge 8 | https://www.leibniz-hki.de/en/applied-systems-biology.html 9 | HKI-Center for Systems Biology of Infection 10 | Leibniz Institute for Natural Product Research and Infection Biology - 11 | Hans Knöll Insitute (HKI) 12 | Adolf-Reichwein-Straße 23, 07745 Jena, Germany* 13 | 14 | ## License 15 | 16 | The source code of this framework is released under the 3-clause BSD license 17 | 18 | ## Citation 19 | 20 | A. Medyukhina, M. Blickensdorf, Z. Cseresnyés, N. Ruef, J.V. Stein, M.T. Figge. Dynamic spherical harmonics approach for shape classification of migrating cells. Sci Rep 10, 6072 (2020) [https://doi.org/10.1038/s41598-020-62997-7](https://doi.org/10.1038/s41598-020-62997-7) 21 | 22 | -------------------------------------------------------------------------------- /SPHARM/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/__init__.py -------------------------------------------------------------------------------- /SPHARM/classes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/classes/__init__.py -------------------------------------------------------------------------------- /SPHARM/classes/ellipsoid.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | 5 | from SPHARM.classes.surface import Surface 6 | from SPHARM.classes.profile import Profile 7 | import SPHARM.lib.transformation as tr 8 | 9 | 10 | class Ellipsoid(Surface): 11 | """ 12 | Class for a surface of a ellipsoidal object. 13 | """ 14 | def __init__(self, grid_shape, size, rotation=None): 15 | """ 16 | Initialize a surface of ellipsoid with given size and rotation. 17 | 18 | Parameters 19 | ---------- 20 | grid_shape : tuple of size 2 21 | Shape of the 2D grid of the ellipsoid surface (phi-theta grid). 22 | size : tuple of size 3 23 | Relative size of ellipsoid axes. 24 | rotation : tuple of size 2 25 | Polar and azimuthal angles of ellipsoid rotation in radians. 26 | """ 27 | super(Ellipsoid, self).__init__() 28 | theta = np.linspace(0, np.pi, grid_shape[0], endpoint=False) # polar angle 29 | phi = np.linspace(0, 2 * np.pi, grid_shape[1], endpoint=False) # azimuthal angle 30 | self.Phi, self.Theta = np.meshgrid(phi, theta) 31 | self.Rgrid = None 32 | self.name = None 33 | 34 | self.generate(size, rotation=rotation) 35 | 36 | self.phi = self.Phi.flatten() 37 | self.theta = self.Theta.flatten() 38 | self.R = self.Rgrid.flatten() 39 | self.x, self.y, self.z = tr.spherical_to_cart(self.R, self.theta, self.phi) 40 | 41 | def generate(self, size, rotation=None): 42 | """ 43 | Generate ellipsoid surface with given size and rotation. 44 | 45 | Parameters 46 | ---------- 47 | size : tuple of size 3 48 | Relative size of ellipsoid axes. 49 | rotation : tuple of size 2 50 | Polar and azimuthal angles of ellipsoid rotation in radians. 51 | """ 52 | 53 | if rotation is not None: 54 | th, ph = rotation 55 | else: 56 | th, ph = (0, 0) 57 | 58 | x = np.sin(self.Theta) * np.cos(self.Phi) * np.cos(th) + np.cos(self.Theta) * np.sin(th) 59 | y = np.sin(self.Theta) * np.sin(self.Phi) 60 | z = np.cos(self.Theta) * np.cos(th) - np.sin(self.Theta) * np.cos(self.Phi) * np.sin(th) 61 | self.Rgrid = np.sqrt(1 / ((x / size[0]) ** 2 + (y / size[1]) ** 2 + (z / size[2]) ** 2)) 62 | 63 | if ph > 0: 64 | phi = self.Phi[1] 65 | i = np.argmin(abs(phi - ph)) 66 | R = np.zeros_like(self.Rgrid) 67 | R[:, :i] = self.Rgrid[:, -i:] 68 | R[:, i:] = self.Rgrid[:, :-i] 69 | 70 | self.Rgrid = R 71 | 72 | self.name = 'Ellipsoid; half-axes=' + str(size[0]) + '-' + str(size[1]) + '-' + str(size[2]) \ 73 | + '; grid size=' + str(self.Rgrid.shape[0]) + 'x' + str(self.Rgrid.shape[1]) \ 74 | + '; rotation $\\theta$=' + str(round(th, 2)) + ', $\phi$=' + str(round(ph, 2)) 75 | 76 | def profile_xy(self, r=None): 77 | """ 78 | Plot the surface profile along the xy plane cut through the middle of the ellipsoid. 79 | 80 | Parameters 81 | ---------- 82 | r : numpy.ndarray, optional 83 | The ellipsoid surface to project. 84 | If None, self.Rgid will be used. 85 | Default is None. 86 | 87 | Returns 88 | ------- 89 | Profile : the generated projection. 90 | """ 91 | if r is None: 92 | r = self.Rgrid 93 | r = r[int(r.shape[0] / 2)] 94 | theta = self.Phi[int(self.Theta.shape[0] / 2)] 95 | profile = Profile(r, theta) 96 | return profile 97 | 98 | def profile_xz(self, r=None): 99 | """ 100 | Plot the surface profile along the xz plane cut through the middle of the ellipsoid. 101 | 102 | Parameters 103 | ---------- 104 | r : numpy.ndarray, optional 105 | The ellipsoid surface to project. 106 | If None, self.Rgid will be used. 107 | Default is None. 108 | 109 | Returns 110 | ------- 111 | Profile : the generated projection. 112 | """ 113 | if r is None: 114 | r = self.Rgrid 115 | r = np.concatenate((r[:, int(r.shape[1] / 2)][::-1], r[:, 0])) 116 | theta = np.concatenate((2 * np.pi - self.Theta[:, int(self.Theta.shape[1] / 2)][::-1], self.Theta[:, 0])) 117 | profile = Profile(r, theta) 118 | return profile 119 | 120 | def profile_yz(self, r=None): 121 | """ 122 | Plot the surface profile along the yz plane cut through the middle of the ellipsoid. 123 | 124 | Parameters 125 | ---------- 126 | r : numpy.ndarray, optional 127 | The ellipsoid surface to project. 128 | If None, self.Rgid will be used. 129 | Default is None. 130 | 131 | Returns 132 | ------- 133 | Profile : the generated projection. 134 | """ 135 | if r is None: 136 | r = self.Rgrid 137 | r = np.concatenate((r[:, int(round(r.shape[1] * 3 / 4))][::-1], r[:, int(round(r.shape[1] / 4))])) 138 | theta = np.concatenate((2 * np.pi - self.Theta[:, int(round(self.Theta.shape[1] * 3 / 4))][::-1], 139 | self.Theta[:, int(round(self.Theta.shape[1] / 4))])) 140 | profile = Profile(r, theta) 141 | return profile 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | -------------------------------------------------------------------------------- /SPHARM/classes/image_stack.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import re 5 | import numpy as np 6 | import pandas as pd 7 | from skimage import measure 8 | from skimage.segmentation import find_boundaries 9 | from skimage import io 10 | from skimage.exposure import rescale_intensity 11 | from mayavi import mlab 12 | import warnings 13 | 14 | from helper_lib import filelib 15 | 16 | 17 | class ImageStack(object): 18 | """ 19 | Class for handling 3D stacks 20 | """ 21 | 22 | def __init__(self, filename, load=True): 23 | """ 24 | Initialize an instance of the "ImageStack" class from a file name. 25 | 26 | Parameters 27 | ---------- 28 | filename : str 29 | Path to the file with image data, or a string to extract metadata. 30 | load : bool, optional 31 | If True, the image data will be loaded from the file specified by `filename`. 32 | Default is True. 33 | """ 34 | self.filename = filename 35 | self.data = None 36 | self.channel = None 37 | self.timepoint = None 38 | self.channel = 0 39 | self.parse_filename() 40 | if load: 41 | self.load_data() 42 | 43 | def parse_filename(self): 44 | """ 45 | Extract information about the time point and the channel from the filename given by `self.filename` 46 | """ 47 | filename = self.filename 48 | 49 | p = re.compile('\d+') 50 | parts = filename.split('Time') 51 | if len(parts) > 1: 52 | num = p.findall(parts[-1]) 53 | if len(num) > 0: 54 | self.timepoint = int(num[-1]) 55 | 56 | p1 = re.compile('C.*Time') 57 | parts = p1.findall(filename) 58 | if len(parts) > 0: 59 | num = p.findall(parts[-1]) 60 | if len(num) > 0: 61 | self.channel = int(num[0]) 62 | 63 | def load_data(self): 64 | """ 65 | Load the image data from the file specified by `self.filename` 66 | """ 67 | self.data = io.imread(self.filename) 68 | 69 | def save(self, filename=None): 70 | """ 71 | Save the image given in `self.data` into a given file. 72 | 73 | Parameters 74 | ---------- 75 | filename : str, optional 76 | Path to a file to save the image. 77 | If None, the image will be saved into a file specified by `self.filename`. 78 | Default is None. 79 | """ 80 | if filename is None: 81 | filename = self.filename 82 | 83 | filelib.make_folders([os.path.dirname(filename)]) 84 | 85 | if self.data.max() > 255: 86 | self.data = self.data.astype(np.uint16) 87 | else: 88 | self.data = self.data.astype(np.uint8) 89 | 90 | with warnings.catch_warnings(): 91 | warnings.simplefilter("ignore") 92 | io.imsave(filename, self.data) 93 | 94 | def save_max_projection(self, filename, axis=0): 95 | """ 96 | Save maximum projection of the image on a given axis. 97 | 98 | Parameters 99 | ---------- 100 | filename : str 101 | Path to save the maximum projection. 102 | axis : int, optional 103 | Axis for the maximum projection. 104 | Default is 0. 105 | """ 106 | maxproj = np.max(self.data, axis=axis) 107 | filelib.make_folders([os.path.dirname(filename)]) 108 | if maxproj.max() > 255: 109 | maxproj = rescale_intensity(maxproj, out_range=(0, 255)) 110 | with warnings.catch_warnings(): 111 | warnings.simplefilter("ignore") 112 | io.imsave(filename, maxproj.astype(np.uint8)) 113 | 114 | def extract_surfaces(self, outputfolder, voxel_size=1, reconstruct=True, min_coord=None): 115 | """ 116 | Extract surface coordinates of each connected region. 117 | 118 | Parameters 119 | ---------- 120 | outputfolder : str 121 | Path to a directory to save the surfaces. 122 | voxel_size : scalar or sequence of scalars, optional 123 | Voxel size of the image. 124 | Specified either by individual value for each axis, or by one value for all axes. 125 | Default is 1. 126 | reconstruct : bool, optional 127 | If True, surfaces will be reconstructed by the marching cube algorithm, 128 | and coordiantes of the vertices will be extracted. 129 | If False, coordinates of the voxels connected to the background will be extracted. 130 | Default is True. 131 | min_coord : sequence of scalars, optional 132 | Starting coordinates of the surface. 133 | Three values: for z, y, and x are expected. 134 | In not None, these values will be added to all surface coordinates. 135 | """ 136 | 137 | voxel_size = np.array([voxel_size]).flatten() 138 | if len(voxel_size) < 3: 139 | voxel_size = [voxel_size[0]]*3 140 | filelib.make_folders([outputfolder + os.path.dirname(self.filename)]) 141 | 142 | llist = np.unique(self.data)[1:] 143 | 144 | if not reconstruct: 145 | border = find_boundaries(self.data)*self.data 146 | 147 | for i, l in enumerate(llist): 148 | if reconstruct: 149 | mask = np.where(self.data == l, 1, 0) 150 | verts = np.array(measure.marching_cubes_lewiner(mask, 0, spacing=tuple(voxel_size))[0]) 151 | verts = verts.transpose() 152 | else: 153 | verts = np.array(np.where(border == l)) 154 | for iv in range(3): 155 | verts[iv] = verts[iv]*voxel_size[iv] 156 | if min_coord is not None: 157 | for i in range(3): 158 | verts[i] += min_coord[i] 159 | stat = pd.DataFrame({'Z': verts[0], 'Y': verts[1], 'X': verts[2]}) 160 | stat['Cell_ID'] = l 161 | stat['Image_name'] = self.filename 162 | if len(stat) > 0: 163 | stat.to_csv(outputfolder + self.filename[:-4] + '_Cell%05d.csv' % l, sep='\t') 164 | 165 | def save_3Dview(self, filename, voxel_size=1): 166 | """ 167 | Save a 3D view of the image stack. 168 | 169 | Parameters 170 | ---------- 171 | filename : str 172 | Path to save the 3D view. 173 | voxel_size : scalar or sequence of scalars, optional 174 | Voxel size of the image. 175 | Specified either by individual value for each axis, or by one value for all axes. 176 | Default is 1. 177 | """ 178 | filelib.make_folders([os.path.dirname(filename)]) 179 | voxel_size = np.array([voxel_size]).flatten() 180 | if len(voxel_size) < 3: 181 | voxel_size = [voxel_size[0]]*3 182 | mlab.clf() 183 | s = mlab.pipeline.scalar_field(self.data) 184 | s.spacing = voxel_size/np.min(voxel_size)*5 185 | mesh = mlab.pipeline.volume(s, color=(1, 0, 0)).scene 186 | mesh.background = (1, 1, 1) 187 | mesh.magnification = 3 188 | mesh.save(filename, size=(200, 200)) 189 | 190 | 191 | 192 | 193 | 194 | -------------------------------------------------------------------------------- /SPHARM/classes/moving_surface.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | from skimage import io 5 | import warnings 6 | 7 | from helper_lib import filelib 8 | 9 | from SPHARM.classes.time_spectrum import TimeSpectrum 10 | 11 | 12 | class MovingSurface(object): 13 | """ 14 | Class for storing the dynamics of an object surface. 15 | """ 16 | def __init__(self, name=None): 17 | """ 18 | Initiate the time series for the surface. 19 | 20 | Parameters 21 | ---------- 22 | name : str, optional 23 | Name of the surface to add to the output file when saving results. 24 | Default is None. 25 | """ 26 | if name is None: 27 | self.name = '' 28 | else: 29 | self.name = name 30 | self.surfaces = [] 31 | self.times = [] 32 | self.timespectrum = TimeSpectrum(name=name) 33 | self.minmax = np.array([[1000., -1000.], [1000., -1000.], [1000., -1000.]]) 34 | self.centers = [] 35 | 36 | def add_surface(self, surface, timepoint=None): 37 | """ 38 | Add new surface to the time series. 39 | 40 | Parameters 41 | ---------- 42 | surface : Surface 43 | Object surface at one time point. 44 | timepoint : scalar, optional 45 | Time point to assign to the surface. 46 | If None, the number of previously added surfaces will be used to label the time point 47 | (e.g. 0 if none were added). 48 | Default is None. 49 | """ 50 | self.surfaces.append(surface) 51 | if timepoint is None: 52 | self.times.append(len(self.surfaces)) 53 | else: 54 | self.times.append(timepoint) 55 | self.minmax[0, 0] = min(self.minmax[0, 0], np.min(surface.z)) 56 | self.minmax[1, 0] = min(self.minmax[1, 0], np.min(surface.y)) 57 | self.minmax[2, 0] = min(self.minmax[2, 0], np.min(surface.x)) 58 | self.minmax[0, 1] = max(self.minmax[0, 1], np.max(surface.z)) 59 | self.minmax[1, 1] = max(self.minmax[1, 1], np.max(surface.y)) 60 | self.minmax[2, 1] = max(self.minmax[2, 1], np.max(surface.x)) 61 | self.centers.append(surface.center) 62 | 63 | def compute_timespectrum(self, gridsize): 64 | """ 65 | Compute a SPHARM spectrum for each surface and generate a TimeSpectrum object. 66 | 67 | Parameters 68 | ---------- 69 | gridsize : int, optional 70 | Dimension of the square grid to interpolate the surface points. 71 | Will be used to interpolate the surface coordinates if self.Rgrid is None 72 | (in this case it is a mandatory parameter). 73 | Default is None. 74 | """ 75 | for i, surface in enumerate(self.surfaces): 76 | surface.compute_spharm(grid_size=gridsize) 77 | self.timespectrum.add_spectrum(surface.spharm, timepoint=self.times[i]) 78 | 79 | def plot_surfaces(self, outputfolder, points=False): 80 | """ 81 | Plot 3D views of all surfaces with mayavi and save to png files. 82 | 83 | Parameters 84 | ---------- 85 | outputfolder : str 86 | Directory to save the plots. 87 | points : bool, optional 88 | If True, surface points will be displayed. 89 | Default is False. 90 | """ 91 | filelib.make_folders([outputfolder]) 92 | extent = np.array([self.minmax[2], self.minmax[1], self.minmax[0]]).flatten() 93 | for i, surface in enumerate(self.surfaces): 94 | mesh = surface.plot_surface(points=points, extent=extent) 95 | mesh.magnification = 3 96 | mesh.save(outputfolder + self.name + '_%03d.png' % self.times[i], size=(200, 200)) 97 | 98 | def plot_max_projections(self, outputfolder, voxel_size): 99 | """ 100 | Plot maxium projections of all surfaces and save to png files. 101 | 102 | Parameters 103 | ---------- 104 | outputfolder : str 105 | Directory to save the plots. 106 | voxel_size : scalar or sequence of scalars 107 | Voxel size of the image. 108 | Specified either by individual value for each axis, or by one value for all axes. 109 | """ 110 | filelib.make_folders([outputfolder]) 111 | voxel_size = np.array([voxel_size]).flatten() 112 | if len(voxel_size) == 1: 113 | voxel_size = np.ones(3)*voxel_size 114 | for i, surface in enumerate(self.surfaces): 115 | stack = surface.as_stack(voxel_size=voxel_size, minmax=self.minmax) 116 | 117 | with warnings.catch_warnings(): 118 | warnings.simplefilter("ignore") 119 | io.imsave(outputfolder + 'xy_' + self.name + '_%03d.png' % self.times[i], 120 | stack.max(0).astype(np.uint8)) 121 | io.imsave(outputfolder + 'xz_' + self.name + '_%03d.png' % self.times[i], 122 | stack.max(1).astype(np.uint8)) 123 | io.imsave(outputfolder + 'yz_' + self.name + '_%03d.png' % self.times[i], 124 | stack.max(2).astype(np.uint8)) 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /SPHARM/classes/node.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import re 4 | import numpy as np 5 | import pandas as pd 6 | 7 | 8 | class Node(object): 9 | """ 10 | Class for extracting the structure of vrml files 11 | """ 12 | def __init__(self, dict): 13 | """ 14 | Initialize an instance of the "Node" class from a file name. 15 | 16 | Parameters 17 | ---------- 18 | dict : tuple of type (str, char) 19 | A tuple containing the name of the node and 20 | the type of closing bracket that determines the end of the node content (e.g. '}') 21 | """ 22 | self.name = dict[0] 23 | self.bracket = dict[1] 24 | self.children = [] 25 | self.text = '' 26 | 27 | def add_child(self, node): 28 | """ 29 | Add a child node to the list of childs. 30 | 31 | Parameters 32 | ---------- 33 | node : Node 34 | The child node. 35 | """ 36 | self.children.append(node) 37 | 38 | def add_text(self, text): 39 | """ 40 | Append given text to the current text variable. 41 | 42 | Parameters 43 | ---------- 44 | text : str 45 | The text to append. 46 | """ 47 | self.text = self.text + text 48 | 49 | def print_children(self, offset=None, outputfile=None): 50 | """ 51 | Print the name of the current node and all child nodes. 52 | 53 | Parameters 54 | ---------- 55 | offset : str, optional 56 | Horizontal offset to print the child nodes (e.g. ' '). 57 | If None, the offset is set to an empty string ('', no offset). 58 | Default is None. 59 | outputfile : file, optional 60 | File to save the output. 61 | If None, the output will be printed out in the console. 62 | Default is None. 63 | """ 64 | if offset is None: 65 | offset = '' 66 | if outputfile is None: 67 | print(offset + self.name) 68 | else: 69 | outputfile.write(offset + self.name + '\n') 70 | for i in range(len(self.children)): 71 | self.children[i].print_children(offset=offset + ' ', outputfile=outputfile) 72 | 73 | def extract_key_nodes(self, key, nodes=None): 74 | """ 75 | Extract a list of (child) nodes with a given name. 76 | 77 | Parameters 78 | ---------- 79 | key : str 80 | The key name to extract. 81 | nodes : list, optional 82 | The list of already extracted nodes. 83 | If None, the value is set to an empty list. 84 | Default is None. 85 | """ 86 | if nodes is None: 87 | nodes = [] 88 | if self.name == key: 89 | nodes.append(self) 90 | for i in range(len(self.children)): 91 | self.children[i].extract_key_nodes(key, nodes=nodes) 92 | 93 | def extract_coordinates(self, curcoords): 94 | """ 95 | Extract cell coordinates. 96 | 97 | Parameters 98 | ---------- 99 | curcoords : list 100 | Previously extracted coordinates. 101 | """ 102 | stat = pd.DataFrame() 103 | p = re.compile('[-+]?\d+\.*\d*e?[-+]?\d*') 104 | for child in self.children: 105 | if child.name == 'Coordinate': 106 | for ch in child.children: 107 | if ch.name == 'point': 108 | num = np.float_(p.findall(ch.text)) 109 | curcoords = np.reshape(num, [int(len(num)/3), 3]) 110 | 111 | if child.name == 'coordIndex': 112 | ind = np.unique(np.int_(p.findall(child.text))) 113 | ind = ind[np.where(ind >= 0)] 114 | coord = curcoords[ind] 115 | stat['X'] = coord[:, 0] 116 | stat['Y'] = coord[:, 1] 117 | stat['Z'] = coord[:, 2] 118 | return stat, curcoords -------------------------------------------------------------------------------- /SPHARM/classes/orbital.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | from scipy.special import sph_harm 5 | import SPHARM.lib.transformation as tr 6 | 7 | from SPHARM.classes.surface import Surface 8 | 9 | 10 | class Orbital(Surface): 11 | """ 12 | Class for a surface of generated from spherical harmonics. 13 | """ 14 | def __init__(self, grid_shape, m, n, amplitude): 15 | """ 16 | Initialize a surface from given spherical harmonics. 17 | 18 | Parameters 19 | ---------- 20 | grid_shape : tuple of size 2 21 | Shape of the 2D grid (phi-theta grid). 22 | m : scalar or sequence of scalars 23 | Degree(s) of spherical harmonics to generate. 24 | Must be (a) non-negative number(s). 25 | n : scalar or sequence of scalars 26 | Order(s) of spherical harmonics to generate. 27 | Must be of the same length as m. 28 | Must be in the range [-m; m] of the corresponding degree. 29 | amplitude : scalar or sequence of scalars 30 | Relative amplitude(s) of corresponding harmonics. 31 | Must be of the same length as m. 32 | """ 33 | super(Orbital, self).__init__() 34 | theta = np.linspace(0, np.pi, grid_shape[0], endpoint=False) # polar angle 35 | phi = np.linspace(0, 2 * np.pi, grid_shape[1], endpoint=False) # azimuthal angle 36 | self.Phi, self.Theta = np.meshgrid(phi, theta) 37 | self.Rgrid = np.zeros_like(self.Theta) 38 | 39 | self.generate(np.array([m]).flatten(), np.array([n]).flatten(), np.array([amplitude]).flatten()) 40 | self.phi = self.Phi.flatten() 41 | self.theta = self.Theta.flatten() 42 | self.R = self.Rgrid.flatten().real 43 | self.x, self.y, self.z = tr.spherical_to_cart(self.R, self.theta, self.phi) 44 | 45 | def generate(self, m, n, amplitude): 46 | """ 47 | Generate a surface from given spherical harmonic degree(s), order(s) and amplitude(s). 48 | 49 | Parameters 50 | ---------- 51 | m : scalar or sequence of scalars 52 | Degree(s) of spherical harmonics to generate. 53 | Must be (a) non-negative number(s). 54 | n : scalar or sequence of scalars 55 | Order(s) of spherical harmonics to generate. 56 | Must be of the same length as m. 57 | Must be in the range [-m; m] of the corresponding degree. 58 | amplitude : scalar or sequence of scalars 59 | Relative amplitude(s) of corresponding harmonics. 60 | Must be of the same length as m. 61 | """ 62 | for i in range(len(m)): 63 | self.Rgrid = self.Rgrid + sph_harm(n[i], m[i], self.Phi, self.Theta)*amplitude[i] 64 | 65 | 66 | -------------------------------------------------------------------------------- /SPHARM/classes/profile.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import pylab as plt 5 | from helper_lib.filelib import make_folders 6 | 7 | 8 | class Profile(object): 9 | """ 10 | Class to handle the surface profile of an ellipsoid obtained after a cut by a xy, yz or xz plane. 11 | """ 12 | def __init__(self, r, theta): 13 | """ 14 | Generate a profile from give polar angle and radius. 15 | 16 | Parameters 17 | ---------- 18 | r : sequence of scalars 19 | Radius 20 | theta : sequence of scalars 21 | Polar angle 22 | """ 23 | self.theta = theta 24 | self.R = r 25 | 26 | def plot(self): 27 | """ 28 | Plot and display the profile. 29 | """ 30 | plt.clf() 31 | ax = plt.subplot(111, projection='polar') 32 | ax.plot(self.theta, self.R, color='r', linewidth=3) 33 | plt.show() 34 | 35 | def save(self, outputfile): 36 | """ 37 | Plot the profile and save to a given file. 38 | 39 | Parameters 40 | ---------- 41 | outputfile : str 42 | Output file name to save the plotted profile. 43 | """ 44 | make_folders([os.path.dirname(outputfile)]) 45 | plt.clf() 46 | ax = plt.subplot(111, projection='polar') 47 | ax.plot(self.theta, self.R, color='r', linewidth=3) 48 | plt.savefig(outputfile) 49 | 50 | -------------------------------------------------------------------------------- /SPHARM/classes/spectrum.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import numpy as np 5 | import pandas as pd 6 | import pylab as plt 7 | import pyshtools.expand as shtools 8 | import seaborn as sns 9 | 10 | from helper_lib import filelib 11 | 12 | 13 | class Spectrum(object): 14 | """ 15 | Class for storing and handling the SPHARM spectrum of a surface. 16 | """ 17 | 18 | def __init__(self, surface=None, filename=None, name=None): 19 | """ 20 | Initialize spectrum from given surface or file. 21 | 22 | Parameters 23 | ---------- 24 | surface : numpy.ndarray, dimension (n, n) or (n, 2*n), n is even, optional 25 | A 2D equally sampled (default) or equally spaced complex grid 26 | that conforms to the sampling theorem of Driscoll and Healy (1994). 27 | The first latitudinal band corresponds to 90 N, the latitudinal band for 90 S is not included, 28 | and the latitudinal sampling interval is 180/n degrees. 29 | The first longitudinal band is 0 E, the longitude band for 360 E is not included, 30 | and the longitudinal sampling interval is 360/n for an equally 31 | and 180/n for an equally spaced grid, respectively. 32 | If None, an empty spectrum will be initialized. 33 | Default is None. 34 | filename : str, optional 35 | Path to a surface file to read the surface data. 36 | If None, an empty spectrum will be initialized. 37 | Default is None. 38 | name : str, optional 39 | Name of the spectrum to display during plotting. 40 | If None, the file name will be displayed. 41 | Default is None. 42 | """ 43 | self.harmonics_csv = None 44 | self.harmonics_shtools = None 45 | self.frequency_spectrum = None 46 | self.name = name 47 | if surface is not None: 48 | self.from_surface(surface) 49 | elif filename is not None: 50 | self.from_file(filename) 51 | if name is None: 52 | self.name = filename 53 | self.metadata = pd.Series() 54 | 55 | def from_surface(self, surface, normalize=False, normalization_method='zero-component'): 56 | """ 57 | Initialize the spectrum from a given surface. 58 | 59 | Parameters 60 | ---------- 61 | surface : numpy.ndarray, dimension (n, n) or (n, 2*n), n is even 62 | A 2D equally sampled (default) or equally spaced complex grid 63 | that conforms to the sampling theorem of Driscoll and Healy (1994). 64 | The first latitudinal band corresponds to 90 N, the latitudinal band for 90 S is not included, 65 | and the latitudinal sampling interval is 180/n degrees. 66 | The first longitudinal band is 0 E, the longitude band for 360 E is not included, 67 | and the longitudinal sampling interval is 360/n for an equally 68 | and 180/n for an equally spaced grid, respectively. 69 | normalize : bool, optional 70 | If True, the values of the spectrum will be normalized according to the `normalization_method`. 71 | Default is False. 72 | normalization_method : str, optional 73 | If 'mean-radius', the grid values will be divided by the mean grid value prior to the SPHARM transform. 74 | If 'zero-component', all spectral components will be divided by the value of the first component (m=0, n=0). 75 | Default is 'zero-component'. 76 | """ 77 | if surface.shape[1] % 2 or surface.shape[0] % 2: 78 | raise ValueError("The number of samples in latitude and longitude, n, must be even") 79 | if surface.shape[1] == surface.shape[0]: 80 | s = 1 81 | elif surface.shape[1] == 2*surface.shape[0]: 82 | s = 2 83 | else: 84 | raise ValueError("GRIDDH must be dimensioned as (N, 2*N) or (N, N)") 85 | if normalization_method not in ['zero-component', 'mean-radius']: 86 | raise ValueError("Invalid value for `method`: must be \'zero-component\' or \'mean-radius\'") 87 | if normalize is True and normalization_method == 'mean-radius': 88 | surface = surface/np.mean(np.abs(surface)) 89 | self.harmonics_shtools = shtools.SHExpandDHC(surface, sampling=s) 90 | if normalize is True and normalization_method == 'zero-component': 91 | self.harmonics_shtools = self.harmonics_shtools / self.harmonics_shtools[0][0, 0] 92 | self.convert_to_csv() 93 | return self.harmonics_shtools 94 | 95 | def spharm_to_surface(self, lmax=None): 96 | """ 97 | Inverse transform the SPHARM spectrum to surface using the given number of components. 98 | 99 | Parameters 100 | ---------- 101 | lmax : int, optional 102 | The maximum spherical harmonic degree to be used in the inverse transform. 103 | If None, all degrees will be used. 104 | Default is None. 105 | 106 | Returns 107 | ------- 108 | ndarray : reconstructed surface grid. 109 | """ 110 | grid = shtools.MakeGridDHC(self.harmonics_shtools, lmax_calc=lmax).real 111 | return grid 112 | 113 | def convert_to_csv(self): 114 | """ 115 | Convert the spectrum from the pyshtools format to a table form. 116 | """ 117 | harm = self.harmonics_shtools 118 | harmdata = pd.DataFrame() 119 | for degree in range(len(harm[0])): 120 | for order in range(degree + 1): 121 | harmdata = harmdata.append(pd.Series({'degree': int(degree), 122 | 'order': int(order), 123 | 'value': harm[0][degree, order]}), ignore_index=True) 124 | 125 | for order in range(1, degree + 1): 126 | harmdata = harmdata.append(pd.Series({'degree': int(degree), 127 | 'order': -int(order), 128 | 'value': harm[1][degree, order]}), ignore_index=True) 129 | 130 | harmdata['amplitude'] = np.abs(harmdata['value']) 131 | harmdata['power'] = harmdata['amplitude']**2 132 | harmdata['real'] = np.real(harmdata['value']) 133 | harmdata['imag'] = np.imag(harmdata['value']) 134 | harmdata['degree'] = np.int_(np.real(harmdata['degree'])) 135 | harmdata['order'] = np.int_(np.real(harmdata['order'])) 136 | harmdata['harmonic'] = '' 137 | for i in range(len(harmdata)): 138 | harmdata.at[i, 'harmonic'] = 'm=' + str(harmdata.iloc[i]['degree']) \ 139 | + ' n=' + str(harmdata.iloc[i]['order']) 140 | 141 | self.harmonics_csv = harmdata 142 | return harmdata 143 | 144 | def convert_to_shtools_array(self): 145 | """ 146 | Convert the spectrum from the table form to pyshtools format. 147 | """ 148 | harmdata = self.harmonics_csv 149 | size = len(harmdata['degree'].unique()) 150 | harm = np.zeros([2, size, size], dtype=complex) 151 | for degree in range(len(harm[0])): 152 | for order in range(degree + 1): 153 | line = harmdata[(harmdata['degree'] == degree) & (harmdata['order'] == order)].iloc[0] 154 | harm[0][degree, order] = line['real'] + 1j * line['imag'] 155 | 156 | for order in range(1, degree + 1): 157 | line = harmdata[(harmdata['degree'] == degree) & (harmdata['order'] == -order)].iloc[0] 158 | harm[1][degree, order] = line['real'] + 1j * line['imag'] 159 | 160 | self.harmonics_shtools = harm 161 | return harm 162 | 163 | def from_file(self, filename): 164 | """ 165 | Read the spectrum in the table form from a given file. 166 | 167 | Parameters 168 | ---------- 169 | filename : str 170 | Path to the spectrum file. 171 | """ 172 | if os.path.exists(filename): 173 | self.harmonics_csv = pd.read_csv(filename, sep='\t', index_col=0) 174 | self.convert_to_shtools_array() 175 | else: 176 | raise ValueError('Input file does not exist!') 177 | 178 | def save_to_csv(self, filename): 179 | """ 180 | Save the table form of the spectrum to a csv file. 181 | 182 | Parameters 183 | ---------- 184 | filename : str 185 | Path to the output file. 186 | """ 187 | if filename is not None: 188 | filelib.make_folders([os.path.dirname(filename)]) 189 | for col in self.metadata.index: 190 | self.harmonics_csv[col] = self.metadata[col] 191 | self.harmonics_csv.to_csv(filename, sep='\t') 192 | 193 | def compute_frequency_spectrum(self, norm=False): 194 | """ 195 | Compute the frequency spectrum by summarizing all orders of a given degree. 196 | 197 | Parameters 198 | ---------- 199 | norm : bool, optional 200 | If True, each component of the frequency spectrum will be divided by the value of the zero frequency. 201 | Default is False. 202 | """ 203 | stat = self.harmonics_csv.groupby(['degree']).sum().reset_index() 204 | if norm: 205 | maxline = stat[stat['degree'] == 0].iloc[0] 206 | for col in stat.columns: 207 | if col != 'degree': 208 | stat.loc[:, col] = stat[col] / maxline[col] 209 | stat['amplitude'] = np.sqrt(stat['power']) 210 | stat['harmonic'] = stat['degree'] 211 | self.frequency_spectrum = stat 212 | 213 | def save_frequency_spectrum_to_csv(self, filename, name=None): 214 | """ 215 | Save the frequency spectrum to a csv file. 216 | 217 | Parameters 218 | ---------- 219 | filename : str 220 | Path to the output file. 221 | name : str, optional 222 | Text to label the spectrum. 223 | If None, the value of `self.name` will be used. 224 | """ 225 | if filename is not None: 226 | filelib.make_folders([os.path.dirname(filename)]) 227 | if name is not None: 228 | self.frequency_spectrum['Name'] = name 229 | elif self.name is not None: 230 | self.frequency_spectrum['Name'] = self.name 231 | else: 232 | self.frequency_spectrum['Name'] = filename 233 | self.frequency_spectrum.to_csv(filename, sep='\t') 234 | 235 | def heatmap(self, value='amplitude', title=None, cutoff=None, logscale=False, **kwargs): 236 | """ 237 | Plot the SPHARM spectrum as a heatmap. 238 | 239 | Parameters 240 | ---------- 241 | value : str, optional 242 | Part of the complex spectrum to plot. 243 | Valid values: 'amplitude', 'power', 'real', 'imag'. 244 | Default is 'amplitude'. 245 | title : str, optional 246 | Text to display in the plot title. 247 | Default is None. 248 | cutoff : int, optional 249 | The number of degrees to display. 250 | If None, all degrees will be displayed. 251 | logscale : bool, optional 252 | If True, the logarithm of the value will be plotted. 253 | Default is False. 254 | kwargs : key, value pairings 255 | Arbitrary keyword arguments to pass to the seaborn.heatmap function. 256 | 257 | Returns 258 | ------- 259 | seaborn.heatmap().figure 260 | The heatmap with the SPHARM degree displayed vertically and SPHARM order horizontally. 261 | 262 | """ 263 | norm = kwargs.pop('norm', False) 264 | stat = self.harmonics_csv 265 | if norm: 266 | stat.loc[:, value] = np.array(stat[value]) / stat[value].iloc[0] 267 | if cutoff is not None: 268 | stat = stat[stat.degree < cutoff] 269 | if logscale: 270 | stat.loc[:, value] = np.log(stat[value]) 271 | hm = stat.pivot('degree', 'order', value) 272 | plt.clf() 273 | plt.figure(figsize=(6, 5)) 274 | pl = sns.heatmap(hm, **kwargs) 275 | if title is None: 276 | if self.name is not None: 277 | title = self.name + '; value = ' + value 278 | else: 279 | title = 'value = ' + value 280 | plt.title(title) 281 | return pl.figure 282 | 283 | def frequency_plot(self, value='amplitude', title=None, cutoff=None, **kwargs): 284 | """ 285 | Plot the frequency spectrum as a bar plot. 286 | 287 | Parameters 288 | ---------- 289 | value : str, optional 290 | Part of the complex spectrum to plot. 291 | Valid values: 'amplitude', 'power', 'real', 'imag'. 292 | Default is 'amplitude'. 293 | title : str, optional 294 | Text to display in the plot title. 295 | Default is None. 296 | cutoff : int, optional 297 | The number of frequency components to display. 298 | If None, all frequencies will be displayed. 299 | kwargs : key, value pairings 300 | Arbitrary keyword arguments to pass to the seaborn.barplot function. 301 | 302 | Returns 303 | ------- 304 | seaborn.barplot().figure 305 | The bar plot with the values of spectral frequencies. 306 | """ 307 | norm = kwargs.pop('norm', False) 308 | if self.frequency_spectrum is None: 309 | self.compute_frequency_spectrum(norm=norm) 310 | stat = self.frequency_spectrum 311 | if cutoff is not None: 312 | stat = stat[stat.degree < cutoff] 313 | plt.clf() 314 | pl = sns.barplot(data=stat, x='degree', y=value, **kwargs) 315 | plt.xticks(stat['degree'].unique(), rotation='vertical') 316 | if title is None: 317 | if self.name is not None: 318 | title = self.name + '; value = ' + value 319 | else: 320 | title = 'value = ' + value 321 | plt.title(title) 322 | return pl.figure 323 | 324 | def return_feature_vector(self, cutoff=None, static_features='amplitude', rotation_invariant=True): 325 | """ 326 | Return the amplitudes of all harmonic components below a given degree. 327 | 328 | Parameters 329 | ---------- 330 | cutoff : int, optional 331 | The number of frequency components to display. 332 | If None, all frequencies will be displayed. 333 | static_features : str, optional 334 | Name of the feature to represent the harmonic coefficients. 335 | Valid values: 'amplitude', 'real_imag'. 336 | Default is 'amplitude' 337 | rotation_invariant : bool, optional 338 | If True, rotation-invariant descriptors (frequencies) will be computed. 339 | If False, the whole spectrum will be used as a feature vector. 340 | Default is True. 341 | 342 | Returns 343 | ------- 344 | numpy.array 345 | The returned feature vector 346 | """ 347 | stat = self.harmonics_csv 348 | if cutoff is None: 349 | cutoff = np.max(stat.degree) 350 | if static_features == 'real_imag': 351 | static_features = ['real', 'imag'] 352 | else: 353 | static_features = [static_features] 354 | if rotation_invariant: 355 | self.compute_frequency_spectrum(norm=False) 356 | stat = self.frequency_spectrum 357 | 358 | features = [] 359 | for value in static_features: 360 | features = features + list(stat[value][stat.degree < cutoff + 1]) 361 | return np.array(features) 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | -------------------------------------------------------------------------------- /SPHARM/classes/stratified_group_shuffle_split.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | from __future__ import division 3 | 4 | import numpy as np 5 | 6 | from sklearn.utils.validation import check_array 7 | from sklearn.model_selection import StratifiedShuffleSplit 8 | 9 | 10 | class GroupShuffleSplitStratified(StratifiedShuffleSplit): 11 | 12 | def __init__(self, n_splits=5, test_size=2, train_size=None, random_state=None): 13 | 14 | super(GroupShuffleSplitStratified, self).__init__( 15 | n_splits=n_splits, 16 | test_size=test_size, 17 | train_size=train_size, 18 | random_state=random_state) 19 | 20 | def _iter_indices(self, X, y, groups): 21 | if groups is None: 22 | raise ValueError("The 'groups' parameter should not be None.") 23 | groups = check_array(groups, ensure_2d=False, dtype=None) 24 | groups_unique, group_indices = np.unique(groups, return_inverse=True) 25 | classes = [] 26 | for gr in groups_unique: 27 | classes.append(y[np.where(groups==gr)[0][0]]) 28 | 29 | for group_train, group_test in super( 30 | GroupShuffleSplitStratified, self)._iter_indices(X=groups_unique, y=classes): 31 | # these are the indices of classes in the partition 32 | # invert them into data indices 33 | 34 | train = np.flatnonzero(np.in1d(group_indices, group_train)) 35 | test = np.flatnonzero(np.in1d(group_indices, group_test)) 36 | 37 | yield train, test 38 | 39 | def split(self, X, y=None, groups=None): 40 | return super(GroupShuffleSplitStratified, self).split(X, y, groups) 41 | 42 | 43 | -------------------------------------------------------------------------------- /SPHARM/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/lib/__init__.py -------------------------------------------------------------------------------- /SPHARM/lib/classification.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import pandas as pd 3 | import numpy as np 4 | from sklearn import svm 5 | from sklearn.model_selection import cross_val_score 6 | from sklearn.model_selection import cross_val_predict 7 | from sklearn.model_selection import StratifiedShuffleSplit 8 | from sklearn.model_selection import StratifiedKFold 9 | from sklearn.model_selection import LeaveOneGroupOut 10 | 11 | from SPHARM.classes.spectrum import Spectrum 12 | from SPHARM.classes.time_spectrum import TimeSpectrum 13 | from SPHARM.classes.stratified_group_shuffle_split import GroupShuffleSplitStratified 14 | 15 | 16 | def extract_features(input_stat, cell_id='Name', group='Group', static=True, dynamic_features=None, 17 | timelength=10, static_features='amplitude', one_time_point=True, rotation_invariant=True): 18 | """ 19 | Extract spectral features for classification. 20 | 21 | Parameters 22 | ---------- 23 | input_stat : pandas.DataFrame 24 | Input data to extract features 25 | cell_id : str 26 | Column in the input data sheet to group connected time points. 27 | Default is 'TrackID' 28 | group : str, optional 29 | Column in the input data sheet to use for grouping. 30 | Default is 'Group'. 31 | static : bool, optional 32 | If True, static features will be extracted. 33 | If False, dynamic features will be extracted. 34 | Default is True. 35 | dynamic_features : str, optional 36 | Name of the feature to use for computing shape dynamics. 37 | Valid values: 'time', 'derivative', 'frequency'. 38 | Default is 'frequency'. 39 | static_features : str, optional 40 | Name of the feature to represent the harmonic coefficients. 41 | Valid values: 'amplitude', 'real_imag'. 42 | Default is 'amplitude' 43 | timelength : int, optional 44 | Number of time points to include into dynamic features. 45 | Default is 10. 46 | one_time_point : bool, optional 47 | If True, only the first time point of each cell will be used. 48 | If False, all time points will be used as independent samples. 49 | Default is True. 50 | rotation_invariant : bool, optional 51 | If True, rotation-invariant descriptors (frequencies) will be computed. 52 | If False, the whole spectrum will be used as a feature vector. 53 | Default is True. 54 | 55 | Returns 56 | ------- 57 | features : N x K numpy.array 58 | The returned feature vector of N samples and K features 59 | classes : array of length N 60 | Labels of the true classes. 61 | """ 62 | 63 | features = [] 64 | classes = [] 65 | names = [] 66 | group_names = [] 67 | samples = [] 68 | 69 | groups = input_stat[group].unique() 70 | for i in range(len(groups)): 71 | stat = input_stat[input_stat[group] == groups[i]] 72 | if len(stat) > 0: 73 | for name in stat[cell_id].unique(): 74 | subsubstat = stat[stat[cell_id] == name].reset_index() 75 | subsubstat = subsubstat.sort_values('Time') 76 | times = subsubstat['Time'].unique() 77 | if static: 78 | spectrum = Spectrum() 79 | if one_time_point: 80 | spectrum.harmonics_csv = subsubstat[subsubstat['Time'] == times[0]] 81 | features.append(spectrum.return_feature_vector(static_features=static_features, 82 | rotation_invariant=rotation_invariant)) 83 | classes.append(i) 84 | names.append(spectrum.harmonics_csv.iloc[0]['Name']) 85 | group_names.append(groups[i]) 86 | if 'Sample' in subsubstat.columns: 87 | samples.append(subsubstat['Sample'].iloc[0]) 88 | else: 89 | for t in times: 90 | spectrum.harmonics_csv = subsubstat[subsubstat['Time'] == t] 91 | features.append(spectrum.return_feature_vector(static_features=static_features, 92 | rotation_invariant=rotation_invariant)) 93 | classes.append(i) 94 | names.append(spectrum.harmonics_csv.iloc[0]['Name']) 95 | group_names.append(groups[i]) 96 | if 'Sample' in subsubstat.columns: 97 | samples.append(subsubstat['Sample'].iloc[0]) 98 | else: 99 | if len(times) >= timelength: 100 | spectrum = TimeSpectrum() 101 | for t in times[:timelength]: 102 | sp = Spectrum() 103 | sp.harmonics_csv = subsubstat[subsubstat['Time'] == t] 104 | spectrum.add_spectrum(sp, timepoint=t) 105 | features.append(spectrum.return_feature_vector(dynamic_features=dynamic_features, 106 | static_features=static_features, 107 | rotation_invariant=rotation_invariant)) 108 | classes.append(i) 109 | group_names.append(groups[i]) 110 | names.append(spectrum.data.iloc[0]['Name']) 111 | if 'Sample' in subsubstat.columns: 112 | samples.append(subsubstat['Sample'].iloc[0]) 113 | classes = np.array(classes) 114 | features = np.array(features) 115 | names = np.array(names) 116 | group_names = np.array(group_names) 117 | samples = np.array(samples) 118 | 119 | return features, classes, names, group_names, samples 120 | 121 | 122 | def predict_classes_loo(features, classes, C=1, groups=None): 123 | """ 124 | Computes accuracy and predicts classes by cross-validation 125 | Parameters 126 | ---------- 127 | features : array-like 128 | The data to fit. 129 | classes : array-like 130 | The target classes to try to predict. 131 | C : float, optional 132 | Penalty parameter C of the error term. 133 | Default is 1. 134 | groups : array-like, optional 135 | Group labels for the samples used while splitting the dataset into train/test set. 136 | Default is None. 137 | 138 | Returns 139 | ------- 140 | accuracy: pandas DataFrame 141 | Array of scores of the estimator for each run of the cross validation. 142 | predicted: pandas DataFrame 143 | Predicted classes 144 | """ 145 | clf = svm.SVC(kernel='linear', C=C, cache_size=1000, decision_function_shape='ovo', random_state=0) 146 | cv = LeaveOneGroupOut() 147 | accuracy = pd.DataFrame({'Accuracy': cross_val_score(clf, X=features, y=classes, groups=groups, cv=cv)}) 148 | predicted = pd.DataFrame({'Actual class': classes, 149 | 'Predicted class': cross_val_predict(clf, X=features, 150 | y=classes, groups=groups, cv=cv)}) 151 | 152 | return accuracy, predicted 153 | 154 | 155 | def predict_classes_st_kfold(features, classes, nsplits=7, random_state=0, C=1): 156 | """ 157 | Computes accuracy and predicts classes by cross-validation 158 | Parameters 159 | ---------- 160 | features : array-like 161 | The data to fit. 162 | classes : array-like 163 | The target classes to try to predict. 164 | nsplits : int, optional 165 | Number of folds. 166 | Default is 7. 167 | random_state : int, RandomState instance or None, optional, default=None 168 | If int, random_state is the seed used by the random number generator; 169 | If RandomState instance, random_state is the random number generator; 170 | If None, the random number generator is the RandomState instance used 171 | by `np.random`. Used when ``shuffle`` == True. 172 | C : float, optional 173 | Penalty parameter C of the error term. 174 | Default is 1. 175 | 176 | Returns 177 | ------- 178 | accuracy: pandas DataFrame 179 | Array of scores of the estimator for each run of the cross validation. 180 | predicted: pandas DataFrame 181 | Predicted classes 182 | """ 183 | 184 | clf = svm.SVC(kernel='linear', C=C, cache_size=1000, decision_function_shape='ovo', random_state=0) 185 | 186 | cv = StratifiedKFold(n_splits=nsplits, random_state=random_state) 187 | accuracy = pd.DataFrame({'Accuracy': cross_val_score(clf, X=features, y=classes,cv=cv)}) 188 | predicted = pd.DataFrame({'Actual class': classes, 189 | 'Predicted class': cross_val_predict(clf, X=features, y=classes, cv=cv)}) 190 | 191 | return accuracy, predicted 192 | 193 | 194 | def predict_shuffle_split(features, classes, C=1, nsplits=100, test_size=2./5, random_state=0): 195 | """ 196 | Computes accuracy and predicts classes by cross-validation 197 | Parameters 198 | ---------- 199 | features : array-like 200 | The data to fit. 201 | classes : array-like 202 | The target classes to try to predict. 203 | C : float, optional 204 | Penalty parameter C of the error term. 205 | Default is 1. 206 | nsplits : int, optional 207 | Number of folds. 208 | Default is 100. 209 | test_size : float, int, None, optional 210 | If float, should be between 0.0 and 1.0 and represent the proportion 211 | of the dataset to include in the test split. If int, represents the 212 | absolute number of test samples. 213 | Default is 2./5 214 | random_state : int, RandomState instance or None, optional, default=None 215 | If int, random_state is the seed used by the random number generator; 216 | If RandomState instance, random_state is the random number generator; 217 | If None, the random number generator is the RandomState instance used 218 | by `np.random`. Used when ``shuffle`` == True. 219 | 220 | Returns 221 | ------- 222 | accuracy: pandas DataFrame 223 | Array of scores of the estimator for each run of the cross validation. 224 | """ 225 | 226 | clf = svm.SVC(kernel='linear', C=C, cache_size=1000, decision_function_shape='ovo', random_state=0) 227 | cv = StratifiedShuffleSplit(n_splits=nsplits, test_size=test_size, random_state=random_state) 228 | accuracy = pd.DataFrame({'Accuracy': cross_val_score(clf, features, classes, cv=cv)}) 229 | return accuracy 230 | 231 | 232 | def predict_group_shuffle_split(features, classes, C=1, nsplits=100, test_size=1, random_state=0, groups=None): 233 | """ 234 | Computes accuracy and predicts classes by cross-validation 235 | Parameters 236 | ---------- 237 | features : array-like 238 | The data to fit. 239 | classes : array-like 240 | The target classes to try to predict. 241 | C : float, optional 242 | Penalty parameter C of the error term. 243 | Default is 1. 244 | nsplits : int, optional 245 | Number of folds. 246 | Default is 100. 247 | test_size : float, int, None, optional 248 | If float, should be between 0.0 and 1.0 and represent the proportion 249 | of the dataset to include in the test split. If int, represents the 250 | absolute number of test samples. 251 | Default is 1 252 | random_state : int, RandomState instance or None, optional, default=None 253 | If int, random_state is the seed used by the random number generator; 254 | If RandomState instance, random_state is the random number generator; 255 | If None, the random number generator is the RandomState instance used 256 | by `np.random`. Used when ``shuffle`` == True. 257 | groups : array-like, optional 258 | Group labels for the samples used while splitting the dataset into train/test set. 259 | Default is None. 260 | 261 | Returns 262 | ------- 263 | accuracy: pandas DataFrame 264 | Array of scores of the estimator for each run of the cross validation. 265 | """ 266 | 267 | clf = svm.SVC(kernel='linear', C=C, cache_size=1000, decision_function_shape='ovo', random_state=0) 268 | cv = GroupShuffleSplitStratified(n_splits=nsplits, test_size=test_size, random_state=random_state) 269 | accuracy = pd.DataFrame({'Accuracy': cross_val_score(clf, X=features, y=classes, groups=groups, cv=cv)}) 270 | return accuracy 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | -------------------------------------------------------------------------------- /SPHARM/lib/confusion_matrix_pretty_print.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | plot a pretty confusion matrix with seaborn 4 | Created on Mon Jun 25 14:17:37 2018 5 | @author: Wagner Cipriano - wagnerbhbr - gmail - CEFETMG / MMC 6 | REFerences: 7 | https://www.mathworks.com/help/nnet/ref/plotconfusion.html 8 | https://stackoverflow.com/questions/28200786/how-to-plot-scikit-learn-classification-report 9 | https://stackoverflow.com/questions/5821125/how-to-plot-confusion-matrix-with-string-axis-rather-than-integer-in-python 10 | https://www.programcreek.com/python/example/96197/seaborn.heatmap 11 | https://stackoverflow.com/questions/19233771/sklearn-plot-confusion-matrix-with-labels/31720054 12 | http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py 13 | """ 14 | 15 | #imports 16 | from pandas import DataFrame 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | import matplotlib.font_manager as fm 20 | from matplotlib.collections import QuadMesh 21 | import seaborn as sn 22 | 23 | 24 | def get_new_fig(fn, figsize=[9,9]): 25 | """ Init graphics """ 26 | fig1 = plt.figure(fn, figsize) 27 | ax1 = fig1.gca() #Get Current Axis 28 | ax1.cla() # clear existing plot 29 | return fig1, ax1 30 | # 31 | 32 | def configcell_text_and_colors(array_df, lin, col, oText, facecolors, posi, fz, fmt, show_null_values=0): 33 | """ 34 | config cell text and colors 35 | and return text elements to add and to dell 36 | @TODO: use fmt 37 | """ 38 | text_add = []; text_del = []; 39 | cell_val = array_df[lin][col] 40 | tot_all = array_df[-1][-1] 41 | per = (float(cell_val) / tot_all) * 100 42 | curr_column = array_df[:,col] 43 | ccl = len(curr_column) 44 | 45 | #last line and/or last column 46 | if(col == (ccl - 1)) or (lin == (ccl - 1)): 47 | #tots and percents 48 | if(cell_val != 0): 49 | if(col == ccl - 1) and (lin == ccl - 1): 50 | tot_rig = 0 51 | for i in range(array_df.shape[0] - 1): 52 | tot_rig += array_df[i][i] 53 | per_ok = (float(tot_rig) / cell_val) * 100 54 | elif(col == ccl - 1): 55 | tot_rig = array_df[lin][lin] 56 | per_ok = (float(tot_rig) / cell_val) * 100 57 | elif(lin == ccl - 1): 58 | tot_rig = array_df[col][col] 59 | per_ok = (float(tot_rig) / cell_val) * 100 60 | per_err = 100 - per_ok 61 | else: 62 | per_ok = per_err = 0 63 | 64 | per_ok_s = ['%.2f%%'%(per_ok), '100%'] [per_ok == 100] 65 | 66 | #text to DEL 67 | text_del.append(oText) 68 | 69 | #text to ADD 70 | font_prop = fm.FontProperties(weight='bold', size=fz) 71 | text_kwargs = dict(color='w', ha="center", va="center", gid='sum', fontproperties=font_prop) 72 | lis_txt = ['%d'%(cell_val), per_ok_s, '%.2f%%'%(per_err)] 73 | lis_kwa = [text_kwargs] 74 | dic = text_kwargs.copy(); dic['color'] = 'g'; lis_kwa.append(dic); 75 | dic = text_kwargs.copy(); dic['color'] = 'r'; lis_kwa.append(dic); 76 | lis_pos = [(oText._x, oText._y-0.3), (oText._x, oText._y), (oText._x, oText._y+0.3)] 77 | for i in range(len(lis_txt)): 78 | newText = dict(x=lis_pos[i][0], y=lis_pos[i][1], text=lis_txt[i], kw=lis_kwa[i]) 79 | #print 'lin: %s, col: %s, newText: %s' %(lin, col, newText) 80 | text_add.append(newText) 81 | #print '\n' 82 | 83 | #set background color for sum cells (last line and last column) 84 | carr = [0.27, 0.30, 0.27, 1.0] 85 | if(col == ccl - 1) and (lin == ccl - 1): 86 | carr = [0.17, 0.20, 0.17, 1.0] 87 | facecolors[posi] = carr 88 | 89 | else: 90 | if(per > 0): 91 | txt = '%s\n%.2f%%' %(cell_val, per) 92 | else: 93 | if(show_null_values == 0): 94 | txt = '' 95 | elif(show_null_values == 1): 96 | txt = '0' 97 | else: 98 | txt = '0\n0.0%' 99 | oText.set_text(txt) 100 | 101 | #main diagonal 102 | if(col == lin): 103 | #set color of the textin the diagonal to white 104 | oText.set_color('w') 105 | # set background color in the diagonal to blue 106 | facecolors[posi] = [0.35, 0.8, 0.55, 1.0] 107 | else: 108 | oText.set_color('r') 109 | 110 | return text_add, text_del 111 | # 112 | 113 | def insert_totals(df_cm): 114 | """ insert total column and line (the last ones) """ 115 | sum_col = [] 116 | for c in df_cm.columns: 117 | sum_col.append( df_cm[c].sum() ) 118 | sum_lin = [] 119 | for item_line in df_cm.iterrows(): 120 | sum_lin.append( item_line[1].sum() ) 121 | df_cm['sum_lin'] = sum_lin 122 | sum_col.append(np.sum(sum_lin)) 123 | df_cm.loc['sum_col'] = sum_col 124 | #print ('\ndf_cm:\n', df_cm, '\n\b\n') 125 | # 126 | 127 | def pretty_plot_confusion_matrix(df_cm, annot=True, cmap="Oranges", fmt='.2f', fz=11, 128 | lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='y', outputfile=None): 129 | """ 130 | print conf matrix with default layout (like matlab) 131 | params: 132 | df_cm dataframe (pandas) without totals 133 | annot print text in each cell 134 | cmap Oranges,Oranges_r,YlGnBu,Blues,RdBu, ... see: 135 | fz fontsize 136 | lw linewidth 137 | pred_val_axis where to show the prediction values (x or y axis) 138 | 'col' or 'x': show predicted values in columns (x axis) instead lines 139 | 'lin' or 'y': show predicted values in lines (y axis) 140 | """ 141 | if(pred_val_axis in ('col', 'x')): 142 | xlbl = 'Predicted' 143 | ylbl = 'Actual' 144 | else: 145 | xlbl = 'Actual' 146 | ylbl = 'Predicted' 147 | df_cm = df_cm.T 148 | 149 | # create "Total" column 150 | insert_totals(df_cm) 151 | 152 | #this is for print allways in the same window 153 | fig, ax1 = get_new_fig('Conf matrix default', figsize) 154 | 155 | #thanks for seaborn 156 | ax = sn.heatmap(df_cm, annot=annot, annot_kws={"size": fz}, linewidths=lw, ax=ax1, 157 | cbar=cbar, cmap=cmap, linecolor='w', fmt=fmt) 158 | 159 | #set ticklabels rotation 160 | ax.set_xticklabels(ax.get_xticklabels(), rotation = 45, fontsize = 10) 161 | ax.set_yticklabels(ax.get_yticklabels(), rotation = 25, fontsize = 10) 162 | 163 | # Turn off all the ticks 164 | for t in ax.xaxis.get_major_ticks(): 165 | t.tick1On = False 166 | t.tick2On = False 167 | for t in ax.yaxis.get_major_ticks(): 168 | t.tick1On = False 169 | t.tick2On = False 170 | 171 | #face colors list 172 | quadmesh = ax.findobj(QuadMesh)[0] 173 | facecolors = quadmesh.get_facecolors() 174 | 175 | #iter in text elements 176 | array_df = np.array( df_cm.to_records(index=False).tolist() ) 177 | text_add = []; text_del = []; 178 | posi = -1 #from left to right, bottom to top. 179 | for t in ax.collections[0].axes.texts: #ax.texts: 180 | pos = np.array( t.get_position()) - [0.5,0.5] 181 | lin = int(pos[1]); col = int(pos[0]); 182 | posi += 1 183 | #print ('>>> pos: %s, posi: %s, val: %s, txt: %s' %(pos, posi, array_df[lin][col], t.get_text())) 184 | 185 | #set text 186 | txt_res = configcell_text_and_colors(array_df, lin, col, t, facecolors, posi, fz, fmt, show_null_values) 187 | 188 | text_add.extend(txt_res[0]) 189 | text_del.extend(txt_res[1]) 190 | 191 | #remove the old ones 192 | for item in text_del: 193 | item.remove() 194 | #append the new ones 195 | for item in text_add: 196 | ax.text(item['x'], item['y'], item['text'], **item['kw']) 197 | 198 | #titles and legends 199 | ax.set_title('Confusion matrix') 200 | ax.set_xlabel(xlbl) 201 | ax.set_ylabel(ylbl) 202 | plt.tight_layout() #set layout slim 203 | if outputfile is None: 204 | plt.show() 205 | else: 206 | plt.savefig(outputfile, dpi=300) 207 | # 208 | 209 | def plot_confusion_matrix_from_data(y_test, predictions, columns=None, annot=True, cmap="Oranges", 210 | fmt='.2f', fz=11, lw=0.5, cbar=False, figsize=[8,8], show_null_values=0, pred_val_axis='lin', outputfile=None): 211 | """ 212 | plot confusion matrix function with y_test (actual values) and predictions (predic), 213 | whitout a confusion matrix yet 214 | """ 215 | from sklearn.metrics import confusion_matrix 216 | from pandas import DataFrame 217 | 218 | #data 219 | if columns is None: 220 | #labels axis integer: 221 | ##columns = range(1, len(np.unique(y_test))+1) 222 | #labels axis string: 223 | from string import ascii_uppercase 224 | columns = ['class %s' %(i) for i in list(ascii_uppercase)[0:len(np.unique(y_test))]] 225 | 226 | confm = confusion_matrix(y_test, predictions) 227 | # cmap = 'Oranges'; 228 | fz = 11; 229 | figsize=[5,5]; 230 | show_null_values = 2 231 | df_cm = DataFrame(confm, index=columns, columns=columns) 232 | pretty_plot_confusion_matrix(df_cm, fz=fz, cmap=cmap, figsize=figsize, show_null_values=show_null_values, 233 | pred_val_axis=pred_val_axis, outputfile=outputfile) 234 | # 235 | 236 | 237 | 238 | # 239 | #TEST functions 240 | # 241 | def _test_cm(): 242 | #test function with confusion matrix done 243 | array = np.array( [[13, 0, 1, 0, 2, 0], 244 | [ 0, 50, 2, 0, 10, 0], 245 | [ 0, 13, 16, 0, 0, 3], 246 | [ 0, 0, 0, 13, 1, 0], 247 | [ 0, 40, 0, 1, 15, 0], 248 | [ 0, 0, 0, 0, 0, 20]]) 249 | #get pandas dataframe 250 | df_cm = DataFrame(array, index=range(1,7), columns=range(1,7)) 251 | #colormap: see this and choose your more dear 252 | cmap = 'PuRd' 253 | pretty_plot_confusion_matrix(df_cm, cmap=cmap) 254 | # 255 | 256 | def _test_data_class(): 257 | """ test function with y_test (actual values) and predictions (predic) """ 258 | #data 259 | y_test = np.array([1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5]) 260 | predic = np.array([1,2,4,3,5, 1,2,4,3,5, 1,2,3,4,4, 1,4,3,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,3,3,5, 1,2,3,3,5, 1,2,3,4,4, 1,2,3,4,1, 1,2,3,4,1, 1,2,3,4,1, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,4,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5]) 261 | """ 262 | Examples to validate output (confusion matrix plot) 263 | actual: 5 and prediction 1 >> 3 264 | actual: 2 and prediction 4 >> 1 265 | actual: 3 and prediction 4 >> 10 266 | """ 267 | columns = [] 268 | annot = True; 269 | cmap = 'Oranges'; 270 | fmt = '.2f' 271 | lw = 0.5 272 | cbar = False 273 | show_null_values = 2 274 | pred_val_axis = 'y' 275 | #size:: 276 | fz = 12; 277 | figsize = [9,9]; 278 | if(len(y_test) > 10): 279 | fz=9; figsize=[14,14]; 280 | plot_confusion_matrix_from_data(y_test, predic, columns, 281 | annot, cmap, fmt, fz, lw, cbar, figsize, show_null_values, pred_val_axis) 282 | # 283 | 284 | 285 | # 286 | #MAIN function 287 | # 288 | if(__name__ == '__main__'): 289 | print('__main__') 290 | print('_test_cm: test function with confusion matrix done\nand pause') 291 | _test_cm() 292 | plt.pause(5) 293 | print('_test_data_class: test function with y_test (actual values) and predictions (predic)') 294 | _test_data_class() 295 | 296 | -------------------------------------------------------------------------------- /SPHARM/lib/parallel.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import numpy as np 5 | 6 | from helper_lib import filelib 7 | import helper_lib.parallel as prl 8 | 9 | 10 | def run_parallel(**kwargs): 11 | """ 12 | Run a given function in a parallel manner. 13 | 14 | Parameters 15 | ---------- 16 | kwargs : key, value pairings 17 | Arbitrary keyword arguments 18 | 19 | Keyword arguments 20 | ----------------- 21 | *items* : list 22 | List of items. For each item, the `process` will be called. 23 | The value of the `item` parameter of `process` will be set to the value of the current item from the list. 24 | Remaining keyword arguments will be passed to the `process` 25 | *max_threads* : int, optional 26 | The maximal number of processes to run in parallel 27 | Default is 8 28 | *process* : callable 29 | The function that will be applied to each item of `kwargs.items`. 30 | The function should accept the argument `item`, which corresponds to one item from `kwargs.items`. 31 | An `item` is usually a name of the file that has to be processed or 32 | a list of files that have to be combined / convolved /analyzed together. 33 | The function should not return any output, but the output should be saved in a specified directory. 34 | *inputfolder* : str 35 | Input directory with files to process. 36 | *outputfolder* : str 37 | Output directory to save the results. 38 | """ 39 | 40 | files = filelib.list_subfolders(kwargs.get('inputfolder'), extensions=kwargs.get('extensions')) 41 | channelcodes = kwargs.get('channels', None) 42 | exclude = kwargs.get('exclude', None) 43 | if channelcodes is not None: 44 | files = list_of_files_to_combine(files, channelcodes) 45 | 46 | if exclude is not None: 47 | nfiles = [] 48 | for fn in files: 49 | cellfile = True 50 | for excl in exclude: 51 | if fn[-len(excl):] == excl: 52 | cellfile = False 53 | if cellfile: 54 | nfiles.append(fn) 55 | files = nfiles 56 | 57 | if kwargs.get('debug'): 58 | kwargs['item'] = files[0] 59 | kwargs.get('process')(**kwargs) 60 | else: 61 | kwargs['items'] = files 62 | prl.run_parallel(**kwargs) 63 | 64 | if kwargs.get('combine', True) and os.path.exists(kwargs.get('outputfolder', 'no_folder')): 65 | filelib.combine_statistics(kwargs.get('outputfolder')) 66 | 67 | 68 | def list_of_files_to_combine(files, channelcodes): 69 | """ 70 | Extract the channel information from file names and group file names of corresponding channels. 71 | 72 | Parameters 73 | ---------- 74 | files : list 75 | List of file names 76 | channelcodes : list of str 77 | List of channel codes as they appear in the file names. 78 | """ 79 | samples = [] 80 | channels = [] 81 | nfiles = [] 82 | 83 | for fn in files: 84 | for i, cc in enumerate(channelcodes): 85 | if len(cc) > 0: 86 | parts = fn.split(cc) 87 | if len(parts) > 1: 88 | nfiles.append(fn) 89 | samples.append(parts[0] + parts[-1].split('Time')[-1]) 90 | channels.append(i) 91 | break 92 | 93 | samples = np.array(samples) 94 | channels = np.array(channels) 95 | nfiles = np.array(nfiles) 96 | 97 | usamples = np.unique(samples) 98 | 99 | files = [] 100 | for sample in usamples: 101 | curfiles = [] 102 | for i in range(len(channelcodes)): 103 | fn = nfiles[(samples == sample) & (channels == i)] 104 | if len(fn) > 0: 105 | curfiles.append(fn[0]) 106 | else: 107 | curfiles.append('') 108 | 109 | files.append(curfiles) 110 | 111 | return files 112 | -------------------------------------------------------------------------------- /SPHARM/lib/segmentation.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import os 3 | import re 4 | import pandas as pd 5 | 6 | from helper_lib import filelib 7 | from SPHARM.classes.image_stack import ImageStack 8 | 9 | 10 | def make_metadata_files(**kwargs): 11 | """ 12 | Generate metadata file with the given voxel size. 13 | 14 | Keyword arguments 15 | ----------------- 16 | *inputfolder* : str 17 | Directory to save the metadata file. 18 | *item* : str 19 | File name for the metadata file. 20 | *voxel_size* : list of scalars 21 | Voxel sizes for z and xy dimensions. 22 | """ 23 | inputfolder = kwargs.get('inputfolder') 24 | filename = kwargs.get('item') 25 | voxel_size = kwargs.get('voxel_size') 26 | metadata = pd.Series({'voxel_size_z': voxel_size[0], 27 | 'voxel_size_xy': voxel_size[1]}) 28 | metadata.to_csv(inputfolder + filename[:-4] + '.txt', sep='\t') 29 | 30 | 31 | def extract_surfaces(**kwargs): 32 | """ 33 | Extract surface coordinates of each connected region in a given image. 34 | 35 | Keyword arguments 36 | ----------------- 37 | *inputfolder* : str 38 | Directory with the input image. 39 | *item* : str 40 | File name of the input image. 41 | *outputfolder* : str 42 | Directory to save the segmented image. 43 | *channelcodes* : list of str 44 | List of channel codes as they appear in the file names. 45 | *reconstruct* : bool, optional 46 | If True, surfaces will be reconstructed by the marching cube algorithm, 47 | and coordiantes of the vertices will be extracted. 48 | If False, coordinates of the voxels connected to the background will be extracted. 49 | Default is True. 50 | """ 51 | inputfolder = kwargs.get('inputfolder') 52 | outputfolder = kwargs.get('outputfolder', inputfolder + '../surfaces/') 53 | filename = kwargs.get('item') 54 | reconstruct = kwargs.get('reconstruct', True) 55 | channelcodes = kwargs.get('channelcodes') 56 | 57 | channel = None 58 | if channelcodes is not None: 59 | for i, cc in enumerate(channelcodes): 60 | if len(filename.split(cc)) > 1: 61 | channel = i 62 | if channelcodes is None or channel is not None: 63 | stack = ImageStack(inputfolder + filename) 64 | metadata = pd.read_csv(inputfolder + filename[:-4] + '.txt', 65 | sep='\t', index_col=0, header=None).transpose().iloc[0].T.squeeze() 66 | min_coord = None 67 | if 'min_x' in metadata.index and 'min_y' in metadata.index and 'min_z' in metadata.index: 68 | min_coord = [metadata['min_z'], metadata['min_y'], metadata['min_x']] 69 | stack.filename = filename 70 | stack.extract_surfaces(outputfolder, 71 | voxel_size=[metadata['voxel_size_z'], 72 | metadata['voxel_size_xy'], 73 | metadata['voxel_size_xy']], 74 | reconstruct=reconstruct, min_coord=min_coord) 75 | 76 | 77 | def combine_surfaces(inputfolder, outputfolder): 78 | """ 79 | Combine surface files located in the same subfolder of a given input folder. 80 | 81 | Parameters 82 | ---------- 83 | inputfolder : str 84 | Input directory with files to combine. 85 | outputfolder : str 86 | Output directory to save the combined files. 87 | """ 88 | filelib.make_folders([outputfolder]) 89 | folders = os.listdir(inputfolder) 90 | p = re.compile('\d*\.*\d+') 91 | for folder in folders: 92 | files = filelib.list_subfolders(inputfolder + folder + '/', extensions=['csv']) 93 | stat = pd.DataFrame() 94 | for fn in files: 95 | curstat = pd.read_csv(inputfolder + folder + '/' + fn, sep='\t') 96 | curstat['Time'] = p.findall(fn.split('/')[-1])[-2] 97 | stat = pd.concat([stat, curstat], ignore_index=True) 98 | stat.to_csv(outputfolder + folder + '.csv', sep='\t') 99 | 100 | 101 | def split_to_surfaces(inputfile, outputfolder, combine_tracks=False, 102 | adjust_frame_rate=False, metadata_file=None): 103 | """ 104 | Split one surface file into separate files for surfaces of individual cells. 105 | 106 | Parameters 107 | ---------- 108 | inputfile : str 109 | Input surface file. 110 | outputfolder : str 111 | Directory to save the output surface files. 112 | combine_tracks : bool, optional 113 | If True, connected time points will be combined into one file. 114 | Default is False. 115 | """ 116 | filelib.make_folders([outputfolder]) 117 | stat = pd.read_csv(inputfile, sep='\t', index_col=0) 118 | framerate = None 119 | if adjust_frame_rate: 120 | parts = inputfile.split('/')[-1].split('_') 121 | stem = parts[0] + '_' + parts[1] 122 | metadata = pd.read_csv(metadata_file, sep='\t') 123 | ind = metadata[metadata['Sample'] == stem].index[0] 124 | framerate = metadata.loc[ind, 'time'] 125 | 126 | for track_id in stat['TrackID'].unique(): 127 | combined_stat = pd.DataFrame() 128 | ntime = 1 129 | for t in stat['Time'].unique(): 130 | curstat = stat[(stat['TrackID'] == track_id) & (stat['Time'] == t)].reset_index() 131 | if adjust_frame_rate and framerate == 20: 132 | if (t-1) % 3: 133 | curstat = pd.DataFrame() 134 | else: 135 | curstat['Time'] = ntime 136 | ntime += 1 137 | if combine_tracks: 138 | combined_stat = pd.concat([combined_stat, curstat], ignore_index=True) 139 | else: 140 | if len(curstat) > 0: 141 | curstat.to_csv(outputfolder + 'Track_' + str(int(track_id)) + '_Time_%03d.csv' % t, sep='\t') 142 | if combine_tracks and len(combined_stat) > 0: 143 | combined_stat.to_csv(outputfolder + 'Track_' + str(int(track_id)) + '.csv', sep='\t') 144 | 145 | 146 | def split_to_surfaces_batch(inputfolder, outputfolder, combine_tracks=False, 147 | adjust_frame_rate=False, metadata_file=None): 148 | """ 149 | Split one surface files located in a given folder into separate files for surfaces of individual cells. 150 | 151 | Parameters 152 | ---------- 153 | inputfolder : str 154 | Input directory 155 | outputfolder : str 156 | Output directory 157 | combine_tracks : bool, optional 158 | If True, connected time points will be combined into one file. 159 | Default is False. 160 | """ 161 | files = filelib.list_subfolders(inputfolder, extensions=['csv']) 162 | for fn in files: 163 | print(fn) 164 | ext = fn.split('.')[-1] 165 | if ext in ['csv']: 166 | split_to_surfaces(inputfolder + fn, outputfolder + fn[:-4] + '/', combine_tracks=combine_tracks, 167 | adjust_frame_rate=adjust_frame_rate, metadata_file=metadata_file) 168 | 169 | 170 | 171 | 172 | -------------------------------------------------------------------------------- /SPHARM/lib/spharm.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import os 3 | import pandas as pd 4 | import numpy as np 5 | 6 | from helper_lib import filelib 7 | from SPHARM.classes.surface import Surface 8 | from SPHARM.classes.moving_surface import MovingSurface 9 | from SPHARM.lib import transformation as tr 10 | 11 | 12 | def compute_spharm(**kwargs): 13 | """ 14 | Compute spherical harmonics spectra for a given surface. 15 | 16 | Keyword arguments 17 | ----------------- 18 | *inputfolder* : str 19 | Directory with the input surface. 20 | *item* : str 21 | File name of the input surface. 22 | *outputfolder* : str 23 | Directory to save the computed spectra. 24 | *grid_size* : int 25 | Dimension of the square grid to interpolate the surface points. 26 | *normalize* : bool 27 | If True, the values of the spectrum will be normalized according to the `normalization_method`. 28 | *normalization_method* : str, optional 29 | If 'mean-radius', the grid values will be divided by the mean grid value prior to the SPHARM transform. 30 | If 'zero-component', all spectral components will be divided by the value of the first component (m=0, n=0). 31 | Default is 'zero-component'. 32 | """ 33 | inputfolder = kwargs.get('inputfolder') 34 | outputfolder = kwargs.get('outputfolder', inputfolder + '../spharm/') 35 | filename = kwargs.get('item') 36 | combined_tracks = kwargs.get('combined_tracks', False) 37 | rotate = kwargs.get('rotate', False) 38 | 39 | filelib.make_folders([os.path.dirname(outputfolder[:-1] + '_kwargs.csv')]) 40 | pd.Series(kwargs).to_csv(outputfolder[:-1] + '_kwargs.csv', sep='\t', header=False) 41 | 42 | if not (os.path.exists(outputfolder+filename) or 43 | os.path.exists(outputfolder + filename[:-4] + '_Time_%03d.csv' % 1)): 44 | 45 | if combined_tracks: 46 | stat = pd.read_csv(inputfolder + filename, sep='\t', index_col=0) 47 | stat.at[:, 'Time'] = np.array(stat['Time']).astype(float).astype(int) 48 | stat = stat.sort_values('Time').reset_index() 49 | t_surface = MovingSurface() 50 | if not filename.endswith('.csv'): 51 | filename += '.csv' 52 | times = stat['Time'].unique() 53 | if len(times) > 2: 54 | for t in times: 55 | curstat = stat[stat['Time'] == t] 56 | if len(curstat) > 4: 57 | surface = Surface(data=curstat) 58 | surface.metadata['Name'] = filename[:-4] + '_Time_%03d.csv' % t 59 | if 'TrackID' in curstat.columns: 60 | surface.metadata['TrackID'] = curstat.iloc[0]['TrackID'] 61 | surface.centrate() 62 | if len(t_surface.surfaces) > 0: 63 | x, y, z = surface.center - t_surface.surfaces[-1].center # direction of the previous interval 64 | surface.migration_angles = tr.cart_to_spherical(x, y, z)[1:] 65 | t_surface.add_surface(surface) 66 | else: 67 | print(filename, times, t, len(curstat)) 68 | t_surface.surfaces[0].migration_angles = t_surface.surfaces[1].migration_angles 69 | 70 | for surface in t_surface.surfaces: 71 | if rotate: 72 | surface.rotate(surface.migration_angles[0], surface.migration_angles[1]) 73 | surface.to_spherical() 74 | surface.compute_spharm(grid_size=kwargs.get('grid_size'), normalize=kwargs.get('normalize'), 75 | normalization_method=kwargs.get('normalization_method', 'zero-component')) 76 | surface.spharm.save_to_csv(outputfolder + surface.metadata['Name']) 77 | 78 | else: 79 | surface = Surface(filename=inputfolder + filename) 80 | surface.centrate() 81 | surface.to_spherical() 82 | surface.compute_spharm(grid_size=kwargs.get('grid_size'), normalize=kwargs.get('normalize'), 83 | normalization_method=kwargs.get('normalization_method', 'zero-component')) 84 | if not filename.endswith('.csv'): 85 | filename += '.csv' 86 | surface.spharm.save_to_csv(outputfolder + filename) 87 | 88 | 89 | def compute_frequency_spectra(**kwargs): 90 | """ 91 | Compute frequency spectra for a given surface. 92 | 93 | Keyword arguments 94 | ----------------- 95 | *inputfolder* : str 96 | Directory with the input surface. 97 | *item* : str 98 | File name of the input surface. 99 | *outputfolder* : str 100 | Directory to save the computed spectra. 101 | *grid_size* : int 102 | Dimension of the square grid to interpolate the surface points. 103 | *normalize* : bool 104 | If True, the values of the spectrum will be normalized according to the `normalization_method`. 105 | *normalization_method* : str, optional 106 | If 'mean-radius', the grid values will be divided by the mean grid value prior to the SPHARM transform. 107 | If 'zero-component', all spectral components will be divided by the value of the first component (m=0, n=0). 108 | Default is 'zero-component'. 109 | """ 110 | inputfolder = kwargs.get('inputfolder') 111 | outputfolder = kwargs.get('outputfolder', inputfolder + '../spharm/') 112 | filename = kwargs.get('item') 113 | 114 | if not os.path.exists(outputfolder+filename): 115 | 116 | surface = Surface(filename=inputfolder + filename) 117 | surface.centrate() 118 | surface.to_spherical() 119 | surface.compute_spharm(grid_size=kwargs.get('grid_size'), normalize=kwargs.get('normalize'), 120 | normalization_method=kwargs.get('normalization_method')) 121 | surface.spharm.save_to_csv(outputfolder + filename) 122 | 123 | 124 | def convert_surfaces(**kwargs): 125 | """ 126 | Convert surface file from txt to a csv format. 127 | 128 | Keyword arguments 129 | ----------------- 130 | *inputfolder* : str 131 | Directory with the input surface. 132 | *item* : str 133 | File name of the input surface. 134 | *outputfolder* : str 135 | Directory to save the converted surface. 136 | 137 | """ 138 | inputfolder = kwargs.get('inputfolder') 139 | outputfolder = kwargs.get('outputfolder', inputfolder + '../surfaces/') 140 | filename = kwargs.get('item') 141 | 142 | surface = Surface(filename=inputfolder+filename, **kwargs) 143 | surface.save(outputfolder+filename+'.csv') 144 | 145 | 146 | def convert_to_tiff(**kwargs): 147 | """ 148 | Save the surface as a 3D image stack. 149 | 150 | Keyword arguments 151 | ----------------- 152 | *inputfolder* : str 153 | Directory with the input surface. 154 | *item* : str 155 | File name of the input surface. 156 | *outputfolder* : str 157 | Directory to save the output 3D stack. 158 | """ 159 | inputfolder = kwargs.get('inputfolder') 160 | outputfolder = kwargs.get('outputfolder', inputfolder + '../stacks/') 161 | filename = kwargs.get('item') 162 | 163 | surface = Surface(filename=inputfolder+filename, **kwargs) 164 | surface.save_as_stack(outputfolder+filename+'.tif', voxel_size=kwargs.get('voxel_size')) 165 | 166 | 167 | 168 | -------------------------------------------------------------------------------- /SPHARM/lib/transformation.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | 5 | 6 | def cart_to_spherical(x, y, z): 7 | """ 8 | Convert coordinates from Cartesian to spherical. 9 | 10 | Parameters 11 | ---------- 12 | x : float 13 | x coordinate. 14 | y : float 15 | y coordinate. 16 | z : float 17 | z coordiante. 18 | 19 | Returns 20 | ------- 21 | r : float 22 | Radius. 23 | theta : float 24 | Polar angle. 25 | phi : float 26 | Azimuthal angle. 27 | 28 | """ 29 | 30 | x = np.array(x) 31 | y = np.array(y) 32 | z = np.array(z) 33 | 34 | r = np.sqrt(x**2 + y**2 + z**2) 35 | r = np.where(r == 0, 0.0001, r) 36 | theta = np.arccos(z*1./r) 37 | phi = np.arctan2(y, x) + np.pi 38 | return r, theta, phi 39 | 40 | 41 | def spherical_to_cart(r, theta, phi): 42 | """ 43 | Convert coordinates from spherical to Cartesian. 44 | 45 | Parameters 46 | ---------- 47 | r : float 48 | Radius. 49 | theta : float 50 | Polar angle. 51 | phi : float 52 | Azimuthal angle. 53 | 54 | Returns 55 | ------- 56 | x : float 57 | x coordinate. 58 | y : float 59 | y coordinate. 60 | z : float 61 | z coordiante. 62 | """ 63 | x = r*np.sin(theta)*np.cos(phi - np.pi) 64 | y = r*np.sin(theta)*np.sin(phi - np.pi) 65 | z = r*np.cos(theta) 66 | 67 | return x, y, z 68 | 69 | 70 | def cart_to_polar(x, y): 71 | """ 72 | Convert coordiantes from Cartesian to polar. 73 | 74 | Parameters 75 | ---------- 76 | x : float 77 | x coordinate. 78 | y : float 79 | y coordinate. 80 | 81 | Returns 82 | ------- 83 | r : float 84 | Radius. 85 | phi : float 86 | Azimuthal angle. 87 | """ 88 | x = x - x.mean() 89 | y = y - y.mean() 90 | 91 | r = np.sqrt(x**2 + y**2) 92 | phi = np.arctan2(y, x) + np.pi 93 | return r, phi 94 | 95 | 96 | def rotate_spherical(x, y, z, theta, phi): 97 | """ 98 | Rotate the given coordinate by given asimuthal and polar angles. 99 | 100 | Parameters 101 | ---------- 102 | x : scalar or array 103 | x coordinate(s). 104 | y : scalar or array 105 | y coordinate(s). 106 | z : scalar or array 107 | z coordinate(s). 108 | theta : float 109 | Polar angle for rotation. 110 | phi : float 111 | Azimuthal angle for rotation. 112 | 113 | Returns 114 | ------- 115 | x : scalar or array 116 | rotated x coordinate(s). 117 | y : scalar or array 118 | rotated y coordinate(s). 119 | z : scalar or array 120 | rotated z coordinate(s). 121 | """ 122 | x = np.array(x) 123 | y = np.array(y) 124 | z = np.array(z) 125 | 126 | x1 = np.cos(phi) * x - np.sin(phi) * y 127 | y1 = np.sin(phi) * x + np.cos(phi) * y 128 | z1 = z 129 | 130 | x2 = np.cos(theta) * x1 + np.sin(theta) * z1 131 | y2 = y1 132 | z2 = - np.sin(theta) * x1 + np.cos(theta) * z1 133 | 134 | return x2, y2, z2 135 | 136 | 137 | def as_stack(x, y, z, minmax=None): 138 | """ 139 | Generate a binary image with given foreground coordinates. 140 | 141 | Parameters 142 | ---------- 143 | x : list or ndarray 144 | x coordinates. 145 | y : list or ndarray 146 | y coordinates. 147 | z : list or ndarray 148 | z coordiantes. 149 | minmax : ndarray, optional 150 | Boundaries to crop the image stack of the form [[z_min, z_max], [y_min, y_max], [x_min, x_max]]. 151 | If None, set to the minimal and maximal given coordinates. 152 | Default is None. 153 | 154 | Returns 155 | ------- 156 | ndimage : 3D binary image with surface point as foreground. 157 | """ 158 | if minmax is None: 159 | minmax = np.int_([[z.min(), z.max()], 160 | [y.min(), y.max()], 161 | [x.min(), x.max()]]) 162 | else: 163 | minmax = np.int_(np.round_(minmax)) 164 | 165 | x = np.int_(x) - minmax[2, 0] + 1 166 | y = np.int_(y) - minmax[1, 0] + 1 167 | z = np.int_(z) - minmax[0, 0] + 1 168 | 169 | img = np.zeros([minmax[0, 1] - minmax[0, 0] + 3, minmax[1, 1] - minmax[1, 0] + 3, minmax[2, 1] - minmax[2, 0] + 3]) 170 | img[z, y, x] = 255 171 | 172 | return img 173 | 174 | 175 | 176 | -------------------------------------------------------------------------------- /SPHARM/lib/vrml_parse.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import numpy as np 5 | import pandas as pd 6 | 7 | from helper_lib import filelib 8 | from SPHARM.classes.node import Node 9 | 10 | 11 | def extract_node_names(inputfile, outputfile=None): 12 | """ 13 | Extract the names of the nodes from vrml file. 14 | 15 | Parameters 16 | ---------- 17 | inputfile : str 18 | Path to the vrml or wrl file. 19 | outputfile : str, optional 20 | Path to the output file to save the node names. 21 | """ 22 | f = open(inputfile) 23 | st = f.readlines() 24 | 25 | st = ''.join(st).replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') 26 | pairs = {'{': '}', '[': ']', '(': ')'} 27 | root = Node(('root', '')) 28 | stack = [] 29 | k = 0 30 | for j, s in enumerate(st): 31 | if s in pairs: 32 | parts = st[k:j].split(' ') 33 | name = '' 34 | for i in range(1, len(parts)): 35 | if len(parts[-i]) > 0: 36 | name = parts[-i] 37 | break 38 | node = Node((name, pairs[s])) 39 | if len(stack) > 0: 40 | stack[-1].add_text(st[k + 1:j]) 41 | stack.append(node) 42 | k = j 43 | 44 | elif len(stack) > 0 and s == stack[-1].bracket: 45 | node = stack.pop() 46 | node.add_text(st[k + 1:j]) 47 | if len(stack) > 0: 48 | stack[-1].add_child(node) 49 | else: 50 | root.add_child(node) 51 | k = j 52 | if outputfile is None: 53 | outputfile = inputfile[:-4] + '_nodes.txt' 54 | filelib.make_folders([os.path.dirname(outputfile)]) 55 | f = open(outputfile, 'w') 56 | root.print_children(outputfile=f) 57 | 58 | f.close() 59 | 60 | 61 | def extract_node_names_batch(inputfolder, outputfolder): 62 | """ 63 | Extract the names of the nodes from vrml file in a parallel mode. 64 | 65 | Parameters 66 | ---------- 67 | inputfolder : str 68 | Path to a directory with vrml / wrl files to extract the node names. 69 | outputfolder : str 70 | Path to a directory to save the output. 71 | """ 72 | files = os.listdir(inputfolder) 73 | for fn in files: 74 | ext = fn.split('.')[-1] 75 | if ext in ['wrl', 'vrml']: 76 | extract_node_names(inputfolder + fn, outputfile=outputfolder + fn) 77 | 78 | 79 | def extract_key_nodes(inputfile, key): 80 | """ 81 | Extract a list of nodes with a given name from a given vrml file. 82 | 83 | Parameters 84 | ---------- 85 | inputfile : str 86 | Path to a vrml or wrl file 87 | key : str 88 | Target node name to extract. 89 | """ 90 | f = open(inputfile) 91 | st = f.readlines() 92 | f.close() 93 | 94 | st = ''.join(st).replace('\t', ' ').replace('\n', ' ').replace('\r', ' ') 95 | pairs = {'{': '}', '[': ']', '(': ')'} 96 | root = Node(('root', '')) 97 | stack = [] 98 | k = 0 99 | for j, s in enumerate(st): 100 | if s in pairs: 101 | parts = st[k:j].split(' ') 102 | name = '' 103 | for i in range(1, len(parts)): 104 | if len(parts[-i]) > 0: 105 | name = parts[-i] 106 | break 107 | node = Node((name, pairs[s])) 108 | if len(stack) > 0: 109 | stack[-1].add_text(st[k + 1:j]) 110 | stack.append(node) 111 | k = j 112 | 113 | elif len(stack) > 0 and s == stack[-1].bracket: 114 | node = stack.pop() 115 | node.add_text(st[k + 1:j]) 116 | if len(stack) > 0: 117 | stack[-1].add_child(node) 118 | else: 119 | root.add_child(node) 120 | k = j 121 | 122 | nodes = [] 123 | root.extract_key_nodes(key=key, nodes=nodes) 124 | return nodes 125 | 126 | 127 | def extract_coordinates(inputfile, outputfile): 128 | """ 129 | Extract cell coordinates from a given vrml file. 130 | 131 | Parameters 132 | ---------- 133 | inputfile : str 134 | Path to a vrml or wrl file with cell coordinates. 135 | outputfile : str 136 | Path to save the extracted coordinates in a table form. 137 | """ 138 | 139 | 140 | stat = pd.DataFrame() 141 | curcoords = [] 142 | timepoint = 0 143 | node_id = 0 144 | 145 | nodes = extract_key_nodes(inputfile, key='children') 146 | for node in nodes: 147 | if node.children[0].name == 'Shape': 148 | timepoint += 1 149 | for subnode1 in node.children: 150 | for subnode in subnode1.children: 151 | if subnode.name == 'IndexedFaceSet': 152 | curstat, curcoords = subnode.extract_coordinates(curcoords) 153 | curstat['ID'] = node_id 154 | curstat['Time'] = timepoint 155 | stat = pd.concat([stat, curstat], ignore_index=True) 156 | node_id += 1 157 | 158 | filelib.make_folders([os.path.dirname(outputfile)]) 159 | stat.to_csv(outputfile, sep='\t') 160 | 161 | 162 | def extract_coordinates_batch(inputfolder, outputfolder): 163 | """ 164 | Extract cell coordinates from vrml files located in a given directory in a parallel mode. 165 | 166 | Parameters 167 | ---------- 168 | inputfolder : str 169 | Path to the input directory. 170 | outputfolder : str 171 | Path to the output directory. 172 | """ 173 | files = filelib.list_subfolders(inputfolder, extensions=['wrl', 'vrml']) 174 | for fn in files: 175 | ext = fn.split('.')[-1] 176 | if ext in ['wrl', 'vrml']: 177 | extract_coordinates(inputfolder + fn, outputfolder + fn[:-4] + '.csv') 178 | 179 | 180 | def combine_with_track_data(inputfile, trackfile, outputfile=None): 181 | """ 182 | Add track IDs to the extracted coordinates. 183 | 184 | Parameters 185 | ---------- 186 | inputfile : str 187 | Path to a file with extracted cell coordinates. 188 | trackfile : str 189 | Path to a file with track IDs. 190 | outputfile : str 191 | Path to the output file. 192 | """ 193 | stat = pd.read_csv(inputfile, sep='\t', index_col=0) 194 | if 'ID' not in stat.columns: 195 | stat['ID'] = stat['Cell_ID'] 196 | summary = stat.groupby(['ID', 'Time']).mean().reset_index() 197 | trackstat = pd.read_excel(trackfile, sheet_name='Position', header=1) 198 | if 'Time' not in trackstat.columns: 199 | trackstat['Time'] = trackstat['Death [s]'] 200 | 201 | for t in trackstat['Time'].unique(): 202 | curstat = summary[summary['Time'] == t].reset_index() 203 | curtrackstat = trackstat[trackstat['Time'] == t].reset_index() 204 | for i in range(len(curtrackstat)): 205 | dist = np.sqrt((curstat['X'] - np.array(curtrackstat.iloc[i]['Position X']))**2 + 206 | (curstat['Y'] - np.array(curtrackstat.iloc[i]['Position Y']))**2 + 207 | (curstat['Z'] - np.array(curtrackstat.iloc[i]['Position Z']))**2) 208 | track_id = curtrackstat.iloc[i]['TrackID'] 209 | if len(dist) > 0: 210 | ind = np.argmin(dist) 211 | stat.at[stat[(stat['ID'] == curstat.iloc[ind]['ID']) & (stat['Time'] == t)].index, 'TrackID'] = track_id 212 | else: 213 | print(trackfile, t, track_id) 214 | 215 | if outputfile is None: 216 | outputfile = inputfile[:-4] + '_tracked.csv' 217 | filelib.make_folders([os.path.dirname(outputfile)]) 218 | stat.to_csv(outputfile, sep='\t') 219 | 220 | 221 | def combine_with_track_data_batch(inputfolder, trackfolder, outputfolder): 222 | """ 223 | Add track IDs to the extracted coordinates in a parallel mode. 224 | 225 | Parameters 226 | ---------- 227 | inputfolder : str 228 | Path to a directory with coordinate files. 229 | trackfolder : str 230 | Path to a directory with track files. 231 | outputfolder : str 232 | Path to the output directory. 233 | """ 234 | files = filelib.list_subfolders(inputfolder, extensions=['csv']) 235 | trackfiles = filelib.list_subfolders(trackfolder, extensions=['xls', 'xlsx']) 236 | for fn in files: 237 | parts = fn.split('/')[-1].split('_') 238 | stem = parts[0] + '_' + parts[1] 239 | for trf in trackfiles: 240 | if trf.split('/')[0] == fn.split('/')[0] and len(trf.split(stem)) > 1: 241 | combine_with_track_data(inputfile=inputfolder + fn, 242 | trackfile=trackfolder + trf, 243 | outputfile=outputfolder + fn) 244 | 245 | 246 | 247 | 248 | 249 | 250 | -------------------------------------------------------------------------------- /SPHARM/scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/scripts/__init__.py -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_1_convert.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import mkl 4 | mkl.set_num_threads(1) 5 | 6 | import sys 7 | import numpy as np 8 | 9 | from SPHARM.lib import spharm 10 | import SPHARM.lib.parallel as prl 11 | from SPHARM.lib import plotting as plt 12 | import SPHARM.lib.segmentation as sgm 13 | import SPHARM.lib.vrml_parse as vr 14 | 15 | 16 | ################################# 17 | args = sys.argv[1:] 18 | if len(args) > 0: 19 | path = args[0] 20 | if path != 'test': 21 | if not path.endswith('/'): 22 | path += '/' 23 | if len(path.split('T_cells')) > 1: 24 | metadata_file = path + 'voxel_size&frame_rate.csv' 25 | vr.extract_coordinates_batch(path + 'wrl/', path + 'output/coordinates/') 26 | vr.combine_with_track_data_batch(inputfolder=path + 'output/coordinates/', 27 | trackfolder=path + 'wrl/', 28 | outputfolder=path + 'output/coordinates_tracked/') 29 | path = path + 'output/' 30 | sgm.split_to_surfaces_batch(inputfolder=path + 'coordinates_tracked/', 31 | outputfolder=path + 'surfaces/', combine_tracks=True, adjust_frame_rate=True, 32 | metadata_file=metadata_file) 33 | elif len(path.split('Synthetic')) > 1: 34 | kwargs = {'max_threads': 5, 'voxel_size': 1} 35 | 36 | prl.run_parallel(process=spharm.convert_to_tiff, inputfolder=path + 'coordinates/', 37 | outputfolder=path + 'output/stacks/', extensions=['*'], debug=False, combine=False, 38 | exclude=['config.xml', 'log.csv', 'runanalysis.csv', 39 | 'runstatistics.csv', 'shapeanalysis.csv', 'tissue.csv'], 40 | **kwargs) 41 | 42 | path = path + 'output/' 43 | 44 | # prl.run_parallel(process=plt.plot_maxprojections, inputfolder=path + 'stacks/', 45 | # outputfolder=path + 'stacks_maxproj_xy/', axis=0, combine=False, **kwargs) 46 | # 47 | # prl.run_parallel(process=plt.plot_maxprojections, inputfolder=path + 'stacks/', 48 | # outputfolder=path + 'stacks_maxproj_xz/', axis=1, combine=False, **kwargs) 49 | # 50 | # prl.run_parallel(process=plt.plot_maxprojections, inputfolder=path + 'stacks/', 51 | # outputfolder=path + 'stacks_maxproj_yz/', axis=2, combine=False, **kwargs) 52 | 53 | if len(args) > 1: 54 | 55 | start_iter = int(float(args[1])) 56 | exclude = ['t666.csv.tif'] 57 | for t in np.arange(0, start_iter, 500): 58 | exclude.append('t' + str(t) + '.csv.tif') 59 | 60 | prl.run_parallel(process=sgm.extract_surfaces, 61 | inputfolder=path + 'stacks/', 62 | outputfolder=path + 'surfaces_separate/', 63 | combine=False, exclude=exclude) 64 | 65 | sgm.combine_surfaces(inputfolder=path + 'surfaces_separate/', outputfolder=path + 'surfaces/') 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_2_spharm.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import re 4 | import sys 5 | import numpy as np 6 | import pandas as pd 7 | from SPHARM.lib import spharm 8 | import SPHARM.lib.parallel as prl 9 | from helper_lib import filelib 10 | 11 | 12 | def extract_metadata_Tcells(inputfile, spectrum_file): 13 | 14 | stat = pd.read_csv(inputfile, sep='\t') 15 | 16 | # extract group info 17 | filenames = np.array(stat['Name']) 18 | groups = [] 19 | Time = [] 20 | Sample = [] 21 | p = re.compile('[-+]?\d*\.*\d+') 22 | 23 | for i, fn in enumerate(filenames): 24 | parts = fn.split('/') 25 | groups.append(parts[0].split('_')[0]) 26 | parts_sample = parts[1].split('_') 27 | Sample.append(parts_sample[0] + '_' + parts_sample[1]) 28 | parts = parts[-1].split('Time') 29 | Time.append(p.findall(parts[-1])[0]) 30 | 31 | stat['Group'] = groups 32 | stat['Time'] = Time 33 | stat['Sample'] = Sample 34 | stat.to_csv(inputfile, sep='\t') 35 | 36 | # compute frequency spectrum 37 | stat['value'] = np.array(stat['real']) + np.array(stat['imag'])*1j 38 | stat = stat.groupby(['Group', 'Name', 'Time', 'degree', 'Sample']).sum().reset_index() 39 | stat['frequency'] = np.sqrt(stat['power']) 40 | 41 | stat.to_csv(spectrum_file, sep='\t') 42 | 43 | 44 | def extract_metadata_synthetic(inputfile, spectrum_file): 45 | 46 | stat = pd.read_csv(inputfile, sep='\t', index_col=0) 47 | 48 | # extract group info 49 | filenames = np.array(stat['Name']) 50 | groups = [] 51 | cellID = [] 52 | Time = [] 53 | nw = [] 54 | pw = [] 55 | fb = [] 56 | dw = [] 57 | p = re.compile('\d*\.*\d+') 58 | 59 | for i, fn in enumerate(filenames): 60 | parts = fn.split('Time') 61 | nums = p.findall(parts[-2]) 62 | nw.append(nums[-4]) 63 | pw.append(nums[-5]) 64 | fb.append(nums[-1]) 65 | dw.append(nums[-3]) 66 | groups.append('NW=' + nums[-4] + '_PW=' + nums[-5] + '_DW=' + nums[-3] + '_FB=' + nums[-1]) 67 | cellID.append(parts[-2][:-1]) 68 | Time.append(p.findall(parts[-1])[-1]) 69 | 70 | cellID = np.array(cellID) 71 | cellID_unique = np.unique((cellID)) 72 | cellID_num = np.zeros(len(cellID)) 73 | for i in range(len(cellID_unique)): 74 | cellID_num[np.where(cellID == cellID_unique[i])] = i + 1 75 | stat['Group'] = groups 76 | stat['CellID'] = cellID_num 77 | stat['Time'] = Time 78 | stat['NWeight'] = nw 79 | stat['PosWeight'] = pw 80 | stat['DistWeight'] = dw 81 | stat['FrontBack'] = fb 82 | stat.to_csv(inputfile, sep='\t') 83 | 84 | # compute frequency spectrum 85 | stat['value'] = np.array(stat['real']) + np.array(stat['imag'])*1j 86 | stat = stat.groupby(['Group', 'Name', 'CellID', 'Time', 'degree', 87 | 'NWeight', 'PosWeight', 'FrontBack', 'DistWeight']).sum().reset_index() 88 | stat['amplitude'] = np.sqrt(stat['power']) 89 | 90 | stat.to_csv(spectrum_file, sep='\t') 91 | 92 | 93 | def split_parameters(filename, outputfolder): 94 | filelib.make_folders([outputfolder]) 95 | stat = pd.read_csv(filename, sep='\t', index_col=0) 96 | for pw in stat['PosWeight'].unique(): 97 | for nw in stat['NWeight'].unique(): 98 | curstat = stat[(stat['PosWeight'] == pw) & (stat['NWeight'] == nw)].reset_index() 99 | curstat.to_csv(outputfolder + 'PosWeight=' + str(pw) + '_NWeight=' + str(nw) + '.csv', sep='\t') 100 | 101 | for fb in stat['FrontBack'].unique(): 102 | curstat = stat[stat['FrontBack'] == fb].reset_index() 103 | curstat.to_csv(outputfolder + 'FrontBack=' + str(fb) + '.csv', sep='\t') 104 | 105 | 106 | kwargs = {'max_threads': 6, 'combined_tracks': True, 'rotate': False} 107 | gridsize = 120 108 | 109 | 110 | ################################# 111 | args = sys.argv[1:] 112 | if len(args) > 0: 113 | path = args[0] 114 | if path != 'test': 115 | if not path.endswith('/'): 116 | path += '/' 117 | path += 'output/' 118 | if len(args) > 1: 119 | kwargs['max_threads'] = int(float(args[1])) 120 | extract_metadata = None 121 | 122 | if len(path.split('T_cells')) > 1: 123 | extract_metadata = extract_metadata_Tcells 124 | 125 | elif len(path.split('Synthetic')) > 1: 126 | extract_metadata = extract_metadata_synthetic 127 | 128 | if extract_metadata is not None: 129 | prl.run_parallel(process=spharm.compute_spharm, inputfolder=path + 'surfaces/', 130 | outputfolder=path + 'spharm/gridsize=' + str(gridsize) + '/', extensions=['csv'], 131 | grid_size=gridsize, normalize=True, **kwargs) 132 | filename = path + 'spharm/gridsize=' + str(gridsize) + '.csv' 133 | extract_metadata(inputfile=filename, spectrum_file=filename[:-4] + '_frequency_spectrum.csv') 134 | 135 | if len(path.split('parameters')) > 1: 136 | 137 | split_parameters(filename=path + 'spharm/gridsize=' + str(gridsize) + '.csv', 138 | outputfolder=path + 'spharm/gridsize=' + str(gridsize) + '_parameters/') 139 | 140 | split_parameters(filename=path + 'spharm/gridsize=' + str(gridsize) + '_frequency_spectrum.csv', 141 | outputfolder=path + 'spharm/gridsize=' + str(gridsize) + '_parameters_frequency_spectrum/') 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_3_plotting_individual.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import mkl 4 | mkl.set_num_threads(1) 5 | 6 | import sys 7 | 8 | from SPHARM.lib import plotting as plt 9 | 10 | 11 | gridsize = 120 12 | 13 | 14 | ################################# 15 | args = sys.argv[1:] 16 | if len(args) > 0: 17 | path = args[0] 18 | if path != 'test': 19 | if not path.endswith('/'): 20 | path += '/' 21 | path += 'output/' 22 | 23 | if len(path.split('Synthetic')) > 1: 24 | id_col = 'CellID' 25 | else: 26 | id_col = 'TrackID' 27 | 28 | plt.plot_individual_heatmaps(inputfolder=path + 'spharm/gridsize=' + str(gridsize) + '/', 29 | outputfolder=path + 'plots/individual_heatmaps/gridsize=' + str(gridsize) + '/', 30 | cutoff=5, logscale=True) 31 | plt.plot_spectra(inputfolder=path + 'spharm/gridsize=' + str(gridsize) + '/', 32 | outputfolder=path + 'plots/individual_spectra/gridsize=' + str(gridsize) + '/') 33 | 34 | plt.plot_individual_time_heatmaps(inputfile=path + 'spharm/gridsize=' + str(gridsize) + '.csv', 35 | outputfolder=path + 'plots/individual_time_heatmaps/gridsize=' 36 | + str(gridsize) + '/', logscale=True, cutoff=10, id_col=id_col) 37 | plt.plot_individual_frequency_heatmaps(inputfile=path + 'spharm/gridsize=' + str(gridsize) + '.csv', 38 | outputfolder=path + 'plots/individual_frequency_heatmaps/gridsize=' 39 | + str(gridsize) + '/', logscale=True, cutoff=10, 40 | id_col=id_col) 41 | plt.plot_individual_derivative_heatmaps(inputfile=path + 'spharm/gridsize=' + str(gridsize) + '.csv', 42 | outputfolder=path + 'plots/individual_derivative_heatmaps/gridsize=' 43 | + str(gridsize) + '/', cutoff=10, id_col=id_col) 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_4_plotting_groups.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import mkl 4 | mkl.set_num_threads(1) 5 | 6 | import sys 7 | import os 8 | 9 | from SPHARM.lib import plotting as plt 10 | 11 | 12 | def compare_parameters(inputfolder, outputfolder): 13 | files = os.listdir(inputfolder) 14 | print(files) 15 | for fn in files: 16 | plt.plot_heatmap_difference(inputfile=inputfolder + fn, cutoff=5, 17 | outputfolder=outputfolder +fn[:-4] + '/heatmap_difference/') 18 | 19 | 20 | def compare_parameters_spectra(inputfolder, outputfolder): 21 | files = os.listdir(inputfolder) 22 | print(files) 23 | for fn in files: 24 | plt.plot_effect_size(inputfile=inputfolder+fn, 25 | outputfolder=outputfolder + fn[:-4] + '/effect_size/', value='amplitude', cutoff=8) 26 | 27 | plt.plot_pairplots(inputfile=inputfolder+fn, 28 | outputfolder=outputfolder + fn[:-4] + '/pairplots/', cutoff=5) 29 | 30 | 31 | gridsize = 120 32 | 33 | 34 | ################################# 35 | args = sys.argv[1:] 36 | if len(args) > 0: 37 | path = args[0] 38 | if path != 'test': 39 | if not path.endswith('/'): 40 | path += '/' 41 | path += 'output/' 42 | 43 | if len(path.split('Synthetic')) > 1: 44 | id_col = 'CellID' 45 | else: 46 | id_col = 'TrackID' 47 | 48 | filename = path + 'spharm/gridsize=' + str(gridsize) + '.csv' 49 | plt.plot_average_heatmaps(inputfile=filename, cutoff=5, logscale=True, 50 | outputfolder=path + 'plots/average_heatmaps/gridsize=' + str(gridsize) + '_') 51 | plt.plot_average_spectra(inputfile=filename[:-4] + '_frequency_spectrum.csv', cutoff=8, 52 | outputfolder=path + 'plots/average_spectra/gridsize=' + str(gridsize) + '_') 53 | plt.plot_average_frequency_heatmaps(inputfile=filename, cutoff=10, logscale=True, 54 | outputfolder=path + 'plots/average_frequency_heatmaps/gridsize=' + str( 55 | gridsize) + '_', id_col=id_col) 56 | plt.plot_mean_abs_derivative(inputfile=filename, cutoff=10, id_col=id_col, 57 | outputfolder=path + 'plots/average_abs_derivative/gridsize=' + str(gridsize) + '_') 58 | plt.plot_inverse_shapes(inputfile=filename, 59 | outputfolder=path + 'plots/inverse_shapes/gridsize=' + str(gridsize) + '_') 60 | 61 | if len(path.split('parameters')) > 1: 62 | 63 | compare_parameters_spectra(inputfolder=path + 'spharm/gridsize=' + str(gridsize) 64 | + '_parameters_frequency_spectrum/', 65 | outputfolder=path+'plots/parameters_comparison/gridsize='+str(gridsize)+'/') 66 | 67 | compare_parameters(inputfolder=path + 'spharm/gridsize=' + str(gridsize) + '_parameters/', 68 | outputfolder=path+'plots/parameters_comparison/gridsize='+str(gridsize)+'/') 69 | 70 | else: 71 | plt.plot_heatmap_difference(inputfile=filename, 72 | outputfolder=path + 'plots/heatmap_difference/gridsize=' + str(gridsize)) 73 | plt.plot_effect_size(inputfile=filename[:-4] + '_frequency_spectrum.csv', 74 | outputfolder=path + 'plots/effect_size/gridsize=' + str(gridsize), value='frequency') 75 | plt.plot_pairplots(inputfile=filename[:-4] + '_frequency_spectrum.csv', 76 | outputfolder=path + 'plots/pairplots/gridsize=' + str(gridsize)) 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_5_adjust_parameters.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import sys 5 | import numpy as np 6 | import pandas as pd 7 | import seaborn as sns 8 | import pylab as plt 9 | 10 | import mkl 11 | from SPHARM.lib import classification 12 | from helper_lib import filelib 13 | import helper_lib.parallel as prl 14 | 15 | import warnings 16 | warnings.simplefilter(action='ignore', category=Warning) 17 | mkl.set_num_threads(1) 18 | 19 | 20 | def compare_parameters_parallel(cutoffs, timelengths, rotation_invariant, **kwargs): 21 | items = [] 22 | if rotation_invariant: 23 | static_features = ['amplitude'] 24 | else: 25 | static_features = ['amplitude', 'real_imag'] 26 | 27 | for cutoff in cutoffs: 28 | 29 | static = True 30 | dynamic_features = None 31 | timelength = None 32 | 33 | for static_feature in static_features: 34 | items.append([cutoff, static, dynamic_features, timelength, 35 | static_feature, rotation_invariant]) 36 | 37 | static = False 38 | for timelength in timelengths: 39 | for dynamic_features in ['time', 'frequency']: 40 | for static_feature in static_features: 41 | items.append([cutoff, static, dynamic_features, timelength, 42 | static_feature, rotation_invariant]) 43 | 44 | kwargs['inputstat'] = pd.read_csv(kwargs.pop('inputfile'), sep='\t', index_col=0) 45 | 46 | if kwargs.pop('debug', False) is True: 47 | kwargs['item'] = items[0] 48 | kwargs.pop('max_threads') 49 | print(kwargs['item']) 50 | compare_parameters(**kwargs) 51 | else: 52 | kwargs['items'] = items 53 | prl.run_parallel(process=compare_parameters, **kwargs) 54 | 55 | filelib.combine_statistics(kwargs.get('folder_accuracy')) 56 | 57 | 58 | def compare_parameters(item, inputstat, folder_accuracy, group='Group', 59 | id_col='TrackID', grouped=False): 60 | filelib.make_folders([folder_accuracy]) 61 | 62 | cutoff, static, dynamic_features, timelength, static_features, rotation_invariant = item 63 | params = dict({'Cutoff': cutoff, 64 | 'Static': static, 65 | 'Dynamic_features': dynamic_features, 66 | 'Time length': timelength, 67 | 'Static_features': static_features, 68 | 'Rotation_invariant': rotation_invariant}) 69 | 70 | outputfile = folder_accuracy 71 | for key in params.keys(): 72 | outputfile += key + '=' + str(params[key]) + '_' 73 | if not os.path.exists(outputfile[:-1] + '.csv'): 74 | if cutoff is not None: 75 | stat = inputstat[inputstat['degree'] <= cutoff] 76 | else: 77 | stat = inputstat 78 | features, classes, \ 79 | names, groups, samples = classification.extract_features(stat, 80 | cell_id=id_col, 81 | group=group, 82 | static=static, 83 | dynamic_features=dynamic_features, 84 | timelength=timelength, 85 | static_features=static_features, 86 | rotation_invariant=rotation_invariant)[:] 87 | if len(classes) > 0 and len(np.unique(classes)) > 1: 88 | accuracy = pd.DataFrame() 89 | for C in [0.1, 1., 10., 100., 1000.]: 90 | if grouped: 91 | curaccuracy = classification.predict_group_shuffle_split(features, classes, C=C, 92 | nsplits=100, 93 | test_size=len(np.unique(classes)), 94 | groups=samples, 95 | random_state=0) 96 | else: 97 | curaccuracy = classification.predict_shuffle_split(features, classes, C=C, 98 | nsplits=100, test_size=2./7, random_state=0) 99 | curaccuracy['C'] = C 100 | accuracy = pd.concat([accuracy, curaccuracy], ignore_index=True) 101 | 102 | for key in params.keys(): 103 | accuracy[key] = params[key] 104 | 105 | accuracy.to_csv(outputfile[:-1] + '.csv', sep='\t') 106 | 107 | 108 | def plot_accuracy(inputfile, outputfolder): 109 | filelib.make_folders([outputfolder]) 110 | stat = pd.read_csv(inputfile, sep='\t', index_col=0) 111 | stat.loc[:, 'Cutoff'] = np.array(stat['Cutoff']).astype(str) 112 | stat.loc[stat[stat['Cutoff'] == 'nan'].index, 'Cutoff'] = str(60) 113 | stat.loc[:, 'Cutoff'] = np.array(stat['Cutoff']).astype(float).astype(int) 114 | stat = stat.sort_values(['Cutoff', 'Time length']).reset_index() 115 | stat.loc[:, 'Dynamic_features'] = np.array(stat['Dynamic_features']).astype(str) 116 | stat.loc[stat[stat['Dynamic_features'] == 'nan'].index, 'Dynamic_features'] = 'static' 117 | if 'Rotation_invariant' in stat.columns: 118 | stat = stat.assign(features=stat['Dynamic_features'] + '_' + stat['Static_features'] 119 | + '_rot_invar=' + stat['Rotation_invariant'].astype(str)) 120 | else: 121 | stat = stat.assign(features=stat['Dynamic_features'] + '_' + stat['Static_features']) 122 | 123 | for c in stat['C'].unique(): 124 | for features in stat['features'].unique(): 125 | curstat = stat[(stat['features'] == features)&(stat['C'] == c)] 126 | if str(curstat['Static'].iloc[0]) == 'True': 127 | if 'One_time_point' in stat.columns: 128 | hue = 'One_time_point' 129 | else: 130 | hue = None 131 | else: 132 | hue = 'Time length' 133 | 134 | sns.boxplot(x='Cutoff', y='Accuracy', hue=hue, data=curstat) 135 | plt.savefig(outputfolder + features + '_C=' + str(c) + '.png') 136 | plt.close() 137 | 138 | stat = stat.sort_values(['C', 'features']).reset_index() 139 | for cutoff in stat['Cutoff'].unique(): 140 | stat_static = stat[(stat['Static'] == True) & (stat['Cutoff'] == cutoff)] 141 | stat_dynamic = stat[(stat['Static'] == False) & (stat['Cutoff'] == cutoff)] 142 | 143 | if 'One_time_point' in stat.columns: 144 | 145 | for otp in stat_static['One_time_point'].unique(): 146 | curstat = stat_static[stat_static['One_time_point'] == otp] 147 | 148 | sns.boxplot(x='C', y='Accuracy', hue='features', data=curstat) 149 | plt.savefig(outputfolder + 'Static_one_time_point=' + str(otp) + '_Cutoff=' + str(cutoff) + '.png') 150 | plt.close() 151 | else: 152 | sns.boxplot(x='C', y='Accuracy', hue='features', data=stat_static) 153 | plt.savefig(outputfolder + 'Static_Cutoff=' + str(cutoff) + '.png') 154 | plt.close() 155 | 156 | for tl in stat_dynamic['Time length'].unique(): 157 | curstat = stat_dynamic[stat_dynamic['Time length'] == tl] 158 | sns.boxplot(x='C', y='Accuracy', hue='features', data=curstat) 159 | plt.savefig(outputfolder + 'Dynamic_Time_length=' + str(tl) + '_Cutoff=' + str(cutoff) + '.png') 160 | plt.close() 161 | 162 | 163 | def plot_accuracy_selected(inputfile, outputfolder): 164 | filelib.make_folders([outputfolder]) 165 | stat = pd.read_csv(inputfile, sep='\t', index_col=0) 166 | stat.loc[:, 'Cutoff'] = np.array(stat['Cutoff']).astype(str) 167 | stat.loc[stat[stat['Cutoff'] == 'nan'].index, 'Cutoff'] = str(60) 168 | stat.loc[:, 'Cutoff'] = np.array(stat['Cutoff']).astype(float).astype(int) 169 | stat = stat.sort_values(['Cutoff', 'Time length']).reset_index() 170 | stat.loc[:, 'Dynamic features'] = np.array(stat['Dynamic_features']).astype(str) 171 | stat.loc[stat[stat['Dynamic features'] == 'nan'].index, 'Dynamic features'] = 'static' 172 | stat = stat[stat['Rotation_invariant'] == True] 173 | 174 | curstat = stat[stat['Static'] == True] 175 | 176 | palette = 'Set1' 177 | 178 | plt.figure(figsize=(4, 4)) 179 | sns.boxplot(x='Cutoff', y='Accuracy', hue='C', data=curstat, palette=palette) 180 | sns.despine() 181 | plt.xlabel('$l_{max}$') 182 | plt.title('Static') 183 | margins = {'left': 0.15, 'right': 0.95, 'top': 0.9, 'bottom': 0.13} 184 | plt.subplots_adjust(**margins) 185 | plt.savefig(outputfolder + 'Static.png') 186 | plt.savefig(outputfolder + 'Static.svg') 187 | plt.close() 188 | 189 | curstat = stat[stat['Static'] == False] 190 | curstat['Time length'] = curstat['Time length'].astype(int) 191 | 192 | for dyn in curstat['Dynamic_features'].unique(): 193 | plt.figure(figsize=(4, 4)) 194 | sns.boxplot(x='Cutoff', y='Accuracy', hue='C', 195 | data=curstat[curstat['Dynamic features'] == dyn], palette=palette) 196 | sns.despine() 197 | plt.xlabel('$l_{max}$') 198 | margins = {'left': 0.15, 'right': 0.95, 'top': 0.9, 'bottom': 0.13} 199 | plt.subplots_adjust(**margins) 200 | plt.title('Dynamic features = ' + dyn) 201 | plt.ylim(0.55, 1.02) 202 | plt.savefig(outputfolder + 'Dynamic_' + dyn + '.png') 203 | plt.savefig(outputfolder + 'Dynamic_' + dyn + '.svg') 204 | plt.close() 205 | 206 | plt.figure(figsize=(3, 4)) 207 | sns.boxplot(x='Time length', y='Accuracy', hue='Dynamic_features', data=curstat, palette=palette) 208 | sns.despine() 209 | plt.xlabel('Time length (frames)') 210 | margins = {'left': 0.22, 'right': 0.95, 'top': 0.9, 'bottom': 0.13} 211 | plt.subplots_adjust(**margins) 212 | plt.savefig(outputfolder + 'Dynamic_Timelength.png') 213 | plt.savefig(outputfolder + 'Dynamic_Timelength.svg') 214 | plt.close() 215 | 216 | 217 | 218 | gridsize = 120 219 | 220 | ################################# 221 | args = sys.argv[1:] 222 | if len(args) > 0: 223 | path = args[0] 224 | if path != 'test': 225 | if not path.endswith('/'): 226 | path += '/' 227 | path += 'output/' 228 | 229 | if len(path.split('Synthetic')) > 1: 230 | id_col = 'CellID' 231 | grouped = False 232 | else: 233 | id_col = 'TrackID' 234 | grouped = True 235 | 236 | inputfile = path + 'spharm/gridsize=' + str(gridsize) + '.csv' 237 | rotation_invariant = True 238 | 239 | compare_parameters_parallel(inputfile=inputfile, 240 | folder_accuracy=path + 'cross_validation_accuracy/', 241 | cutoffs=[1, 2, 3, 5, 10, None], 242 | timelengths=[5, 10, 20, 30, 50, 80], 243 | max_threads=20, 244 | id_col=id_col, 245 | rotation_invariant=rotation_invariant, 246 | debug=False, grouped=grouped) 247 | 248 | plot_accuracy_selected(path + 'cross_validation_accuracy.csv', path + 'cross_validation_accuracy_plots_selected/') 249 | 250 | 251 | 252 | 253 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_T_cells_in_LN.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import re 4 | import os 5 | import numpy as np 6 | import pandas as pd 7 | from scipy import ndimage 8 | from SPHARM.lib import spharm 9 | import SPHARM.lib.parallel as prl 10 | import SPHARM.lib.segmentation as sgm 11 | from SPHARM.lib.vrml_parse import combine_with_track_data 12 | from SPHARM.classes.image_stack import ImageStack 13 | from SPHARM.lib import plotting as plt 14 | 15 | 16 | def metadata_from_filename(filename, group_keys, group_names, sample_keys): 17 | p = re.compile('[-+]?\d*\.*\d+') 18 | group = None 19 | for i, g in enumerate(group_keys): 20 | if len(filename.split(g)) > 1: 21 | group = group_names[i] 22 | 23 | sample = None 24 | for g in sample_keys: 25 | if len(filename.split(g)) > 1: 26 | sample = g 27 | 28 | parts = filename.split('Time') 29 | time = int(p.findall(parts[-1])[0]) + 1 30 | return group, sample, time 31 | 32 | 33 | def extract_metadata0(inputfile): 34 | 35 | stat = pd.DataFrame.from_csv(inputfile, sep='\t') 36 | group_keys = ['PMT3', 'PMT2'] 37 | group_names = ['CMTMR', 'CFSE'] 38 | sample_keys = ['Doc17', 'Doc18'] 39 | 40 | # extract group info 41 | filenames = np.array(stat['Image_name']) 42 | Time = [] 43 | 44 | for i, fn in enumerate(filenames): 45 | group, sample, time = metadata_from_filename(fn, group_keys, group_names, sample_keys) 46 | Time.append(time) 47 | 48 | stat['Time'] = Time 49 | stat.to_csv(inputfile, sep='\t') 50 | 51 | 52 | def extract_metadata(inputfile, spectrum_file): 53 | 54 | stat = pd.DataFrame.from_csv(inputfile, sep='\t') 55 | group_keys = ['PMT3', 'PMT2'] 56 | group_names = ['CMTMR', 'CFSE'] 57 | sample_keys = ['Doc17', 'Doc18'] 58 | 59 | # extract group info 60 | filenames = np.array(stat['Name']) 61 | groups = [] 62 | mutants = [] 63 | samples = [] 64 | Time = [] 65 | 66 | for i, fn in enumerate(filenames): 67 | group, sample, time = metadata_from_filename(fn, group_keys, group_names, sample_keys) 68 | mutants.append(group) 69 | print(group, sample) 70 | groups.append(group + '_' + sample) 71 | samples.append(sample) 72 | Time.append(time) 73 | 74 | stat['Group'] = groups 75 | stat['Time'] = Time 76 | stat['Sample'] = samples 77 | stat['Mutant'] = mutants 78 | stat.to_csv(inputfile, sep='\t') 79 | 80 | # compute frequency spectrum 81 | if spectrum_file is not None: 82 | stat['value'] = np.array(stat['real']) + np.array(stat['imag'])*1j 83 | stat = stat.groupby(['Group', 'Sample', 'Mutant', 'Name', 'Time', 'degree']).sum().reset_index() 84 | stat['frequency'] = np.sqrt(stat['power']) 85 | 86 | stat.to_csv(spectrum_file, sep='\t') 87 | 88 | 89 | def combine_with_track_data_batch(inputfolder, trackfolder, outputfolder): 90 | files = os.listdir(inputfolder) 91 | trackfiles = os.listdir(trackfolder) 92 | group_keys = np.array(['PMT3', 'PMT2']) 93 | group_names = np.array(['CMTMR', 'CFSE']) 94 | sample_keys = np.array(['Doc17', 'Doc18']) 95 | 96 | for fn in files: 97 | print(fn) 98 | group, sample, time = metadata_from_filename(fn, group_keys, group_names, sample_keys) 99 | for trf in trackfiles: 100 | if len(trf.split(group)) > 1 and len(trf.split(sample)) > 1: 101 | extract_metadata0(inputfolder + fn) 102 | outputfile = outputfolder + sample + '_' + group_keys[np.where(group_names == group)[0][0]] + '/' + fn 103 | combine_with_track_data(inputfile=inputfolder + fn, 104 | trackfile=trackfolder + trf, 105 | outputfile=outputfile) 106 | stat = pd.DataFrame.from_csv(outputfile, sep='\t') 107 | if stat['TrackID'].iloc[0] == -1: 108 | os.remove(outputfile) 109 | 110 | for sample in sample_keys: 111 | for group in group_keys: 112 | stat_combined = pd.DataFrame() 113 | files = os.listdir(outputfolder + sample + '_' + group_keys[np.where(group_keys == group)[0][0]] + '/') 114 | for fn in files: 115 | stat = pd.read_csv(outputfolder + sample + '_' + 116 | group_keys[np.where(group_keys == group)[0][0]] + '/' + fn, sep='\t', index_col=0) 117 | stat_combined = pd.concat([stat_combined, stat], ignore_index=True) 118 | 119 | stat_combined.to_csv(outputfolder + sample + '_' + 120 | group_keys[np.where(group_keys == group)[0][0]] + '.csv', sep='\t') 121 | 122 | 123 | def overlay_tracks(**kwargs): 124 | inputfolder = kwargs.get('inputfolder') 125 | outputfolder = kwargs.get('outputfolder', inputfolder + '../output/RGB/') 126 | trackfolder = kwargs.get('track_folder') 127 | filenames = kwargs.get('item') 128 | voxel_size = np.array(kwargs.get('voxel_size')) 129 | 130 | group_keys = kwargs.get('group_keys') 131 | group_names = kwargs.get('group_names') 132 | sample_keys = kwargs.get('sample_keys') 133 | 134 | stacks = [] 135 | for i, fn in enumerate(filenames): 136 | if len(fn) > 0: 137 | stacks.append(ImageStack(inputfolder + fn)) 138 | 139 | data = np.zeros(stacks[0].data.shape + (3,)) 140 | for i in range(len(stacks)): 141 | data[:, :, :, i] = stacks[i].data 142 | 143 | stacks[0].data = data 144 | 145 | trackfiles = os.listdir(trackfolder) 146 | centers = np.zeros(np.array(stacks[0].data.shape)[:-1]) 147 | for fn in filenames: 148 | if fn != '': 149 | group, sample, time = metadata_from_filename(fn, group_keys, group_names, sample_keys) 150 | for trf in trackfiles: 151 | if len(trf.split(group)) > 1 and len(trf.split(sample)) > 1: 152 | trackstat = pd.read_csv(trackfolder + trf, sep='\t') 153 | trackstat = trackstat[trackstat['Time'] == time] 154 | x = np.int_(np.round_(np.array(trackstat['Position X'])/voxel_size[2])) 155 | y = np.int_(np.round_(np.array(trackstat['Position Y'])/voxel_size[1])) 156 | z = np.int_(np.round_(np.array(trackstat['Position Z'])/voxel_size[0])) 157 | centroids = np.array([z,y,x]).transpose().reshape((len(trackstat), 3)) 158 | centers[tuple(centroids.transpose())] = 255. 159 | 160 | sigma = 0.25 / voxel_size 161 | centers = ndimage.gaussian_filter(centers, sigma) 162 | print(centers.max(), centers.min()) 163 | 164 | stacks[0].data[np.where(centers > 0)] = (255, 255, 255) 165 | stacks[0].save_max_projection(outputfolder + filenames[0]) 166 | 167 | 168 | path = '../../../Data/T_cells_in_LN/' 169 | inputfolder = path + 'input/' 170 | 171 | if os.path.exists(path): 172 | kwargs = {'max_threads': 3, 'mincellrad': 3, 'voxel_size': [4, 0.478, 0.478], 173 | 'channelcodes': ['PMT1', 'PMT2', 'PMT3', 'PMT4'], 174 | 'sigmas': [0.5] * 4, 'percentiles': [100, 99.99, 99.99, 100], 'thresholds': [0, 50, 100, 0]} 175 | 176 | prl.run_parallel(process=sgm.make_metadata_files, inputfolder=inputfolder, **kwargs) 177 | 178 | 179 | prl.run_parallel(process=sgm.preprocess, inputfolder=inputfolder, 180 | outputfolder=path + 'output/preprocessed/', **kwargs) 181 | 182 | prl.run_parallel(process=plt.plot_maxprojections, 183 | inputfolder=path + 'output/preprocessed/', 184 | outputfolder=path + 'output/preprocessed_maxproj/') 185 | 186 | prl.run_parallel(process=sgm.mergeRGB, inputfolder=path + 'output/preprocessed/', 187 | outputfolder=path + 'output/preprocessed_RGB/', channels=['PMT3', 'PMT2', 'PMT1']) 188 | 189 | prl.run_parallel(process=plt.plot_maxprojections, 190 | inputfolder=path + 'output/preprocessed_RGB/', 191 | outputfolder=path + 'output/preprocessed_RGB_maxproj/') 192 | 193 | 194 | prl.run_parallel(process=sgm.unmix, channels=['PMT3', 'PMT2'], 195 | inputfolder=path + 'output/preprocessed/', 196 | outputfolder=path + 'output/unmixed/') 197 | 198 | prl.run_parallel(process=plt.plot_maxprojections, 199 | inputfolder=path + 'output/unmixed/', 200 | outputfolder=path + 'output/unmixed_maxproj/') 201 | 202 | prl.run_parallel(process=sgm.mergeRGB, inputfolder=path + 'output/unmixed/', 203 | outputfolder=path + 'output/unmixed_RGB/', channels=['PMT3', 'PMT2', 'PMT1']) 204 | 205 | prl.run_parallel(process=plt.plot_maxprojections, 206 | inputfolder=path + 'output/unmixed_RGB/', 207 | outputfolder=path + 'output/unmixed_RGB_maxproj/') 208 | 209 | prl.run_parallel(process=sgm.segment, 210 | inputfolder=path + 'output/unmixed/', 211 | outputfolder=path + 'output/segmented/', 212 | channelcodes=['PMT1', 'PMT2', 'PMT3', 'PMT4'], 213 | thresholds=[None, 50, 100, None], to_label=False, 214 | track_folder=path + 'tracks/') 215 | 216 | prl.run_parallel(process=overlay_tracks, inputfolder=path + 'output/unmixed/', 217 | outputfolder=path + 'output/unmixed_RGB_tracks/', channels=['PMT3', 'PMT2', 'PMT1'], 218 | track_folder=path + 'tracks/', group_keys=['PMT3', 'PMT2'], group_names=['CMTMR', 'CFSE'], 219 | sample_keys=['Doc17', 'Doc18'], **kwargs) 220 | 221 | prl.run_parallel(process=sgm.mergeRGB, inputfolder=path + 'output/segmented/', 222 | outputfolder=path + 'output/segmented_RGB/', channels=['PMT3', 'PMT2', 'PMT1']) 223 | 224 | prl.run_parallel(process=plt.plot_maxprojections, 225 | inputfolder=path + 'output/segmented_RGB/', 226 | outputfolder=path + 'output/segmented_RGB_maxproj/') 227 | 228 | prl.run_parallel(process=sgm.segment, 229 | inputfolder=path + 'output/unmixed/', 230 | outputfolder=path + 'output/segmented_labeled/', 231 | channelcodes=['PMT1', 'PMT2', 'PMT3', 'PMT4'], 232 | thresholds=[None, 50, 100, None], to_label=True, 233 | track_folder=path + 'tracks/') 234 | 235 | prl.run_parallel(process=sgm.extract_surfaces, 236 | inputfolder=path + 'output/segmented_labeled/', 237 | outputfolder=path + 'output/surfaces_all/', 238 | channelcodes=['PMT2', 'PMT3'], combine=False) 239 | 240 | combine_with_track_data_batch(inputfolder=path + 'output/surfaces_all/', trackfolder=path + 'tracks/', 241 | outputfolder=path + 'output/surfaces_selected/') 242 | 243 | sgm.split_to_surfaces_batch(path + 'output/surfaces_selected/', path + 'output/surfaces/') 244 | 245 | gridsize = 60 246 | prl.run_parallel(process=spharm.compute_spharm, inputfolder=path + 'output/surfaces/', 247 | outputfolder=path + 'output/spharm/', extensions=['csv'], 248 | grid_size=gridsize) 249 | 250 | filename = path + 'output/spharm.csv' 251 | 252 | extract_metadata(inputfile=filename, 253 | spectrum_file=filename[:-4] + '_frequency_spectrum.csv') 254 | 255 | for group in ['Group', 'Sample', 'Mutant']: 256 | 257 | plt.plot_average_heatmaps(inputfile=filename, outputfolder=path+'output/plots_' + group + '/heatmaps/', group=group) 258 | plt.plot_average_spectra(inputfile=filename[:-4]+'_frequency_spectrum.csv', 259 | outputfolder=path+'output/plots_' + group + '/frequencies/', value='frequency', group=group) 260 | 261 | plt.plot_effect_size(inputfile=filename[:-4]+'_frequency_spectrum.csv', 262 | outputfolder=path+'output/plots_' + group + '/effect_size/', 263 | value='frequency', group=group) 264 | 265 | plt.plot_pairplots(inputfile=filename[:-4]+'_frequency_spectrum.csv', 266 | outputfolder=path+'output/plots_' + group + '/pairplots/', group=group) 267 | 268 | 269 | 270 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_ellipsoids.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import pylab as plt 6 | 7 | from helper_lib import filelib 8 | from SPHARM.classes.ellipsoid import Ellipsoid 9 | 10 | 11 | def plot_heatmaps(size, rotation, gridsize, path, value='power', cutoff=None, normalize=False, ri=False): 12 | 13 | filelib.make_folders([path + 'heatmaps/', path + 'frequencies/']) 14 | ell = Ellipsoid(grid_shape=(gridsize, gridsize), size=size, rotation=rotation) 15 | sp = ell.compute_spharm(grid_size=gridsize, normalize=normalize, normalization_method='zero-component', ri=ri) 16 | sp.save_to_csv(path + 'heatmaps/' + ell.name + '.csv') 17 | sp.heatmap(value=value, cutoff=cutoff).savefig(path + 'heatmaps/' + ell.name + '.png') 18 | plt.clf() 19 | sp.frequency_plot(value=value).savefig(path + 'frequencies/' + ell.name + '.png') 20 | plt.clf() 21 | 22 | 23 | def test_grid_size_invariance(path): 24 | 25 | value = 'power' 26 | size = (3, 1, 1) 27 | rotation = (0, 0) 28 | 29 | for n in range(2, 20): 30 | gridsize = 2*n 31 | plot_heatmaps(size, rotation, gridsize, path + 'not_normalized/', value=value, normalize=False) 32 | plot_heatmaps(size, rotation, gridsize, path + 'normalized/', value=value, normalize=True) 33 | 34 | 35 | def test_size_invariance(path): 36 | 37 | value = 'power' 38 | size = (2, 1, 1) 39 | rotation = (0, 0) 40 | gridsize = 20 41 | for k in range(1, 10): 42 | Size = tuple(np.array(size)*k) 43 | plot_heatmaps(Size, rotation, gridsize, path + 'not_normalized/', value=value, normalize=False) 44 | plot_heatmaps(Size, rotation, gridsize, path + 'normalized/', value=value, normalize=True) 45 | 46 | 47 | def test_rotation_invariance(path, ri=False): 48 | 49 | value = 'amplitude2' 50 | size = (10, 1, 1) 51 | gridsize = 50 52 | n_theta = 4 53 | n_phi = 4 54 | for t in range(n_theta): 55 | for p in range(n_phi): 56 | rotation = (np.pi/n_theta*t, 2*np.pi/n_phi*p) 57 | plot_heatmaps(size, rotation, gridsize, path + 'not_normalized/', value=value, 58 | normalize=False, cutoff=5, ri=ri) 59 | plot_heatmaps(size, rotation, gridsize, path + 'normalized/', value=value, 60 | normalize=True, cutoff=5, ri=ri) 61 | 62 | 63 | def test_eccentricity_dependence(path): 64 | 65 | value = 'power' 66 | rotation = (0, 0) 67 | gridsize = 16 68 | for k in range(1, 21): 69 | size = (1, 1, k) 70 | plot_heatmaps(size, rotation, gridsize, path + 'prolate/', value=value, normalize=True) 71 | 72 | for k in range(1, 21): 73 | size = (1, k, k) 74 | plot_heatmaps(size, rotation, gridsize, path + 'oblate/', value=value, normalize=True) 75 | 76 | 77 | def prolateness(r): 78 | r = np.array(r) 79 | n = len(r) 80 | ravg = np.mean(r**2) 81 | pr = 4./9 * (np.prod(r**2 - ravg)/ravg**n + 0.25) 82 | return pr 83 | 84 | 85 | def plot_eccentricity_test(inputfile): 86 | stat = pd.DataFrame.from_csv(inputfile, sep='\t') 87 | stat = stat.sort_values(['degree', 'Prolateness']) 88 | 89 | for d in stat['degree'].unique(): 90 | curstat = stat[stat['degree'] == d] 91 | plt.scatter(curstat['Eccentricity'], curstat['power'], c=curstat['Prolateness'], cmap='viridis') 92 | cbar = plt.colorbar() 93 | cbar.set_label('Prolateness') 94 | plt.xlabel('Eccentricty') 95 | plt.ylabel('Power') 96 | plt.savefig(inputfile[:-4] + '_eccentricity_deg=%03d.png'%d) 97 | plt.close() 98 | 99 | 100 | def test_eccentricity(path, gridsize): 101 | 102 | filelib.make_folders([path]) 103 | sizes = [] 104 | for i in range(1, 21): 105 | for j in range(1, 21): 106 | sizes.append((1, i, j)) 107 | 108 | stat = pd.DataFrame() 109 | for size in sizes: 110 | ell = Ellipsoid(grid_shape=(gridsize, gridsize), size=size, rotation=(0,0)) 111 | sp = ell.compute_spharm(grid_size=gridsize, normalize=True) 112 | 113 | sp.compute_frequency_spectrum() 114 | curstat = sp.frequency_spectrum 115 | curstat['Eccentricity'] = np.max(size)/np.min(size) 116 | curstat['Prolateness'] = prolateness(size) 117 | stat = pd.concat([stat, curstat], ignore_index=True) 118 | 119 | stat.to_csv(path + 'stat.csv', sep='\t') 120 | plot_eccentricity_test(path + 'stat.csv') 121 | 122 | 123 | path = '../../../Data/Ellipsoids/' 124 | 125 | import os 126 | if os.path.exists(path): 127 | # test_grid_size_invariance(path + 'Grid_size_invariance/') 128 | # test_size_invariance(path + 'Size_invariance/') 129 | test_rotation_invariance(path + 'Rotation_invariance_RI/', ri=True) 130 | # test_eccentricity_dependence(path + 'Eccentricity_test/') 131 | # test_eccentricity(path + 'Eccentricity_test_quantitative/', gridsize=60) 132 | 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_gridsize.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import mkl 4 | mkl.set_num_threads(1) 5 | from SPHARM.lib import plotting as plt 6 | import os 7 | 8 | 9 | path = '../../../Data/Synthetic_cells/output/' 10 | if os.path.exists(path): 11 | kwargs = {'max_threads': 100, 'voxel_size': 1} 12 | 13 | for gridsize in [20, 40, 60, 80, 100, 120, 140, 160, 180, 200]: 14 | plt.plot_3D_surfaces(inputfolder=path + 'surfaces/', 15 | outputfolder=path + 'surfaces_3D_views/gridsize=' + str(gridsize) + '/', 16 | points=True, gridsize=gridsize) 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_orbitals.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import numpy as np 4 | import pylab as plt 5 | import os 6 | 7 | from helper_lib import filelib 8 | from SPHARM.classes.orbital import Orbital 9 | 10 | 11 | def test_single_orbitals(gridsize, path): 12 | value = 'amplitude' 13 | filelib.make_folders([path + 'surfaces/', path + 'heatmaps/', path + 'frequencies/']) 14 | for m in range(5): 15 | for n in range(-m, m+1): 16 | 17 | orbital = Orbital(grid_shape=(gridsize, gridsize), m=m, n=n, amplitude=1) 18 | orbital.name = 'm=' + str(m) + '_n=' + str(n) 19 | 20 | sp = orbital.compute_spharm(grid_size=gridsize) 21 | sp.heatmap(value=value, cutoff=5).savefig(path + 'heatmaps/' + orbital.name + '.png') 22 | plt.clf() 23 | sp.frequency_plot(value=value, cutoff=5).savefig(path + 'frequencies/' + orbital.name + '.png') 24 | plt.clf() 25 | 26 | orbital.Rgrid = orbital.Rgrid.real 27 | mesh = orbital.plot_surface(points=False) 28 | mesh.save(path + 'surfaces/' + orbital.name + '.png', size=(200, 200)) 29 | 30 | 31 | def test_combined_orbitals(gridsize, path): 32 | value = 'amplitude' 33 | filelib.make_folders([path + 'surfaces/', path + 'heatmaps/', path + 'frequencies/']) 34 | combined = [[(0, 0, 1), (2, -1, 0.1)], 35 | [(0, 0, 1), (2, -1, 0.5), (4, 3, 1)], 36 | [(0, 0, 1), (4, 3, 0.5)], 37 | [(0, 0, 1), (1, 0, 1)]] 38 | for set in combined: 39 | set = np.array(set) 40 | m = set[:, 0] 41 | n = set[:, 1] 42 | amplitude = set[:, 2] 43 | 44 | orbital = Orbital(grid_shape=(gridsize, gridsize), m=m, n=n, amplitude=amplitude) 45 | orbital.name = 'm=' + str(m) + '_n=' + str(n) + '_amplitude=' + str(amplitude) 46 | 47 | sp = orbital.compute_spharm(grid_size=gridsize) 48 | sp.heatmap(value=value, cutoff=5).savefig(path + 'heatmaps/' + orbital.name + '.png') 49 | plt.clf() 50 | sp.frequency_plot(value=value).savefig(path + 'frequencies/' + orbital.name + '.png') 51 | plt.clf() 52 | 53 | orbital.Rgrid = orbital.Rgrid.real 54 | mesh = orbital.plot_surface(points=False) 55 | mesh.save(path + 'surfaces/' + orbital.name + '.png', size=(200, 200)) 56 | 57 | path = '../../../Data/Orbitals/' 58 | if os.path.exists(path): 59 | test_single_orbitals(50, path + 'Single_orbitals/') 60 | test_combined_orbitals(50, path + 'Combined_orbitals/') 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_surfaces.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import sys 5 | import pandas as pd 6 | 7 | 8 | ################################# 9 | args = sys.argv[1:] 10 | if len(args) > 0: 11 | path = args[0] 12 | if path != 'test': 13 | if not path.endswith('/'): 14 | path += '/' 15 | path += 'output/' 16 | 17 | surface_stat = pd.DataFrame() 18 | groups = os.listdir(path + 'surfaces/') 19 | for gr in groups: 20 | print(gr) 21 | samples = os.listdir(path + 'surfaces/' + gr + '/') 22 | for sample in samples: 23 | print(sample) 24 | files = os.listdir(path + 'surfaces/' + gr + '/' + sample + '/') 25 | for fn in files: 26 | print(fn) 27 | stat = pd.read_csv(path + 'surfaces/' + gr + '/' + sample + '/' + fn, sep='\t', index_col=0) 28 | times = stat['Time'].unique() 29 | 30 | for t in times: 31 | curstat = stat[stat['Time'] == t] 32 | cur_surface_stat = pd.DataFrame({'Time': [t], 33 | 'Number of surface points': [len(curstat)], 34 | 'Group': gr, 35 | 'Sample': sample, 36 | 'File': fn, 37 | 'Number of unique IDs': len(curstat['ID'].unique())}) 38 | surface_stat = pd.concat([surface_stat, cur_surface_stat], ignore_index=True) 39 | surface_stat.to_csv(path + 'surface_time_summary.csv', sep='\t') 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /SPHARM/scripts/analyse_tracks.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import sys 5 | import pandas as pd 6 | 7 | 8 | ################################# 9 | args = sys.argv[1:] 10 | if len(args) > 0: 11 | path = args[0] 12 | if path != 'test': 13 | if not path.endswith('/'): 14 | path += '/' 15 | path += 'output/' 16 | 17 | track_stat = pd.DataFrame() 18 | summary_stat = pd.DataFrame() 19 | groups = os.listdir(path + 'surfaces/') 20 | for gr in groups: 21 | print(gr) 22 | samples = os.listdir(path + 'surfaces/' + gr + '/') 23 | for sample in samples: 24 | print(sample) 25 | files = os.listdir(path + 'surfaces/' + gr + '/' + sample + '/') 26 | cur_summary_stat = pd.DataFrame({'Number of tracks': [len(files)], 27 | 'Sample': sample, 28 | 'Group': gr}) 29 | cur_track_stat = pd.DataFrame() 30 | for fn in files: 31 | print(fn) 32 | stat = pd.read_csv(path + 'surfaces/' + gr + '/' + sample + '/' + fn, sep='\t', index_col=0) 33 | cur_track_stat = pd.concat([cur_track_stat, pd.DataFrame({'TrackID': [stat.iloc[0]['TrackID']], 34 | 'Track length': len(stat['Time'].unique()), 35 | 'Start time': stat['Time'].min(), 36 | 'End time': stat['Time'].max(), 37 | 'File': fn, 38 | 'Sample': sample, 39 | 'Group': gr})], 40 | ignore_index=True) 41 | 42 | track_stat = pd.concat([track_stat, cur_track_stat], ignore_index=True) 43 | cur_summary_stat['Track length min'] = cur_track_stat['Track length'].min() 44 | cur_summary_stat['Track length max'] = cur_track_stat['Track length'].max() 45 | cur_summary_stat['Tracks >= 5 points'] = len(cur_track_stat[cur_track_stat['Track length'] >= 5]) 46 | cur_summary_stat['Tracks >= 10 points'] = len(cur_track_stat[cur_track_stat['Track length'] >= 10]) 47 | cur_summary_stat['Tracks >= 20 points'] = len(cur_track_stat[cur_track_stat['Track length'] >= 20]) 48 | summary_stat = pd.concat([summary_stat, cur_summary_stat], ignore_index=True) 49 | summary_stat.to_csv(path + 'track_summary.csv', sep='\t') 50 | track_stat.to_csv(path + 'track_details.csv', sep='\t') 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | -------------------------------------------------------------------------------- /SPHARM/scripts/convert_surface_to_tiff.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import numpy as np 5 | import pandas as pd 6 | from skimage import io 7 | import warnings 8 | 9 | from multiprocessing import Process 10 | import time 11 | import sys 12 | 13 | 14 | def _print_progress(procdone, totproc, start): 15 | donepercent = procdone*100/totproc 16 | elapse = time.time() - start 17 | tottime = totproc*1.*elapse/procdone 18 | left = tottime - elapse 19 | units = 'sec' 20 | if left > 60: 21 | left = left/60. 22 | units = 'min' 23 | if left > 60: 24 | left = left/60. 25 | units = 'hours' 26 | 27 | print('done', procdone, 'of', totproc, '(', donepercent, '% ), approx. time left: ', left, units) 28 | 29 | 30 | def run_parallel(process, process_name=None, print_progress=True, **kwargs): 31 | 32 | items = kwargs.pop('items', []) 33 | max_threads = int(round(kwargs.pop('max_threads', 8))) 34 | if process_name is None: 35 | process_name = process.func_name 36 | 37 | if print_progress: 38 | print('Run', process_name) 39 | 40 | procs = [] 41 | 42 | totproc = len(items) 43 | procdone = 0 44 | start = time.time() 45 | 46 | if print_progress: 47 | print('Started at ', time.ctime()) 48 | 49 | for i, cur_item in enumerate(items): 50 | 51 | while len(procs) >= max_threads: 52 | time.sleep(0.05) 53 | for p in procs: 54 | if not p.is_alive(): 55 | procs.remove(p) 56 | procdone +=1 57 | if print_progress: 58 | _print_progress(procdone, totproc, start) 59 | 60 | cur_args = kwargs.copy() 61 | cur_args['item'] = cur_item 62 | p = Process(target=process, kwargs=cur_args) 63 | p.start() 64 | procs.append(p) 65 | 66 | while len(procs) > 0: 67 | time.sleep(0.05) 68 | for p in procs: 69 | if not p.is_alive(): 70 | procs.remove(p) 71 | procdone += 1 72 | if print_progress: 73 | _print_progress(procdone, totproc, start) 74 | 75 | if print_progress: 76 | print(process_name, 'done') 77 | 78 | 79 | def as_stack(x, y, z, minmax=None): 80 | if minmax is None: 81 | minmax = np.int_([[z.min(), z.max()], 82 | [y.min(), y.max()], 83 | [x.min(), x.max()]]) 84 | else: 85 | minmax = np.int_(np.round_(minmax)) 86 | 87 | x = np.int_(x) - minmax[2, 0] + 1 88 | y = np.int_(y) - minmax[1, 0] + 1 89 | z = np.int_(z) - minmax[0, 0] + 1 90 | 91 | img = np.zeros([minmax[0, 1] - minmax[0, 0] + 3, minmax[1, 1] - minmax[1, 0] + 3, minmax[2, 1] - minmax[2, 0] + 3]) 92 | img[z, y, x] = 255 93 | 94 | return img 95 | 96 | 97 | def make_folders(folders): 98 | for folder in folders: 99 | if not os.path.exists(folder): 100 | try: 101 | os.makedirs(folder) 102 | except OSError: 103 | pass 104 | 105 | 106 | _IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg', 'bmp', 'PNG', 'JPG', 'JPEG', 'BMP', 'tif', 'TIFF', 'tiff', 'TIF'] 107 | 108 | 109 | def _is_in_extensions(filename, extensions): 110 | 111 | parts = filename.split('.') 112 | if len(parts) > 1: 113 | ext = parts[-1] 114 | else: 115 | ext = '' 116 | 117 | if ext in extensions or '*' in extensions: 118 | return True 119 | else: 120 | return False 121 | 122 | 123 | def list_subfolders(inputfolder, cur_subfolder=None, subfolders=None, extensions=None): 124 | if extensions is None: 125 | # global IMAGE_EXTENSIONS 126 | extensions = _IMAGE_EXTENSIONS 127 | 128 | if cur_subfolder is None: 129 | cur_subfolder = '' 130 | 131 | if subfolders is None: 132 | subfolders = [] 133 | 134 | files = os.listdir(inputfolder + cur_subfolder) 135 | 136 | for path in files: 137 | if os.path.isdir(inputfolder + cur_subfolder + path): 138 | if path[-1] != '/': 139 | path = path + '/' 140 | subfolders = list_subfolders(inputfolder, cur_subfolder=cur_subfolder + path, subfolders=subfolders, 141 | extensions=extensions) 142 | else: 143 | if _is_in_extensions(path, extensions): 144 | subfolders.append(cur_subfolder + path) 145 | 146 | return subfolders 147 | 148 | 149 | ################################################### 150 | 151 | 152 | class Surface(object): 153 | 154 | def __init__(self, filename=None, **kwargs): 155 | 156 | self.x = None # list of points 157 | self.y = None # list of points 158 | self.z = None # list of points 159 | 160 | self.filename = filename 161 | 162 | if filename is not None: 163 | self.read_from_file(filename, **kwargs) 164 | 165 | def read_from_file(self, filename, voxel_size=1): 166 | if os.path.exists(filename): 167 | stat = pd.read_csv(filename, sep='\t', index_col=0) 168 | 169 | if 'X' in stat.columns and 'Y' in stat.columns and 'Z' in stat.columns: 170 | self.x = np.array(stat.X) 171 | self.y = np.array(stat.Y) 172 | self.z = np.array(stat.Z) 173 | 174 | else: 175 | stat = pd.read_csv(filename, sep=',', header=None) 176 | px = voxel_size 177 | if 0 in stat.columns and 1 in stat.columns and 2 in stat.columns: 178 | self.x = np.array(stat[0]) * px 179 | self.y = np.array(stat[1]) * px 180 | self.z = np.array(stat[2]) * px 181 | 182 | def save_as_stack(self, filename, voxel_size): 183 | voxel_size = np.array([voxel_size]).flatten() 184 | if len(voxel_size) == 1: 185 | voxel_size = np.ones(3) * voxel_size 186 | make_folders([os.path.dirname(filename)]) 187 | img = self.as_stack(voxel_size) 188 | with warnings.catch_warnings(): 189 | warnings.simplefilter("ignore") 190 | io.imsave(filename, img.astype(np.uint8)) 191 | metadata = pd.Series({'voxel_size_xy': voxel_size[2], 'voxel_size_z': voxel_size[0]}) 192 | metadata.to_csv(filename[:-4] + '.txt', sep='\t') 193 | 194 | def as_stack(self, voxel_size, minmax=None): 195 | if minmax is not None: 196 | minmax = [minmax[0] / voxel_size[0], 197 | minmax[1] / voxel_size[1], 198 | minmax[2] / voxel_size[2]] 199 | img = as_stack(np.array(self.x) / voxel_size[2], 200 | np.array(self.y) / voxel_size[1], 201 | np.array(self.z) / voxel_size[0], minmax=minmax) 202 | return img 203 | 204 | 205 | ################################################### 206 | 207 | def convert_to_tiff(**kwargs): 208 | inputfolder = kwargs.get('inputfolder') 209 | outputfolder = kwargs.get('outputfolder', inputfolder + '../stacks/') 210 | filename = kwargs.get('item') 211 | 212 | surface = Surface(filename=inputfolder + filename) 213 | surface.save_as_stack(outputfolder + filename + '.tif', voxel_size=kwargs.get('voxel_size')) 214 | 215 | 216 | def convert_parallel(**kwargs): 217 | files = list_subfolders(kwargs.get('inputfolder'), extensions=['*']) 218 | 219 | if kwargs.get('debug'): 220 | kwargs['item'] = files[0] 221 | kwargs.get('process')(**kwargs) 222 | else: 223 | kwargs['items'] = files 224 | run_parallel(**kwargs) 225 | 226 | if __name__ == '__main__': 227 | 228 | args = sys.argv[1:] 229 | if args[0] != 'test': 230 | if len(args) == 3: 231 | convert_parallel(process=convert_to_tiff, inputfolder=args[0], 232 | outputfolder=args[1], max_threads=6, voxel_size=float(args[2])) 233 | 234 | -------------------------------------------------------------------------------- /SPHARM/scripts/plot_maxprojections.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import mkl 4 | mkl.set_num_threads(1) 5 | 6 | import sys 7 | import os 8 | import numpy as np 9 | 10 | from skimage import io 11 | from helper_lib import filelib 12 | from skimage.exposure import rescale_intensity 13 | 14 | 15 | ################################# 16 | args = sys.argv[1:] 17 | if len(args) > 0: 18 | path = args[0] 19 | if path != 'test': 20 | if not path.endswith('/'): 21 | path += '/' 22 | 23 | kwargs = {'max_threads': 50, 'voxel_size': 1} 24 | groups = os.listdir(path + 'raw/') 25 | for gr in groups: 26 | samples = os.listdir(path + 'raw/' + gr) 27 | for sample in samples: 28 | files = os.listdir(path + 'raw/' + gr + '/' + sample) 29 | print(path + 'raw/' + gr + '/' + sample + '/' + files[0]) 30 | img = io.imread(path + 'raw/' + gr + '/' + sample + '/' + files[0]) 31 | ind = -1 32 | for j in range(len(img[0])): 33 | if np.max(img[0, j]) > 0: 34 | ind = j 35 | img = img[:, ind] 36 | per = np.percentile(img, 99.95) 37 | for i in range(len(img)): 38 | img[i][np.where(img[i] > per)] = per 39 | maxproj = np.max(img[i], axis=0) 40 | filename = path + 'raw_maxprojecion_xy/' + gr + '/' + sample + '/' + files[0][:-4] + '%03d.tif' % i 41 | filelib.make_folders([os.path.dirname(filename)]) 42 | if maxproj.max() > 255: 43 | maxproj = rescale_intensity(maxproj, out_range=(0, 255)) 44 | io.imsave(filename, maxproj.astype(np.uint8)) 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /SPHARM/scripts/plot_surfaces.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import sys 5 | import pandas as pd 6 | from SPHARM.classes.surface import Surface 7 | from helper_lib import filelib 8 | from mayavi import mlab 9 | 10 | 11 | ################################# 12 | args = sys.argv[1:] 13 | if len(args) > 0: 14 | path = args[0] 15 | if path != 'test': 16 | if not path.endswith('/'): 17 | path += '/' 18 | path += 'output/' 19 | 20 | track_stat = pd.DataFrame() 21 | summary_stat = pd.DataFrame() 22 | groups = os.listdir(path + 'surfaces/') 23 | for gr in groups: 24 | print(gr) 25 | files = os.listdir(path + 'surfaces/' + gr + '/') 26 | for fn in files: 27 | stat = pd.read_csv(path + 'surfaces/' + gr + '/' + fn, sep='\t', index_col=0) 28 | t1 = stat['Time'].unique()[0] 29 | stat = stat[stat['Time'] == t1] 30 | print(fn, len(stat)) 31 | surface = Surface(data=stat) 32 | mesh = surface.plot_points(scale_factor=0.2) 33 | filelib.make_folders([os.path.dirname(path + 'surface_plots/' + gr + '_' + fn[:-4])]) 34 | mesh.save(path + 'surface_plots/' + gr + '_' + fn[:-4] + '_init.png', size=(100, 100)) 35 | mlab.clf() 36 | 37 | surface.centrate() 38 | surface.to_spherical() 39 | surface.compute_spharm(grid_size=120, normalize=True) 40 | mesh = surface.plot_surface(points=False) 41 | filelib.make_folders([os.path.dirname(path + 'surface_plots/' + gr + '_' + fn[:-4])]) 42 | mesh.save(path + 'surface_plots/' + gr + '_' + fn[:-4] + '_grid.png', size=(100, 100)) 43 | mlab.clf() 44 | 45 | surface.inverse_spharm(lmax=10) 46 | mesh = surface.plot_surface(points=False) 47 | filelib.make_folders([os.path.dirname(path + 'surface_plots/' + gr + '_' + fn[:-4])]) 48 | mesh.save(path + 'surface_plots/' + gr + '_' + fn[:-4] + '_inverse_lmax=10.png', size=(100, 100)) 49 | mlab.clf() 50 | 51 | surface.inverse_spharm(lmax=None) 52 | mesh = surface.plot_surface(points=False) 53 | filelib.make_folders([os.path.dirname(path + 'surface_plots/' + gr + '_' + fn[:-4])]) 54 | mesh.save(path + 'surface_plots/' + gr + '_' + fn[:-4] + '_inverse.png', size=(100, 100)) 55 | mlab.clf() 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /SPHARM/scripts/plot_surfaces_from_stacks.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | 3 | import os 4 | import sys 5 | import pandas as pd 6 | import numpy as np 7 | from SPHARM.classes.surface import Surface 8 | from SPHARM.classes.image_stack import ImageStack 9 | from helper_lib import filelib 10 | from mayavi import mlab 11 | 12 | 13 | ################################# 14 | args = sys.argv[1:] 15 | if len(args) > 0: 16 | path = args[0] 17 | if path != 'test': 18 | if not path.endswith('/'): 19 | path += '/' 20 | path += 'output/' 21 | 22 | track_stat = pd.DataFrame() 23 | summary_stat = pd.DataFrame() 24 | groups = os.listdir(path + 'stacks/') 25 | for gr in groups: 26 | print(gr) 27 | files = os.listdir(path + 'stacks/' + gr + '/') 28 | for fn in files: 29 | print(fn) 30 | stack = ImageStack(path + 'stacks/' + gr + '/' + fn) 31 | stack.filename = fn 32 | stack.extract_surfaces(path + 'surfaces/' + gr + '/', voxel_size=0.3) 33 | 34 | groups = os.listdir(path + 'surfaces/') 35 | for gr in groups: 36 | print(gr) 37 | files = os.listdir(path + 'surfaces/' + gr + '/') 38 | files.sort() 39 | for fn in files: 40 | print(fn) 41 | surface = Surface(filename=path + 'surfaces/' + gr + '/' + fn, voxel_size=0.3) 42 | mesh = mlab.points3d(surface.x, surface.y, surface.z, surface.x, scale_mode='none', 43 | scale_factor=0.5, mode='sphere', colormap='jet').scene 44 | mesh.background = (1, 1, 1) 45 | mesh.magnification = 10 46 | filelib.make_folders([os.path.dirname(path + 'surface_plots/' + gr + '_' + fn[:-4])]) 47 | mesh.save(path + 'surface_plots/' + gr + '_' + fn[:-4] + '_init.png', size=(100, 100)) 48 | mesh = mlab.points3d(surface.x, surface.y, surface.z, surface.x, scale_mode='none', 49 | scale_factor=0.5, mode='sphere', colormap='gray').scene 50 | mesh.background = (1, 1, 1) 51 | mesh.magnification = 10 52 | 53 | mlab.clf() 54 | groups = os.listdir(path + 'surfaces/') 55 | for gr in groups: 56 | print(gr) 57 | files = os.listdir(path + 'surfaces/' + gr + '/') 58 | files.sort() 59 | for fn in files: 60 | print(fn) 61 | surface = Surface(filename=path + 'surfaces/' + gr + '/' + fn, voxel_size=0.3) 62 | mesh = mlab.points3d(surface.x, surface.y, surface.z, surface.z, scale_mode='none', 63 | scale_factor=0.5, mode='sphere', colormap='jet', 64 | extent=[-100, 100, -100, 100, -100, 100]).scene 65 | mesh.background = (1, 1, 1) 66 | mesh.magnification = 10 67 | filelib.make_folders([os.path.dirname(path + 'surface_plots/' + gr + '_' + fn[:-4])]) 68 | mesh.save(path + 'surface_plots/' + gr + '_' + fn[:-4] + '_init.png', size=(100, 100)) 69 | mlab.clf() 70 | 71 | 72 | -------------------------------------------------------------------------------- /SPHARM/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/tests/__init__.py -------------------------------------------------------------------------------- /SPHARM/tests/data/LN_deconv_set4_Detailed.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/tests/data/LN_deconv_set4_Detailed.xlsx -------------------------------------------------------------------------------- /SPHARM/tests/data/surfaces/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00004.csv: -------------------------------------------------------------------------------- 1 | X Y Z Cell_ID Image_name 2 | 0 142 37 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 3 | 1 143 37 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 4 | 2 141 38 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 5 | 3 144 38 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 6 | 4 141 39 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 7 | 5 144 39 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 8 | 6 141 40 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 9 | 7 145 40 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 10 | 8 142 41 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 11 | 9 144 41 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 12 | 10 142 42 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 13 | 11 143 42 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 14 | 12 140 43 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 15 | 13 141 43 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 16 | 14 143 43 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 17 | 15 139 44 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 18 | 16 144 44 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 19 | 17 139 45 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 20 | 18 144 45 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 21 | 19 139 46 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 22 | 20 144 46 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 23 | 21 137 47 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 24 | 22 138 47 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 25 | 23 143 47 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 26 | 24 136 48 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 27 | 25 142 48 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 28 | 26 135 49 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 29 | 27 142 49 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 30 | 28 136 50 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 31 | 29 143 50 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 32 | 30 137 51 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 33 | 31 143 51 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 34 | 32 137 52 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 35 | 33 143 52 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 36 | 34 137 53 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 37 | 35 142 53 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 38 | 36 138 54 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 39 | 37 142 54 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 40 | 38 138 55 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 41 | 39 142 55 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 42 | 40 139 56 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 43 | 41 140 56 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 44 | 42 141 56 0 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 45 | 43 142 38 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 46 | 44 143 38 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 47 | 45 142 39 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 48 | 46 143 39 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 49 | 47 144 39 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 50 | 48 142 40 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 51 | 49 143 40 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 52 | 50 144 40 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 53 | 51 142 41 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 54 | 52 143 41 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 55 | 53 144 41 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 56 | 54 142 43 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 57 | 55 140 44 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 58 | 56 141 44 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 59 | 57 142 44 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 60 | 58 143 44 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 61 | 59 140 45 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 62 | 60 141 45 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 63 | 61 142 45 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 64 | 62 143 45 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 65 | 63 144 45 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 66 | 64 140 46 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 67 | 65 141 46 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 68 | 66 142 46 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 69 | 67 143 46 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 70 | 68 138 47 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 71 | 69 139 47 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 72 | 70 140 47 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 73 | 71 141 47 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 74 | 72 142 47 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 75 | 73 136 48 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 76 | 74 137 48 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 77 | 75 138 48 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 78 | 76 139 48 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 79 | 77 140 48 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 80 | 78 141 48 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 81 | 79 136 49 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 82 | 80 137 49 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 83 | 81 139 49 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 84 | 82 140 49 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 85 | 83 141 49 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 86 | 84 136 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 87 | 85 137 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 88 | 86 138 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 89 | 87 139 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 90 | 88 140 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 91 | 89 141 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 92 | 90 142 50 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 93 | 91 138 51 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 94 | 92 139 51 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 95 | 93 141 51 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 96 | 94 142 51 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 97 | 95 138 52 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 98 | 96 141 52 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 99 | 97 142 52 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 100 | 98 138 53 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 101 | 99 141 53 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 102 | 100 142 53 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 103 | 101 138 54 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 104 | 102 139 54 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 105 | 103 141 54 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 106 | 104 139 55 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 107 | 105 140 55 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 108 | 106 141 55 1 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 109 | 107 138 49 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 110 | 108 140 51 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 111 | 109 139 52 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 112 | 110 140 52 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 113 | 111 139 53 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 114 | 112 140 53 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 115 | 113 140 54 2 4 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 116 | -------------------------------------------------------------------------------- /SPHARM/tests/data/surfaces/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0_Cell00007.csv: -------------------------------------------------------------------------------- 1 | X Y Z Cell_ID Image_name 2 | 0 480 44 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 3 | 1 478 45 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 4 | 2 479 45 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 5 | 3 481 45 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 6 | 4 478 46 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 7 | 5 481 46 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 8 | 6 478 47 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 9 | 7 481 47 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 10 | 8 478 48 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 11 | 9 479 48 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 12 | 10 480 48 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 13 | 11 474 49 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 14 | 12 475 49 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 15 | 13 476 49 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 16 | 14 477 49 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 17 | 15 478 49 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 18 | 16 467 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 19 | 17 468 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 20 | 18 469 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 21 | 19 470 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 22 | 20 471 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 23 | 21 472 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 24 | 22 473 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 25 | 23 474 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 26 | 24 478 50 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 27 | 25 466 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 28 | 26 472 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 29 | 27 473 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 30 | 28 474 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 31 | 29 475 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 32 | 30 476 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 33 | 31 477 51 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 34 | 32 466 52 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 35 | 33 472 52 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 36 | 34 465 53 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 37 | 35 466 53 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 38 | 36 470 53 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 39 | 37 471 53 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 40 | 38 465 54 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 41 | 39 466 54 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 42 | 40 467 54 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 43 | 41 468 54 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 44 | 42 469 54 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 45 | 43 471 54 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 46 | 44 465 55 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 47 | 45 466 55 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 48 | 46 468 55 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 49 | 47 465 56 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 50 | 48 468 56 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 51 | 49 465 57 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 52 | 50 466 57 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 53 | 51 467 57 0 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 54 | 52 479 45 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 55 | 53 480 45 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 56 | 54 479 46 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 57 | 55 480 46 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 58 | 56 481 46 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 59 | 57 479 47 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 60 | 58 480 47 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 61 | 59 481 47 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 62 | 60 479 48 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 63 | 61 480 48 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 64 | 62 474 49 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 65 | 63 475 49 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 66 | 64 476 49 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 67 | 65 477 49 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 68 | 66 475 50 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 69 | 67 476 50 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 70 | 68 477 50 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 71 | 69 467 51 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 72 | 70 468 51 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 73 | 71 469 51 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 74 | 72 470 51 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 75 | 73 471 51 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 76 | 74 467 52 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 77 | 75 468 52 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 78 | 76 469 52 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 79 | 77 470 52 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 80 | 78 471 52 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 81 | 79 467 53 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 82 | 80 468 53 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 83 | 81 469 53 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 84 | 82 470 53 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 85 | 83 471 53 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 86 | 84 472 53 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 87 | 85 471 54 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 88 | 86 472 54 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 89 | 87 467 55 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 90 | 88 466 56 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 91 | 89 467 56 1 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 92 | 90 471 54 2 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 93 | 91 471 55 2 7 data/input_data/Doc6_12-59-18_PMT - PMT [PMT3_ 593-40] _C2_Time Time0.tif 94 | -------------------------------------------------------------------------------- /SPHARM/tests/data/track_files/LN/LN_deconv_set4_Detailed.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/applied-systems-biology/Dynamic_SPHARM/2588d3632e9b3d99486a75bfb50d21fc7a7c8070/SPHARM/tests/data/track_files/LN/LN_deconv_set4_Detailed.xlsx -------------------------------------------------------------------------------- /SPHARM/tests/test_ellipsoid.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import numpy as np 5 | import shutil 6 | from ddt import ddt, data 7 | 8 | from SPHARM.classes.ellipsoid import Ellipsoid 9 | 10 | 11 | @ddt 12 | class TestEllipsoidClass(unittest.TestCase): 13 | 14 | @data( 15 | ([10, 10], [1, 3, 2], (0, 0)), 16 | ([10, 30], [3, 3, 2], (np.pi/4, 0)), 17 | ([100, 100], [1, 3, 2], (np.pi/6, np.pi/4)) 18 | ) 19 | def test_generate(self, case): 20 | grid_shape, size, rotation = case 21 | ellipsoid = Ellipsoid(grid_shape=grid_shape, size=size, rotation=rotation) 22 | ellipsoid.profile_xy().save(outputfile='data/test_data/ellipsoid_test/' + ellipsoid.name + '_xy.png') 23 | ellipsoid.profile_xz().save(outputfile='data/test_data/ellipsoid_test/' + ellipsoid.name + '_xz.png') 24 | ellipsoid.profile_yz().save(outputfile='data/test_data/ellipsoid_test/' + ellipsoid.name + '_yz.png') 25 | self.assertEqual(os.path.exists('data/test_data/ellipsoid_test/' + ellipsoid.name + '_xy.png'), True) 26 | self.assertEqual(os.path.exists('data/test_data/ellipsoid_test/' + ellipsoid.name + '_yz.png'), True) 27 | self.assertEqual(os.path.exists('data/test_data/ellipsoid_test/' + ellipsoid.name + '_yz.png'), True) 28 | shutil.rmtree('data/test_data/') 29 | 30 | def test_save_as_stack(self): 31 | ellipsoid = Ellipsoid(grid_shape=[100, 100], size=[3, 3, 2], rotation=(np.pi/6, np.pi/4)) 32 | ellipsoid.save_as_stack(filename='data/test_data/ellipsoid_test/' + ellipsoid.name + '.tif', voxel_size=0.5) 33 | self.assertEqual(os.path.exists('data/test_data/ellipsoid_test/' + ellipsoid.name + '.tif'), True) 34 | self.assertEqual(os.path.exists('data/test_data/ellipsoid_test/' + ellipsoid.name + '.txt'), True) 35 | shutil.rmtree('data/test_data/') 36 | 37 | 38 | if __name__ == '__main__': 39 | unittest.main() 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /SPHARM/tests/test_image_stack.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import numpy as np 5 | import shutil 6 | from ddt import ddt, data 7 | 8 | from SPHARM.classes.image_stack import ImageStack 9 | 10 | 11 | @ddt 12 | class TestImageStackClass(unittest.TestCase): 13 | 14 | @data( 15 | ('Doc17_19-42-58_PMT - PMT [PMT2_ 525-50] _C1_Time Time0.tif', 0, 1), 16 | ('data/Doc17_19-42-58_PMT - PMT [PMT3_ 593-40] _C2_Time Time18.tif', 18, 2) 17 | ) 18 | def test_parse_filename(self, case): 19 | fn, tp, ch = case 20 | path = 'data/test_data//SPHARM/tests/' 21 | img = ImageStack(filename=path + fn, load=False) 22 | self.assertEqual(img.timepoint, tp) 23 | self.assertEqual(img.channel, ch) 24 | 25 | def test_load_and_save(self): 26 | fn = 'data/test_data/test_stack.tif' 27 | img = ImageStack(filename=fn, load=False) 28 | img.data = np.ones([10, 100, 100]) 29 | img.save(filename=fn) 30 | self.assertEqual(os.path.exists(fn), True) 31 | 32 | img = ImageStack(filename=fn) 33 | self.assertIsInstance(img.data, np.ndarray) 34 | shutil.rmtree('data/test_data/') 35 | 36 | def test_save_maxproj(self): 37 | fn = 'data/test_data/test_stack.tif' 38 | img = ImageStack(filename=fn, load=False) 39 | img.data = np.zeros([10, 100, 100]) 40 | img.data[2:7, 10:-10, 10:-10] = 50 41 | img.save_max_projection(filename=fn) 42 | self.assertEqual(os.path.exists(fn), True) 43 | shutil.rmtree('data/test_data/') 44 | 45 | def test_extract_surfaces(self): 46 | fn = 'data/test_data/test_stack.tif' 47 | img = ImageStack(filename=fn, load=False) 48 | img.data = np.zeros([10, 100, 100]) 49 | img.data[2:7, 10:-10, 10:-10] = 50 50 | voxel_size = [4, 0.3824, 0.3824] 51 | img.extract_surfaces('data/test_data/surfaces/', 52 | voxel_size=voxel_size, reconstruct=False) 53 | img.extract_surfaces('data/test_data/surfaces_reconstructed/', 54 | voxel_size=voxel_size, reconstruct=True) 55 | files = os.listdir('data/test_data/surfaces_reconstructed/') 56 | self.assertEqual(len(files), 1) 57 | files = os.listdir('data/test_data/surfaces/') 58 | self.assertEqual(len(files), 1) 59 | shutil.rmtree('data/test_data/') 60 | 61 | 62 | if __name__ == '__main__': 63 | unittest.main() 64 | 65 | 66 | -------------------------------------------------------------------------------- /SPHARM/tests/test_moving_surface.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import shutil 5 | import numpy as np 6 | from ddt import ddt 7 | 8 | from SPHARM.classes.surface import Surface 9 | from SPHARM.classes.moving_surface import MovingSurface 10 | 11 | 12 | @ddt 13 | class TestMovingSurfaceCass(unittest.TestCase): 14 | 15 | def test_add_spectrum_and_plotting(self): 16 | ms = MovingSurface() 17 | 18 | surf = Surface(grid=np.ones([10, 10])) 19 | for i in range(3): 20 | ms.add_surface(surf, timepoint=i*20) 21 | 22 | surf = Surface(grid=np.ones([10, 10])) 23 | surf.Rgrid[3:4, 4:8] = 10 24 | for i in range(2): 25 | ms.add_surface(surf, timepoint=i*20+60) 26 | 27 | surf = Surface(grid=np.ones([10, 10])) 28 | for i in range(3): 29 | ms.add_surface(surf, timepoint=i*20+100) 30 | 31 | self.assertEqual(len(ms.timespectrum.spectra), 0) 32 | ms.compute_timespectrum(gridsize=10) 33 | self.assertEqual(len(ms.timespectrum.spectra), 8) 34 | 35 | ms.plot_surfaces('data/test_data/surfaces/') 36 | files = os.listdir('data/test_data/surfaces/') 37 | self.assertEqual(len(files), 8) 38 | 39 | ms.plot_max_projections(outputfolder='data/test_data/maxprojections/', voxel_size=0.1) 40 | files = os.listdir('data/test_data/maxprojections/') 41 | self.assertEqual(len(files), 24) 42 | 43 | shutil.rmtree('data/test_data/') 44 | 45 | 46 | if __name__ == '__main__': 47 | unittest.main() 48 | 49 | -------------------------------------------------------------------------------- /SPHARM/tests/test_orbital.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | from ddt import ddt, data 5 | 6 | from SPHARM.classes.orbital import Orbital 7 | 8 | 9 | @ddt 10 | class TestOrbitalClass(unittest.TestCase): 11 | 12 | @data( 13 | ([50, 50], [0], [0], [5]), 14 | ([50, 50], [2], [0], [5]), 15 | ([50, 50], [2], [-1], [5]), 16 | ([50, 50], [2], [2], [5]), 17 | ) 18 | def test_generate(self, case): 19 | grid_shape, m, n, amplitude = case 20 | orbital = Orbital(grid_shape=grid_shape, m=m, n=n, amplitude=1) 21 | orbital.Rgrid = orbital.Rgrid 22 | orbital.compute_spharm() 23 | data = orbital.spharm.harmonics_csv 24 | data['amplitude'] = np.array(data['amplitude'])/data['amplitude'].max() 25 | for i in range(len(data)): 26 | cur_m = data.iloc[i]['degree'] 27 | cur_n = data.iloc[i]['order'] 28 | if cur_m == m[0] and cur_n == n[0]: 29 | self.assertAlmostEqual((data.iloc[i]['amplitude']), 1, 10) 30 | else: 31 | self.assertAlmostEqual((data.iloc[i]['amplitude']), 0, 10) 32 | 33 | 34 | if __name__ == '__main__': 35 | unittest.main() 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /SPHARM/tests/test_spectrum.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import numpy as np 5 | import shutil 6 | from ddt import ddt, data 7 | 8 | from SPHARM.classes.spectrum import Spectrum 9 | 10 | 11 | @ddt 12 | class TestSpectrumClass(unittest.TestCase): 13 | 14 | @data( 15 | np.ones([10, 10]), 16 | np.ones([10, 20]) 17 | ) 18 | def test_from_surface(self, case): 19 | sp = Spectrum() 20 | harm = sp.from_surface(surface=case) 21 | self.assertEqual(tuple(harm.shape), (2, case.shape[0]/2, case.shape[0]/2)) 22 | 23 | @data( 24 | np.ones([11, 11]), 25 | np.ones([11, 13]), 26 | np.ones([10, 12]) 27 | ) 28 | def test_from_surface_errors(self, case): 29 | sp = Spectrum() 30 | self.assertRaises(ValueError, sp.from_surface, surface=case) 31 | 32 | @data( 33 | np.ones([10, 10]), 34 | np.ones([10, 20]) 35 | ) 36 | def test_from_surface_norm(self, case): 37 | sp = Spectrum() 38 | harm = sp.from_surface(surface=case, normalize=True, normalization_method='zero-component') 39 | self.assertEqual(np.max(harm), 1) 40 | 41 | @data( 42 | np.ones([10, 10]), 43 | np.ones([10, 20]) 44 | ) 45 | def test_from_surface_norm2(self, case): 46 | sp = Spectrum() 47 | harm = sp.from_surface(surface=case, normalize=True, normalization_method='mean-radius') 48 | self.assertAlmostEqual(abs(np.max(harm)), 1, 8) 49 | 50 | def test_convertion(self): 51 | sp = Spectrum() 52 | harm_shtools = sp.from_surface(surface=np.ones([10, 10])) 53 | sp.convert_to_csv() 54 | harm_shtools2 = sp.convert_to_shtools_array() 55 | self.assertEqual(np.sum(abs(harm_shtools - harm_shtools2)), 0) 56 | 57 | def test_saving(self): 58 | sp = Spectrum() 59 | sp.from_surface(surface=np.ones([10, 10])) 60 | sp.convert_to_csv() 61 | sp.save_to_csv(filename='data/test_data/spectrum.csv') 62 | sp2 = Spectrum(filename='data/test_data/spectrum.csv') 63 | self.assertAlmostEqual(np.sum(abs(sp.harmonics_shtools - sp2.harmonics_shtools)), 0, 15) 64 | shutil.rmtree('data/test_data/') 65 | 66 | def test_frequency_spectrum(self): 67 | sp = Spectrum() 68 | sp.from_surface(surface=np.ones([10, 10])) 69 | sp.compute_frequency_spectrum() 70 | self.assertEqual(len(sp.frequency_spectrum), 5) 71 | 72 | def test_heatmap(self): 73 | sp = Spectrum() 74 | sp.from_surface(surface=np.ones([10, 10])) 75 | pl = sp.heatmap() 76 | os.makedirs('data/test_data') 77 | pl.savefig('data/test_data/heatmap.png') 78 | self.assertEqual(os.path.exists('data/test_data/heatmap.png'), True) 79 | shutil.rmtree('data/test_data/') 80 | 81 | def test_frequency_plot(self): 82 | sp = Spectrum(name='Example 1') 83 | sp.from_surface(surface=np.ones([10, 10])) 84 | pl = sp.frequency_plot() 85 | os.makedirs('data/test_data') 86 | pl.savefig('data/test_data/frequency_plot.png') 87 | self.assertEqual(os.path.exists('data/test_data/frequency_plot.png'), True) 88 | shutil.rmtree('data/test_data/') 89 | 90 | def test_inverse_transform(self): 91 | sp = Spectrum() 92 | surf = np.ones([10, 10]) 93 | sp.from_surface(surface=surf) 94 | grid = sp.spharm_to_surface() 95 | self.assertAlmostEqual(np.sum(np.abs(surf - grid)), 0, 7) 96 | 97 | def test_feature_vector(self): 98 | sp = Spectrum() 99 | surf = np.ones([10, 10]) 100 | sp.from_surface(surface=surf) 101 | self.assertEqual(len(sp.return_feature_vector(cutoff=3)), 4) 102 | 103 | 104 | if __name__ == '__main__': 105 | unittest.main() 106 | 107 | -------------------------------------------------------------------------------- /SPHARM/tests/test_spharm.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import pandas as pd 5 | from ddt import ddt 6 | import shutil 7 | import site 8 | 9 | from SPHARM.lib import spharm 10 | import SPHARM.lib.parallel as prl 11 | 12 | 13 | @ddt 14 | class TestSPHARM(unittest.TestCase): 15 | 16 | def test_spharm_batch(self): 17 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 18 | prl.run_parallel(process=spharm.compute_spharm, inputfolder=path + 'data/surfaces/', 19 | outputfolder='data/test_data/spharm_test/spharm/', 20 | grid_size=10, extensions=['csv'], print_progress=False) 21 | files = os.listdir('data/test_data/spharm_test/spharm/') 22 | self.assertEqual(len(files), 9) 23 | data = pd.read_csv('data/test_data/spharm_test/spharm.csv', sep='\t') 24 | self.assertEqual('Time' in data.columns, True) 25 | self.assertEqual('TrackID' in data.columns, True) 26 | shutil.rmtree('data/test_data/') 27 | 28 | def test_spharm_batch2(self): 29 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 30 | prl.run_parallel(process=spharm.compute_spharm, inputfolder=path + 'data/synthetic_cells/', 31 | outputfolder='data/test_data/spharm_test/spharm/', extensions=['*'], 32 | grid_size=10, print_progress=False) 33 | files = os.listdir('data/test_data/spharm_test/spharm/') 34 | self.assertEqual(len(files), 6) 35 | data = pd.read_csv('data/test_data/spharm_test/spharm.csv', sep='\t') 36 | self.assertEqual('Time' in data.columns, True) 37 | self.assertEqual('TrackID' in data.columns, True) 38 | shutil.rmtree('data/test_data/') 39 | 40 | def test_convert_surfaces_batch(self): 41 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 42 | prl.run_parallel(process=spharm.convert_surfaces, inputfolder=path + 'data/synthetic_cells/', 43 | outputfolder='data/test_data/synthetic_cells_surfaces/', 44 | extensions=['*'], print_progress=False) 45 | files = os.listdir('data/test_data/synthetic_cells_surfaces/') 46 | self.assertEqual(len(files), 6) 47 | shutil.rmtree('data/test_data/') 48 | 49 | def test_convert_to_tiff_batch(self): 50 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 51 | prl.run_parallel(process=spharm.convert_to_tiff, inputfolder=path + 'data/synthetic_cells/', 52 | outputfolder='data/test_data/synthetic_cells_tiff/', 53 | extensions=['*'], voxel_size=0.4, print_progress=False, combine=False) 54 | files = os.listdir('data/test_data/synthetic_cells_tiff/') 55 | self.assertEqual(len(files), 12) 56 | shutil.rmtree('data/test_data/') 57 | 58 | 59 | if __name__ == '__main__': 60 | unittest.main() 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /SPHARM/tests/test_stratified_group_shuffle_split.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | from ddt import ddt 5 | from sklearn import svm 6 | from sklearn.model_selection import cross_val_score 7 | from scipy import ndimage 8 | 9 | from SPHARM.classes.stratified_group_shuffle_split import GroupShuffleSplitStratified 10 | 11 | 12 | @ddt 13 | class TestCrossval(unittest.TestCase): 14 | 15 | def test_crossval(self): 16 | 17 | groups = np.array([1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,6,6,6,7,7,7,8,8,8, 9,9,9,9]) 18 | classes = np.array([0]*9 + [1]*9 + [2]*10) 19 | features = np.random.rand(28,3) 20 | clf = svm.SVC(kernel='linear', C=1, cache_size=1000, decision_function_shape='ovo', random_state=0) 21 | cv = GroupShuffleSplitStratified(n_splits=5, test_size=3) 22 | # for train, test in cv.split(X=features, y=classes, groups=groups): 23 | # print(groups[train], groups[test], classes[train], classes[test]) 24 | # train_classes = classes[train] 25 | # unique_train_classes = np.unique(train_classes) 26 | # n_observations = ndimage.sum(np.ones_like(train_classes), train_classes, unique_train_classes) 27 | # predicted_classes = np.ones_like(classes[test])*unique_train_classes[np.argmax(n_observations)] 28 | # print(predicted_classes) 29 | # accuracy = np.sum(np.where(classes[test] == predicted_classes, 1, 0)) / len(predicted_classes) 30 | # print(accuracy) 31 | score = cross_val_score(clf, X=features, y=classes, groups=groups, cv=cv) 32 | self.assertEqual(len(score), 5) 33 | 34 | 35 | if __name__ == '__main__': 36 | unittest.main() 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /SPHARM/tests/test_surface.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import shutil 5 | import os 6 | from ddt import ddt, data 7 | import warnings 8 | import site 9 | 10 | from SPHARM.classes.surface import Surface 11 | from SPHARM.classes.image_stack import ImageStack 12 | import SPHARM.lib.transformation as tr 13 | 14 | 15 | @ddt 16 | class TestSurfaceClass(unittest.TestCase): 17 | 18 | @data( 19 | '0, 0, -20, 0.216057\n-8, -2, -18, 0.216057\n' 20 | '-8, 0, -18, 0.216057\n-8, 2, -18, 0.216057\n-6, -4, -18, 0.216057\n' 21 | '-6, -2, -18, 0.216057\n-6, 0, -18, 0.216057\n-6, 2, -18, 0.216057\n', 22 | ' X Y Z Name\n' 23 | '0 0.0 0.0 -10.0 ../Data/SyntheticCells/input_test/case1/cells1473770615/ContourCell0_0.216056749\n' 24 | '1 -4.0 -1.0 -9.0 ../Data/SyntheticCells/input_test/case1/cells1473770615/ContourCell0_0.216056749\n' 25 | '2 -4.0 0.0 -9.0 ../Data/SyntheticCells/input_test/case1/cells1473770615/ContourCell0_0.216056749\n' 26 | '3 -4.0 1.0 -9.0 ../Data/SyntheticCells/input_test/case1/cells1473770615/ContourCell0_0.216056749' 27 | ) 28 | def test_init_and_save(self, coords): 29 | os.makedirs('data/test_data') 30 | f = open('data/test_data/surface.txt', 'w') 31 | f.write(coords) 32 | f.close() 33 | surf = Surface(filename='data/test_data/surface.txt') 34 | for coord in ['x', 'y', 'z']: 35 | self.assertIsNotNone(surf.__dict__[coord]) 36 | shutil.rmtree('data/test_data/') 37 | 38 | def test_read_surface_and_save_as_stack(self): 39 | img = ImageStack(filename='', load=False) 40 | img.data = np.zeros([10, 100, 100]) 41 | img.data[2:7, 10:-10, 10:-10] = 1 42 | voxel_size = [4, 0.3824, 0.3824] 43 | img.extract_surfaces('data/test_data/surfaces/', 44 | voxel_size=voxel_size, reconstruct=False) 45 | surf = Surface(filename='data/test_data/surfaces/_Cell00001.csv') 46 | for coord in ['x', 'y', 'z']: 47 | self.assertIsNotNone(surf.__dict__[coord]) 48 | 49 | surf.save_as_stack(filename='data/test_data/stack.tif', voxel_size=0.5) 50 | self.assertEqual(os.path.exists('data/test_data/stack.tif'), True) 51 | self.assertEqual(os.path.exists('data/test_data/stack.txt'), True) 52 | shutil.rmtree('data/test_data/') 53 | 54 | def test_transforms(self): 55 | img = ImageStack(filename='', load=False) 56 | img.data = np.zeros([10, 100, 100]) 57 | img.data[2:7, 10:-10, 10:-10] = 1 58 | voxel_size = [4, 0.3824, 0.3824] 59 | img.extract_surfaces('data/test_data/surfaces/', 60 | voxel_size=voxel_size, reconstruct=False) 61 | surf = Surface(filename='data/test_data/surfaces/_Cell00001.csv') 62 | surf.to_spherical() 63 | x, y, z = tr.spherical_to_cart(surf.R, surf.theta, surf.phi) 64 | self.assertAlmostEqual(np.sum(np.abs(surf.x - x)), 0, 7) 65 | self.assertAlmostEqual(np.sum(np.abs(surf.y - y)), 0, 7) 66 | self.assertAlmostEqual(np.sum(np.abs(surf.z - z)), 0, 7) 67 | shutil.rmtree('data/test_data/') 68 | 69 | @data( 70 | 10, 71 | 13, 72 | 25 73 | ) 74 | def test_interpolate(self, grid_size): 75 | img = ImageStack(filename='', load=False) 76 | img.data = np.zeros([10, 100, 100]) 77 | img.data[2:7, 10:-10, 10:-10] = 1 78 | voxel_size = [4, 0.3824, 0.3824] 79 | img.extract_surfaces('data/test_data/surfaces/', 80 | voxel_size=voxel_size, reconstruct=False) 81 | surf = Surface(filename='data/test_data/surfaces/_Cell00001.csv') 82 | surf.centrate() 83 | surf.to_spherical() 84 | grid = surf.interpolate(grid_size=grid_size) 85 | self.assertEqual(len(grid), grid_size) 86 | shutil.rmtree('data/test_data/') 87 | 88 | def test_spharm_transform(self): 89 | img = ImageStack(filename='', load=False) 90 | img.data = np.zeros([100, 100, 100]) 91 | img.data[48:52, 48:52, 48:52] = 1. 92 | voxel_size = 0.3 93 | img.extract_surfaces('data/test_data/surfaces/', 94 | voxel_size=voxel_size, reconstruct=True) 95 | surf = Surface(filename='data/test_data/surfaces/_Cell00001.csv') 96 | surf.centrate() 97 | surf.to_spherical() 98 | grid = surf.interpolate(grid_size=10) 99 | surf.compute_spharm(grid_size=10) 100 | ngrid = surf.inverse_spharm() 101 | self.assertAlmostEqual(np.mean(np.abs(ngrid - grid)), 0, 1) 102 | shutil.rmtree('data/test_data/') 103 | 104 | def test_spharm_transform_norm(self): 105 | img = ImageStack(filename='', load=False) 106 | img.data = np.zeros([100, 100, 100]) 107 | img.data[48:52, 48:52, 48:52] = 1. 108 | voxel_size = 0.3 109 | img.extract_surfaces('data/test_data/surfaces/', 110 | voxel_size=voxel_size, reconstruct=True) 111 | surf = Surface(filename='data/test_data/surfaces/_Cell00001.csv') 112 | surf.centrate() 113 | surf.to_spherical() 114 | grid = surf.interpolate(grid_size=10) 115 | surf.compute_spharm(grid_size=10, normalize=True, normalization_method='mean-radius') 116 | ngrid = surf.inverse_spharm() 117 | grid = grid / np.mean(grid) 118 | self.assertAlmostEqual(np.mean(np.abs(ngrid - grid)), 0, 1) 119 | shutil.rmtree('data/test_data/') 120 | 121 | def test05_plot(self): 122 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 123 | fn = path + 'data/synthetic_cell.txt' 124 | surf = Surface(filename=fn) 125 | with warnings.catch_warnings(): 126 | warnings.simplefilter("ignore") 127 | mesh = surf.plot_points() 128 | os.makedirs('data/test_data') 129 | mesh.save('data/test_data/points_3D.png', size=(200, 200)) 130 | 131 | surf.centrate() 132 | surf.to_spherical() 133 | surf.Rgrid = surf.interpolate(grid_size=100) 134 | 135 | with warnings.catch_warnings(): 136 | warnings.simplefilter("ignore") 137 | mesh = surf.plot_surface(points=False) 138 | mesh.save('data/test_data/surface_3D.png', size=(200, 200)) 139 | 140 | with warnings.catch_warnings(): 141 | warnings.simplefilter("ignore") 142 | mesh = surf.plot_surface(points=True) 143 | mesh.save('data/test_data/surface_with_points_3D.png', size=(200, 200)) 144 | self.assertEqual(os.path.exists('data/test_data/surface_3D.png'), True) 145 | self.assertEqual(os.path.exists('data/test_data/points_3D.png'), True) 146 | self.assertEqual(os.path.exists('data/test_data/surface_with_points_3D.png'), True) 147 | shutil.rmtree('data/test_data/') 148 | 149 | def test07_spharm_inverse_less(self): 150 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 151 | fn = path + 'data/synthetic_cell.txt' 152 | surf = Surface(filename=fn) 153 | surf.centrate() 154 | surf.to_spherical() 155 | surf.compute_spharm(grid_size=100) 156 | surf.inverse_spharm() 157 | os.makedirs('data/test_data') 158 | 159 | with warnings.catch_warnings(): 160 | warnings.simplefilter("ignore") 161 | surf.plot_surface(points=True).save('data/test_data/surface_inverse_all.png', size=(200, 200)) 162 | surf.inverse_spharm(lmax=30) 163 | surf.plot_surface(points=True).save('data/test_data/surface_inverse_30.png', size=(200, 200)) 164 | surf.inverse_spharm(lmax=10) 165 | surf.plot_surface(points=True).save('data/test_data/surface_inverse_10.png', size=(200, 200)) 166 | self.assertEqual(os.path.exists('data/test_data/surface_inverse_all.png'), True) 167 | self.assertEqual(os.path.exists('data/test_data/surface_inverse_30.png'), True) 168 | self.assertEqual(os.path.exists('data/test_data/surface_inverse_10.png'), True) 169 | shutil.rmtree('data/test_data/') 170 | 171 | 172 | if __name__ == '__main__': 173 | unittest.main() 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | -------------------------------------------------------------------------------- /SPHARM/tests/test_time_spectrum.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import shutil 5 | import numpy as np 6 | from ddt import ddt 7 | 8 | from SPHARM.classes.spectrum import Spectrum 9 | from SPHARM.classes.time_spectrum import TimeSpectrum 10 | 11 | 12 | @ddt 13 | class TestTimeSpectrumClass(unittest.TestCase): 14 | 15 | def test_add_spectrum_and_plotting(self): 16 | tsp = TimeSpectrum() 17 | 18 | sp = Spectrum() 19 | sp.from_surface(surface=np.ones([10, 10])) 20 | sp.convert_to_csv() 21 | for i in range(3): 22 | tsp.add_spectrum(sp, timepoint=i*20) 23 | sp = Spectrum() 24 | surf = np.ones([10, 10]) 25 | surf[3:4, 4:8] = 10 26 | sp.from_surface(surface=surf) 27 | sp.convert_to_csv() 28 | for i in range(2): 29 | tsp.add_spectrum(sp, timepoint=i*20+60) 30 | 31 | sp = Spectrum() 32 | sp.from_surface(surface=np.ones([10, 10])) 33 | sp.convert_to_csv() 34 | for i in range(3): 35 | tsp.add_spectrum(sp, timepoint=i*20+100) 36 | 37 | self.assertEqual(len(tsp.spectra), 8) 38 | tsp.save_to_csv('data/test_data/time_spectrum.csv') 39 | self.assertEqual(os.path.exists('data/test_data/time_spectrum.csv'), True) 40 | pl = tsp.time_heatmap() 41 | pl.savefig('data/test_data/time_heatmap.png') 42 | self.assertEqual(os.path.exists('data/test_data/time_heatmap.png'), True) 43 | 44 | tsp.compute_derivative() 45 | pl = tsp.derivative_heatmap() 46 | pl.savefig('data/test_data/derivative_heatmap.png') 47 | self.assertEqual(os.path.exists('data/test_data/derivative_heatmap.png'), True) 48 | 49 | pl = tsp.plot_mean_abs_derivative() 50 | pl.savefig('data/test_data/mean_abs_derivative.png') 51 | self.assertEqual(os.path.exists('data/test_data/mean_abs_derivative.png'), True) 52 | shutil.rmtree('data/test_data/') 53 | 54 | def test_fourier(self): 55 | sp = Spectrum() 56 | sp.from_surface(surface=np.ones([10, 10])) 57 | sp.convert_to_csv() 58 | tsp = TimeSpectrum() 59 | for i in range(10): 60 | tsp.add_spectrum(sp) 61 | tsp.fourier_analysis() 62 | tsp.save_frequencies_to_csv('data/test_data/time_spectrum_freq.csv') 63 | self.assertEqual(os.path.exists('data/test_data/time_spectrum_freq.csv'), True) 64 | pl = tsp.frequency_heatmap() 65 | pl.savefig('data/test_data/frequency_heatmap.png') 66 | self.assertEqual(os.path.exists('data/test_data/frequency_heatmap.png'), True) 67 | shutil.rmtree('data/test_data/') 68 | 69 | def test_feature_vector(self): 70 | surf = np.ones([10, 10]) 71 | sp = Spectrum() 72 | sp.from_surface(surface=surf) 73 | sp.convert_to_csv() 74 | 75 | surf[3:4, 4:8] = 10 76 | sp2 = Spectrum() 77 | sp2.from_surface(surface=surf) 78 | sp2.convert_to_csv() 79 | 80 | tsp = TimeSpectrum() 81 | tsp.add_spectrum(sp) 82 | tsp.add_spectrum(sp2) 83 | tsp.add_spectrum(sp) 84 | tsp.compute_derivative() 85 | self.assertEqual(len(tsp.return_feature_vector(cutoff=2)), 3) 86 | 87 | 88 | if __name__ == '__main__': 89 | unittest.main() 90 | 91 | -------------------------------------------------------------------------------- /SPHARM/tests/test_transformation.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | from ddt import ddt, data 5 | import SPHARM.lib.transformation as tr 6 | 7 | 8 | @ddt 9 | class TestTransformation(unittest.TestCase): 10 | 11 | @data( 12 | ([2, 908, -234, 0.3], [654, -6, -64, 2], [-32, 243, 24, -22]), 13 | ) 14 | def test_spherical_transform(self, coords): 15 | x, y, z = coords 16 | r, theta, phi = tr.cart_to_spherical(x, y, z) 17 | x1, y1, z1 = tr.spherical_to_cart(r, theta, phi) 18 | self.assertAlmostEqual(sum(abs(x - x1)), 0, 10) 19 | self.assertAlmostEqual(sum(abs(y - y1)), 0, 10) 20 | self.assertAlmostEqual(sum(abs(z - z1)), 0, 10) 21 | 22 | @data( 23 | 24 | (1, 0, 0), 25 | (0, 1, 0), 26 | (0, 0, 1), 27 | (1, 1, 3) 28 | ) 29 | def test_rotation_single(self, coords): 30 | x, y, z = coords 31 | r, theta, phi = tr.cart_to_spherical(x, y, z) 32 | x, y, z = tr.rotate_spherical(x, y, z, -theta, -(phi - np.pi)) 33 | self.assertAlmostEqual(x, 0, 10) 34 | self.assertAlmostEqual(y, 0, 10) 35 | self.assertGreater(z, 0) 36 | 37 | @data( 38 | 39 | (0, np.pi/2, [[0, 0, -2, 1.9, 0, 0], 40 | [1, -0.9, 0, 0, 0, 0], 41 | [0, 0, 0, 0, 3, -2.9]]), 42 | ) 43 | def test_rotation_complex(self, case): 44 | theta, phi, coord = case 45 | x = [1, -0.9, 0, 0, 0, 0] 46 | y = [0, 0, 2, -1.9, 0, 0] 47 | z = [0, 0, 0, 0, 3, -2.9] 48 | x0, y0, z0 = coord 49 | x, y, z = tr.rotate_spherical(x, y, z, theta, phi) 50 | self.assertAlmostEqual(sum(abs(x - np.array(x0))), 0, 10) 51 | self.assertAlmostEqual(sum(abs(y - np.array(y0))), 0, 10) 52 | self.assertAlmostEqual(sum(abs(z - np.array(z0))), 0, 10) 53 | 54 | 55 | if __name__ == '__main__': 56 | unittest.main() 57 | -------------------------------------------------------------------------------- /SPHARM/tests/test_vrml.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import os 4 | import pandas as pd 5 | from ddt import ddt 6 | import shutil 7 | import site 8 | 9 | import SPHARM.lib.vrml_parse as vr 10 | 11 | 12 | @ddt 13 | class TestVRML(unittest.TestCase): 14 | 15 | def test_extract_node_names(self): 16 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 17 | vr.extract_node_names(path + 'data/test_vrml.vrml', 'data/test_data/test_vrml.txt') 18 | f = open('data/test_data/test_vrml.txt') 19 | st = f.readlines() 20 | f.close() 21 | self.assertEqual(len(st), 1362) 22 | 23 | def test_extract_node_names_batch(self): 24 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 25 | vr.extract_node_names_batch(path + 'data/vrml/', 'data/test_data/vrml/node_names/') 26 | files = os.listdir('data/test_data/vrml/node_names/') 27 | self.assertEqual(len(files), 2) 28 | shutil.rmtree('data/test_data/') 29 | 30 | def test_extract_coordinates(self): 31 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 32 | vr.extract_coordinates(path + 'data/test_vrml.vrml', 'data/test_data/test_vrml_coord.csv') 33 | stat = pd.read_csv('data/test_data/test_vrml_coord.csv', sep='\t', index_col=0) 34 | self.assertEqual(len(stat), 858) 35 | shutil.rmtree('data/test_data/') 36 | 37 | def test_extract_coordinates_batch(self): 38 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 39 | vr.extract_coordinates_batch(path + 'data/vrml/', 'data/test_data/vrml/coordinates/') 40 | files = os.listdir('data/test_data/vrml/coordinates/') 41 | self.assertEqual(len(files), 2) 42 | shutil.rmtree('data/test_data/') 43 | 44 | def test_combine_with_tracks(self): 45 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 46 | vr.combine_with_track_data(inputfile=path + 'data/LN_deconv_set4_small.csv', 47 | trackfile=path + 'data/LN_deconv_set4_Detailed.xlsx', 48 | outputfile='data/test_data/LN_deconv_set4_tracked.csv') 49 | stat = pd.read_csv('data/test_data/LN_deconv_set4_tracked.csv', sep='\t', index_col=0) 50 | self.assertEqual('TrackID' in stat.columns, True) 51 | shutil.rmtree('data/test_data/') 52 | 53 | def test_combine_with_tracks_batch(self): 54 | path = site.getsitepackages()[0] + '/SPHARM/tests/' 55 | vr.combine_with_track_data_batch(inputfolder=path + 'data/wrl/', 56 | trackfolder=path + 'data/track_files/', 57 | outputfolder='data/test_data/vrml/tracked/') 58 | files = os.listdir('data/test_data/vrml/tracked/') 59 | self.assertEqual(len(files), 1) 60 | shutil.rmtree('data/test_data/') 61 | 62 | if __name__ == '__main__': 63 | unittest.main() 64 | 65 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='dynamic_spharm', # This is the name of your PyPI-package. 5 | version='1.0', # python versioneer 6 | url="https://github.com/applied-systems-biology/Dynamic_SPHARM/", 7 | author="Anna Medyukhina", 8 | author_email='anna.medyukhina@gmail.com', 9 | packages=['SPHARM', 'SPHARM.classes', 'SPHARM.lib'], 10 | package_data={'': ['tests/data/*', 'tests/data/surfaces/*', 'tests/data/synthetic_cells/*', 11 | 'tests/data/track_files/LN/*', 12 | 'tests/data/vrml/*', 'tests/data/wrl/LN/*']}, 13 | include_package_data=True, 14 | license='BSD-3-Clause', 15 | 16 | install_requires=[ 17 | 'scikit-image', 18 | 'pandas', 19 | 'numpy', 20 | 'seaborn', 21 | 'scipy', 22 | 'ddt', 23 | 'pyshtools', 24 | 'mayavi', 25 | 'helper_lib', 26 | 'vtk' 27 | ], 28 | dependency_links=[ 29 | "https://github.com/applied-systems-biology/HelperLib/releases/", 30 | ], 31 | ) 32 | --------------------------------------------------------------------------------