├── MANIFEST.in ├── setup.cfg ├── figures ├── fourhists.png ├── jointplot.png └── wolframcmap.png ├── requirements.txt ├── nmmn ├── __init__.py ├── ml.py ├── astro.py ├── finance.py ├── bayes.py ├── fermi.py ├── lsd.py ├── misc.py ├── dsp.py ├── peakdetect.py └── grmhd.py ├── setup.py ├── LICENSE ├── docs ├── index.rst ├── nmmn.rst ├── ngc3031.ssd ├── ngc3031.adaf ├── ngc3031.jet ├── make.bat ├── Makefile └── conf.py ├── README.md └── examples.md /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md -------------------------------------------------------------------------------- /figures/fourhists.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsnemmen/nmmn/HEAD/figures/fourhists.png -------------------------------------------------------------------------------- /figures/jointplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsnemmen/nmmn/HEAD/figures/jointplot.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | scipy 3 | matplotlib>=2.0.2 4 | uncertainties 5 | aplpy 6 | astropy -------------------------------------------------------------------------------- /figures/wolframcmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rsnemmen/nmmn/HEAD/figures/wolframcmap.png -------------------------------------------------------------------------------- /nmmn/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Miscellaneous modules for: 3 | 4 | * `astro`: astronomy 5 | * `dsp`: signal processing 6 | * `lsd`: misc. operations on arrays, lists, dictionaries and sets 7 | * `stats`: statistical methods 8 | * `plots`: custom plots 9 | * `fermi`: Fermi LAT analysis methods 10 | * `bayes`: Bayesian tools for dealing with posterior distributions 11 | * `grmhd`: tools for dealing with GRMHD numerical simulations 12 | * `finance`: financial market 13 | 14 | These are modules I wrote which I find useful -- for whatever reason -- in my research. 15 | 16 | .. moduleauthor:: Rodrigo Nemmen 17 | """ -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open('README.md') as f: 4 | readme = f.read() 5 | 6 | #with open('LICENSE') as f: 7 | # license = f.read() 8 | 9 | setup( 10 | name='nmmn', 11 | version='1.3.6', 12 | description='Miscellaneous methods for data science and astronomy', 13 | long_description=readme, 14 | long_description_content_type='text/markdown', 15 | author='Rodrigo Nemmen', 16 | author_email='rodrigo.nemmen@iag.usp.br', 17 | url='https://github.com/rsnemmen/nmmn', 18 | download_url = 'https://github.com/rsnemmen/nmmn/archive/1.3.6.tar.gz', 19 | license="MIT License", 20 | keywords = ['science', 'statistics', 'signal-processing', 'numerical-methods', 'astronomy', 'numerical-simulations', 'astrophysics', 'mhd', 'grmhd'], # arbitrary keywords 21 | packages=find_packages(exclude=('tests', 'docs')) 22 | ) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Rodrigo Nemmen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /nmmn/ml.py: -------------------------------------------------------------------------------- 1 | """ 2 | Machine learning methods 3 | ========================= 4 | 5 | """ 6 | 7 | import numpy as np 8 | #from . import misc 9 | 10 | 11 | 12 | 13 | 14 | def AUCmulti(y_true, y_score): 15 | """ 16 | Computes the area under the ROC curve for multiclass classification models. 17 | Useful for evaluating the performance of such a model. 18 | 19 | Assume `y_true` contains the true labels and `y_score` contains predicted probabilities 20 | for each class. 21 | 22 | :param y_true: 1D array listing the labels 23 | :param y_score: multidimensional array of predicted probabilities 24 | 25 | Example: AUC for a classification involving 7 labels and 10 instances. 26 | 27 | # Mock data 28 | ytrue=np.array([6, 2, 6, 6, 6, 6, 5, 1, 5, 0]) 29 | y_score=np.array([[0.11, 0.04, 0. , 0. , 0.03, 0.12, 0.69], 30 | [0. , 0.03, 0.76, 0. , 0. , 0.01, 0.13], 31 | [0.05, 0.01, 0. , 0. , 0. , 0.27, 0.63], 32 | [0.09, 0.01, 0. , 0. , 0. , 0.47, 0.43], 33 | [0.09, 0. , 0.01, 0. , 0.08, 0.51, 0.31], 34 | [0.03, 0.53, 0. , 0. , 0.03, 0.17, 0.21], 35 | [0.17, 0.07, 0.01, 0. , 0.03, 0.36, 0.32], 36 | [0.08, 0.3 , 0.09, 0. , 0.05, 0.16, 0.26], 37 | [0.01, 0.01, 0. , 0. , 0.01, 0.6 , 0.33], 38 | [0. , 0.04, 0.08, 0.01, 0. , 0.37, 0.41]]) 39 | 40 | AUCmulti(ytrue, yscore) 41 | """ 42 | from sklearn.metrics import roc_auc_score 43 | from sklearn.preprocessing import label_binarize 44 | 45 | # Binarize the labels for a multi-class problem 46 | y_true = label_binarize(y_true, classes=range(y_score.shape[1])) 47 | 48 | # Compute the AUC for each class 49 | auc = roc_auc_score(y_true, y_score, multi_class='ovr') 50 | 51 | return auc -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. nmmn documentation master file, created by 2 | sphinx-quickstart on Sun Sep 11 23:53:27 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | `nmmn `_ 7 | ================================================== 8 | 9 | Tools for astronomy, data analysis, time series, numerical simulations and more! These are modules I wrote which I find useful -- for whatever reason -- in my research. 10 | 11 | List of submodules available: 12 | 13 | * `astro`: astronomy 14 | * `dsp`: signal processing 15 | * `lsd`: misc. operations on arrays, lists, dictionaries and sets 16 | * `stats`: statistical methods 17 | * `plots`: custom plots 18 | * `fermi`: Fermi LAT analysis methods 19 | * `bayes`: Bayesian tools for dealing with posterior distributions 20 | * `grmhd`: tools for dealing with GRMHD numerical simulations 21 | * `sed`: tools for dealing with spectral energy distributions (SEDs) 22 | * `finance`: financial market 23 | 24 | `Code available on Github `_. 25 | 26 | Usage 27 | ------- 28 | 29 | Example 1: Remove all `nan` and `inf` (:math:`\infty`) elements from a numpy array. 30 | 31 | >>> import nmmn.lsd, numpy 32 | >>> x=numpy.array([1,2,numpy.nan,numpy.inf]) 33 | >>> xok=nmmn.lsd.delweird(x) 34 | 35 | Example 2: Reads SED generated by `grmonty `_. 36 | 37 | >>> import nmmn.sed 38 | >>> s=nmmn.sed.SED() 39 | >>> s.grmonty('grmonty.spec') 40 | >>> plot(s.lognu, s.ll) 41 | 42 | Now it is easy to compute the bolometric luminosity: `s.bol()`. 43 | 44 | 45 | Contents 46 | --------- 47 | 48 | .. toctree:: 49 | :maxdepth: 4 50 | 51 | nmmn 52 | 53 | 54 | Todo 55 | ----- 56 | 57 | - [ ] need more examples 58 | 59 | 60 | Indices and tables 61 | -------------------- 62 | 63 | * :ref:`genindex` 64 | * :ref:`modindex` 65 | * :ref:`search` 66 | 67 | -------------------------------------------------------------------------------- /docs/nmmn.rst: -------------------------------------------------------------------------------- 1 | nmmn package 2 | ============ 3 | 4 | Submodules 5 | ---------- 6 | 7 | nmmn.astro module 8 | ----------------- 9 | 10 | .. automodule:: nmmn.astro 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | nmmn.bayes module 16 | ----------------- 17 | 18 | .. automodule:: nmmn.bayes 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | nmmn.dsp module 24 | --------------- 25 | 26 | .. automodule:: nmmn.dsp 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | nmmn.fermi module 32 | ----------------- 33 | 34 | .. automodule:: nmmn.fermi 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | nmmn.grmhd module 40 | ----------------- 41 | 42 | .. automodule:: nmmn.grmhd 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | nmmn.lsd module 48 | --------------- 49 | 50 | .. automodule:: nmmn.lsd 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | nmmn.misc module 56 | ---------------- 57 | 58 | .. automodule:: nmmn.misc 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | nmmn.plots module 64 | ----------------- 65 | 66 | .. automodule:: nmmn.plots 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | nmmn.sed module 72 | ----------------- 73 | 74 | .. automodule:: nmmn.sed 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | nmmn.stats module 80 | ----------------- 81 | 82 | .. automodule:: nmmn.stats 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | 88 | nmmn.finance module 89 | ----------------- 90 | 91 | .. automodule:: nmmn.finance 92 | :members: 93 | :undoc-members: 94 | :show-inheritance: 95 | 96 | 97 | 98 | Module contents 99 | --------------- 100 | 101 | .. automodule:: nmmn 102 | :members: 103 | :undoc-members: 104 | :show-inheritance: 105 | -------------------------------------------------------------------------------- /docs/ngc3031.ssd: -------------------------------------------------------------------------------- 1 | 11.100000000000000 36.017685861971771 2 | 11.199999999999999 36.307327567174362 3 | 11.300000000000001 36.594085292851958 4 | 11.400000000000000 36.877319732214310 5 | 11.500000000000000 37.156076186643354 6 | 11.600000000000000 37.429141616066964 7 | 11.699999999999999 37.694985779161705 8 | 11.800000000000001 37.951706847440967 9 | 11.900000000000000 38.197007924847782 10 | 12.000000000000000 38.428260284785061 11 | 12.100000000000000 38.642751658693832 12 | 12.199999999999999 38.838247135972246 13 | 12.300000000000001 39.014006134290568 14 | 12.400000000000000 39.171455079156694 15 | 12.500000000000000 39.314890976304170 16 | 12.600000000000000 39.449804092324705 17 | 12.699999999999999 39.580560170548637 18 | 12.800000000000001 39.709221750022159 19 | 12.900000000000000 39.835809306095456 20 | 13.000000000000000 39.935927726722177 21 | 13.100000000000000 40.079959196903992 22 | 13.199999999999999 40.195041708646855 23 | 13.300000000000001 40.302862553116050 24 | 13.400000000000000 40.400644526174787 25 | 13.500000000000000 40.484533727249769 26 | 13.600000000000000 40.549115656773274 27 | 13.699999999999999 40.587766821619688 28 | 13.800000000000001 40.590439305923937 29 | 13.900000000000000 40.544618063556868 30 | 14.000000000000000 40.433771481303602 31 | 14.100000000000000 40.236925228858389 32 | 14.199999999999999 39.927704428223493 33 | 14.300000000000001 39.473620070218864 34 | 14.400000000000000 38.834404847062842 35 | 14.500000000000000 37.960113576240509 36 | 14.600000000000000 36.788273524854112 37 | 14.699999999999999 35.240525582835126 38 | 14.800000000000001 33.218560078056392 39 | 14.900000000000000 30.598733788894730 40 | 15.000000000000000 27.225526874836792 41 | 15.100000000000001 22.903395353330627 42 | 15.199999999999999 17.386182796662606 43 | 15.300000000000001 12.799923187207902 44 | 15.400000000000000 4.5869235623401181 45 | 15.500000000000000 -6.6141491596869093 46 | 15.600000000000001 -20.819034413519130 47 | 15.699999999999999 -38.805495595905782 48 | 15.800000000000001 -61.552678811380119 49 | 15.900000000000000 -90.293255972795720 50 | 16.000000000000000 -126.27803907998658 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | `nmmn` package 2 | ================ 3 | 4 | Tools for astronomy, data analysis, time series, numerical simulations, gamma-ray astronomy and more! These are modules I wrote which I find useful—for whatever reason—in my research. 5 | 6 | List of modules available ([more info here](http://rsnemmen.github.io/nmmn/)): 7 | 8 | * `astro`: astronomy 9 | * `dsp`: signal processing 10 | * `lsd`: misc. operations on arrays, lists, dictionaries and sets 11 | * `stats`: statistical methods 12 | * [`sed`: spectral energy distributions](./docs/SEDs.ipynb) 13 | * `plots`: custom plots 14 | * `fermi`: Fermi LAT analysis methods 15 | * `bayes`: Bayesian tools for dealing with posterior distributions 16 | * `grmhd`: tools for dealing with GRMHD numerical simulations 17 | 18 | Very basic [documentation](http://rsnemmen.github.io/nmmn/) for the package. Generated with Sphinx. 19 | 20 | # Installation 21 | 22 | You have a couple of options to install the module: 23 | 24 | ### 1. Install using `pip`: 25 | 26 | ``` 27 | pip install nmmn 28 | ``` 29 | 30 | 31 | ### 2. Install the module on the system’s python library path: 32 | 33 | ``` 34 | git clone https://github.com/rsnemmen/nmmn.git 35 | cd nmmn 36 | python setup.py install 37 | ``` 38 | 39 | ### 3. Install the package with a symlink, so that changes to the source files will be immediately available: 40 | 41 | ``` 42 | git clone https://github.com/rsnemmen/nmmn.git 43 | cd nmmn 44 | python setup.py develop 45 | ``` 46 | 47 | This last method is preferred if you want the latest, bleeding-edge updates in the repo. You may need to run the last command with `sudo`. 48 | 49 | ## Updating 50 | 51 | If you installed with `pip` (method 1), to upgrade the package to the latest stable version use 52 | 53 | pip install --upgrade nmmn 54 | 55 | If you installed with the `setup.py` script and the `develop` option (method 3), use 56 | 57 | cd /path/to/nmmn 58 | git pull 59 | 60 | # Usage 61 | 62 | First import the specific module that you want to use: 63 | 64 | import nmmn.lsd 65 | 66 | Then call the method you need. For example, to remove all `nan` and `inf` elements from a `numpy` array: 67 | 68 | ```python 69 | import numpy as np 70 | 71 | # generates some array with nan and inf 72 | x=np.array([1,2,np.nan,np.inf]) 73 | 74 | # removes strange elements 75 | xok=nmmn.lsd.delweird(x) 76 | ``` 77 | 78 | For more examples, please refer to the [examples doc](examples.md). 79 | 80 | # TODO 81 | 82 | * [x] need more examples of how to use the modules 83 | * [x] add IFU data cubes method (refer to [ifscube](https://ifscube.readthedocs.io/en/latest/)) 84 | 85 | # License 86 | 87 | See `LICENSE` file. 88 | 89 | If you have suggestions of improvements, by all means please contribute with a pull request! :) 90 | 91 | The MIT License (MIT). Copyright (c) 2020 [Rodrigo Nemmen](http://rodrigonemmen.com) 92 | 93 | [Visit the author's web page](https://rodrigonemmen.com/) and/or follow him on twitter ([@nemmen](https://twitter.com/nemmen)). -------------------------------------------------------------------------------- /examples.md: -------------------------------------------------------------------------------- 1 | Examples: how to use the `nmmn` module 2 | ===================================== 3 | 4 | # Array operations 5 | 6 | Example 1: Remove all `nan` and `inf` elements from a `numpy` array. 7 | 8 | ```python 9 | import nmmn.lsd, numpy 10 | x=numpy.array([1,2,numpy.nan,numpy.inf]) 11 | xok=nmmn.lsd.delweird(x) 12 | ``` 13 | 14 | => propagate errors 15 | 16 | => propagate complex error distributions (e.g. asymmetric error bars) 17 | 18 | 19 | 20 | # Spectral energy distributions 21 | 22 | Check out the [jupyter notebook `SEDs.ipynb`](./docs/SEDs.ipynb) which has a tutorial illustrating how to perform several operations on SEDs: reading, computing bolometric luminosity, radio-loudness, adding SEDs, computing average SEDs. 23 | 24 | Here is just one simple example. 25 | 26 | Example 1: Reads SED generated by [`grmonty`](https://github.com/rsnemmen/grmonty). 27 | 28 | ```python 29 | import nmmn.sed 30 | s=nmmn.sed.SED() 31 | s.grmonty('grmonty.spec') 32 | plot(s.lognu, s.ll) 33 | ``` 34 | 35 | Now it is easy to compute the bolometric luminosity: `s.bol()`. 36 | 37 | => plot SED with pretty axis 38 | 39 | # Plots 40 | 41 | Example 1: Make a 2D kernel density distribution plot, along with the 1D histograms. 42 | 43 | ```python 44 | import nmmn.plots 45 | # define your 1D arrays X and Y with the points 46 | nmmn.plots.jointplot(X,Y,xlabel='$\log \ r_{\\rm tr}$', ylabel='$\log \ \dot{m}$') 47 | ``` 48 | 49 | ![2D kernel density distribution](./figures/jointplot.png) 50 | 51 | 52 | Example 2: Use the colormap of Wolfram Mathematica for plotting images. `var` constains a 2D array. 53 | 54 | ```python 55 | import nmmn.plots 56 | wolframcmap=nmmn.plots.wolframcmap() 57 | # define var with the image 58 | imshow(var, cmap=wolframcmap) 59 | ``` 60 | 61 | ![Image plotted with matplotlib and using Wolfram's colormap](./figures/wolframcmap.png) 62 | 63 | Note that there is also a method here for using MATLAB's parula colormap. For more examples of colormaps including Turbo, check out [this notebook](https://gist.github.com/rsnemmen/5c451783db51489ae10d0992babd06ba). 64 | 65 | Example 3: Plot four histograms in the same figure. 66 | 67 | ```python 68 | import nmmn.plots 69 | # define your 4 variables x1, x2, x3 and x4 that will be plotted as histograms 70 | nemmen.fourhists(x1,x2,x3,x4,-3,0,'BL Lacs','FSRQs','Blazars','GRBs','$\log \epsilon_{\\rm rad}$',fig=2,fontsize=15,bins1=15,bins2=15,bins3=15,bins4=15) 71 | ``` 72 | 73 | ![Four histograms in the same figure](./figures/fourhists.png) 74 | 75 | => plot linear fit with confidence band 76 | 77 | 78 | # Statistics 79 | 80 | Example 1: Given the Pearson correlation coefficient `r`, what is the p-value for the null hypothesis of no correlation? 81 | 82 | ```python 83 | # let's say r was computed from arrays x,y 84 | r=0.4 85 | 86 | # compute p-value 87 | p=nmmn.stats.r2p(r,x.size) 88 | 89 | print(p) 90 | ``` 91 | 92 | Example 2: Given the p-value, what is statistical confidence for rejecting the null hypothesis, in standard deviations (i.e. in sigmas)? 93 | 94 | nmmn.stats.p2sig(p) 95 | 96 | -------------------------------------------------------------------------------- /nmmn/astro.py: -------------------------------------------------------------------------------- 1 | """ 2 | Astrophysical routines 3 | ========================= 4 | 5 | """ 6 | 7 | 8 | import numpy 9 | import scipy 10 | 11 | 12 | 13 | 14 | def dist2z(d): 15 | """ 16 | Converts luminosity distance to redshift by solving the equation 17 | 'd-z=0'. 18 | 19 | Input is assumed float. 20 | """ 21 | import cosmolopy 22 | 23 | # x here is the unknown redshift 24 | f = lambda x: d-cosmolopy.distance.luminosity_distance(x,**cosmolopy.fidcosmo) 25 | 26 | z = scipy.optimize.fsolve(f, 0.01) 27 | return z 28 | 29 | 30 | 31 | 32 | def mjy(lognu,ll,dist,llerr=None): 33 | """ 34 | Converts log(nu/Hz), log(nu Lnu [erg/s]), error in log(nuLnu) to 35 | log(lambda/micron), log(Fnu/mJy), error in log(Fnu). 36 | The input units are CGS. 37 | 38 | Usage: 39 | If you have errors in the flux: 40 | 41 | >>> lamb,fnu,ferr=mjy(xdata,ydata,dist,yerr) 42 | 43 | If you do not have errors in the flux: 44 | 45 | >>> lamb,fnu=mjy(xdata,ydata,dist) 46 | 47 | :param dist: distance in Mpc 48 | """ 49 | import uncertainties.unumpy as unumpy 50 | 51 | c=29979245800. # speed of light in CGS 52 | dist=dist*3.085677581e24 # Mpc -> cm 53 | 54 | nu=10**lognu 55 | lamb=c/nu*1e4 # cm -> micron 56 | if llerr!=None: 57 | lllerr=unumpy.uarray(ll,llerr) 58 | else: 59 | lllerr=ll 60 | lnuerr=10**lllerr/nu 61 | fluxerr=lnuerr/(1e-26*4.*numpy.pi*dist**2) # Lnu (erg/s/Hz) -> Fnu (mJy) 62 | if llerr!=None: 63 | fluxerr=unumpy.log10(fluxerr) 64 | return numpy.log10(lamb),unumpy.nominal_values(fluxerr),unumpy.std_devs(fluxerr) 65 | else: 66 | return numpy.log10(lamb),numpy.log10(fluxerr) 67 | 68 | 69 | 70 | 71 | 72 | 73 | def arcsec2pc(d=15.,a=1.): 74 | """ 75 | Given the input angular size and distance to the object, computes 76 | the corresponding linear size in pc. 77 | 78 | :param d: distance in Mpc 79 | :param a: angular size in arcsec 80 | :returns: linear size in pc 81 | """ 82 | 83 | # convert arcsec to radians 84 | a=a*4.848e-6 85 | # convert distance to pc instead of Mpc 86 | d=d*1e6 87 | 88 | return d*numpy.tan(a) 89 | 90 | 91 | 92 | def freq(T): 93 | """ 94 | Convert array of periods in days to frequencies in Hz. 95 | """ 96 | return 1./T/86400. 97 | 98 | 99 | 100 | def period(freq): 101 | """ 102 | Convert array of frequencies to periods. 103 | """ 104 | return 1./freq 105 | 106 | 107 | 108 | 109 | class Constants: 110 | """ 111 | Defines a set of useful constants in CGS. 112 | :: 113 | 114 | const=nmmn.astro.Constants() 115 | E=mass*const.c**2 116 | """ 117 | 118 | def __init__(self): 119 | # Fundamental constants 120 | self.G = 6.673E-8 121 | self.c = 29979245800 122 | self.kb = 1.380649e-16 # Boltzmann constant 123 | self.h = 6.6261e-27 # Planck constant 124 | self.sigma = 0.56704e-4 # Stefan-Boltzmann constant 125 | 126 | # Time 127 | self.solarmass = 1.99e33 128 | self.year = 31556926 129 | 130 | -------------------------------------------------------------------------------- /nmmn/finance.py: -------------------------------------------------------------------------------- 1 | """ 2 | Financial market methods 3 | ========================= 4 | """ 5 | 6 | import numpy as np 7 | import yfinance as yf 8 | 9 | 10 | 11 | 12 | def candle(fig,data,legend=None): 13 | """ 14 | Convenient function to plot candle sticks. 15 | 16 | :param fig: figure object created with plotly (cf. example below) 17 | :param data: stock time series imported with yfinance (Pandas) 18 | :param legend: plot title 19 | 20 | Example: Candle stick plot for Microsoft stocks 21 | 22 | >>> import plotly.graph_objs as go 23 | >>> import yfinance 24 | >>> fig=go.Figure() 25 | >>> msft=yfinance.download(tickers='MSFT', period='1y', interval='1d') 26 | >>> candle(fig,msft) 27 | >>> fig.show() 28 | """ 29 | import plotly.graph_objs as go 30 | 31 | fig.add_trace(go.Candlestick(x=data.index, open=data['Open'], high=data['High'], 32 | low=data['Low'], close=data['Close'], name=legend)) 33 | 34 | def normalize(x1,x2): 35 | """ 36 | Given two tickers, this method normalizes them such that you can 37 | plot them together. One possible usage of this method is to compare 38 | the stock price of the same company in different exchanges—e.g. NASDAQ 39 | and B3—and see how they compare. 40 | 41 | :param x1: yfinance stock time series #1 42 | :param x2: yfinance stock time series #2 43 | :returns: x2 stock data normalized to the same scale as x1 44 | 45 | Example: 46 | 47 | >>> adbe=yf.download(tickers='ADBE', period='3mo', interval='1d') 48 | >>> adbeBR=yf.download(tickers='ADBE34.SA', period='3mo', interval='1d') 49 | >>> adbe['Close'].plot(label='US') 50 | >>> x=normalize(adbe,adbeBR) 51 | >>> x.plot(label='BR') 52 | >>> legend() 53 | >>> title('Adobe') 54 | >>> grid() 55 | """ 56 | return x2['Close']/x2['Close'][0]*x1['Close'][0] 57 | 58 | def returns(ticker,dt='ytd',t0=None): 59 | """ 60 | Convenient method for retrieving the returns of a stock over a given 61 | time period. 62 | 63 | :param ticker: the stock ticker 64 | :param dt: the period covered ending at "now". Possible options: 11d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max. OPTIONAL, default is year to date. 65 | :param t0: the initial date in the format '2021-03-17'. If t0 is specified, then dt should not be. 66 | :returns: the percentage return of the stock in the specified period 67 | 68 | # Examples: 69 | 70 | Returns from Small Cap BR ETF since July 11th 2014: 71 | 72 | >>> returns('SMAL11.SA',t0='2014-07-11') 73 | 74 | Returns from VALE3 in the last two years: 75 | 76 | >>> returns('VALE3.SA','2y') 77 | """ 78 | from datetime import date 79 | 80 | if t0 is None: 81 | data=yf.download(tickers=ticker, period=dt, interval='1d', progress=False) 82 | else: 83 | today = date.today() 84 | data=yf.download(tickers=ticker, start=t0, end=today.strftime("%Y-%m-%d"), progress=False) 85 | 86 | r=(data['Close'][-1]/data['Close'][0]-1)*100 87 | 88 | return round(r,1) 89 | 90 | 91 | 92 | def returnsTS(x): 93 | """ 94 | Given a stock, this method returns the stock percentage returns as a time series. 95 | 96 | :param x: yfinance stock time series 97 | :returns: stock time series of percentage returns 98 | 99 | """ 100 | return (x['Close']/x['Close'][0]-1)*100 101 | 102 | -------------------------------------------------------------------------------- /docs/ngc3031.adaf: -------------------------------------------------------------------------------- 1 | 0.912000E+01 0.328940E+02 2 | 0.924000E+01 0.331729E+02 3 | 0.936000E+01 0.334526E+02 4 | 0.948000E+01 0.337334E+02 5 | 0.960000E+01 0.340619E+02 6 | 0.972000E+01 0.343420E+02 7 | 0.984000E+01 0.346222E+02 8 | 0.996000E+01 0.349041E+02 9 | 0.100800E+02 0.351856E+02 10 | 0.102000E+02 0.354646E+02 11 | 0.103200E+02 0.357217E+02 12 | 0.104400E+02 0.360018E+02 13 | 0.105600E+02 0.362800E+02 14 | 0.106800E+02 0.365561E+02 15 | 0.108000E+02 0.368297E+02 16 | 0.109200E+02 0.371016E+02 17 | 0.110400E+02 0.373706E+02 18 | 0.111600E+02 0.376368E+02 19 | 0.112800E+02 0.379000E+02 20 | 0.114000E+02 0.381600E+02 21 | 0.115200E+02 0.384161E+02 22 | 0.116400E+02 0.386684E+02 23 | 0.117600E+02 0.389155E+02 24 | 0.118800E+02 0.391572E+02 25 | 0.120000E+02 0.393939E+02 26 | 0.121200E+02 0.396205E+02 27 | 0.122400E+02 0.398382E+02 28 | 0.123600E+02 0.400523E+02 29 | 0.124800E+02 0.402314E+02 30 | 0.126000E+02 0.403639E+02 31 | 0.127200E+02 0.404473E+02 32 | 0.128400E+02 0.404612E+02 33 | 0.129600E+02 0.403683E+02 34 | 0.130800E+02 0.401625E+02 35 | 0.132000E+02 0.398816E+02 36 | 0.133200E+02 0.395550E+02 37 | 0.134400E+02 0.392148E+02 38 | 0.135600E+02 0.389521E+02 39 | 0.136800E+02 0.387782E+02 40 | 0.138000E+02 0.389018E+02 41 | 0.139200E+02 0.391070E+02 42 | 0.140400E+02 0.392471E+02 43 | 0.141600E+02 0.394813E+02 44 | 0.142800E+02 0.395306E+02 45 | 0.144000E+02 0.397601E+02 46 | 0.145200E+02 0.399024E+02 47 | 0.146400E+02 0.399393E+02 48 | 0.147600E+02 0.400663E+02 49 | 0.148800E+02 0.401387E+02 50 | 0.150000E+02 0.402564E+02 51 | 0.151200E+02 0.403065E+02 52 | 0.152400E+02 0.402834E+02 53 | 0.153600E+02 0.404068E+02 54 | 0.154800E+02 0.403767E+02 55 | 0.156000E+02 0.402624E+02 56 | 0.157200E+02 0.402286E+02 57 | 0.158400E+02 0.401976E+02 58 | 0.159600E+02 0.401058E+02 59 | 0.160800E+02 0.400137E+02 60 | 0.162000E+02 0.399221E+02 61 | 0.163200E+02 0.398217E+02 62 | 0.164400E+02 0.397412E+02 63 | 0.165600E+02 0.396770E+02 64 | 0.166800E+02 0.396398E+02 65 | 0.168000E+02 0.396649E+02 66 | 0.169200E+02 0.397127E+02 67 | 0.170400E+02 0.397783E+02 68 | 0.171600E+02 0.398477E+02 69 | 0.172800E+02 0.398982E+02 70 | 0.174000E+02 0.399556E+02 71 | 0.175200E+02 0.400046E+02 72 | 0.176400E+02 0.400364E+02 73 | 0.177600E+02 0.400660E+02 74 | 0.178800E+02 0.400819E+02 75 | 0.180000E+02 0.400867E+02 76 | 0.181200E+02 0.400841E+02 77 | 0.182400E+02 0.400586E+02 78 | 0.183600E+02 0.400345E+02 79 | 0.184800E+02 0.400046E+02 80 | 0.186000E+02 0.399620E+02 81 | 0.187200E+02 0.399254E+02 82 | 0.188400E+02 0.398837E+02 83 | 0.189600E+02 0.398502E+02 84 | 0.190800E+02 0.398199E+02 85 | 0.192000E+02 0.397778E+02 86 | 0.193200E+02 0.397645E+02 87 | 0.194400E+02 0.397617E+02 88 | 0.195600E+02 0.397693E+02 89 | 0.196800E+02 0.397702E+02 90 | 0.198000E+02 0.397906E+02 91 | 0.199200E+02 0.398058E+02 92 | 0.200400E+02 0.398188E+02 93 | 0.201600E+02 0.398183E+02 94 | 0.202800E+02 0.398005E+02 95 | 0.204000E+02 0.397740E+02 96 | 0.205200E+02 0.397245E+02 97 | 0.206400E+02 0.396547E+02 98 | 0.207600E+02 0.395600E+02 99 | 0.208800E+02 0.394709E+02 100 | 0.210000E+02 0.393094E+02 101 | -------------------------------------------------------------------------------- /docs/ngc3031.jet: -------------------------------------------------------------------------------- 1 | 0.820000E+01 0.351119E+02 2 | 0.830000E+01 0.352127E+02 3 | 0.840000E+01 0.353141E+02 4 | 0.850000E+01 0.354153E+02 5 | 0.860000E+01 0.355157E+02 6 | 0.870000E+01 0.356166E+02 7 | 0.880000E+01 0.357175E+02 8 | 0.890000E+01 0.358181E+02 9 | 0.900000E+01 0.359181E+02 10 | 0.910000E+01 0.360186E+02 11 | 0.920000E+01 0.361189E+02 12 | 0.930000E+01 0.362185E+02 13 | 0.940000E+01 0.363193E+02 14 | 0.950000E+01 0.364225E+02 15 | 0.960000E+01 0.365468E+02 16 | 0.970000E+01 0.366993E+02 17 | 0.980000E+01 0.368670E+02 18 | 0.990000E+01 0.370351E+02 19 | 0.100000E+02 0.371988E+02 20 | 0.101000E+02 0.373386E+02 21 | 0.102000E+02 0.374666E+02 22 | 0.103000E+02 0.375885E+02 23 | 0.104000E+02 0.377013E+02 24 | 0.105000E+02 0.378096E+02 25 | 0.106000E+02 0.379133E+02 26 | 0.107000E+02 0.380127E+02 27 | 0.108000E+02 0.381058E+02 28 | 0.109000E+02 0.381922E+02 29 | 0.110000E+02 0.382723E+02 30 | 0.111000E+02 0.383465E+02 31 | 0.112000E+02 0.384159E+02 32 | 0.113000E+02 0.384795E+02 33 | 0.114000E+02 0.385389E+02 34 | 0.115000E+02 0.385936E+02 35 | 0.116000E+02 0.386430E+02 36 | 0.117000E+02 0.386888E+02 37 | 0.118000E+02 0.387295E+02 38 | 0.119000E+02 0.387657E+02 39 | 0.120000E+02 0.387969E+02 40 | 0.121000E+02 0.388237E+02 41 | 0.122000E+02 0.388460E+02 42 | 0.123000E+02 0.388639E+02 43 | 0.124000E+02 0.388783E+02 44 | 0.125000E+02 0.388894E+02 45 | 0.126000E+02 0.388979E+02 46 | 0.127000E+02 0.389043E+02 47 | 0.128000E+02 0.389089E+02 48 | 0.129000E+02 0.389117E+02 49 | 0.130000E+02 0.389129E+02 50 | 0.131000E+02 0.389129E+02 51 | 0.132000E+02 0.389117E+02 52 | 0.133000E+02 0.389098E+02 53 | 0.134000E+02 0.389071E+02 54 | 0.135000E+02 0.389041E+02 55 | 0.136000E+02 0.389008E+02 56 | 0.137000E+02 0.388972E+02 57 | 0.138000E+02 0.388934E+02 58 | 0.139000E+02 0.388895E+02 59 | 0.140000E+02 0.388853E+02 60 | 0.141000E+02 0.388808E+02 61 | 0.142000E+02 0.388763E+02 62 | 0.143000E+02 0.388715E+02 63 | 0.144000E+02 0.388665E+02 64 | 0.145000E+02 0.388613E+02 65 | 0.146000E+02 0.388560E+02 66 | 0.147000E+02 0.388505E+02 67 | 0.148000E+02 0.388449E+02 68 | 0.149000E+02 0.388391E+02 69 | 0.150000E+02 0.388331E+02 70 | 0.151000E+02 0.388269E+02 71 | 0.152000E+02 0.388207E+02 72 | 0.153000E+02 0.388143E+02 73 | 0.154000E+02 0.388077E+02 74 | 0.155000E+02 0.388010E+02 75 | 0.156000E+02 0.387941E+02 76 | 0.157000E+02 0.387871E+02 77 | 0.158000E+02 0.387800E+02 78 | 0.159000E+02 0.387727E+02 79 | 0.160000E+02 0.387652E+02 80 | 0.161000E+02 0.387576E+02 81 | 0.162000E+02 0.387498E+02 82 | 0.163000E+02 0.387418E+02 83 | 0.164000E+02 0.387337E+02 84 | 0.165000E+02 0.387253E+02 85 | 0.166000E+02 0.387168E+02 86 | 0.167000E+02 0.387080E+02 87 | 0.168000E+02 0.386990E+02 88 | 0.169000E+02 0.386899E+02 89 | 0.170000E+02 0.386806E+02 90 | 0.171000E+02 0.386711E+02 91 | 0.172000E+02 0.386615E+02 92 | 0.173000E+02 0.386517E+02 93 | 0.174000E+02 0.386418E+02 94 | 0.175000E+02 0.386318E+02 95 | 0.176000E+02 0.386218E+02 96 | 0.177000E+02 0.386117E+02 97 | 0.178000E+02 0.386016E+02 98 | 0.179000E+02 0.385914E+02 99 | 0.180000E+02 0.385812E+02 100 | 0.181000E+02 0.385710E+02 101 | 0.182000E+02 0.385607E+02 102 | 0.183000E+02 0.385504E+02 103 | 0.184000E+02 0.385400E+02 104 | 0.185000E+02 0.385295E+02 105 | 0.186000E+02 0.385188E+02 106 | 0.187000E+02 0.385079E+02 107 | 0.188000E+02 0.384969E+02 108 | 0.189000E+02 0.384856E+02 109 | 0.190000E+02 0.384739E+02 110 | 0.191000E+02 0.384618E+02 111 | 0.192000E+02 0.384491E+02 112 | 0.193000E+02 0.384357E+02 113 | 0.194000E+02 0.384215E+02 114 | 0.195000E+02 0.384062E+02 115 | 0.196000E+02 0.383895E+02 116 | 0.197000E+02 0.383710E+02 117 | 0.198000E+02 0.383504E+02 118 | 0.199000E+02 0.383270E+02 119 | 0.200000E+02 0.383002E+02 120 | 0.201000E+02 0.382692E+02 121 | 0.202000E+02 0.382329E+02 122 | 0.203000E+02 0.381906E+02 123 | 0.204000E+02 0.381414E+02 124 | 0.205000E+02 0.380860E+02 125 | 0.206000E+02 0.380442E+02 126 | 0.207000E+02 0.379812E+02 127 | 0.208000E+02 0.379074E+02 128 | 0.209000E+02 0.378249E+02 129 | 0.210000E+02 0.377260E+02 130 | 0.211000E+02 0.376051E+02 131 | -------------------------------------------------------------------------------- /nmmn/bayes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Methods for dealing with Bayesian statistics 3 | ============================================== 4 | 5 | e.g. priors, posteriors, joint density plots. 6 | 7 | Right now the module is focused around PyMC, but I am migrating 8 | to emcee. 9 | 10 | .. todo:: plot confidence/credibility interval of a model 11 | """ 12 | 13 | import numpy, pylab, scipy, scipy.stats 14 | 15 | 16 | 17 | 18 | 19 | def joint_density(X, Y, bounds=None): 20 | """ 21 | Plots joint distribution of variables. 22 | Inherited from method in src/graphics.py module in project 23 | git://github.com/aflaxman/pymc-example-tfr-hdi.git 24 | """ 25 | if bounds: 26 | X_min, X_max, Y_min, Y_max = bounds 27 | else: 28 | X_min = X.min() 29 | X_max = X.max() 30 | Y_min = Y.min() 31 | Y_max = Y.max() 32 | 33 | pylab.plot(X, Y, linestyle='none', marker='o', color='green', mec='green', alpha=.2, zorder=-99) 34 | 35 | gkde = scipy.stats.gaussian_kde([X, Y]) 36 | x,y = pylab.mgrid[X_min:X_max:(X_max-X_min)/25.,Y_min:Y_max:(Y_max-Y_min)/25.] 37 | z = pylab.array(gkde.evaluate([x.flatten(), y.flatten()])).reshape(x.shape) 38 | pylab.contour(x, y, z, linewidths=2) 39 | 40 | pylab.axis([X_min, X_max, Y_min, Y_max]) 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | def allplot(xb,yb,bins=30,fig=1,xlabel='x',ylabel='y'): 49 | """ 50 | Input: 51 | X,Y : objects referring to the variables produced by PyMC that you want 52 | to analyze. Example: X=M.theta, Y=M.slope. 53 | 54 | Inherited from Tommy LE BLANC's code at astroplotlib|STSCI. 55 | """ 56 | #X,Y=xb.trace(),yb.trace() 57 | X,Y=xb,yb 58 | 59 | #pylab.rcParams.update({'font.size': fontsize}) 60 | fig=pylab.figure(fig) 61 | pylab.clf() 62 | 63 | gs = pylab.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1,3], wspace=0.07, hspace=0.07) 64 | scat=pylab.subplot(gs[2]) 65 | histx=pylab.subplot(gs[0], sharex=scat) 66 | histy=pylab.subplot(gs[3], sharey=scat) 67 | #scat=fig.add_subplot(2,2,3) 68 | #histx=fig.add_subplot(2,2,1, sharex=scat) 69 | #histy=fig.add_subplot(2,2,4, sharey=scat) 70 | 71 | # Scatter plot 72 | scat.plot(X, Y,linestyle='none', marker='o', color='green', mec='green',alpha=.2, zorder=-99) 73 | 74 | gkde = scipy.stats.gaussian_kde([X, Y]) 75 | x,y = numpy.mgrid[X.min():X.max():(X.max()-X.min())/25.,Y.min():Y.max():(Y.max()-Y.min())/25.] 76 | z = numpy.array(gkde.evaluate([x.flatten(), y.flatten()])).reshape(x.shape) 77 | scat.contour(x, y, z, linewidths=2) 78 | scat.set_xlabel(xlabel) 79 | scat.set_ylabel(ylabel) 80 | 81 | # X-axis histogram 82 | histx.hist(X, bins, histtype='stepfilled') 83 | pylab.setp(histx.get_xticklabels(), visible=False) # no X label 84 | #histx.xaxis.set_major_formatter(pylab.NullFormatter()) # no X label 85 | 86 | # Y-axis histogram 87 | histy.hist(Y, bins, histtype='stepfilled', orientation='horizontal') 88 | pylab.setp(histy.get_yticklabels(), visible=False) # no Y label 89 | #histy.yaxis.set_major_formatter(pylab.NullFormatter()) # no Y label 90 | 91 | #pylab.minorticks_on() 92 | #pylab.subplots_adjust(hspace=0.1) 93 | #pylab.subplots_adjust(wspace=0.1) 94 | pylab.draw() 95 | pylab.show() 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | def jointplotx(X,Y,xlabel=None,ylabel=None,binsim=40,binsh=20,binscon=15): 106 | """ 107 | Plots the joint distribution of posteriors for X1 and X2, including the 1D 108 | histograms showing the median and standard deviations. Uses simple method 109 | for drawing the confidence contours compared to jointplot (which is wrong). 110 | 111 | The work that went in creating this method is shown, step by step, in 112 | the ipython notebook "error contours.ipynb". Sources of inspiration: 113 | - http://python4mpia.github.io/intro/quick-tour.html 114 | 115 | Usage: 116 | >>> jointplot(M.rtr.trace(),M.mdot.trace(),xlabel='$\log \ r_{\\rm tr}$', ylabel='$\log \ \dot{m}$') 117 | """ 118 | # Generates 2D histogram for image 119 | histt, xt, yt = numpy.histogram2d(X, Y, bins=[binsim,binsim], normed=False) 120 | histt = numpy.transpose(histt) # Beware: numpy switches axes, so switch back. 121 | 122 | # assigns correct proportions to subplots 123 | fig=pylab.figure() 124 | gs = pylab.GridSpec(2, 2, width_ratios=[3,1], height_ratios=[1,3], wspace=0.001, hspace=0.001) 125 | con=pylab.subplot(gs[2]) 126 | histx=pylab.subplot(gs[0], sharex=con) 127 | histy=pylab.subplot(gs[3], sharey=con) 128 | 129 | # Image 130 | con.imshow(histt,extent=[xt[0],xt[-1], yt[0],yt[-1]],origin='lower',cmap=pylab.cm.gray_r,aspect='auto') 131 | 132 | # Overplot with error contours 1,2 sigma 133 | # Contour plot 134 | histdata, x, y = numpy.histogram2d(X, Y, bins=[binscon,binscon], normed=False) 135 | histdata = numpy.transpose(histdata) # Beware: numpy switches axes, so switch back. 136 | pmax = histdata.max() 137 | cs=con.contour(histdata, levels=[0.68*pmax,0.05*pmax], extent=[x[0],x[-1], y[0],y[-1]], colors=['black','blue']) 138 | # use dictionary in order to assign your own labels to the contours. 139 | #fmtdict = {s[0]:r'$1\sigma$',s[1]:r'$2\sigma$'} 140 | #con.clabel(cs, fmt=fmtdict, inline=True, fontsize=20) 141 | if xlabel!=None: con.set_xlabel(xlabel) 142 | if ylabel!=None: con.set_ylabel(ylabel) 143 | 144 | # X-axis histogram 145 | histx.hist(X, binsh, histtype='stepfilled',facecolor='lightblue') 146 | pylab.setp(histx.get_xticklabels(), visible=False) # no X label 147 | pylab.setp(histx.get_yticklabels(), visible=False) # no Y label 148 | # Vertical lines with median and 1sigma confidence 149 | yax=histx.set_ylim() 150 | histx.plot([numpy.median(X),numpy.median(X)],[yax[0],yax[1]],'k-',linewidth=2) # median 151 | xsd=scipy.stats.scoreatpercentile(X, [15.87,84.13]) 152 | histx.plot([xsd[0],xsd[0]],[yax[0],yax[1]],'k--') # -1sd 153 | histx.plot([xsd[-1],xsd[-1]],[yax[0],yax[1]],'k--') # +1sd 154 | 155 | # Y-axis histogram 156 | histy.hist(Y, binsh, histtype='stepfilled', orientation='horizontal',facecolor='lightyellow') 157 | pylab.setp(histy.get_yticklabels(), visible=False) # no Y label 158 | pylab.setp(histy.get_xticklabels(), visible=False) # no X label 159 | # Vertical lines with median and 1sigma confidence 160 | xax=histy.set_xlim() 161 | histy.plot([xax[0],xax[1]],[numpy.median(Y),numpy.median(Y)],'k-',linewidth=2) # median 162 | ysd=scipy.stats.scoreatpercentile(Y, [15.87,84.13]) 163 | histy.plot([xax[0],xax[1]],[ysd[0],ysd[0]],'k--') # -1sd 164 | histy.plot([xax[0],xax[1]],[ysd[-1],ysd[-1]],'k--') # +1sd 165 | 166 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 1>NUL 2>NUL 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\nmmn.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\nmmn.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | #BUILDDIR = _build 9 | BUILDDIR = /Users/nemmen/Dropbox/codes/python/nmmn-ghpages 10 | 11 | 12 | # User-friendly check for sphinx-build 13 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 14 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 15 | endif 16 | 17 | # Internal variables. 18 | PAPEROPT_a4 = -D latex_paper_size=a4 19 | PAPEROPT_letter = -D latex_paper_size=letter 20 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | # the i18n builder cannot share the environment and doctrees with the others 22 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 23 | 24 | .PHONY: help 25 | help: 26 | @echo "Please use \`make ' where is one of" 27 | @echo " html to make standalone HTML files" 28 | @echo " dirhtml to make HTML files named index.html in directories" 29 | @echo " singlehtml to make a single large HTML file" 30 | @echo " pickle to make pickle files" 31 | @echo " json to make JSON files" 32 | @echo " htmlhelp to make HTML files and a HTML help project" 33 | @echo " qthelp to make HTML files and a qthelp project" 34 | @echo " applehelp to make an Apple Help Book" 35 | @echo " devhelp to make HTML files and a Devhelp project" 36 | @echo " epub to make an epub" 37 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 38 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 39 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 40 | @echo " text to make text files" 41 | @echo " man to make manual pages" 42 | @echo " texinfo to make Texinfo files" 43 | @echo " info to make Texinfo files and run them through makeinfo" 44 | @echo " gettext to make PO message catalogs" 45 | @echo " changes to make an overview of all changed/added/deprecated items" 46 | @echo " xml to make Docutils-native XML files" 47 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 48 | @echo " linkcheck to check all external links for integrity" 49 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 50 | @echo " coverage to run coverage check of the documentation (if enabled)" 51 | 52 | .PHONY: clean 53 | clean: 54 | rm -rf $(BUILDDIR)/* 55 | 56 | .PHONY: html 57 | html: 58 | #$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 59 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR) 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)." 62 | 63 | 64 | .PHONY: dirhtml 65 | dirhtml: 66 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 67 | @echo 68 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 69 | 70 | .PHONY: singlehtml 71 | singlehtml: 72 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 73 | @echo 74 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 75 | 76 | .PHONY: pickle 77 | pickle: 78 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 79 | @echo 80 | @echo "Build finished; now you can process the pickle files." 81 | 82 | .PHONY: json 83 | json: 84 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 85 | @echo 86 | @echo "Build finished; now you can process the JSON files." 87 | 88 | .PHONY: htmlhelp 89 | htmlhelp: 90 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 91 | @echo 92 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 93 | ".hhp project file in $(BUILDDIR)/htmlhelp." 94 | 95 | .PHONY: qthelp 96 | qthelp: 97 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 98 | @echo 99 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 100 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 101 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nmmn.qhcp" 102 | @echo "To view the help file:" 103 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nmmn.qhc" 104 | 105 | .PHONY: applehelp 106 | applehelp: 107 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 108 | @echo 109 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 110 | @echo "N.B. You won't be able to view it unless you put it in" \ 111 | "~/Library/Documentation/Help or install it in your application" \ 112 | "bundle." 113 | 114 | .PHONY: devhelp 115 | devhelp: 116 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 117 | @echo 118 | @echo "Build finished." 119 | @echo "To view the help file:" 120 | @echo "# mkdir -p $$HOME/.local/share/devhelp/nmmn" 121 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/nmmn" 122 | @echo "# devhelp" 123 | 124 | .PHONY: epub 125 | epub: 126 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 127 | @echo 128 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 129 | 130 | .PHONY: latex 131 | latex: 132 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 133 | @echo 134 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 135 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 136 | "(use \`make latexpdf' here to do that automatically)." 137 | 138 | .PHONY: latexpdf 139 | latexpdf: 140 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 141 | @echo "Running LaTeX files through pdflatex..." 142 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 143 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 144 | 145 | .PHONY: latexpdfja 146 | latexpdfja: 147 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 148 | @echo "Running LaTeX files through platex and dvipdfmx..." 149 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 150 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 151 | 152 | .PHONY: text 153 | text: 154 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 155 | @echo 156 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 157 | 158 | .PHONY: man 159 | man: 160 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 161 | @echo 162 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 163 | 164 | .PHONY: texinfo 165 | texinfo: 166 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 167 | @echo 168 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 169 | @echo "Run \`make' in that directory to run these through makeinfo" \ 170 | "(use \`make info' here to do that automatically)." 171 | 172 | .PHONY: info 173 | info: 174 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 175 | @echo "Running Texinfo files through makeinfo..." 176 | make -C $(BUILDDIR)/texinfo info 177 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 178 | 179 | .PHONY: gettext 180 | gettext: 181 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 182 | @echo 183 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 184 | 185 | .PHONY: changes 186 | changes: 187 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 188 | @echo 189 | @echo "The overview file is in $(BUILDDIR)/changes." 190 | 191 | .PHONY: linkcheck 192 | linkcheck: 193 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 194 | @echo 195 | @echo "Link check complete; look for any errors in the above output " \ 196 | "or in $(BUILDDIR)/linkcheck/output.txt." 197 | 198 | .PHONY: doctest 199 | doctest: 200 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 201 | @echo "Testing of doctests in the sources finished, look at the " \ 202 | "results in $(BUILDDIR)/doctest/output.txt." 203 | 204 | .PHONY: coverage 205 | coverage: 206 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 207 | @echo "Testing of coverage in the sources finished, look at the " \ 208 | "results in $(BUILDDIR)/coverage/python.txt." 209 | 210 | .PHONY: xml 211 | xml: 212 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 213 | @echo 214 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 215 | 216 | .PHONY: pseudoxml 217 | pseudoxml: 218 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 219 | @echo 220 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 221 | -------------------------------------------------------------------------------- /nmmn/fermi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Methods to handle Fermi LAT data 3 | ===================================== 4 | 5 | Handle output of Fermi analysis. 6 | """ 7 | 8 | import os, sys 9 | import pylab, numpy 10 | 11 | 12 | 13 | 14 | 15 | 16 | def getconfig(configfile="analysis.conf"): 17 | """ 18 | Reads config file and gets parameters. 19 | """ 20 | import configobj 21 | config = configobj.ConfigObj(configfile) 22 | 23 | # gets useful things 24 | folder = config['out'] 25 | cmapfile=folder+"/"+config['target']['name']+'_'+config['file']['tag']+'_CountMap.fits' 26 | modelmap=folder+"/"+config['target']['name']+'_'+config['file']['tag']+'_ModelMap.fits' 27 | residuals=folder+"/"+config['target']['name']+'_Residual_Model_cmap.fits' 28 | images = [residuals, modelmap, cmapfile] 29 | tsdir=folder+"/TSMap/" 30 | tsfits=folder+"/"+config['target']['name']+'_'+config['file']['tag']+'_TSMap.npz' # tsmap filename 31 | 32 | # create dictionary with useful values 33 | useful = {'folder': folder, 'cmapfile': cmapfile, 'modelmap':modelmap, 'residuals':residuals, 'images':images, 'tsdir':tsdir, 'tsfits':tsfits} 34 | 35 | return config, useful 36 | 37 | 38 | 39 | 40 | 41 | def plotMaps(configfile="analysis.conf",cmap=None): 42 | """ 43 | Given the directory where the results of an Enrico analysis are 44 | located (enrico_sed), this method plots the different count maps 45 | (observed, model and residuals). 46 | 47 | This is the fixed version of the script plotMaps.py from the Enrico 48 | distribution. 49 | 50 | :param configdir: directory with "/" at the end that points to the place 51 | where Enrico output files are located. 52 | :returns: image file SOURCE_Maps.png in the source folder. 53 | """ 54 | import astropy, astropy.io.fits, astropy.wcs 55 | import aplpy 56 | 57 | config,c=getconfig(configfile) 58 | 59 | def set_hgps_style(f): 60 | """Set HGPS style for a f = aplpy.FITSFigure""" 61 | f.ticks.set_xspacing(2) 62 | f.ticks.set_yspacing(2) 63 | f.ticks.set_linewidth(1.5) 64 | f.tick_labels.set_xformat('dd') 65 | f.tick_labels.set_yformat('dd') 66 | f.tick_labels.set_style('colons') 67 | f.axis_labels.set_xtext('Right Ascension (deg)') 68 | f.axis_labels.set_ytext('Declination (deg)') 69 | 70 | # Determine image center and width / height 71 | dpi = 2000 72 | header = astropy.io.fits.getheader(c['cmapfile']) 73 | wcs = astropy.wcs.WCS(header) 74 | header['NAXIS1'] / dpi 75 | header['NAXIS2'] / dpi 76 | lon, lat = header['NAXIS1'] / 2., header['NAXIS2'] / 2. 77 | x_center, y_center = wcs.wcs_pix2world(lon, lat, 0) 78 | radius = header['CDELT2'] * header['NAXIS2'] / 2. 79 | 80 | # Computing the sub-figure sizes is surprisingly hard 81 | figsize=(5, 15) 82 | figure = pylab.figure(figsize=figsize) 83 | axis_ratio = figsize[0] / float(figsize[1]) 84 | edge_margin_x = 0.12 85 | edge_margin_y = edge_margin_x * axis_ratio 86 | edge_margin_x_up = 0.01 87 | edge_margin_y_up = edge_margin_x_up * axis_ratio 88 | inner_margin_x = 0.1 89 | inner_margin_y = inner_margin_x * axis_ratio 90 | size_x = (1 - edge_margin_x - edge_margin_x_up) 91 | size_y = (1 - edge_margin_y - edge_margin_y_up - 2 * inner_margin_y) / 3 92 | 93 | for i, image in enumerate(c['images']): 94 | subplot = [edge_margin_x, edge_margin_y + i * (size_y + inner_margin_y), size_x, size_y] 95 | f = aplpy.FITSFigure(image, figure=figure, subplot=subplot) 96 | f.recenter(x_center, y_center, 0.95 * radius) 97 | set_hgps_style(f) 98 | f.show_colorscale(stretch='power', exponent=1, cmap=cmap) 99 | #f.show_colorbar() 100 | if i==0: f.show_markers(x_center, y_center, edgecolor='Black', s=400.) 101 | if i==1: 102 | f.show_markers(x_center, y_center, edgecolor='White', s=400.) 103 | f.show_regions(c['folder']+'/Roi_model.reg') 104 | if i==2: f.show_markers(x_center, y_center, edgecolor='Black', s=400.) 105 | 106 | plotfile =c['folder']+"/"+config['target']['name']+"_Maps.png" 107 | print('Writing {}'.format(plotfile)) 108 | figure.savefig(plotfile) 109 | 110 | 111 | def plotMap(counts,out='tmp.png',cmap=None,roi=None): 112 | """ 113 | This method plots some count map. Inherited from the method plotMaps 114 | above. 115 | 116 | :param counts: FITS file with counts to be plotted 117 | :param cmap: desired colormap for image 118 | :param roi: DS9 region file if present, otherwise leave None 119 | :param out: output filename and format 120 | :returns: image file 'tmp.png' in the source folder. 121 | """ 122 | import astropy, astropy.io.fits, astropy.wcs 123 | import aplpy 124 | 125 | def set_hgps_style(f): 126 | """Set HGPS style for a f = aplpy.FITSFigure""" 127 | f.ticks.set_xspacing(2) 128 | f.ticks.set_yspacing(2) 129 | f.ticks.set_linewidth(1.5) 130 | f.tick_labels.set_xformat('dd') 131 | f.tick_labels.set_yformat('dd') 132 | f.tick_labels.set_style('colons') 133 | f.axis_labels.set_xtext('Right Ascension (deg)') 134 | f.axis_labels.set_ytext('Declination (deg)') 135 | 136 | # Determine image center and width / height 137 | dpi = 2000 138 | header = astropy.io.fits.getheader(counts) 139 | wcs = astropy.wcs.WCS(header) 140 | header['NAXIS1'] / dpi 141 | header['NAXIS2'] / dpi 142 | lon, lat = header['NAXIS1'] / 2., header['NAXIS2'] / 2. 143 | x_center, y_center = wcs.wcs_pix2world(lon, lat, 0) 144 | radius = header['CDELT2'] * header['NAXIS2'] / 2. 145 | 146 | f = aplpy.FITSFigure(counts) 147 | f.recenter(x_center, y_center, 0.95 * radius) 148 | set_hgps_style(f) 149 | f.show_colorscale(stretch='power', exponent=1, cmap=cmap) 150 | f.show_colorbar() 151 | f.show_markers(x_center, y_center, edgecolor='White', s=400.) 152 | if roi!=None: f.show_regions(roi) 153 | 154 | print('Writing {}'.format(out)) 155 | pylab.savefig(out) 156 | 157 | 158 | 159 | 160 | 161 | 162 | def plotTSmap(configfile="analysis.conf") : 163 | """ From Enrico/tsmap.py. 164 | 165 | Gather the results of the evaluation of 166 | each pixel and fill a fits file""" 167 | 168 | # gets parameters from the Enrico configuration file 169 | config,c=getconfig(configfile) 170 | 171 | # Read the cmap produced before to get the grid for the TS map 172 | npix = int(config['TSMap']['npix']) 173 | ts=numpy.zeros((npix,npix)) 174 | ra=ts.copy() 175 | dec=ts.copy() 176 | 177 | import string # read the results 178 | for i in range(npix): 179 | for j in range(npix): 180 | try : 181 | lines = open(c['tsdir']+PixelFile(i,j),"r").readlines() 182 | tsval = float(string.split(lines[0])[2]) # TS value 183 | raval = float(string.split(lines[0])[0]) # RA 184 | decval = float(string.split(lines[0])[1]) # dec 185 | except : 186 | print("Cannot find, open or read "+c['tsdir']+PixelFile(i,j)) 187 | Value = 0. 188 | ts[i,j] = tsval 189 | ra[i,j] = raval 190 | dec[i,j] = decval 191 | 192 | 193 | # save file 194 | numpy.savez(c['tsfits'], TS=ts,ra=ra,dec=dec) 195 | print("TS Map saved in "+c['tsfits']) 196 | 197 | return ra,dec,ts 198 | 199 | 200 | 201 | 202 | def PixelFile(i,j): 203 | """ return the name of a file where the result of 1 pixel 204 | evaluation will be stored 205 | 206 | From enrico/tsmap.py. 207 | """ 208 | return 'Pixel_'+str(i)+'_'+str(j) 209 | 210 | 211 | 212 | 213 | 214 | def converttime(y): 215 | """ 216 | Converts from MET Fermi time to a python datetime tuple. 217 | 218 | >>> convertyear(386817828) 219 | 220 | returns datetime.datetime(2013, 4, 5, 1, 23, 48). 221 | 222 | :param y: a float or array of floats 223 | :returns: a datetime structure (or list) with the date corresponding to the input float or array 224 | 225 | y is the amount of seconds since 2001.0 UTC. Beware that my method 226 | may assume that all years have 365 days. 227 | 228 | References: 229 | 230 | - http://stackoverflow.com/questions/19305991/convert-fractional-years-to-a-real-date-in-python 231 | - http://heasarc.gsfc.nasa.gov/cgi-bin/Tools/xTime/xTime.pl 232 | """ 233 | import datetime 234 | 235 | t0=2001 236 | 237 | if numpy.size(y)==1: # if input is float 238 | day_one = datetime.datetime(t0,1,1) # initial time MET 239 | d = datetime.timedelta(seconds=y) # dt since t0 240 | date = d + day_one 241 | else: # if input is list/array 242 | date=[] 243 | 244 | for i, yi in enumerate(y): 245 | day_one = datetime.datetime(t0,1,1) 246 | d = datetime.timedelta(seconds=yi) 247 | date.append(d + day_one) 248 | 249 | return date 250 | 251 | 252 | 253 | 254 | 255 | def starttime(t): 256 | """ 257 | Fixes the start time of observations taken with LAT. This assumes that the 258 | time array in your light curve is in days already, and the beginning of 259 | your analysis coincides with t0 for LAT. 260 | 261 | :param t: array with input times given in days, with t0 coinciding with the beginning of LAT 262 | :returns: t (days), t (years) 263 | """ 264 | import pendulum 265 | 266 | # start of observations by Fermi 267 | tp=pendulum.create(2008,8,4,15,43,37)-pendulum.create(2008,1,1) 268 | t0=tp.days # how many days since 2008 for the beginning of Fermi observations 269 | 270 | t=t-t[0]+t0 # fixes t array 271 | ty=2008.+t/365. # time in years 272 | 273 | return t,ty 274 | 275 | 276 | -------------------------------------------------------------------------------- /nmmn/lsd.py: -------------------------------------------------------------------------------- 1 | """ 2 | LSD operations = lists, sets, dictionaries (and arrays) 3 | ========================================================= 4 | """ 5 | 6 | import numpy 7 | import scipy 8 | import scipy.stats 9 | 10 | 11 | def cmset_and(x,y): 12 | """ 13 | Usage: 14 | 15 | >>> cmset_and(x,y) 16 | 17 | returns the index of the elements of array x which are also present in the 18 | array y. 19 | 20 | This is equivalent to using the IDL command 21 | 22 | >>> botha=cmset_op(namea, 'AND', nameb, /index) 23 | 24 | i.e. performs the same thing as the IDL routine `cmset_op `_. 25 | """ 26 | 27 | idel=[] # list of indexes of x elements which are also in y 28 | i=0 29 | for xx in x: 30 | if xx in y: idel.append(i) 31 | i=i+1 32 | 33 | return idel 34 | 35 | 36 | 37 | 38 | def cmsetsort_and(x,y): 39 | """ 40 | Usage: 41 | 42 | >>> cmsetsort_and(x,y) 43 | 44 | returning the index of the elements of array x which are also present in the 45 | array y. 46 | 47 | The resulting elements have the same order as the ones in y. For 48 | instance, if you run 49 | 50 | >>> i=cmsetsort_and(x,y) 51 | >>> x[i]==y 52 | 53 | will return an array of True, whereas if you used instead cmset_and it is 54 | not guaranteed that all elements would match in x[i] and y. 55 | 56 | Inherited from :func:`nemmen.cmset_and`. 57 | """ 58 | 59 | idel=[] # list of indexes of x elements which are also in y 60 | i=0 61 | for yy in y: 62 | i=numpy.where(x==yy) 63 | idel.append( i[0].item() ) 64 | 65 | return idel 66 | 67 | 68 | 69 | 70 | def cmset_not(x,y): 71 | """ 72 | Usage: 73 | 74 | >>> cmset_not(x,y) 75 | 76 | returning the index of the elements of array x which are not present in the 77 | array y. 78 | 79 | This is equivalent to using the IDL command 80 | SET = CMSET_OP(A, 'AND', /NOT2, B, /INDEX) ; A but not B 81 | i.e. performs the same thing as the IDL routine cmset_op from 82 | http://cow.physics.wisc.edu/~craigm/idl/idl.html. 83 | """ 84 | 85 | idel=[] # list of indexes of x elements which NOT in y 86 | i=0 87 | for xx in x: 88 | if xx not in y: idel.append(i) 89 | i=i+1 90 | 91 | return idel 92 | 93 | 94 | 95 | def nanzero(x): 96 | """ 97 | Set nan elements to zero in the array. 98 | """ 99 | # Index of nan elements 100 | i=numpy.where(numpy.isnan(x)==True) 101 | 102 | y=x.copy() 103 | y[i]=0. 104 | #y[i]=1e-20 105 | 106 | # Removes the nan elements 107 | return y 108 | 109 | 110 | 111 | 112 | def delnan(x): 113 | """ 114 | Remove nan elements from the array. 115 | """ 116 | # Index of nan elements 117 | i=numpy.where(numpy.isnan(x)==True) 118 | 119 | # Removes the nan elements 120 | return numpy.delete(x,i) 121 | 122 | 123 | 124 | 125 | def delweird(x): 126 | """ 127 | Remove nan or inf elements from the array. 128 | """ 129 | # Index of nan elements 130 | i=numpy.where( (numpy.isnan(x)==True) | (numpy.isinf(x)==True) ) 131 | 132 | # Removes the nan elements 133 | return numpy.delete(x,i) 134 | 135 | 136 | 137 | def findnan(x): 138 | """ 139 | Return index of nan elements in the array. 140 | """ 141 | # Index of nan elements 142 | i=numpy.where(numpy.isnan(x)==True) 143 | 144 | return i 145 | 146 | 147 | 148 | 149 | 150 | def replacevals(x,minval): 151 | """ 152 | Replace all values in array x for which abs(x)<=minval with x=sign(x)*minval. 153 | """ 154 | i=numpy.where(numpy.abs(x)<=minval) 155 | y=x.copy() 156 | y[i]=numpy.sign(y[i])*minval 157 | 158 | return y 159 | 160 | 161 | 162 | 163 | def search(xref, x): 164 | """ 165 | Search for the element in an array x with the value nearest xref. 166 | Piece of code based on http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array 167 | 168 | >>> i=search(xref, x) 169 | 170 | :param xref: input number, array or list of reference values 171 | :param x: input array 172 | :returns: index of the x-elements with values nearest to xref: 173 | """ 174 | if numpy.size(xref)==1: 175 | i=(numpy.abs(x-xref)).argmin() 176 | else: 177 | i=[] 178 | 179 | for y in xref: 180 | i.append( (numpy.abs(x-y)).argmin() ) 181 | 182 | return i 183 | 184 | 185 | 186 | def sortindex(x,**kwargs): 187 | """ 188 | Returns the list of indexes, ordered according to the numerical value of each 189 | element of x. 190 | 191 | :param x: input array or list. 192 | :returns: list of element indexes. 193 | """ 194 | f=lambda i: x[i] 195 | 196 | return sorted( range(numpy.size(x)) , key=f,**kwargs) 197 | 198 | 199 | 200 | 201 | def norm(x1,x2=None): 202 | """ 203 | Normalizes x1. If also given as input x2, then normalizes x1 to x2. 204 | 205 | :param x1: input array 206 | :param x2: optional 207 | :returns: normalized x1 208 | """ 209 | if x2 is None: 210 | return x1/x1.max() 211 | else: 212 | return x1*x2.max()/x1.max() 213 | 214 | 215 | 216 | 217 | 218 | def uarray(x,errx): 219 | """ 220 | With the new releases of the uncertainties and astropy.io.ascii (0.2.3, the 221 | replacement of asciitable), if I try to create an uncertainties array with 222 | the column of a table imported with ascii I run into trouble. For instance, 223 | if I use the sequence of commands below: 224 | 225 | >>> import astropy.io.ascii as asciitable 226 | >>> raff= asciitable.read('data/rafferty06.dat') 227 | >>> m,errm=raff['mass'],raff['errm'] 228 | >>> mass=unumpy.uarray(m,errm) 229 | >>> x=0.2*mass 230 | 231 | I get the error message: 232 | 233 | >>> TypeError: unsupported operand type(s) for *: 'float' and 'Column' 234 | 235 | which I can only assume is due to the new way ascii handles tables. 236 | 237 | I created this method to use as a replacement for unumpy.uarray that handles 238 | the tables created with astropy.io.ascii. 239 | 240 | Usage is the same as uncertainties.unumpy.uarray. 241 | 242 | :type x,errx: arrays created with astropy.io.ascii. 243 | :returns: uncertainties array. 244 | """ 245 | import uncertainties.unumpy as unumpy 246 | 247 | x=numpy.array(x) 248 | errx=numpy.array(errx) 249 | 250 | return unumpy.uarray(x,errx) 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | def bootstrap(v): 259 | """ 260 | Constructs Monte Carlo simulated data set using the 261 | Bootstrap algorithm. 262 | 263 | Usage: 264 | 265 | >>> bootstrap(x) 266 | 267 | where x is either an array or a list of arrays. If it is a 268 | list, the code returns the corresponding list of bootstrapped 269 | arrays assuming that the same position in these arrays map the 270 | same "physical" object. 271 | 272 | Rodrigo Nemmen, http://goo.gl/8S1Oo 273 | """ 274 | if type(v)==list: 275 | vboot=[] # list of boostrapped arrays 276 | n=v[0].size 277 | iran=scipy.stats.randint.rvs(0,n,size=n) # Array of random indexes 278 | for x in v: vboot.append(x[iran]) 279 | else: # if v is an array, not a list of arrays 280 | n=v.size 281 | iran=scipy.stats.randint.rvs(0,n,size=n) # Array of random indexes 282 | vboot=v[iran] 283 | 284 | return vboot 285 | 286 | 287 | 288 | 289 | def regrid(x,y,z,xnew,ynew,method='cubic'): 290 | """ 291 | Regrid 1D arrays (x,y,z) -- where z is some scalar field mapped at positions 292 | x,y -- to a 2d array Z defined in the cartesian grids xnew,ynew (1D arrays with 293 | new grid). 294 | 295 | For the interpolation method, choose nearest, linear or cubic. 296 | 297 | >>> rho=regrid(d.x,d.y,d.rho,xnew,ynew) 298 | 299 | .. todo:: need to create a 3d version of this method, paving the road for the 3d simulations. 300 | """ 301 | import scipy.interpolate 302 | 303 | # regrid the data to a nice cartesian grid 304 | Z = scipy.interpolate.griddata((x, y), z, (xnew[None,:], ynew[:,None]), method=method) 305 | 306 | # get rid of NaNs 307 | return nanzero(Z) 308 | 309 | 310 | 311 | 312 | def crop(z, x,y, xmin, xmax, ymin, ymax, all=False): 313 | """ 314 | Crops the image or 2D array, leaving only pixels inside the region 315 | you define. 316 | 317 | >>> Znew,Xnew,Ynew = crop(Z, X, Y, 0,10,-20,20) 318 | 319 | where X,Y are 1D or 2D arrays, and Z is a 2D array. 320 | 321 | :param z: 2d array 322 | :param x,y: 1d or 2d arrays. In the latter case, they should have the same shape as z 323 | :param all: should I return cropped Z,X,Y or only Z? 324 | :returns: Z_cropped, X_cropped, Y_cropped 325 | """ 326 | if x.ndim==1: # if x,y are 1D 327 | # Index tuples with elements that will be selected along each dimension 328 | i=numpy.where((x>=xmin) & (x<=xmax)) # x 329 | j=numpy.where((y>=ymin) & (y<=ymax)) # y 330 | 331 | # Defines new x and y arrays 332 | xnew,ynew=x[i],y[j] 333 | 334 | i,j=i[0],j[0] # tuples -> arrays (for matrix slicing below) 335 | znew=z[j[0]:j[-1],i[0]:i[-1]] # CAREFUL with the ordering of the indexes! 336 | elif x.ndim==2: # if x,y are 2D 337 | i=numpy.where((x[0,:]>=xmin) & (x[0,:]<=xmax)) 338 | j=numpy.where((y[:,0]>=ymin) & (y[:,0]<=ymax)) 339 | i,j=i[0],j[0] 340 | 341 | xnew=x[j[0]:j[-1],i[0]:i[-1]] 342 | ynew=y[j[0]:j[-1],i[0]:i[-1]] 343 | znew=z[j[0]:j[-1],i[0]:i[-1]] 344 | else: 345 | print("Dimensions of the input arrays are inconsistent") 346 | return 347 | 348 | if all==False: 349 | return znew 350 | else: 351 | return znew,xnew,ynew 352 | 353 | 354 | 355 | 356 | 357 | def arrAvg(alist): 358 | """ 359 | Given a list of 1D or 2D arrays, this method computes their average, 360 | returning an array with the same shape as the input. 361 | 362 | :param alist: list of arrays 363 | :returns: average, std. dev. -- arrays with the same shape as the input arrays 364 | 365 | Usage: 366 | 367 | >>> avg=arrAvg([x,y,z]) 368 | """ 369 | if alist[0].ndim==2: 370 | # join arrays together, creating 3D arrays where the third dimension is e.g. time 371 | # or whatever index for the different arrays you want to average 372 | arr=numpy.stack(alist,axis=2) 373 | 374 | # performs the average 375 | return numpy.mean(arr,axis=2), numpy.std(arr,axis=2) 376 | elif alist[0].ndim==1: 377 | arr=numpy.stack(alist,axis=1) 378 | return numpy.mean(arr,axis=1), numpy.std(arr,axis=1) 379 | else: 380 | print("Dimensionality not supported") 381 | 382 | 383 | 384 | 385 | def string2float(s): 386 | """ 387 | Converts from an array of strings to floats. 388 | 389 | >>> string2float('28122014') 390 | 391 | returns 28122014.0. 392 | 393 | :param s: a string or list/array of strings 394 | :returns: a numpy array of floats 395 | """ 396 | if numpy.size(s)==1: # if input is a single string 397 | out=numpy.float(s) 398 | else: # if input is list/array 399 | out=[] 400 | 401 | for i, si in enumerate(s): 402 | out.append(numpy.float(si)) 403 | 404 | out=numpy.array(out) 405 | 406 | return out 407 | 408 | 409 | 410 | 411 | def allEqual(x): 412 | """ 413 | Check if all elements in an array are equal. 414 | """ 415 | from itertools import groupby 416 | 417 | g = groupby(x) 418 | 419 | return next(g, True) and not next(g, False) -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # nmmn documentation build configuration file, created by 4 | # sphinx-quickstart on Sun Sep 11 23:53:27 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | 19 | 20 | 21 | 22 | # http://read-the-docs.readthedocs.io/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules 23 | """ 24 | from mock import Mock as MagicMock 25 | 26 | class Mock(MagicMock): 27 | @classmethod 28 | def __getattr__(cls, name): 29 | return Mock() 30 | 31 | MOCK_MODULES = ['pymc'] 32 | sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) 33 | """ 34 | 35 | 36 | 37 | 38 | 39 | # If extensions (or modules to document with autodoc) are in another directory, 40 | # add these directories to sys.path here. If the directory is relative to the 41 | # documentation root, use os.path.abspath to make it absolute, like shown here. 42 | #sys.path.insert(0, os.path.abspath('.')) 43 | 44 | # -- General configuration ------------------------------------------------ 45 | 46 | # If your documentation needs a minimal Sphinx version, state it here. 47 | #needs_sphinx = '1.0' 48 | 49 | # Add any Sphinx extension module names here, as strings. They can be 50 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 51 | # ones. 52 | extensions = [ 53 | 'sphinx.ext.autodoc', 54 | 'sphinx.ext.todo', 55 | 'sphinx.ext.viewcode', 56 | 'sphinx.ext.mathjax', 57 | ] 58 | 59 | # Add any paths that contain templates here, relative to this directory. 60 | templates_path = ['_templates'] 61 | 62 | # The suffix(es) of source filenames. 63 | # You can specify multiple suffix as a list of string: 64 | # source_suffix = ['.rst', '.md'] 65 | source_suffix = '.rst' 66 | 67 | # The encoding of source files. 68 | #source_encoding = 'utf-8-sig' 69 | 70 | # The master toctree document. 71 | master_doc = 'index' 72 | 73 | # General information about the project. 74 | project = u'nmmn' 75 | copyright = u'2016, Author' 76 | author = u'Rodrigo Nemmen' 77 | 78 | # The version info for the project you're documenting, acts as replacement for 79 | # |version| and |release|, also used in various other places throughout the 80 | # built documents. 81 | # 82 | # The short X.Y version. 83 | version = u'' 84 | # The full version, including alpha/beta/rc tags. 85 | release = u'' 86 | 87 | # The language for content autogenerated by Sphinx. Refer to documentation 88 | # for a list of supported languages. 89 | # 90 | # This is also used if you do content translation via gettext catalogs. 91 | # Usually you set "language" from the command line for these cases. 92 | language = 'en' 93 | 94 | # There are two options for replacing |today|: either, you set today to some 95 | # non-false value, then it is used: 96 | #today = '' 97 | # Else, today_fmt is used as the format for a strftime call. 98 | #today_fmt = '%B %d, %Y' 99 | 100 | # List of patterns, relative to source directory, that match files and 101 | # directories to ignore when looking for source files. 102 | exclude_patterns = ['_build'] 103 | 104 | # The reST default role (used for this markup: `text`) to use for all 105 | # documents. 106 | #default_role = None 107 | 108 | # If true, '()' will be appended to :func: etc. cross-reference text. 109 | #add_function_parentheses = True 110 | 111 | # If true, the current module name will be prepended to all description 112 | # unit titles (such as .. function::). 113 | #add_module_names = True 114 | 115 | # If true, sectionauthor and moduleauthor directives will be shown in the 116 | # output. They are ignored by default. 117 | #show_authors = False 118 | 119 | # The name of the Pygments (syntax highlighting) style to use. 120 | pygments_style = 'sphinx' 121 | 122 | # A list of ignored prefixes for module index sorting. 123 | #modindex_common_prefix = [] 124 | 125 | # If true, keep warnings as "system message" paragraphs in the built documents. 126 | #keep_warnings = False 127 | 128 | # If true, `todo` and `todoList` produce output, else they produce nothing. 129 | todo_include_todos = True 130 | 131 | 132 | # -- Options for HTML output ---------------------------------------------- 133 | 134 | # The theme to use for HTML and HTML Help pages. See the documentation for 135 | # a list of builtin themes. 136 | html_theme = 'alabaster' 137 | 138 | # Theme options are theme-specific and customize the look and feel of a theme 139 | # further. For a list of options available for each theme, see the 140 | # documentation. 141 | #html_theme_options = {} 142 | 143 | # Add any paths that contain custom themes here, relative to this directory. 144 | #html_theme_path = [] 145 | 146 | # The name for this set of Sphinx documents. If None, it defaults to 147 | # " v documentation". 148 | #html_title = None 149 | 150 | # A shorter title for the navigation bar. Default is the same as html_title. 151 | #html_short_title = None 152 | 153 | # The name of an image file (relative to this directory) to place at the top 154 | # of the sidebar. 155 | #html_logo = None 156 | 157 | # The name of an image file (within the static path) to use as favicon of the 158 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 159 | # pixels large. 160 | #html_favicon = None 161 | 162 | # Add any paths that contain custom static files (such as style sheets) here, 163 | # relative to this directory. They are copied after the builtin static files, 164 | # so a file named "default.css" will overwrite the builtin "default.css". 165 | html_static_path = ['_static'] 166 | 167 | # Add any extra paths that contain custom files (such as robots.txt or 168 | # .htaccess) here, relative to this directory. These files are copied 169 | # directly to the root of the documentation. 170 | #html_extra_path = [] 171 | 172 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 173 | # using the given strftime format. 174 | #html_last_updated_fmt = '%b %d, %Y' 175 | 176 | # If true, SmartyPants will be used to convert quotes and dashes to 177 | # typographically correct entities. 178 | #html_use_smartypants = True 179 | 180 | # Custom sidebar templates, maps document names to template names. 181 | #html_sidebars = {} 182 | 183 | # Additional templates that should be rendered to pages, maps page names to 184 | # template names. 185 | #html_additional_pages = {} 186 | 187 | # If false, no module index is generated. 188 | #html_domain_indices = True 189 | 190 | # If false, no index is generated. 191 | #html_use_index = True 192 | 193 | # If true, the index is split into individual pages for each letter. 194 | #html_split_index = False 195 | 196 | # If true, links to the reST sources are added to the pages. 197 | #html_show_sourcelink = True 198 | 199 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 200 | #html_show_sphinx = True 201 | 202 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 203 | #html_show_copyright = True 204 | 205 | # If true, an OpenSearch description file will be output, and all pages will 206 | # contain a tag referring to it. The value of this option must be the 207 | # base URL from which the finished HTML is served. 208 | #html_use_opensearch = '' 209 | 210 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 211 | #html_file_suffix = None 212 | 213 | # Language to be used for generating the HTML full-text search index. 214 | # Sphinx supports the following languages: 215 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 216 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 217 | #html_search_language = 'en' 218 | 219 | # A dictionary with options for the search language support, empty by default. 220 | # Now only 'ja' uses this config value 221 | #html_search_options = {'type': 'default'} 222 | 223 | # The name of a javascript file (relative to the configuration directory) that 224 | # implements a search results scorer. If empty, the default will be used. 225 | #html_search_scorer = 'scorer.js' 226 | 227 | # Output file base name for HTML help builder. 228 | htmlhelp_basename = 'nmmndoc' 229 | 230 | # -- Options for LaTeX output --------------------------------------------- 231 | 232 | latex_elements = { 233 | # The paper size ('letterpaper' or 'a4paper'). 234 | #'papersize': 'letterpaper', 235 | 236 | # The font size ('10pt', '11pt' or '12pt'). 237 | #'pointsize': '10pt', 238 | 239 | # Additional stuff for the LaTeX preamble. 240 | #'preamble': '', 241 | 242 | # Latex figure (float) alignment 243 | #'figure_align': 'htbp', 244 | } 245 | 246 | # Grouping the document tree into LaTeX files. List of tuples 247 | # (source start file, target name, title, 248 | # author, documentclass [howto, manual, or own class]). 249 | latex_documents = [ 250 | (master_doc, 'nmmn.tex', u'nmmn Documentation', 251 | u'Author', 'manual'), 252 | ] 253 | 254 | # The name of an image file (relative to this directory) to place at the top of 255 | # the title page. 256 | #latex_logo = None 257 | 258 | # For "manual" documents, if this is true, then toplevel headings are parts, 259 | # not chapters. 260 | #latex_use_parts = False 261 | 262 | # If true, show page references after internal links. 263 | #latex_show_pagerefs = False 264 | 265 | # If true, show URL addresses after external links. 266 | #latex_show_urls = False 267 | 268 | # Documents to append as an appendix to all manuals. 269 | #latex_appendices = [] 270 | 271 | # If false, no module index is generated. 272 | #latex_domain_indices = True 273 | 274 | 275 | # -- Options for manual page output --------------------------------------- 276 | 277 | # One entry per manual page. List of tuples 278 | # (source start file, name, description, authors, manual section). 279 | man_pages = [ 280 | (master_doc, 'nmmn', u'nmmn Documentation', 281 | [author], 1) 282 | ] 283 | 284 | # If true, show URL addresses after external links. 285 | #man_show_urls = False 286 | 287 | 288 | # -- Options for Texinfo output ------------------------------------------- 289 | 290 | # Grouping the document tree into Texinfo files. List of tuples 291 | # (source start file, target name, title, author, 292 | # dir menu entry, description, category) 293 | texinfo_documents = [ 294 | (master_doc, 'nmmn', u'nmmn Documentation', 295 | author, 'nmmn', 'One line description of project.', 296 | 'Miscellaneous'), 297 | ] 298 | 299 | # Documents to append as an appendix to all manuals. 300 | #texinfo_appendices = [] 301 | 302 | # If false, no module index is generated. 303 | #texinfo_domain_indices = True 304 | 305 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 306 | #texinfo_show_urls = 'footnote' 307 | 308 | # If true, do not generate a @detailmenu in the "Top" node's menu. 309 | #texinfo_no_detailmenu = False 310 | 311 | 312 | # -- Options for Epub output ---------------------------------------------- 313 | 314 | # Bibliographic Dublin Core info. 315 | epub_title = project 316 | epub_author = author 317 | epub_publisher = author 318 | epub_copyright = copyright 319 | 320 | # The basename for the epub file. It defaults to the project name. 321 | #epub_basename = project 322 | 323 | # The HTML theme for the epub output. Since the default themes are not 324 | # optimized for small screen space, using the same theme for HTML and epub 325 | # output is usually not wise. This defaults to 'epub', a theme designed to save 326 | # visual space. 327 | #epub_theme = 'epub' 328 | 329 | # The language of the text. It defaults to the language option 330 | # or 'en' if the language is not set. 331 | #epub_language = '' 332 | 333 | # The scheme of the identifier. Typical schemes are ISBN or URL. 334 | #epub_scheme = '' 335 | 336 | # The unique identifier of the text. This can be a ISBN number 337 | # or the project homepage. 338 | #epub_identifier = '' 339 | 340 | # A unique identification for the text. 341 | #epub_uid = '' 342 | 343 | # A tuple containing the cover image and cover page html template filenames. 344 | #epub_cover = () 345 | 346 | # A sequence of (type, uri, title) tuples for the guide element of content.opf. 347 | #epub_guide = () 348 | 349 | # HTML files that should be inserted before the pages created by sphinx. 350 | # The format is a list of tuples containing the path and title. 351 | #epub_pre_files = [] 352 | 353 | # HTML files that should be inserted after the pages created by sphinx. 354 | # The format is a list of tuples containing the path and title. 355 | #epub_post_files = [] 356 | 357 | # A list of files that should not be packed into the epub file. 358 | epub_exclude_files = ['search.html'] 359 | 360 | # The depth of the table of contents in toc.ncx. 361 | #epub_tocdepth = 3 362 | 363 | # Allow duplicate toc entries. 364 | #epub_tocdup = True 365 | 366 | # Choose between 'default' and 'includehidden'. 367 | #epub_tocscope = 'default' 368 | 369 | # Fix unsupported image types using the Pillow. 370 | #epub_fix_images = False 371 | 372 | # Scale large images. 373 | #epub_max_image_width = 0 374 | 375 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 376 | #epub_show_urls = 'inline' 377 | 378 | # If false, no index is generated. 379 | #epub_use_index = True 380 | -------------------------------------------------------------------------------- /nmmn/misc.py: -------------------------------------------------------------------------------- 1 | """ 2 | Miscelaneous methods 3 | ====================== 4 | """ 5 | 6 | import numpy 7 | 8 | 9 | 10 | # COORDINATE TRANSFORMATIONS 11 | # =========================== 12 | # 13 | # 14 | 15 | def pol2cart(r, th): 16 | """ 17 | Converts from polar to cartesian coordinates. 18 | 19 | >>> x,y=pol2cart(r,phi) 20 | """ 21 | x = r * numpy.cos(th) 22 | y = r * numpy.sin(th) 23 | return x, y 24 | 25 | 26 | def sph2cart(r, th): 27 | """ 28 | Converts from spherical polar to cartesian coordinates. 29 | 30 | >>> x,y=pol2cart(r,phi) 31 | """ 32 | # spherical polar angle to polar 33 | th=-(th-numpy.pi/2.) 34 | 35 | x = r * numpy.cos(th) 36 | y = r * numpy.sin(th) 37 | return x, y 38 | 39 | 40 | 41 | def cart2pol(x, y): 42 | """ 43 | Converts from cartesian to polar coordinates. 44 | 45 | >>> r,t=cart2pol(x,y) 46 | """ 47 | r = numpy.sqrt(x**2 + y**2) 48 | t = numpy.arctan2(y, x) 49 | return r, t 50 | 51 | 52 | def cart2sph(x, y): 53 | """ 54 | Converts from cartesian to spherical polar coordinates, 55 | poles are at theta=0, equator at theta=90deg 56 | 57 | >>> r,t=cart2pol(x,y) 58 | """ 59 | r = numpy.sqrt(x**2 + y**2) 60 | t = numpy.pi/2.-numpy.arctan2(y, x) 61 | return r, t 62 | 63 | 64 | 65 | 66 | def vel_p2c(th,vr,vth): 67 | """ 68 | Computes the cartesian components of a velocity vector which 69 | is expressed in polar coordinates. i.e. apply a change of 70 | basis. See for example discussion after eq. 4 in 71 | https://ocw.mit.edu/courses/aeronautics-and-astronautics/16-07-dynamics-fall-2009/lecture-notes/MIT16_07F09_Lec05.pdf 72 | 73 | Returns: vx, vy 74 | """ 75 | vx=vr*numpy.cos(th)-vth*numpy.sin(th) 76 | vy=vr*numpy.sin(th)+vth*numpy.cos(th) 77 | 78 | return vx, vy 79 | 80 | 81 | 82 | 83 | def vel_c2p(th,vx,vy): 84 | """ 85 | Computes the polar components of a velocity vector which 86 | is expressed in cartesian coordinates. 87 | 88 | Returns: vr, vth 89 | """ 90 | vr=vx*numpy.cos(th)+vy*numpy.sin(th) 91 | vth=-vx*numpy.sin(th)+vy*numpy.cos(th) 92 | 93 | return vr, vth 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | def evalfun(fun,x,par): 111 | """ 112 | Evaluates the function fun at each element of the array x, for the parameters 113 | provided in the array/list par. fun is assumed to return a scalar. Returns an 114 | array with fun evaluated at x. See example below. 115 | 116 | Usage: 117 | 118 | >>> p=array([1,2,3]) 119 | 120 | x=1, par0=2, par1=3 121 | 122 | >>> fun(p) 123 | 124 | returns a scalar 125 | 126 | >>> x=linspace(0,10,50) 127 | 128 | >>> evalfun(fun,x,[2,3]) 129 | 130 | evaluates fun at the array x and returns an array. 131 | 132 | v1 Dec. 2011 133 | """ 134 | y=numpy.zeros_like(x) 135 | 136 | for i in range(x.size): 137 | # Array consisting of [x[i], par1, par2, ...] 138 | p=numpy.concatenate(([x[i]],par)) 139 | 140 | # Function evaluated 141 | y[i]=fun(p) 142 | 143 | return y 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | def savepartial(x,y,z,obsx,obsy,obsz,outfile): 152 | """ 153 | Exports data file for partial correlation analysis with cens_tau.f. 154 | cens_tau.f quantifies the correlation between X and Y eliminating the 155 | effect of a third variable Z. I patched the Fortran code available from 156 | http://astrostatistics.psu.edu/statcodes/sc_regression.html. 157 | 158 | Method arguments: 159 | x,y,z = arrays with data for partial correlation 160 | obs? = arrays of integers. 1 if there is a genuine measurement 161 | available and 0 if there is only an upper limit i.e. censored data. 162 | 163 | In the case of this study, X=Pjet, Y=Lgamma, Z=distance. 164 | 165 | The structure of the resulting datafile is: 166 | logPjet detected? logLgamma detected? logDist detected? 167 | where the distance is in Mpc. 168 | 169 | Example: 170 | 171 | >>> agngrb.exportpartial(all.kp,all.lg,log10(all.d),ones_like(all.kp),ones_like(all.kp),ones_like(all.kp),'par 172 | tialdata.dat') 173 | 174 | v1 Sep. 2011 175 | """ 176 | numpy.savetxt(outfile,numpy.transpose((x,obsx,y,obsy,z,obsz)),fmt='%10.4f %i %10.4f %i %10.4f %i') 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | def log2lin(dlogyl,dlogyu,logy): 186 | """ 187 | From a given uncertainty in log-space (dex) and the value of y, calculates the 188 | error in linear space. 189 | 190 | Returns a sequence with the lower and upper arrays with errors. 191 | """ 192 | # Upper error bar 193 | dyu=10**logy*(10.**dlogyu-1.) 194 | 195 | # Lower error bar 196 | dyl=-10**logy*(10.**-dlogyl-1.) 197 | 198 | return dyl, dyu 199 | 200 | 201 | 202 | 203 | 204 | def lin2log(dyl,dyu,logy): 205 | """ 206 | From a given uncertainty in linear space and the value of y, calculates the 207 | error in log space. 208 | 209 | Returns a sequence with the lower and upper arrays with errors. 210 | """ 211 | # Upper error bar 212 | dlogyu=-logy+numpy.log10(10.**logy+dyu) 213 | 214 | # Lower error bar 215 | dlogyl=+logy-numpy.log10(10.**logy-dyl) 216 | 217 | return dlogyl, dlogyu 218 | 219 | 220 | 221 | 222 | 223 | def whichbces(bces): 224 | """ 225 | Given the 'bces' string selector, returns an integer which tells the 226 | location of the BCES fitting results in the arrays returned by 227 | the bces* methods. 228 | """ 229 | # Selects the appropriate BCES fitting method 230 | import sys 231 | 232 | if bces=='ort': 233 | i=3 234 | elif bces=='y|x': 235 | i=0 236 | elif bces=='x|y': 237 | i=1 238 | elif bces=='bis': 239 | i=2 240 | else: 241 | sys.exit("Invalid BCES method selected! Please select bis, ort, y|x or x|y.") 242 | 243 | return i 244 | 245 | 246 | 247 | 248 | 249 | def mathcontour(x,y,errx,erry,cov): 250 | """ 251 | Suppose I want to draw the error ellipses for two parameters 'x' and 'y' 252 | with 1 s.d. uncertainties and the covariance between these uncertainties. 253 | I have a mathematica notebook that does that: 'chisq contour plot.nb'. 254 | 255 | This method takes the values 'x,y,errx,erry,cov' and outputs Mathematica 256 | code that I can just copy and paste in the appropriate notebook in order 257 | to draw the ellipses. 258 | """ 259 | print("x0=",x,";") 260 | print("y0=",y,";") 261 | print("\[Sigma]x=",errx,";") 262 | print("\[Sigma]y=",erry,";") 263 | print("\[Sigma]xy=",cov,";") 264 | 265 | 266 | 267 | 268 | 269 | 270 | # Methods related to datetime tuples 271 | # ==================================== 272 | # 273 | def convertyear(y): 274 | """ 275 | Converts from decimal year to a python datetime tuple. 276 | 277 | >>> convertyear(2012.9) 278 | 279 | returns datetime.datetime(2012, 11, 24, 12, 0, 0, 3). 280 | 281 | :param y: a float or array of floats 282 | :returns: a datetime structure (or list) with the date corresponding to the input float or array 283 | 284 | Reference: http://stackoverflow.com/questions/19305991/convert-fractional-years-to-a-real-date-in-python 285 | """ 286 | import datetime 287 | 288 | if numpy.size(y)==1: # if input is float 289 | year = int(y) 290 | d = datetime.timedelta(days=(y - year)*365) 291 | day_one = datetime.datetime(year,1,1) 292 | date = d + day_one 293 | else: # if input is list/array 294 | date=[] 295 | 296 | for i, yi in enumerate(y): 297 | year = int(yi) 298 | d = datetime.timedelta(days=(yi - year)*365) 299 | day_one = datetime.datetime(year,1,1) 300 | date.append(d + day_one) 301 | 302 | return date 303 | 304 | 305 | def string2year(s): 306 | """ 307 | Converts from a string in the format DDMMYYYY to a python datetime tuple. 308 | 309 | >>> string2year('28122014') 310 | 311 | returns datetime.datetime(2014, 12, 22, 0, 0). 312 | 313 | :param y: a string or list of strings 314 | :returns: a datetime structure (or list) with the date corresponding to the input float or array 315 | 316 | Reference: https://docs.python.org/2/library/datetime.html#strftime-strptime-behavior 317 | """ 318 | import datetime 319 | 320 | if numpy.size(s)==1: # if input is a string 321 | date=datetime.datetime.strptime(str(s),'%d%m%Y') 322 | else: # if input is list/array 323 | date=[] 324 | 325 | for i, si in enumerate(s): 326 | date.append(datetime.datetime.strptime(str(si),'%d%m%Y')) 327 | 328 | return date 329 | 330 | 331 | 332 | def date2dec(date): 333 | """ 334 | Convert a python datetime tuple to decimal year. 335 | 336 | Inspired on http://stackoverflow.com/a/6451892/793218. 337 | """ 338 | import datetime 339 | import time 340 | 341 | def sinceEpoch(date): # returns seconds since epoch 342 | return time.mktime(date.timetuple()) 343 | 344 | def getyear(date): # returns decimal year for a datetime tuple 345 | year = date.year 346 | startOfThisYear = datetime.datetime(year=year, month=1, day=1) 347 | startOfNextYear = datetime.datetime(year=year+1, month=1, day=1) 348 | yearElapsed = sinceEpoch(date) - sinceEpoch(startOfThisYear) 349 | yearDuration = sinceEpoch(startOfNextYear) - sinceEpoch(startOfThisYear) 350 | fraction = yearElapsed/yearDuration 351 | return date.year + fraction 352 | 353 | if numpy.size(date)==1: 354 | year=getyear(date) 355 | else: 356 | year=[] 357 | for datei in date: 358 | year.append(getyear(datei)) 359 | 360 | return numpy.array(year) 361 | 362 | 363 | def timeIndex(obj,datefield='date'): 364 | """ 365 | Given a pandas dataframe with a 'date' column as strings, this 366 | converts them to the datetime format and makes it the df index. 367 | """ 368 | import pandas as pd 369 | 370 | obj[datefield]=pd.to_datetime(obj[datefield]) 371 | obj.set_index(datefield, inplace=True) 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | def runsave(cmd,log): 381 | """ 382 | Executes command cmd and saves its standard output as log 383 | """ 384 | import subprocess 385 | 386 | # executes command 387 | p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) 388 | out,err=p.communicate() 389 | 390 | # saves output in a diagnostic file 391 | text=open(log,"w") 392 | text.write(str(out)) 393 | text.close() 394 | 395 | 396 | 397 | def scinotation(x,n=2): 398 | """ 399 | Displays a number in scientific notation. 400 | 401 | :param x: number 402 | :param n: number of significant digits to display 403 | """ 404 | import decimal 405 | fmt='%.'+str(n)+'E' 406 | s= fmt % decimal.Decimal(str(x)) 407 | 408 | return s 409 | 410 | 411 | 412 | 413 | 414 | 415 | def readmodel(file,m=1e8): 416 | """ 417 | Auxiliary method to rescale the BHBH simulation time series from Gold+14 and 418 | compare with the data. 419 | :: 420 | 421 | tphys12, t12, y12 = readmodel('Gold paper/1,2nc mdot bin.csv') 422 | 423 | :returns: time in years, time in code units, signal 424 | """ 425 | from . import astro, dsp 426 | import astropy.io.ascii as ascii 427 | import scipy.signal 428 | 429 | data = ascii.read(file) 430 | tmod,ymod=data['col1'],data['col2'] 431 | 432 | # cleans TS (remove duplicate times, regular dt for CWT later) 433 | tmod,ymod=dsp.uneven2even(tmod,ymod) 434 | 435 | # detrends data 436 | ymoddet=scipy.signal.detrend(ymod) 437 | 438 | # physical times in years 439 | const=astro.Constants() 440 | tphys=tmod*1000*const.G*m*const.solarmass/const.c**3/const.year 441 | 442 | return tphys, tmod, ymoddet 443 | 444 | 445 | 446 | 447 | def adjustmodel(file,stretch,translate,obs): 448 | """ 449 | Auxiliary method to read and normalize the BHBH simulation TS from Gold+14 450 | to the observations. 451 | :: 452 | 453 | tmod,smod=adjustmodel('/Users/nemmen/work/projects/fermi/ngc1275/Gold paper/1,2nc mdot bin.csv',5.6,2008.6,y) 454 | 455 | obs is the data you want to normalize the model to. 456 | 457 | :returns: time in years, time in code units, signal 458 | """ 459 | from . import lsd 460 | import astropy.io.ascii as ascii 461 | import scipy.signal 462 | 463 | data = ascii.read(file) 464 | tmod,ymod=data['col1'],data['col2'] 465 | 466 | # detrends and normalize data 467 | ymoddet=scipy.signal.detrend(ymod) 468 | ymod=lsd.norm(ymoddet,scipy.signal.detrend(obs)) 469 | 470 | # physical times (scaled to data) 471 | tphys=(tmod-tmod[0])*stretch+translate 472 | 473 | # BH mass implied by the scaling, in solar masses 474 | # Careful with units: CGS then converted to Msun 475 | mass=1e-3*4.04e38*(tphys[-1]-tphys[0])/(tmod[-1]-tmod[0])*31556926./1.99e33 476 | print('M = ',mass,' Msun') 477 | 478 | return tphys, ymod 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | def paperimpact(citations,dt,impactfactor): 487 | """ 488 | Given a journal `impactfactor` and a given time interval `dt`, this 489 | computes the expected number of citations for a given paper 490 | published in that journal over `dt`. It then gives the ratio of your 491 | paper's citation over the predicted value, to give you an idea 492 | whether your paper is above or below average. 493 | 494 | :param citations: number of citations for your paper 495 | :param dt: time since your paper was published in years 496 | :param impactfactor: impact factor for the journal where the paper was published 497 | """ 498 | print("Journal expected number of citations =",dt*impactfactor) 499 | 500 | print("Actual citations/expected citations",citations/(dt*impactfactor)) 501 | 502 | 503 | 504 | 505 | def findPATH(filename,envVar="PYTHONPATH"): 506 | """ 507 | Given a PATH or PYTHONPATH environment variable, find the full path of a file 508 | among different options. From https://stackoverflow.com/a/1124851/793218 509 | 510 | :param filename: file for which full path is desired 511 | :param envVar: environment variable with path to be searched 512 | :returns: string with full path to file 513 | 514 | Example: 515 | 516 | >>> fullpath=findPATH("fastregrid.cl") 517 | """ 518 | import os 519 | 520 | for p in os.environ[envVar].split(":"): 521 | for r,d,f in os.walk(p): 522 | for files in f: 523 | if files == filename: 524 | return os.path.join(r,files) 525 | 526 | 527 | 528 | def mario(): 529 | """ 530 | Displays a nice Super Mario. :) 531 | 532 | Analogous to \mario in LaTeX. 533 | """ 534 | from IPython.display import Image 535 | from IPython.core.display import HTML 536 | Image(url= "https://banner2.kisspng.com/20180410/kye/kisspng-new-super-mario-bros-u-super-mario-64-8-bit-5acd5c8ba05651.6908995015234080116568.jpg") 537 | 538 | 539 | 540 | def linefrompoints(x1,y1,x2,y2): 541 | """ 542 | Given the values of two points, outputs the regression line. 543 | 544 | :returns: values of A,B such that y=A*x+B 545 | """ 546 | A=(y2-y1)/(x2-x1) 547 | B=y1-A*x1 548 | 549 | return A, B -------------------------------------------------------------------------------- /nmmn/dsp.py: -------------------------------------------------------------------------------- 1 | """ 2 | Signal processing 3 | =================== 4 | 5 | Mostly time series. 6 | """ 7 | 8 | 9 | import numpy 10 | import pylab 11 | import scipy.signal 12 | 13 | 14 | 15 | 16 | def peaks(y,x=None,what=0,**args): 17 | """ 18 | Detects the peaks in the time series given by Y (and X if provided). 19 | 20 | :param x,y: time series input arrays 21 | :param what: select what you want -- max/0 or min/1 peaks returned 22 | :returns: xmax,ymax -- arrays with max peaks in the data. 23 | """ 24 | from . import peakdetect 25 | 26 | peaks=peakdetect.peakdetect(y,x,**args) 27 | 28 | if what==0 or what=="max": 29 | return zip(*peaks[0]) 30 | else: 31 | return zip(*peaks[1]) 32 | 33 | 34 | 35 | 36 | 37 | def smooth(x,window_len=11,window='hanning'): 38 | """ 39 | Smooth the data using a window with requested size. 40 | Copied from http://wiki.scipy.org/Cookbook/SignalSmooth 41 | 42 | This method is based on the convolution of a scaled window with the signal. 43 | The signal is prepared by introducing reflected copies of the signal 44 | (with the window size) in both ends so that transient parts are minimized 45 | in the begining and end part of the output signal. 46 | 47 | :param x: the input signal 48 | :param window_len: the dimension of the smoothing window; should be an odd integer 49 | :param window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. 50 | 51 | :returns: the smoothed signal 52 | 53 | Example 54 | 55 | >>> t=linspace(-2,2,0.1) 56 | >>> x=sin(t)+randn(len(t))*0.1 57 | >>> y=smooth(x) 58 | 59 | .. seealso:: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve, scipy.signal.lfilter 60 | 61 | .. note:: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. 62 | 63 | .. todo:: the window parameter could be the window itself if an array instead of a string 64 | """ 65 | 66 | if x.ndim != 1: 67 | raise ValueError("smooth only accepts 1 dimension arrays.") 68 | 69 | if x.size < window_len: 70 | raise ValueError("Input vector needs to be bigger than window size.") 71 | 72 | if window_len<3: 73 | return x 74 | 75 | if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: 76 | raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'") 77 | 78 | s=numpy.r_[x[window_len-1:0:-1],x,x[-1:-window_len:-1]] 79 | #print(len(s)) 80 | if window == 'flat': #moving average 81 | w=numpy.ones(window_len,'d') 82 | else: 83 | w=eval('numpy.'+window+'(window_len)') 84 | 85 | y=numpy.convolve(w/w.sum(),s,mode='valid') 86 | return y 87 | 88 | 89 | 90 | 91 | def smoothxy(x,y,*arg,**args): 92 | """ 93 | :param x: "time" in the time series 94 | :param y: y variable 95 | :returns: smoothed y and corresponding x arrays 96 | """ 97 | return smooth(x,*arg,**args), smooth(y,*arg,**args) 98 | 99 | 100 | 101 | 102 | def varsig(f,df): 103 | """ 104 | Quantifies significance of variability of time series, taking into 105 | account the uncertainties. 106 | 107 | Given the time series signal *y* and the corresponding uncertainties 108 | :math:`\sigma y`, the test statistics *T* is defined as 109 | 110 | .. math:: T \equiv \\frac{y_{i+1}-y_i}{\sigma y_i}. 111 | 112 | *T* will give the significance of variability at each point in time, 113 | in standard deviations of the preceding noise point. 114 | 115 | **Usage:** 116 | 117 | Computes TS and plots time series, highlighting the 1sigma variability 118 | interval in gray: 119 | 120 | >>> ts=varsig(flux,errflux) 121 | >>> step(t,q,'o-') 122 | >>> fill_between(t, -2*ones_like(tdec), 2*ones_like(tdec), alpha=0.3, facecolor='gray') 123 | 124 | .. todo:: generalize to arbitrary windows of time 125 | .. todo:: propagate uncertainty 126 | """ 127 | q=numpy.zeros_like(f) 128 | for i,fi in enumerate(f): 129 | if i>0: 130 | dy=fi-f[i-1] 131 | q[i]=dy/df[i-1] 132 | 133 | return q 134 | 135 | 136 | 137 | 138 | 139 | def ls(t,z,plot=False): 140 | """ 141 | Computes and plot Lomb-Scargle periodogram for a given timeseries. 142 | Returns arrays with periods and LS spectral power. 143 | 144 | >>> p,power=l.ls(time,values,plot=False) 145 | 146 | :returns: periods, spectral power 147 | """ 148 | import scipy.signal 149 | 150 | tbin = t[1] - t[0] # time bin 151 | f = numpy.linspace(0.001, 3./tbin, 10000) 152 | z = scipy.signal.detrend(z) 153 | 154 | # periodogram 155 | pgram = scipy.signal.lombscargle(t,z,f) 156 | 157 | # Lomb-Scargle spectral power 158 | period=1./(f/(2*numpy.pi)) 159 | spower=(numpy.sqrt(4.*(pgram/len(t)))/numpy.mean(numpy.sqrt(4.*(pgram/len(t)))))**2 160 | 161 | # most significant periodicity 162 | #i=spower.argmax() # finds index of maximum power 163 | #pbest=period[i] 164 | #spmax=spower[i] 165 | #print("Spectral peak found at t="+str(pbest)+", "+str(spmax)) 166 | 167 | # plot 168 | if plot==True: 169 | #pylab.figure(figsize=(16,8)) 170 | pylab.title("Lomb-Scargle ") 171 | pylab.ylabel('Spectral Power') 172 | pylab.xlabel('Period [days]') 173 | pylab.xscale('log') 174 | pylab.plot(period, spower) 175 | 176 | return period,spower 177 | 178 | 179 | def ls_spectra(t,var,n=200,thres=0.1,smooth=0): 180 | """ 181 | Computes Lomb-Scargle power spectrum, find peaks, produces arrays for 182 | plotting images showing the spectral lines. 183 | 184 | :param t: array of times 185 | :param var: array of signal 186 | :param n: number of vertical elements in image that will be created showing spectral lines 187 | :param thres: threshold parameter for finding peaks in time series 188 | :param smooth: number of points in the smoothing window. If 0, no smoothing 189 | 190 | Usage: ``N,P,l,pp,power=ls_spectra(t,y,thres=0.3)`` 191 | 192 | Returns the following variables: 193 | 194 | - ``N``: 2d number of vertical elements for plotting the spectra 195 | - ``P``: 2d periods 196 | - ``l``: 2d power spectra for plotting images 197 | - ``pp``: periods corresponding to peaks (peak period) in power spectrum 198 | - ``power``: peaks in power spectrum 199 | """ 200 | import peakutils 201 | 202 | p,power=ls(t,var) 203 | 204 | # smooth periodogram if requested 205 | if (smooth!=0): 206 | p,power=smoothxy(p,power,smooth) 207 | 208 | T, P = numpy.meshgrid(range(n), p) 209 | 210 | # spectral lines 211 | lines = numpy.ones([1, n]) * power[:, None] 212 | 213 | # peaks 214 | ipeak = peakutils.indexes(power,thres=thres) 215 | 216 | return T.T, P.T, lines.T, p[ipeak], power[ipeak] 217 | 218 | 219 | 220 | 221 | 222 | 223 | def error_resampler(errors): 224 | """ 225 | For use with ``pandas``. 226 | 227 | Method for performing the proper ``mean`` resampling of the *uncertainties* (error bars) 228 | in the time series with ``pandas``. Note that doing a simple resampling 229 | will fail to propagate uncertainties, since error in the mean goes as 230 | 231 | .. math:: \sigma=\sqrt{\Sigma_n \sigma_n^2} 232 | 233 | Example: Resamples the errors with 30 day averages: 234 | :: 235 | 236 | # df['errflux'] has the 1sigma uncertainties 237 | err=df['errflux'].resample('30d').apply(nmmn.dsp.error_resampler) 238 | 239 | # plot y-values (df['flux']) with errors (err) 240 | df['flux'].resample('30d').mean().plot(yerr=err) 241 | """ 242 | err=errors**2 243 | 244 | return numpy.sqrt(err.sum())/err.size 245 | 246 | 247 | 248 | def sumts(t1,t2,y1,y2,n=1): 249 | """ 250 | Given two time series, this method returns their sum. 251 | 252 | n = number of points in the final TS, as a multiple of the sum of points 253 | in both TS. 254 | 255 | Steps: 256 | 257 | 1. defines a uniform t-array with the number of total elements of the two arrays 258 | 2. interpolates both TS in this new t-array 259 | 3. sums their values 260 | """ 261 | # new number of elements 262 | points=int(n*(t1.size+t2.size)) 263 | # lower and upper ranges 264 | tmin=numpy.min(numpy.hstack((t1,t2))) 265 | tmax=numpy.max(numpy.hstack((t1,t2))) 266 | 267 | # Defines the new array of interpolated times 268 | tnew=numpy.linspace(tmin,tmax,points) 269 | # interpolates the two TS 270 | y1new=numpy.interp(tnew,t1,y1) 271 | y2new=numpy.interp(tnew,t2,y2) 272 | # sums the two TS 273 | ynew=y1new+y2new 274 | 275 | return tnew,ynew 276 | 277 | 278 | 279 | def dt(t): 280 | """ 281 | Computes difference between consecutive points in the time series. 282 | 283 | :param t: input times 284 | :param y: input y-values 285 | """ 286 | dtt=[] 287 | for i in range(t.size): 288 | if (i>0): 289 | dtt.append(t[i]-t[i-1]) 290 | 291 | return dtt 292 | 293 | 294 | def uneven2even(t,y): 295 | """ 296 | Given an uneven timeseries (TS) with multiple values defined at the same 297 | time, this method will convert it into an evenly sampled 298 | timeseries with a dt as close as possible to the actual dt, removing 299 | duplicate-t values. 300 | 301 | Example: suppose you want to compute the CWT for an unevenly sampled 302 | time series. Since the CWT does not support uneven TS, you can first 303 | call this method to regularize your TS and then perform the TS. 304 | 305 | Algorithm: 306 | 307 | - REMOVE DUPLICATE times 308 | - CREATE REGULAR GRID USING BEST DT 309 | - INTERPOLATE NEW TS ON PREVIOUS ONE 310 | 311 | :param t: input times 312 | :param y: input value 313 | """ 314 | import scipy.interpolate 315 | 316 | # remove items with same t 317 | # ========================== 318 | # gets successive dt for all points 319 | dtarr=[] 320 | idel=[] # list of indexes for elements that will be removed 321 | for i in range(t.size): 322 | if (i>0): 323 | dt=t[i]-t[i-1] 324 | if (dt==0): # if they have dt==0, store their index and later remove them 325 | idel.append(i) 326 | else: 327 | dtarr.append(dt) # stores only dt!=0 328 | 329 | # Find out optimal value of dt for the new TS 330 | dt=numpy.mean(dtarr) 331 | 332 | # Removes elements with same t 333 | tuniq=numpy.delete(t,idel) 334 | yuniq=numpy.delete(y,idel) 335 | 336 | # Does linear interpolation on new TS 337 | # ====================================== 338 | # new regular grid, as close as possible to original one 339 | tnew=numpy.arange(t[0],t[-1],dt) 340 | 341 | # interpolation 342 | f = scipy.interpolate.interp1d(tuniq, yuniq) 343 | ynew = f(tnew) 344 | 345 | return tnew,ynew 346 | 347 | 348 | 349 | 350 | 351 | # Wavelet methods 352 | # ================== 353 | 354 | 355 | def cwt(t,sig): 356 | 357 | """ 358 | Given the time and flux (sig), this method computes a continuous 359 | wavelet transform (CWT). 360 | 361 | .. warning:: Deprecated. 362 | """ 363 | import matplotlib 364 | import matplotlib.pyplot as plt 365 | import obspy.signal.tf_misfit 366 | import scipy.signal 367 | 368 | 369 | tbin = t[1] - t[0] 370 | freqLC = numpy.fft.fftfreq(len(t),d=tbin) 371 | fluxo = sig 372 | 373 | sig = scipy.signal.detrend(sig) 374 | fig = plt.figure() 375 | f_min = 5./t.max() 376 | f_max = 1000*freqLC.max() #so calcula ate onde vai a FFT. Porque nao faz sentido tentar encontrar frequencias alem da resolucao temporal 377 | gs0 = matplotlib.gridspec.GridSpec(2, 2,hspace=0,wspace=0) #GridSpec specifies the geometry of the grid that a subplot will be placed. 378 | #The number of rows and number of columns of the grid need to be set (here: 2 x 2). 379 | #The horizontal and vertical space between graphics can be adjusted in "hspace" and "wspace" 380 | ax = fig.add_subplot(gs0[0]) 381 | dt = 0.001*tbin 382 | CWT = obspy.signal.tf_misfit.cwt(sig,dt,6,f_min,f_max,nf=1000) 383 | x, y = numpy.meshgrid(t, numpy.logspace(numpy.log10(f_min), numpy.log10(f_max), CWT.shape[0])) 384 | NormCWT = (numpy.abs(CWT)**2)/numpy.var(fluxo) 385 | 386 | 387 | return x, 1000./y, NormCWT 388 | 389 | 390 | 391 | 392 | def reconstruct(wa,i=None): 393 | """ 394 | Method to reconstruct a time series (TS) signal based on its CWT. 395 | 396 | :param wa: wavelet object generated with `wavelets` module 397 | :param i: index array containing only the elements that will be excluded from the signal reconstruction. i.e. the elements indicated by ``i`` will zeroed in the CWT complex array. 398 | :returns: full reconstructed TS, reconstructed CWT power array, reconstructed CWT complex array, detrended reconstructed TS 399 | 400 | Examples: 401 | 402 | 1. Compute the CWT for TS in ``var``: 403 | :: 404 | 405 | import wavelets 406 | 407 | # compute CWT 408 | wa = wavelets.WaveletAnalysis(var, dt=dt) 409 | 410 | # time and period 2D arrays 411 | T, S = numpy.meshgrid(wa.time, wa.scales) 412 | 413 | 2. Reconstruct the signal, throwing out all periods with values between 1000 and 2000: 414 | :: 415 | 416 | j=where((S<1000) | (S>2000)) 417 | recdet,rec=nmmn.dsp.reconstruct(wa,j) 418 | 419 | 3. Plots the reconstructed signal along with the data: 420 | :: 421 | 422 | subplot(2,1,1) 423 | plot(t,flux,label='original') 424 | plot(t,rec,label='reconstructed signal',lw=5) 425 | 426 | subplot(2,1,2) 427 | plot(t,recdet,'r') 428 | title('Pure reconstructed signal') 429 | 430 | """ 431 | wavecut=wa.wavelet_transform 432 | if i is not None: wavecut[i]=0 433 | 434 | # reconstructed signal 435 | rec=wa.reconstruction(wave=wavecut) 436 | 437 | # find the linear trend on the signal 438 | import scipy.stats 439 | a, b, r, p, err = scipy.stats.linregress(wa.time,wa.data) 440 | 441 | # detrend the reconstructed signal 442 | import scipy.signal 443 | recdet=scipy.signal.detrend(rec) 444 | 445 | return recdet+a*wa.time+b,numpy.abs(wavecut)**2,wavecut,recdet 446 | 447 | 448 | 449 | def reconstruct_period(wa,P1,P2): 450 | """ 451 | Returns reconstructed detrended signal from CWT, considering only the interval 452 | between the two given periods. 453 | """ 454 | T, P = numpy.meshgrid(wa.time, wa.scales) 455 | 456 | i=numpy.where((PP2)) 457 | xrec,powerrec,cwtrec,xrecdet=reconstruct(wa,i) 458 | return xrecdet 459 | 460 | 461 | 462 | 463 | def cwt_spectra(t,var,dj=0.01,n=200,thres=0.1): 464 | """ 465 | Computes CWT power spectrum, find peaks, produces arrays for 466 | plotting images showing the spectral lines. Note that the CWT power spectrum 467 | is an average from the CWT power array. Therefore, they are a smoothed out 468 | version of a Fourier spectrum. 469 | 470 | :param t: array of times 471 | :param var: array of signal 472 | :param n: number of vertical elements in image that will be created showing spectral lines 473 | :param thres: threshold parameter for finding peaks in time series 474 | 475 | Usage: ``N,P,l,pp,power=cwt_spectra(t,y,thres=0.3)`` 476 | 477 | Returns the following variables: 478 | 479 | - ``N``: 2d number of vertical elements for plotting the spectra 480 | - ``P``: 2d periods 481 | - ``l``: 2d power spectra for plotting images 482 | - ``pp``: periods corresponding to peaks (peak period) in power spectrum 483 | - ``power``: peaks in CWT power spectrum 484 | """ 485 | import peakutils, wavelets 486 | 487 | dt=t[1]-t[0] # bin size 488 | wa = wavelets.WaveletAnalysis(var, dt=dt,dj=dj) 489 | T, P = numpy.meshgrid(range(n), wa.scales) 490 | cwtpower=wa.wavelet_power 491 | 492 | # spectral lines 493 | z=cwtpower.mean(axis=1) 494 | lines = numpy.ones([1, n]) * z[:, None] 495 | 496 | # peaks 497 | ipeak = peakutils.indexes(z,thres=thres) 498 | 499 | return T.T, P.T, lines.T, wa.scales[ipeak], z[ipeak] 500 | 501 | 502 | -------------------------------------------------------------------------------- /nmmn/peakdetect.py: -------------------------------------------------------------------------------- 1 | """ 2 | Peak detection in time series 3 | =================================== 4 | 5 | Originally downloaded from https://gist.github.com/sixtenbe. 6 | However, now I can't find the original source for the code anymore. 7 | """ 8 | 9 | import numpy as np 10 | from math import pi, log 11 | import pylab 12 | from scipy import fft, ifft 13 | from scipy.optimize import curve_fit 14 | 15 | #i = 10000 16 | #x = np.linspace(0, 3.5 * pi, i) 17 | #y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06*np.random.randn(i)) 18 | 19 | 20 | def _datacheck_peakdetect(x_axis, y_axis): 21 | if x_axis is None: 22 | x_axis = range(len(y_axis)) 23 | 24 | if len(y_axis) != len(x_axis): 25 | raise ValueError('Input vectors y_axis and x_axis must have same length') 26 | 27 | #needs to be a numpy array 28 | y_axis = np.array(y_axis) 29 | x_axis = np.array(x_axis) 30 | return x_axis, y_axis 31 | 32 | def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points): 33 | """ 34 | Performs the actual parabole fitting for the peakdetect_parabole function. 35 | 36 | keyword arguments: 37 | raw_peaks -- A list of either the maximium or the minimum peaks, as given 38 | by the peakdetect_zero_crossing function, with index used as x-axis 39 | x_axis -- A numpy list of all the x values 40 | y_axis -- A numpy list of all the y values 41 | points -- How many points around the peak should be used during curve 42 | fitting, must be odd. 43 | 44 | return -- A list giving all the peaks and the fitted waveform, format: 45 | [[x, y, [fitted_x, fitted_y]]] 46 | 47 | """ 48 | func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m 49 | fitted_peaks = [] 50 | for peak in raw_peaks: 51 | index = peak[0] 52 | x_data = x_axis[index - points // 2: index + points // 2 + 1] 53 | y_data = y_axis[index - points // 2: index + points // 2 + 1] 54 | # get a first approximation of tau (peak position in time) 55 | tau = x_axis[index] 56 | # get a first approximation of peak amplitude 57 | m = peak[1] 58 | 59 | # build list of approximations 60 | # k = -m as first approximation? 61 | p0 = (-m, tau, m) 62 | popt, pcov = curve_fit(func, x_data, y_data, p0) 63 | # retrieve tau and m i.e x and y value of peak 64 | x, y = popt[1:3] 65 | 66 | # create a high resolution data set for the fitted waveform 67 | x2 = np.linspace(x_data[0], x_data[-1], points * 10) 68 | y2 = func(x2, *popt) 69 | 70 | fitted_peaks.append([x, y, [x2, y2]]) 71 | 72 | return fitted_peaks 73 | 74 | 75 | def peakdetect(y_axis, x_axis = None, lookahead = 200, delta=0): 76 | """ 77 | Converted from/based on a MATLAB script at: 78 | http://billauer.co.il/peakdet.html 79 | 80 | function for detecting local maximas and minmias in a signal. 81 | Discovers peaks by searching for values which are surrounded by lower 82 | or larger values for maximas and minimas respectively 83 | 84 | keyword arguments: 85 | y_axis -- A list containg the signal over which to find peaks 86 | x_axis -- (optional) A x-axis whose values correspond to the y_axis list 87 | and is used in the return to specify the postion of the peaks. If 88 | omitted an index of the y_axis is used. (default: None) 89 | lookahead -- (optional) distance to look ahead from a peak candidate to 90 | determine if it is the actual peak (default: 200) 91 | '(sample / period) / f' where '4 >= f >= 1.25' might be a good value 92 | delta -- (optional) this specifies a minimum difference between a peak and 93 | the following points, before a peak may be considered a peak. Useful 94 | to hinder the function from picking up false peaks towards to end of 95 | the signal. To work well delta should be set to delta >= RMSnoise * 5. 96 | (default: 0) 97 | delta function causes a 20% decrease in speed, when omitted 98 | Correctly used it can double the speed of the function 99 | 100 | return -- two lists [max_peaks, min_peaks] containing the positive and 101 | negative peaks respectively. Each cell of the lists contains a tupple 102 | of: (position, peak_value) 103 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 104 | results to unpack one of the lists into x, y coordinates do: 105 | x, y = zip(*tab) 106 | """ 107 | max_peaks = [] 108 | min_peaks = [] 109 | dump = [] #Used to pop the first hit which almost always is false 110 | 111 | # check input data 112 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 113 | # store data length for later use 114 | length = len(y_axis) 115 | 116 | 117 | #perform some checks 118 | if lookahead < 1: 119 | raise ValueError("Lookahead must be '1' or above in value") 120 | if not (np.isscalar(delta) and delta >= 0): 121 | raise ValueError("delta must be a positive number") 122 | 123 | #maxima and minima candidates are temporarily stored in 124 | #mx and mn respectively 125 | mn, mx = np.Inf, -np.Inf 126 | 127 | #Only detect peak if there is 'lookahead' amount of points after it 128 | for index, (x, y) in enumerate(zip(x_axis[:-lookahead], 129 | y_axis[:-lookahead])): 130 | if y > mx: 131 | mx = y 132 | mxpos = x 133 | if y < mn: 134 | mn = y 135 | mnpos = x 136 | 137 | ####look for max#### 138 | if y < mx-delta and mx != np.Inf: 139 | #Maxima peak candidate found 140 | #look ahead in signal to ensure that this is a peak and not jitter 141 | if y_axis[index:index+lookahead].max() < mx: 142 | max_peaks.append([mxpos, mx]) 143 | dump.append(True) 144 | #set algorithm to only find minima now 145 | mx = np.Inf 146 | mn = np.Inf 147 | if index+lookahead >= length: 148 | #end is within lookahead no more peaks can be found 149 | break 150 | continue 151 | #else: #slows shit down this does 152 | # mx = ahead 153 | # mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)] 154 | 155 | ####look for min#### 156 | if y > mn+delta and mn != -np.Inf: 157 | #Minima peak candidate found 158 | #look ahead in signal to ensure that this is a peak and not jitter 159 | if y_axis[index:index+lookahead].min() > mn: 160 | min_peaks.append([mnpos, mn]) 161 | dump.append(False) 162 | #set algorithm to only find maxima now 163 | mn = -np.Inf 164 | mx = -np.Inf 165 | if index+lookahead >= length: 166 | #end is within lookahead no more peaks can be found 167 | break 168 | #else: #slows shit down this does 169 | # mn = ahead 170 | # mnpos = x_axis[np.where(y_axis[index:index+lookahead]==mn)] 171 | 172 | 173 | #Remove the false hit on the first value of the y_axis 174 | try: 175 | if dump[0]: 176 | max_peaks.pop(0) 177 | else: 178 | min_peaks.pop(0) 179 | del dump 180 | except IndexError: 181 | #no peaks were found, should the function return empty lists? 182 | pass 183 | 184 | return [max_peaks, min_peaks] 185 | 186 | 187 | def peakdetect_fft(y_axis, x_axis, pad_len = 5): 188 | """ 189 | Performs a FFT calculation on the data and zero-pads the results to 190 | increase the time domain resolution after performing the inverse fft and 191 | send the data to the 'peakdetect' function for peak 192 | detection. 193 | 194 | Omitting the x_axis is forbidden as it would make the resulting x_axis 195 | value silly if it was returned as the index 50.234 or similar. 196 | 197 | Will find at least 1 less peak then the 'peakdetect_zero_crossing' 198 | function, but should result in a more precise value of the peak as 199 | resolution has been increased. Some peaks are lost in an attempt to 200 | minimize spectral leakage by calculating the fft between two zero 201 | crossings for n amount of signal periods. 202 | 203 | The biggest time eater in this function is the ifft and thereafter it's 204 | the 'peakdetect' function which takes only half the time of the ifft. 205 | Speed improvementd could include to check if 2**n points could be used for 206 | fft and ifft or change the 'peakdetect' to the 'peakdetect_zero_crossing', 207 | which is maybe 10 times faster than 'peakdetct'. The pro of 'peakdetect' 208 | is that it resutls in one less lost peak. It should also be noted that the 209 | time used by the ifft function can change greatly depending on the input. 210 | 211 | keyword arguments: 212 | y_axis -- A list containg the signal over which to find peaks 213 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 214 | in the return to specify the postion of the peaks. 215 | pad_len -- (optional) By how many times the time resolution should be 216 | increased by, e.g. 1 doubles the resolution. The amount is rounded up 217 | to the nearest 2 ** n amount (default: 5) 218 | 219 | return -- two lists [max_peaks, min_peaks] containing the positive and 220 | negative peaks respectively. Each cell of the lists contains a tupple 221 | of: (position, peak_value) 222 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 223 | results to unpack one of the lists into x, y coordinates do: 224 | x, y = zip(*tab) 225 | """ 226 | # check input data 227 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 228 | zero_indices = zero_crossings(y_axis, window = 11) 229 | #select a n amount of periods 230 | last_indice = - 1 - (1 - len(zero_indices) & 1) 231 | # Calculate the fft between the first and last zero crossing 232 | # this method could be ignored if the begining and the end of the signal 233 | # are discardable as any errors induced from not using whole periods 234 | # should mainly manifest in the beginning and the end of the signal, but 235 | # not in the rest of the signal 236 | fft_data = fft(y_axis[zero_indices[0]:zero_indices[last_indice]]) 237 | padd = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:] 238 | n = lambda x: int(log(x)/log(2)) + 1 239 | # padds to 2**n amount of samples 240 | fft_padded = padd(list(fft_data), 2 ** 241 | n(len(fft_data) * pad_len) - len(fft_data)) 242 | 243 | # There is amplitude decrease directly proportional to the sample increase 244 | sf = len(fft_padded) / float(len(fft_data)) 245 | # There might be a leakage giving the result an imaginary component 246 | # Return only the real component 247 | y_axis_ifft = ifft(fft_padded).real * sf #(pad_len + 1) 248 | x_axis_ifft = np.linspace( 249 | x_axis[zero_indices[0]], x_axis[zero_indices[last_indice]], 250 | len(y_axis_ifft)) 251 | # get the peaks to the interpolated waveform 252 | max_peaks, min_peaks = peakdetect(y_axis_ifft, x_axis_ifft, 500, 253 | delta = abs(np.diff(y_axis).max() * 2)) 254 | #max_peaks, min_peaks = peakdetect_zero_crossing(y_axis_ifft, x_axis_ifft) 255 | 256 | # store one 20th of a period as waveform data 257 | data_len = int(np.diff(zero_indices).mean()) / 10 258 | data_len += 1 - data_len & 1 259 | 260 | 261 | fitted_wave = [] 262 | for peaks in [max_peaks, min_peaks]: 263 | peak_fit_tmp = [] 264 | index = 0 265 | for peak in peaks: 266 | index = np.where(x_axis_ifft[index:]==peak[0])[0][0] + index 267 | x_fit_lim = x_axis_ifft[index - data_len // 2: 268 | index + data_len // 2 + 1] 269 | y_fit_lim = y_axis_ifft[index - data_len // 2: 270 | index + data_len // 2 + 1] 271 | 272 | peak_fit_tmp.append([x_fit_lim, y_fit_lim]) 273 | fitted_wave.append(peak_fit_tmp) 274 | 275 | #pylab.plot(range(len(fft_data)), fft_data) 276 | #pylab.show() 277 | 278 | pylab.plot(x_axis, y_axis) 279 | pylab.hold(True) 280 | pylab.plot(x_axis_ifft, y_axis_ifft) 281 | #for max_p in max_peaks: 282 | # pylab.plot(max_p[0], max_p[1], 'xr') 283 | pylab.show() 284 | return [max_peaks, min_peaks] 285 | 286 | 287 | def peakdetect_parabole(y_axis, x_axis, points = 9): 288 | """ 289 | Function for detecting local maximas and minmias in a signal. 290 | Discovers peaks by fitting the model function: y = k (x - tau) ** 2 + m 291 | to the peaks. The amount of points used in the fitting is set by the 292 | points argument. 293 | 294 | Omitting the x_axis is forbidden as it would make the resulting x_axis 295 | value silly if it was returned as index 50.234 or similar. 296 | 297 | will find the same amount of peaks as the 'peakdetect_zero_crossing' 298 | function, but might result in a more precise value of the peak. 299 | 300 | keyword arguments: 301 | y_axis -- A list containg the signal over which to find peaks 302 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 303 | in the return to specify the postion of the peaks. 304 | points -- (optional) How many points around the peak should be used during 305 | curve fitting, must be odd (default: 9) 306 | 307 | return -- two lists [max_peaks, min_peaks] containing the positive and 308 | negative peaks respectively. Each cell of the lists contains a list 309 | of: (position, peak_value) 310 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 311 | results to unpack one of the lists into x, y coordinates do: 312 | x, y = zip(*max_peaks) 313 | """ 314 | # check input data 315 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 316 | # make the points argument odd 317 | points += 1 - points % 2 318 | #points += 1 - int(points) & 1 slower when int conversion needed 319 | 320 | # get raw peaks 321 | max_raw, min_raw = peakdetect_zero_crossing(y_axis) 322 | 323 | # define output variable 324 | max_peaks = [] 325 | min_peaks = [] 326 | 327 | max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points) 328 | min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points) 329 | 330 | max_peaks = map(lambda x: [x[0], x[1]], max_) 331 | max_fitted = map(lambda x: x[-1], max_) 332 | min_peaks = map(lambda x: [x[0], x[1]], min_) 333 | min_fitted = map(lambda x: x[-1], min_) 334 | 335 | 336 | #pylab.plot(x_axis, y_axis) 337 | #pylab.hold(True) 338 | #for max_p, max_f in zip(max_peaks, max_fitted): 339 | # pylab.plot(max_p[0], max_p[1], 'x') 340 | # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2) 341 | #for min_p, min_f in zip(min_peaks, min_fitted): 342 | # pylab.plot(min_p[0], min_p[1], 'x') 343 | # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2) 344 | #pylab.show() 345 | 346 | return [max_peaks, min_peaks] 347 | 348 | 349 | def peakdetect_sine(y_axis, x_axis, points = 9, lock_frequency = False): 350 | """ 351 | Function for detecting local maximas and minmias in a signal. 352 | Discovers peaks by fitting the model function: 353 | y = A * sin(2 * pi * f * x - tau) to the peaks. The amount of points used 354 | in the fitting is set by the points argument. 355 | 356 | Omitting the x_axis is forbidden as it would make the resulting x_axis 357 | value silly if it was returned as index 50.234 or similar. 358 | 359 | will find the same amount of peaks as the 'peakdetect_zero_crossing' 360 | function, but might result in a more precise value of the peak. 361 | 362 | The function might have some problems if the sine wave has a 363 | non-negligible total angle i.e. a k*x component, as this messes with the 364 | internal offset calculation of the peaks, might be fixed by fitting a 365 | k * x + m function to the peaks for offset calculation. 366 | 367 | keyword arguments: 368 | y_axis -- A list containg the signal over which to find peaks 369 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 370 | in the return to specify the postion of the peaks. 371 | points -- (optional) How many points around the peak should be used during 372 | curve fitting, must be odd (default: 9) 373 | lock_frequency -- (optional) Specifies if the frequency argument of the 374 | model function should be locked to the value calculated from the raw 375 | peaks or if optimization process may tinker with it. (default: False) 376 | 377 | return -- two lists [max_peaks, min_peaks] containing the positive and 378 | negative peaks respectively. Each cell of the lists contains a tupple 379 | of: (position, peak_value) 380 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 381 | results to unpack one of the lists into x, y coordinates do: 382 | x, y = zip(*tab) 383 | """ 384 | # check input data 385 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 386 | # make the points argument odd 387 | points += 1 - points % 2 388 | #points += 1 - int(points) & 1 slower when int conversion needed 389 | 390 | # get raw peaks 391 | max_raw, min_raw = peakdetect_zero_crossing(y_axis) 392 | 393 | # define output variable 394 | max_peaks = [] 395 | min_peaks = [] 396 | 397 | # get global offset 398 | offset = np.mean([np.mean(max_raw, 0)[1], np.mean(min_raw, 0)[1]]) 399 | # fitting a k * x + m function to the peaks might be better 400 | #offset_func = lambda x, k, m: k * x + m 401 | 402 | # calculate an approximate frequenzy of the signal 403 | Hz = [] 404 | for raw in [max_raw, min_raw]: 405 | if len(raw) > 1: 406 | peak_pos = [x_axis[index] for index in zip(*raw)[0]] 407 | Hz.append(np.mean(np.diff(peak_pos))) 408 | Hz = 1 / np.mean(Hz) 409 | 410 | # model function 411 | # if cosine is used then tau could equal the x position of the peak 412 | # if sine were to be used then tau would be the first zero crossing 413 | if lock_frequency: 414 | func = lambda x, A, tau: A * np.sin(2 * pi * Hz * (x - tau) + pi / 2) 415 | else: 416 | func = lambda x, A, Hz, tau: A * np.sin(2 * pi * Hz * (x - tau) + 417 | pi / 2) 418 | #func = lambda x, A, Hz, tau: A * np.cos(2 * pi * Hz * (x - tau)) 419 | 420 | 421 | #get peaks 422 | fitted_peaks = [] 423 | for raw_peaks in [max_raw, min_raw]: 424 | peak_data = [] 425 | for peak in raw_peaks: 426 | index = peak[0] 427 | x_data = x_axis[index - points // 2: index + points // 2 + 1] 428 | y_data = y_axis[index - points // 2: index + points // 2 + 1] 429 | # get a first approximation of tau (peak position in time) 430 | tau = x_axis[index] 431 | # get a first approximation of peak amplitude 432 | A = peak[1] 433 | 434 | # build list of approximations 435 | if lock_frequency: 436 | p0 = (A, tau) 437 | else: 438 | p0 = (A, Hz, tau) 439 | 440 | # subtract offset from waveshape 441 | y_data -= offset 442 | popt, pcov = curve_fit(func, x_data, y_data, p0) 443 | # retrieve tau and A i.e x and y value of peak 444 | x = popt[-1] 445 | y = popt[0] 446 | 447 | # create a high resolution data set for the fitted waveform 448 | x2 = np.linspace(x_data[0], x_data[-1], points * 10) 449 | y2 = func(x2, *popt) 450 | 451 | # add the offset to the results 452 | y += offset 453 | y2 += offset 454 | y_data += offset 455 | 456 | peak_data.append([x, y, [x2, y2]]) 457 | 458 | fitted_peaks.append(peak_data) 459 | 460 | # structure date for output 461 | max_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[0]) 462 | max_fitted = map(lambda x: x[-1], fitted_peaks[0]) 463 | min_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[1]) 464 | min_fitted = map(lambda x: x[-1], fitted_peaks[1]) 465 | 466 | 467 | #pylab.plot(x_axis, y_axis) 468 | #pylab.hold(True) 469 | #for max_p, max_f in zip(max_peaks, max_fitted): 470 | # pylab.plot(max_p[0], max_p[1], 'x') 471 | # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2) 472 | #for min_p, min_f in zip(min_peaks, min_fitted): 473 | # pylab.plot(min_p[0], min_p[1], 'x') 474 | # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2) 475 | #pylab.show() 476 | 477 | return [max_peaks, min_peaks] 478 | 479 | 480 | def peakdetect_sine_locked(y_axis, x_axis, points = 9): 481 | """ 482 | Convinience function for calling the 'peakdetect_sine' function with 483 | the lock_frequency argument as True. 484 | 485 | keyword arguments: 486 | y_axis -- A list containg the signal over which to find peaks 487 | x_axis -- A x-axis whose values correspond to the y_axis list and is used 488 | in the return to specify the postion of the peaks. 489 | points -- (optional) How many points around the peak should be used during 490 | curve fitting, must be odd (default: 9) 491 | 492 | return -- see 'peakdetect_sine' 493 | """ 494 | return peakdetect_sine(y_axis, x_axis, points, True) 495 | 496 | 497 | def peakdetect_zero_crossing(y_axis, x_axis = None, window = 11): 498 | """ 499 | Function for detecting local maximas and minmias in a signal. 500 | Discovers peaks by dividing the signal into bins and retrieving the 501 | maximum and minimum value of each the even and odd bins respectively. 502 | Division into bins is performed by smoothing the curve and finding the 503 | zero crossings. 504 | 505 | Suitable for repeatable signals, where some noise is tolerated. Excecutes 506 | faster than 'peakdetect', although this function will break if the offset 507 | of the signal is too large. It should also be noted that the first and 508 | last peak will probably not be found, as this function only can find peaks 509 | between the first and last zero crossing. 510 | 511 | keyword arguments: 512 | y_axis -- A list containg the signal over which to find peaks 513 | x_axis -- (optional) A x-axis whose values correspond to the y_axis list 514 | and is used in the return to specify the postion of the peaks. If 515 | omitted an index of the y_axis is used. (default: None) 516 | window -- the dimension of the smoothing window; should be an odd integer 517 | (default: 11) 518 | 519 | return -- two lists [max_peaks, min_peaks] containing the positive and 520 | negative peaks respectively. Each cell of the lists contains a tupple 521 | of: (position, peak_value) 522 | to get the average peak value do: np.mean(max_peaks, 0)[1] on the 523 | results to unpack one of the lists into x, y coordinates do: 524 | x, y = zip(*tab) 525 | """ 526 | # check input data 527 | x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis) 528 | 529 | zero_indices = zero_crossings(y_axis, window = window) 530 | period_lengths = np.diff(zero_indices) 531 | 532 | bins_y = [y_axis[index:index + diff] for index, diff in 533 | zip(zero_indices, period_lengths)] 534 | bins_x = [x_axis[index:index + diff] for index, diff in 535 | zip(zero_indices, period_lengths)] 536 | 537 | even_bins_y = bins_y[::2] 538 | odd_bins_y = bins_y[1::2] 539 | even_bins_x = bins_x[::2] 540 | odd_bins_x = bins_x[1::2] 541 | hi_peaks_x = [] 542 | lo_peaks_x = [] 543 | 544 | #check if even bin contains maxima 545 | if abs(even_bins_y[0].max()) > abs(even_bins_y[0].min()): 546 | hi_peaks = [bin.max() for bin in even_bins_y] 547 | lo_peaks = [bin.min() for bin in odd_bins_y] 548 | # get x values for peak 549 | for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, hi_peaks): 550 | hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 551 | for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, lo_peaks): 552 | lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 553 | else: 554 | hi_peaks = [bin.max() for bin in odd_bins_y] 555 | lo_peaks = [bin.min() for bin in even_bins_y] 556 | # get x values for peak 557 | for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, hi_peaks): 558 | hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 559 | for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, lo_peaks): 560 | lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]]) 561 | 562 | max_peaks = [[x, y] for x,y in zip(hi_peaks_x, hi_peaks)] 563 | min_peaks = [[x, y] for x,y in zip(lo_peaks_x, lo_peaks)] 564 | 565 | return [max_peaks, min_peaks] 566 | 567 | 568 | def _smooth(x, window_len=11, window='hanning'): 569 | """ 570 | smooth the data using a window of the requested size. 571 | 572 | This method is based on the convolution of a scaled window on the signal. 573 | The signal is prepared by introducing reflected copies of the signal 574 | (with the window size) in both ends so that transient parts are minimized 575 | in the begining and end part of the output signal. 576 | 577 | input: 578 | x: the input signal 579 | window_len: the dimension of the smoothing window; should be an odd 580 | integer 581 | window: the type of window from 'flat', 'hanning', 'hamming', 582 | 'bartlett', 'blackman' 583 | flat window will produce a moving average smoothing. 584 | 585 | output: 586 | the smoothed signal 587 | 588 | example: 589 | 590 | t = linspace(-2,2,0.1) 591 | x = sin(t)+randn(len(t))*0.1 592 | y = _smooth(x) 593 | 594 | see also: 595 | 596 | numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, 597 | numpy.convolve, scipy.signal.lfilter 598 | 599 | TODO: the window parameter could be the window itself if a list instead of 600 | a string 601 | """ 602 | if x.ndim != 1: 603 | raise ValueError("smooth only accepts 1 dimension arrays.") 604 | 605 | if x.size < window_len: 606 | raise ValueError("Input vector needs to be bigger than window size.") 607 | 608 | if window_len<3: 609 | return x 610 | 611 | if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: 612 | raise ValueError("Window is not one of '{0}', '{1}', '{2}', '{3}', '{4}'".format( 613 | *('flat', 'hanning', 'hamming', 'bartlett', 'blackman'))) 614 | 615 | s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]] 616 | #print(len(s)) 617 | if window == 'flat': #moving average 618 | w = np.ones(window_len,'d') 619 | else: 620 | w = eval('np.' + window + '(window_len)') 621 | 622 | y = np.convolve(w / w.sum(), s, mode = 'valid') 623 | return y 624 | 625 | 626 | def zero_crossings(y_axis, window = 11): 627 | """ 628 | Algorithm to find zero crossings. Smoothens the curve and finds the 629 | zero-crossings by looking for a sign change. 630 | 631 | 632 | keyword arguments: 633 | y_axis -- A list containg the signal over which to find zero-crossings 634 | window -- the dimension of the smoothing window; should be an odd integer 635 | (default: 11) 636 | 637 | return -- the index for each zero-crossing 638 | """ 639 | # smooth the curve 640 | length = len(y_axis) 641 | x_axis = np.asarray(range(length), int) 642 | 643 | # discard tail of smoothed signal 644 | y_axis = _smooth(y_axis, window)[:length] 645 | zero_crossings = np.where(np.diff(np.sign(y_axis)))[0] 646 | indices = [x_axis[index] for index in zero_crossings] 647 | 648 | # check if zero-crossings are valid 649 | diff = np.diff(indices) 650 | if diff.std() / diff.mean() > 0.2: 651 | print(diff.std() / diff.mean()) 652 | print(np.diff(indices)) 653 | raise ValueError("False zero-crossings found, indicates problem {0} or {1}".format( 654 | "with smoothing window", "problem with offset")) 655 | # check if any zero crossings were found 656 | if len(zero_crossings) < 1: 657 | raise ValueError("No zero crossings found") 658 | 659 | return indices 660 | # used this to test the fft function's sensitivity to spectral leakage 661 | #return indices + np.asarray(30 * np.random.randn(len(indices)), int) 662 | 663 | ############################Frequency calculation############################# 664 | # diff = np.diff(indices) 665 | # time_p_period = diff.mean() 666 | # 667 | # if diff.std() / time_p_period > 0.1: 668 | # raise ValueError, 669 | # "smoothing window too small, false zero-crossing found" 670 | # 671 | # #return frequency 672 | # return 1.0 / time_p_period 673 | ############################################################################## 674 | 675 | 676 | 677 | 678 | 679 | def _test_zero(): 680 | _max, _min = peakdetect_zero_crossing(y,x) 681 | def _test(): 682 | _max, _min = peakdetect(y,x, delta=0.30) 683 | 684 | 685 | def _test_graph(): 686 | i = 10000 687 | x = np.linspace(0,3.7*pi,i) 688 | y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06 * 689 | np.random.randn(i)) 690 | y *= -1 691 | x = range(i) 692 | 693 | _max, _min = peakdetect(y,x,750, 0.30) 694 | xm = [p[0] for p in _max] 695 | ym = [p[1] for p in _max] 696 | xn = [p[0] for p in _min] 697 | yn = [p[1] for p in _min] 698 | 699 | plot = pylab.plot(x,y) 700 | pylab.hold(True) 701 | pylab.plot(xm, ym, 'r+') 702 | pylab.plot(xn, yn, 'g+') 703 | 704 | _max, _min = peak_det_bad.peakdetect(y, 0.7, x) 705 | xm = [p[0] for p in _max] 706 | ym = [p[1] for p in _max] 707 | xn = [p[0] for p in _min] 708 | yn = [p[1] for p in _min] 709 | pylab.plot(xm, ym, 'y*') 710 | pylab.plot(xn, yn, 'k*') 711 | pylab.show() 712 | 713 | 714 | 715 | if __name__ == "__main__": 716 | from math import pi 717 | import pylab 718 | 719 | i = 10000 720 | x = np.linspace(0,3.7*pi,i) 721 | y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06 * 722 | np.random.randn(i)) 723 | y *= -1 724 | 725 | _max, _min = peakdetect(y, x, 750, 0.30) 726 | xm = [p[0] for p in _max] 727 | ym = [p[1] for p in _max] 728 | xn = [p[0] for p in _min] 729 | yn = [p[1] for p in _min] 730 | 731 | plot = pylab.plot(x, y) 732 | pylab.hold(True) 733 | pylab.plot(xm, ym, 'r+') 734 | pylab.plot(xn, yn, 'g+') 735 | 736 | 737 | pylab.show() -------------------------------------------------------------------------------- /nmmn/grmhd.py: -------------------------------------------------------------------------------- 1 | """ 2 | Dealing with (GR)(R)(M)HD simulations 3 | ======================================== 4 | 5 | - RAISHIN 6 | - Pluto 7 | - HARM 8 | 9 | See jupyter notebooks "grmhd*" for examples on how to use this 10 | module. 11 | 12 | TODO: 13 | 14 | - [ ] incorporate Pluto class from pluto-tools 15 | - [ ] 16 | """ 17 | 18 | import numpy, scipy 19 | import tqdm 20 | import numpy as np 21 | 22 | 23 | 24 | 25 | class Raishin: 26 | """ 27 | Class that reads a RAISHIN VTK datafile and converts to numpy format. 28 | 29 | Attributes of the object: 30 | 31 | - x,y,z: 1D position arrays for the mesh 32 | - rho: 1D density array 33 | - vx,vy,vz: 1D velocity arrays 34 | - p: 1D pressure 35 | - bx,by,bz: 1D magnetic field arrays 36 | - b2: 1D B^2 37 | 38 | Define an empty object: 39 | 40 | >>> o=nmmn.grmhd.Raishin() 41 | 42 | Reads data from a VTK file, new attributes rho, p, vx, bx etc: 43 | 44 | >>> o.vtk("ok200.vtk") 45 | 46 | Saves data as an ASCII file with columns corresponding to variables: 47 | 48 | >>> o.savetxt("ok200.dat") 49 | """ 50 | 51 | #def __init__(self): 52 | # does nothing for now 53 | 54 | 55 | 56 | def vtk(self, vtkfile): 57 | """ 58 | Given a VTK file created with the RAISHIN GRMHD code, this reads the 59 | data as numpy arrays. 60 | """ 61 | import re 62 | 63 | f = open(vtkfile,"r") 64 | #newf=open("tmp.dat","w") # file that will hold coordinates 65 | 66 | # booleans that will tell the code when to stop reading 67 | # data for a given variable: 68 | # boold for density, boolp for pressure etc 69 | boolxyz,boold,boolp,boolvx,boolvy,boolvz,boolb2,boolbx,boolby,boolbz=False,False,False,False,False,False,False,False,False,False 70 | strx,stry,strz,strd,strp,strvx,strvy,strvz,strb2,strbx,strby,strbz='','','','','','','','','','','','' # string that holds values 71 | 72 | for line in f: 73 | # gets dimensions 74 | if re.search(r'DIMENSIONS\s+\d+\s+\d+',line): 75 | s=re.findall(r'\s*\d+\s*',line.rstrip('\n')) 76 | self.nx=int(s[0]) 77 | self.ny=int(s[1]) 78 | self.nz=int(s[2]) 79 | boolxyz=True 80 | 81 | # gets arrays 82 | # these lines are important to tell python when to stop reading shit 83 | # it must be sequential 84 | if 'density' in line: 85 | boolxyz=False 86 | boold=True 87 | if 'pressure' in line: 88 | boold=False 89 | boolp=True 90 | if 'LorentzW1' in line: 91 | boolp=False 92 | if 'util^x' in line: 93 | boolvx=True 94 | if 'util^y' in line: 95 | boolvx=False 96 | boolvy=True 97 | if 'util^z' in line: 98 | boolvy=False 99 | boolvz=True 100 | if 'b^2' in line: 101 | boolvz=False 102 | boolb2=True 103 | if 'bx' in line: 104 | boolb2=False 105 | boolbx=True 106 | if 'by' in line: 107 | boolbx=False 108 | boolby=True 109 | if 'bz' in line: 110 | boolby=False 111 | boolbz=True 112 | 113 | if boolxyz==True and re.search(r'-?\d+\.\d+E?[-+]?\d+\s+-?\d+\.\d+E?[-+]?\d+\s+-?\d+\.\d+E?[-+]?\d+',line): 114 | s=re.findall(r'\s*-?\d+\.\d+E?[-+]?\d+\s*',line.rstrip('\n')) 115 | strx=strx+s[0] 116 | stry=stry+s[1] 117 | strz=strz+s[2] 118 | if boold==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 119 | strd=strd+line 120 | if boolp==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 121 | strp=strp+line 122 | if boolvx==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 123 | strvx=strvx+line 124 | if boolvy==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 125 | strvy=strvy+line 126 | if boolvz==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 127 | strvz=strvz+line 128 | if boolb2==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 129 | strb2=strb2+line 130 | if boolbx==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 131 | strbx=strbx+line 132 | if boolby==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 133 | strby=strby+line 134 | if boolbz==True and re.search(r'-?\d+\.\d+E?[-+]?\d+',line): 135 | strbz=strbz+line 136 | 137 | # gets numpy arrays finally 138 | self.x=numpy.fromstring(strx, sep='\n') 139 | self.y=numpy.fromstring(stry, sep='\n') 140 | self.z=numpy.fromstring(strz, sep='\n') 141 | self.rho=numpy.fromstring(strd, sep='\n') 142 | self.p=numpy.fromstring(strp, sep='\n') 143 | self.vx=numpy.fromstring(strvx, sep='\n') 144 | self.vy=numpy.fromstring(strvy, sep='\n') 145 | self.vz=numpy.fromstring(strvz, sep='\n') 146 | self.b2=numpy.fromstring(strb2, sep='\n') 147 | self.bx=numpy.fromstring(strbx, sep='\n') 148 | self.by=numpy.fromstring(strby, sep='\n') 149 | self.bz=numpy.fromstring(strbz, sep='\n') 150 | 151 | # reads mesh positions 152 | #self.x,self.y,self.z= numpy.loadtxt('tmp.dat',unpack=True,usecols=(0,1,2)) 153 | 154 | # close files 155 | f.close() 156 | #newf.close() 157 | 158 | 159 | 160 | 161 | def savetxt(self,outfile): 162 | """Saves data as ASCII file """ 163 | numpy.savetxt(outfile,numpy.transpose((self.x,self.y,self.z,self.rho,self.p,self.vx,self.vy,self.vz,self.bx,self.by,self.bz))) 164 | 165 | 166 | def savehdf5(self,outfile): 167 | """ 168 | Exports data as compressed HDF5. 7x less space than ASCII. 169 | """ 170 | import h5py 171 | 172 | with h5py.File(outfile, 'w') as hf: 173 | grid=hf.create_group('grid') 174 | grid.create_dataset('x', data=self.x, compression="gzip", compression_opts=9) 175 | grid.create_dataset('y', data=self.y, compression="gzip", compression_opts=9) 176 | grid.create_dataset('z', data=self.z, compression="gzip", compression_opts=9) 177 | 178 | fields=hf.create_group('fields') 179 | fields.create_dataset('density', data=self.rho, compression="gzip", compression_opts=9) 180 | fields.create_dataset('pressure', data=self.p, compression="gzip", compression_opts=9) 181 | fields.create_dataset('vx', data=self.vx, compression="gzip", compression_opts=9) 182 | fields.create_dataset('vy', data=self.vy, compression="gzip", compression_opts=9) 183 | fields.create_dataset('vz', data=self.vz, compression="gzip", compression_opts=9) 184 | fields.create_dataset('bx', data=self.bx, compression="gzip", compression_opts=9) 185 | fields.create_dataset('by', data=self.by, compression="gzip", compression_opts=9) 186 | fields.create_dataset('bz', data=self.bz, compression="gzip", compression_opts=9) 187 | 188 | 189 | def savenumpy(self,outfile): 190 | """ 191 | Save data as binary Numpy file .npz. 3x less space than ASCII. 192 | """ 193 | numpy.savez(outfile,x=self.x,y=self.y,z=self.z,rho=self.rho,p=self.p,vx=self.vx,vy=self.vy,vz=self.vz,bx=self.bx,by=self.by,bz=self.bz) 194 | 195 | 196 | 197 | def regridAll(self,nboost=5): 198 | """ 199 | Regrid all RAISHIN data to a nice cartesian grid for plotting with 200 | python. 201 | 202 | :param nboost: factor of increase of number of grid points compared to 203 | previous grid 204 | 205 | Usage: 206 | 207 | >>> d=nmmn.grmhd.Raishin() 208 | >>> d.vtk('ok100.vtk') 209 | >>> d.regridAll() 210 | 211 | Gets interpolated rho: 212 | 213 | >>> print(d.xc) 214 | 215 | TODO: 216 | - 3D version 217 | - parallel version 218 | """ 219 | #import lsd 220 | from . import lsd # py3 221 | 222 | # create two new arrays with spatial grid, with more points than the 223 | # original grid 224 | nxnew=self.nx*nboost 225 | nynew=self.ny*nboost 226 | nznew=self.nz*nboost 227 | xnew=numpy.linspace(self.x.min(),round(self.x.max()),nxnew) 228 | ynew=numpy.linspace(self.y.min(),round(self.y.max()),nynew) 229 | znew=numpy.linspace(self.z.min(),round(self.z.max()),nznew) 230 | 231 | # 'c' is added to 2D array values 232 | self.xc,self.yc=numpy.meshgrid(xnew,ynew) # 2D 233 | self.xc1d,self.yc1d,self.zc1d=xnew,ynew,znew # 1D 234 | 235 | # bottleneck, 236 | self.rhoc=lsd.regrid(self.x,self.y,self.rho,xnew,ynew) 237 | self.pc=lsd.regrid(self.x,self.y,self.p,xnew,ynew) 238 | self.vxc=lsd.regrid(self.x,self.y,self.vx,xnew,ynew) 239 | self.vyc=lsd.regrid(self.x,self.y,self.vy,xnew,ynew) 240 | self.vzc=lsd.regrid(self.x,self.y,self.vz,xnew,ynew) 241 | self.bxc=lsd.regrid(self.x,self.y,self.bx,xnew,ynew) 242 | self.byc=lsd.regrid(self.x,self.y,self.by,xnew,ynew) 243 | self.bzc=lsd.regrid(self.x,self.y,self.bz,xnew,ynew) 244 | 245 | self.bc=numpy.sqrt(self.bxc**2+self.byc**2) 246 | self.vc=numpy.sqrt(self.vxc**2+self.vyc**2) 247 | 248 | 249 | 250 | def regrid(self,var,nboost=5): 251 | """ 252 | Regrid one specific RAISHIN array to a nice cartesian grid for 253 | plotting with python. Note that RAISHIN's output is already in 254 | cartesian coordinates. 255 | 256 | :param var: array to be regridded e.g. d.rho 257 | :param nboost: factor of increase of number of grid points compared to 258 | previous grid 259 | 260 | Usage: 261 | 262 | >>> d=nmmn.grmhd.Raishin() 263 | >>> d.vtk('ok100.vtk') 264 | >>> d.regrid(d.rho) 265 | 266 | TODO: 267 | - 3D version 268 | - parallel version 269 | """ 270 | #import lsd 271 | from . import lsd 272 | 273 | # create two new arrays with spatial grid, with more points than the 274 | # original grid 275 | nxnew=self.nx*nboost 276 | nynew=self.ny*nboost 277 | xnew=numpy.linspace(self.x.min(),round(self.x.max()),nxnew) 278 | ynew=numpy.linspace(self.y.min(),round(self.y.max()),nynew) 279 | 280 | # 'c' is added to 2D array values 281 | self.xc,self.yc=numpy.meshgrid(xnew,ynew) # 2D 282 | self.xc1d,self.yc1d=xnew,ynew # 1D 283 | 284 | # bottleneck, 285 | return lsd.regrid(self.x,self.y,var,xnew,ynew) 286 | 287 | 288 | def regridsome(self,listarr,nboost=5): 289 | """ 290 | Regrid the selected arrays in the RAISHIN data to a nice cartesian 291 | grid for plotting with python. Regridding only some of the arrays 292 | will, of course, speed up things. 293 | 294 | :param listarr: list of strings specifying the arrays to be regridded. 295 | Options are: rho, p, v, b 296 | :param nboost: factor of increase of number of grid points compared to 297 | previous grid 298 | 299 | Usage: 300 | 301 | >>> d=nmmn.grmhd.Raishin() 302 | >>> d.vtk('ok100.vtk') 303 | >>> d.regridsome(['rho','v']) 304 | 305 | TODO: 306 | - 3D version 307 | - parallel version 308 | """ 309 | #import lsd 310 | from . import lsd # py3 311 | 312 | # create two new arrays with spatial grid, with more points than the 313 | # original grid 314 | nxnew=self.nx*nboost 315 | nynew=self.ny*nboost 316 | xnew=numpy.linspace(self.x.min(),round(self.x.max()),nxnew) 317 | ynew=numpy.linspace(self.y.min(),round(self.y.max()),nynew) 318 | 319 | # 'c' is added to 2D array values 320 | self.xc,self.yc=numpy.meshgrid(xnew,ynew) # 2D 321 | self.xc1d,self.yc1d=xnew,ynew # 1D 322 | 323 | # bottleneck 324 | if 'rho' in listarr: 325 | self.rhoc=lsd.regrid(self.x,self.y,self.rho,xnew,ynew) 326 | if 'p' in listarr: 327 | self.pc=lsd.regrid(self.x,self.y,self.p,xnew,ynew) 328 | if 'v' in listarr: 329 | self.vxc=lsd.regrid(self.x,self.y,self.vx,xnew,ynew) 330 | self.vyc=lsd.regrid(self.x,self.y,self.vy,xnew,ynew) 331 | self.vzc=lsd.regrid(self.x,self.y,self.vz,xnew,ynew) 332 | self.vc=numpy.sqrt(self.vxc**2+self.vyc**2) 333 | if 'b' in listarr: 334 | self.bxc=lsd.regrid(self.x,self.y,self.bx,xnew,ynew) 335 | self.byc=lsd.regrid(self.x,self.y,self.by,xnew,ynew) 336 | self.bzc=lsd.regrid(self.x,self.y,self.bz,xnew,ynew) 337 | self.bc=numpy.sqrt(self.bxc**2+self.byc**2) 338 | 339 | 340 | 341 | 342 | def yt2d(self): 343 | """ 344 | Converts 2d arrays from raishin to the 3d format that is understood 345 | by the yt package. Make sure you used regridAll first. 346 | 347 | Inspired by this example: http://stackoverflow.com/questions/7372316/how-to-make-a-2d-numpy-array-a-3d-array 348 | """ 349 | self.x3d=self.xc.T[..., numpy.newaxis] 350 | self.y3d=self.yc.T[..., numpy.newaxis] 351 | 352 | self.rho3d=self.rhoc.T[..., numpy.newaxis] 353 | self.p3d=self.pc.T[..., numpy.newaxis] 354 | 355 | self.vx3d=self.vxc.T[..., numpy.newaxis] 356 | self.vy3d=self.vyc.T[..., numpy.newaxis] 357 | self.vz3d=self.vzc.T[..., numpy.newaxis] 358 | self.v3d=self.vc.T[..., numpy.newaxis] 359 | 360 | self.bx3d=self.bxc.T[..., numpy.newaxis] 361 | self.by3d=self.byc.T[..., numpy.newaxis] 362 | self.bz3d=self.bzc.T[..., numpy.newaxis] 363 | self.b3d=self.bc.T[..., numpy.newaxis] 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | class Harm: 375 | """ 376 | Class that reads a HARM dump datafile and converts to numpy format 377 | for plotting with matplotlib, mayavi etc. Heavily inspired/copied from 378 | harm_script.py. 379 | 380 | Reads data from a harm dump file: 381 | 382 | >>> d=nmmn.grmhd.Harm("dump019") 383 | 384 | The `d` object will then have several attributes related to the 385 | dump physical fields, grid and coordinate information. 386 | 387 | Inspect densities: 388 | 389 | >>> d.rho 390 | 391 | Convert arrays to cartesian coordinates for plotting with matplotlib: 392 | 393 | >>> rhonew,x,y=o.cartesian(o.rho) 394 | >>> pcolormesh(x,y,log10(rhonew)) 395 | """ 396 | 397 | def __init__(self, dump=None, gdump='gdump'): 398 | """ 399 | TODO: 400 | - [ ] input number of snapshot instead of filename 401 | """ 402 | self.dump=dump 403 | self.gdump=gdump 404 | 405 | if dump is not None: 406 | # read grid information 407 | self.read_file(gdump,type="gdump") 408 | 409 | # read dump file 410 | self.read_file(dump,type="dump") 411 | else: 412 | print("Please provide a dump file.") 413 | 414 | 415 | 416 | def read_file(self,dump,type=None,savedump=True,saverdump=False,noround=False): 417 | """ 418 | High-level function that reads either MPI or serial gdump's 419 | """ 420 | import os,sys 421 | import glob 422 | 423 | if type is None: 424 | if dump.startswith("dump"): 425 | type = "dump" 426 | print("Reading a dump file %s ..." % dump) 427 | elif dump.startswith("gdump2"): 428 | type = "gdump2" 429 | print("Reading a gdump2 file %s ..." % dump) 430 | elif dump.startswith("gdump"): 431 | type = "gdump" 432 | print("Reading a gdump file %s ..." % dump) 433 | elif dump.startswith("rdump"): 434 | type = "rdump" 435 | print("Reading a rdump file %s ..." % dump) 436 | elif dump.startswith("fdump"): 437 | type = "fdump" 438 | print("Reading a fdump file %s ..." % dump) 439 | else: 440 | print("Couldn't guess dump type; assuming it is a data dump") 441 | type = "dump" 442 | 443 | #normal dump 444 | if os.path.isfile(dump): 445 | headerline = self.read_header(dump, returnheaderline = True) 446 | gd = self.read_body(dump,nx=self.N1+2*self.N1G,ny=self.N2+2*self.N2G,nz=self.N3+2*self.N3G,noround=1) 447 | if noround: 448 | res = self.data_assign( gd,type=type,nx=self.N1+2*self.N1G,ny=self.N2+2*self.N2G,nz=self.N3+2*self.N3G) 449 | else: 450 | res = self.data_assign(myfloat(gd),type=type,nx=self.N1+2*self.N1G,ny=self.N2+2*self.N2G,nz=self.N3+2*self.N3G) 451 | return res 452 | 453 | #MPI-type dump that is spread over many files 454 | else: 455 | flist = np.sort(glob.glob(dump + "_[0-9][0-9][0-9][0-9]" )) 456 | if len(flist) == 0: 457 | print( "Could not find %s or its MPI counterpart" % dump ) 458 | return 459 | sys.stdout.write( "Reading %s (%d files)" % (dump, len(flist)) ) 460 | sys.stdout.flush() 461 | ndots = 10 462 | dndot = len(flist)/ndots 463 | if dndot == 0: dndot = 1 464 | for i,fname in enumerate(flist): 465 | #print( "Reading file %d out of %d..." % (i,len(flist)) ) 466 | #header for each file might be different, so read each 467 | header = read_header(fname,issilent=1) 468 | if header is None: 469 | print( "Error reading header of %s, aborting..." % fname ) 470 | return 471 | lgd = read_body(fname,nx=N1+2*N1G,ny=N2+2*N2G,nz=N3+2*N3G) 472 | #this gives an array of dimensions (-1,N1,N2,N3)+potentially ghost cells 473 | if 0 == i: 474 | #create full array: of dimensions, (-1,nx,ny,nz) 475 | fgd = np.zeros( (lgd.shape[0], nx+2*N1G, ny+2*N2G, nz+2*N3G), dtype=np.float32) 476 | if not type == "rdump": 477 | #construct full indices: ti, tj, tk 478 | #fti,ftj,ftk = mgrid[0:nx,0:ny,0:nz] 479 | lti,ltj,ltk = lgd[0:3,:,:].view(); 480 | lti = np.int64(lti) 481 | ltj = np.int64(ltj) 482 | ltk = np.int64(ltk) 483 | fgd[:,lti+N1G,ltj+N2G,ltk+N3G] = lgd[:,:,:,:] 484 | else: 485 | print(starti,startj,startk) 486 | fgd[:,starti:starti+N1+2*N1G,startj:startj+N2+2*N2G,startk:startk+N3+2*N3G] = lgd[:,:,:,:] 487 | del lgd 488 | if i%dndot == 0: 489 | sys.stdout.write(".") 490 | sys.stdout.flush() 491 | res = data_assign(fgd,type=type,nx=nx+2*N1G,ny=ny+2*N2G,nz=nz+2*N3G) 492 | if savedump: 493 | #if the full dump file does not exist, create it 494 | dumpfullname = dump 495 | if (type == "dump" or type == "gdump") and not os.path.isfile(dumpfullname): 496 | sys.stdout.write("Saving full dump to %s..." % dumpfullname) 497 | sys.stdout.flush() 498 | header[1] = header[4] #N1 = nx 499 | header[2] = header[5] #N2 = ny 500 | header[3] = header[6] #N3 = nz 501 | fout = open( dumpfullname, "wb" ) 502 | #join header items with " " (space) as a glue 503 | #see http://stackoverflow.com/questions/12377473/python-write-versus-writelines-and-concatenated-strings 504 | #write it out with a new line char at the end 505 | fout.write(" ".join(header) + "\n") 506 | fout.flush() 507 | os.fsync(fout.fileno()) 508 | #reshape the dump content 509 | gd1 = fgd.transpose(1,2,3,0) 510 | gd1.tofile(fout) 511 | fout.close() 512 | print( " done!" ) 513 | if res is not None: 514 | return res 515 | return res 516 | 517 | def read_header(self,dump,issilent=True,returnheaderline=False): 518 | """Read the header for the dump file""" 519 | # I am replacing all global variables below as attributes 520 | # of the object 521 | """ 522 | global t,nx,ny,nz,N1,N2,N3,N1G,N2G,N3G,starti,startj,startk,_dx1,_dx2,_dx3,a,gam,Rin,Rout,hslope,R0,ti,tj,tk,x1,x2,x3,r,h,ph,gcov,gcon,gdet,drdx,gn3,gv3,guu,gdd,dxdxp, games, startx1, startx2, startx3, tf, NPR, DOKTOT, BL 523 | global fractheta 524 | global fracphi 525 | global rbr 526 | global npow2 527 | global cpow2 528 | """ 529 | 530 | #read image 531 | fin = open( dump, "rb" ) 532 | headerline = fin.readline() 533 | header = headerline.split() 534 | nheadertot = len(header) 535 | fin.close() 536 | # Creates object attributes 537 | if not dump.startswith("dumps/rdump"): 538 | if not issilent: print( "dump header: len(header) = %d" % len(header) ) 539 | nheader = 57 540 | n = 0 541 | self.t = myfloat(np.float64(header[n])); n+=1 542 | #per tile resolution 543 | self.N1 = int(header[n]); n+=1 544 | self.N2 = int(header[n]); n+=1 545 | self.N3 = int(header[n]); n+=1 546 | #total resolution 547 | self.nx = int(header[n]); n+=1 548 | self.ny = int(header[n]); n+=1 549 | self.nz = int(header[n]); n+=1 550 | #numbers of ghost cells 551 | self.N1G = int(header[n]); n+=1 552 | self.N2G = int(header[n]); n+=1 553 | self.N3G = int(header[n]); n+=1 554 | self.startx1 = myfloat(float(header[n])); n+=1 555 | self.startx2 = myfloat(float(header[n])); n+=1 556 | self.startx3 = myfloat(float(header[n])); n+=1 557 | self._dx1=myfloat(float(header[n])); n+=1 558 | self._dx2=myfloat(float(header[n])); n+=1 559 | self._dx3=myfloat(float(header[n])); n+=1 560 | self.tf=myfloat(float(header[n])); n+=1 561 | self.nstep=myfloat(float(header[n])); n+=1 562 | self.a=myfloat(float(header[n])); n+=1 563 | self.gam=myfloat(float(header[n])); n+=1 564 | self.cour=myfloat(float(header[n])); n+=1 565 | self.DTd=myfloat(float(header[n])); n+=1 566 | self.DTl=myfloat(float(header[n])); n+=1 567 | self.DTi=myfloat(float(header[n])); n+=1 568 | self.DTr=myfloat(float(header[n])); n+=1 569 | self.DTr01=myfloat(float(header[n])); n+=1 570 | self.dump_cnt=myfloat(float(header[n])); n+=1 571 | self.image_cnt=myfloat(float(header[n])); n+=1 572 | self.rdump_cnt=myfloat(float(header[n])); n+=1 573 | self.rdump01_cnt=myfloat(float(header[n])); n+=1 574 | self.dt=myfloat(float(header[n])); n+=1 575 | self.lim=myfloat(float(header[n])); n+=1 576 | self.failed=myfloat(float(header[n])); n+=1 577 | self.Rin=myfloat(float(header[n])); n+=1 578 | self.Rout=myfloat(float(header[n])); n+=1 579 | self.hslope=myfloat(float(header[n])); n+=1 580 | self.R0=myfloat(float(header[n])); n+=1 581 | self.NPR=int(header[n]); n+=1 582 | self.DOKTOT=int(header[n]); n+=1 583 | self.fractheta = myfloat(header[n]); n+=1 584 | self.fracphi = myfloat(header[n]); n+=1 585 | self.rbr = myfloat(header[n]); n+=1 586 | self.npow2 = myfloat(header[n]); n+=1 587 | self.cpow2 = myfloat(header[n]); n+=1 588 | self.BL = myfloat(header[n]); n+=1 589 | else: 590 | print("rdump header") 591 | nheader = 46 592 | n = 0 593 | #per tile resolution 594 | self.N1 = int(header[n]); n+=1 595 | self.N2 = int(header[n]); n+=1 596 | self.N3 = int(header[n]); n+=1 597 | #total resolution 598 | self.nx = int(header[n]); n+=1 599 | self.ny = int(header[n]); n+=1 600 | self.nz = int(header[n]); n+=1 601 | #numbers of ghost cells 602 | self.N1G = int(header[n]); n+=1 603 | self.N2G = int(header[n]); n+=1 604 | self.N3G = int(header[n]); n+=1 605 | #starting indices 606 | self.starti = int(header[n]); n+=1 607 | self.startj = int(header[n]); n+=1 608 | self.startk = int(header[n]); n+=1 609 | self.t = myfloat(header[n]); n+=1 610 | self.tf = myfloat(header[n]); n+=1 611 | self.nstep = int(header[n]); n+=1 612 | self.a = myfloat(header[n]); n+=1 613 | self.gam = myfloat(header[n]); n+=1 614 | self.game = myfloat(header[n]); n+=1 615 | self.game4 = myfloat(header[n]); n+=1 616 | self.game5 = myfloat(header[n]); n+=1 617 | self.cour = myfloat(header[n]); n+=1 618 | self.DTd = myfloat(header[n]); n+=1 619 | self.DTl = myfloat(header[n]); n+=1 620 | self.DTi = myfloat(header[n]); n+=1 621 | self.DTr = myfloat(header[n]); n+=1 622 | self.DTr01 = myfloat(header[n]); n+=1 623 | self.dump_cnt = myfloat(header[n]); n+=1 624 | self.image_cnt = myfloat(header[n]); n+=1 625 | self.rdump_cnt = myfloat(header[n]); n+=1 626 | self.rdump01_cnt=myfloat(float(header[n])); n+=1 627 | self.dt = myfloat(header[n]); n+=1 628 | self.lim = myfloat(header[n]); n+=1 629 | self.failed = myfloat(header[n]); n+=1 630 | self.Rin = myfloat(header[n]); n+=1 631 | self.Rout = myfloat(header[n]); n+=1 632 | self.hslope = myfloat(header[n]); n+=1 633 | self.R0 = myfloat(header[n]); n+=1 634 | self.fractheta = myfloat(header[n]); n+=1 635 | self.fracphi = myfloat(header[n]); n+=1 636 | self.rbr = myfloat(header[n]); n+=1 637 | self.npow2 = myfloat(header[n]); n+=1 638 | self.cpow2 = myfloat(header[n]); n+=1 639 | self.tdump = myfloat(header[n]); n+=1 640 | self.trdump = myfloat(header[n]); n+=1 641 | self.timage = myfloat(header[n]); n+=1 642 | self.tlog = myfloat(header[n]); n+=1 643 | 644 | if n != nheader or n != nheadertot: 645 | print("Wrong number of elements in header: nread = %d, nexpected = %d, nototal = %d: incorrect format?"% (n, nheader, nheadertot) ) 646 | return headerline 647 | if returnheaderline: 648 | return headerline 649 | else: 650 | return header 651 | 652 | def read_body(self,dump,nx=None,ny=None,nz=None,noround=False): 653 | fin = open( dump, "rb" ) 654 | header = fin.readline() 655 | if dump.startswith("dumps/rdump"): 656 | dtype = np.float64 657 | body = np.fromfile(fin,dtype=dtype,count=-1) 658 | gd = body.view().reshape((self.nx,self.ny,self.nz,-1), order='C') 659 | if noround: 660 | gd=gd.transpose(3,0,1,2) 661 | else: 662 | gd=myfloat(gd.transpose(3,0,1,2)) 663 | elif dump.startswith("dumps/gdump2"): 664 | dtype = np.float64 665 | body = np.fromfile(fin,dtype=dtype,count=-1) 666 | gd = body.view().reshape((self.nx,self.ny,self.nz,-1), order='C') 667 | if noround: 668 | gd=gd.transpose(3,0,1,2) 669 | else: 670 | gd=myfloat(gd.transpose(3,0,1,2)) 671 | elif dump.startswith("dumps/fdump"): 672 | dtype = np.int64 673 | body = np.fromfile(fin,dtype=dtype,count=-1) 674 | gd = body.view().reshape((-1,self.nz,self.ny,self.nx), order='F') 675 | gd=myfloat(gd.transpose(0,3,2,1)) 676 | else: 677 | dtype = np.float32 678 | body = np.fromfile(fin,dtype=dtype,count=-1) 679 | gd = body.view().reshape((-1,self.nz,self.ny,self.nx), order='F') 680 | gd=myfloat(gd.transpose(0,3,2,1)) 681 | return gd 682 | 683 | def data_assign(self,gd,type=None,**kwargs): 684 | if type is None: 685 | print("Please specify data type") 686 | return 687 | if type == "gdump": 688 | self.gdump_assign(gd,**kwargs) 689 | return None 690 | elif type == "gdump2": 691 | self.gdump2_assign(gd,**kwargs) 692 | return None 693 | elif type == "dump": 694 | self.dump_assign(gd,**kwargs) 695 | return None 696 | elif type == "rdump": 697 | gd = self.rdump_assign(gd,**kwargs) 698 | return gd 699 | elif type == "fdump": 700 | gd = self.fdump_assign(gd,**kwargs) 701 | return gd 702 | else: 703 | print("Unknown data type: %s" % type) 704 | return gd 705 | 706 | def gdump_assign(self,gd,**kwargs): 707 | #global t,nx,ny,nz,N1,N2,N3,_dx1,_dx2,_dx3,a,gam,Rin,Rout,hslope,R0,ti,tj,tk,x1,x2,x3,r,h,ph,gcov,gcon,gdet,drdx,gn3,gv3,guu,gdd,dxdxp, games 708 | self.nx = kwargs.pop("nx",self.nx) 709 | self.ny = kwargs.pop("ny",self.ny) 710 | self.nz = kwargs.pop("nz",self.nz) 711 | self.ti,self.tj,self.tk,self.x1,self.x2,self.x3,self.r,self.h,self.ph = gd[0:9,:,:].view(); n = 9 712 | self.gv3 = gd[n:n+16].view().reshape((4,4,self.nx,self.ny,self.nz),order='F').transpose(1,0,2,3,4); n+=16 713 | self.gn3 = gd[n:n+16].view().reshape((4,4,self.nx,self.ny,self.nz),order='F').transpose(1,0,2,3,4); n+=16 714 | self.gcov = self.gv3 715 | self.gcon = self.gn3 716 | self.guu = self.gn3 717 | self.gdd = self.gv3 718 | self.gdet = gd[n]; n+=1 719 | self.drdx = gd[n:n+16].view().reshape((4,4,self.nx,self.ny,self.nz),order='F').transpose(1,0,2,3,4); n+=16 720 | self.dxdxp = self.drdx 721 | if n != gd.shape[0]: 722 | print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) ) 723 | return 1 724 | return 0 725 | 726 | def gdump2_assign(self,gd,**kwargs): 727 | #global t,nx,ny,nz,N1,N2,N3,_dx1,_dx2,_dx3,a,gam,Rin,Rout,hslope,R0,ti,tj,tk,x1,x2,x3,gdet,games,rf1,hf1,phf1,rf2,hf2,phf2,rf3,hf3,phf3,rcorn,hcord,phcorn,re1,he1,phe1,re2,he2,phe2,re3,he3,phe3 728 | self.nx = kwargs.pop("nx",self.nx) 729 | self.ny = kwargs.pop("ny",self.ny) 730 | self.nz = kwargs.pop("nz",self.nz) 731 | self.ti,self.tj,self.tk,self.x1,self.x2,self.x3 = gd[0:6,:,:].view(); n = 6 732 | self.rf1,self.hf1,self.phf1,self.rf2,self.hf2,self.phf2,self.rf3,self.hf3,self.phf3 = gd[0:9,:,:].view(); n += 9 733 | self.rcorn,self.hcord,self.phcorn,self.rcent,self.hcent,self.phcen = gd[0:6,:,:].view(); n += 6 734 | self.re1,self.he1,self.phe1,self.re2,self.he2,self.phe2,self.re3,self.he3,self.phe3 = gd[0:9,:,:].view(); n += 9 735 | self.gdet = gd[n]; n+=1 736 | if n != gd.shape[0]: 737 | print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) ) 738 | return 1 739 | return 0 740 | 741 | #read in a dump file 742 | def dump_assign(self,gd,**kwargs): 743 | #global t,nx,ny,nz,_dx1,_dx2,_dx3,gam,hslope,a,R0,Rin,Rout,ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug,vu,B,pg,cs2,Sden,U,gdetB,divb,uu,ud,bu,bd,v1m,v1p,v2m,v2p,gdet,bsq,gdet,alpha,rhor, ktot, pg 744 | self.nx = kwargs.pop("nx",self.nx) 745 | self.ny = kwargs.pop("ny",self.ny) 746 | self.nz = kwargs.pop("nz",self.nz) 747 | self.ti,self.tj,self.tk,self.x1,self.x2,self.x3,self.r,self.h,self.ph,self.rho,self.ug = gd[0:11,:,:].view(); n = 11 748 | self.pg = (self.gam-1)*self.ug 749 | #lrho=np.log10(self.rho) 750 | self.vu=np.zeros_like(gd[0:4]) 751 | self.B=np.zeros_like(gd[0:4]) 752 | self.vu[1:4] = gd[n:n+3]; n+=3 753 | self.B[1:4] = gd[n:n+3]; n+=3 754 | #if total entropy equation is evolved (on by default) 755 | if self.DOKTOT == 1: 756 | self.ktot = gd[n]; n+=1 757 | self.divb = gd[n]; n+=1 758 | self.uu = gd[n:n+4]; n+=4 759 | self.ud = gd[n:n+4]; n+=4 760 | self.bu = gd[n:n+4]; n+=4 761 | self.bd = gd[n:n+4]; n+=4 762 | self.bsq = mdot(self.bu,self.bd) 763 | self.v1m,self.v1p,self.v2m,self.v2p,self.v3m,self.v3p=gd[n:n+6]; n+=6 764 | self.gdet=gd[n]; n+=1 765 | self.rhor = 1+(1-self.a**2)**0.5 766 | if hasattr(self, 'guu'): 767 | #if "guu" in globals(): 768 | #lapse 769 | alpha = (-self.guu[0,0])**(-0.5) 770 | if n != gd.shape[0]: 771 | print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) ) 772 | return 1 773 | return 0 774 | 775 | def rdump_assign(self,gd,**kwargs): 776 | #global t,nx,ny,nz,_dx1,_dx2,_dx3,gam,hslope,a,R0,Rin,Rout,ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug,vu,B,pg,cs2,Sden,U,gdetB,divb,uu,ud,bu,bd,v1m,v1p,v2m,v2p,gdet,bsq,gdet,alpha,rhor, ktot, Ttot, game, qisosq, pflag, qisodotb, kel, uelvar, Tel4, Tel5,Teldis, Tels, kel4, kel5,ugel,ugeldis, ugcon, sel, ugscon, ugel4, ugel5,stot, uelvar, Telvar, Tsel, sel, ugels, games, phi, keldis, phihat,csphib,lrho 777 | self.nx = kwargs.pop("nx",self.nx) 778 | self.ny = kwargs.pop("ny",self.ny) 779 | self.nz = kwargs.pop("nz",self.nz) 780 | n = 0 781 | self.rho = gd[n]; n+=1 782 | self.ug = gd[n]; n+=1 783 | self.vu=np.zeros_like(gd[0:4]) 784 | self.B=np.zeros_like(gd[0:4]) 785 | self.vu[1:4] = gd[n:n+3]; n+=3 786 | self.B[1:4] = gd[n:n+3]; n+=3 787 | # if n != gd.shape[0]: 788 | # print("rd: WARNING: nread = %d < ntot = %d: incorrect format?" % (n, gd.shape[0]) ) 789 | # return 1 790 | return gd 791 | 792 | def fdump_assign(self,gd,**kwargs): 793 | #global t,nx,ny,nz,_dx1,_dx2,_dx3,gam,hslope,a,R0,Rin,Rout,ti,tj,tk,x1,x2,x3,r,h,ph,rho,ug,vu,B,pg,cs2,Sden,U,gdetB,divb,uu,ud,bu,bd,v1m,v1p,v2m,v2p,gdet,bsq,gdet,alpha,rhor, ktot, Ttot, game, qisosq, pflag, qisodotb, kel, uelvar, Tel4, Tel5,Teldis, Tels, kel4, kel5,ugel,ugeldis, ugcon, sel, ugscon, ugel4, ugel5,stot, uelvar, Telvar, Tsel, sel, ugels, games, phi, keldis, phihat,csphib,lrho,fail 794 | self.nx = kwargs.pop("nx",self.nx) 795 | self.ny = kwargs.pop("ny",self.ny) 796 | self.nz = kwargs.pop("nz",self.nz) 797 | self.fail = gd 798 | return gd 799 | 800 | 801 | 802 | def cartesian_sasha(self,yourvar,k=0,xy=1,xcoord=None,ycoord=None,symmx=0,mirrorx=0,mirrory=0): 803 | """ 804 | This was adapted from `plc` at `harm_script.py`, atchekho/harmpi. 805 | Need to understand this better. 806 | 807 | - k: 3D slice (if applicable) 808 | """ 809 | r=self.r 810 | h=self.h 811 | ph=self.ph 812 | 813 | # avoids trouble 814 | myvar=yourvar.copy() 815 | 816 | if np.abs(xy)==1: 817 | if xcoord is None: xcoord = r * np.sin(h) 818 | if ycoord is None: ycoord = r * np.cos(h) 819 | if mirrory: ycoord *= -1 820 | if mirrorx: xcoord *= -1 821 | if xcoord is not None and ycoord is not None: 822 | xcoord = xcoord[:,:,None] if xcoord.ndim == 2 else xcoord[:,:,k:k+1] 823 | ycoord = ycoord[:,:,None] if ycoord.ndim == 2 else ycoord[:,:,k:k+1] 824 | if np.abs(xy)==1 and symmx: 825 | if myvar.ndim == 2: 826 | myvar = myvar[:,:,None] if myvar.ndim == 2 else myvar[:,:,k:k+1] 827 | myvar=np.concatenate((myvar[:,::-1],myvar),axis=1) 828 | xcoord=np.concatenate((-xcoord[:,::-1],xcoord),axis=1) 829 | ycoord=np.concatenate((ycoord[:,::-1],ycoord),axis=1) 830 | else: 831 | if myvar.shape[-1] > 1: 832 | symmk = (k+self.nz/2)%self.nz 833 | else: 834 | symmk = k 835 | myvar=np.concatenate((myvar[:,self.ny-1:self.ny,k:k+1],myvar[:,::-1,symmk:symmk+1],myvar[:,:,k:k+1]),axis=1) 836 | xcoord=np.concatenate((xcoord[:,self.ny-1:self.ny,k:k+1],-xcoord[:,::-1],xcoord),axis=1) 837 | ycoord=np.concatenate((ycoord[:,self.ny-1:self.ny,k:k+1],ycoord[:,::-1],ycoord),axis=1) 838 | elif np.abs(xy) == 2 and symmx: 839 | #if fracphi == 0.5 done in a robust way 840 | if get_fracphi() < 0.75: 841 | r1 = np.concatenate((r,r,r[...,0:1]),axis=2) 842 | ph1 = np.concatenate((ph,ph+np.pi,ph[...,0:1]+2*np.pi),axis=2) 843 | myvar = np.concatenate((myvar,myvar,myvar[...,0:1]),axis=2) 844 | else: 845 | r1 = np.concatenate((r,r[...,0:1]),axis=2) 846 | ph1 = np.concatenate((ph,ph[...,0:1]+2*np.pi),axis=2) 847 | myvar = np.concatenate((myvar,myvar[...,0:1]),axis=2) 848 | xcoord=(r1*cos(ph1))[:,self.ny/2,:,None] 849 | ycoord=(r1*sin(ph1))[:,self.ny/2,:,None] 850 | myvar = myvar[:,self.ny/2,:,None] 851 | else: 852 | if myvar.ndim == 2: 853 | myvar = myvar[:,:,None] 854 | else: 855 | myvar[:,:,k:k+1] 856 | 857 | return myvar[:,:,k],xcoord[:,:,k],ycoord[:,:,k] 858 | 859 | 860 | def cartesian(self,myvar=None): 861 | """ 862 | Computes cartesian coordinates. 863 | 864 | Arguments: 865 | 866 | - k: 3D slice (if applicable) 867 | """ 868 | r=self.r 869 | th=self.h 870 | phi=self.ph 871 | 872 | if self.rho.shape[2]>1: # 3D 873 | self.x=r*numpy.sin(th)*numpy.cos(phi) 874 | self.y=r*numpy.sin(th)*numpy.sin(phi) 875 | self.z=r*numpy.cos(th) 876 | else: # 2D 877 | x=r*numpy.sin(th) 878 | y=r*numpy.cos(th) 879 | 880 | self.x=x[:,:,0] 881 | self.y=y[:,:,0] 882 | 883 | if myvar is not None: 884 | return myvar[:,:,0] 885 | 886 | 887 | 888 | 889 | 890 | 891 | 892 | 893 | 894 | 895 | 896 | 897 | 898 | def myfloat(f,acc=1): # Sasha 899 | """ acc=1 means np.float32, acc=2 means np.float64 """ 900 | if acc==1: 901 | return( np.float32(f) ) 902 | else: 903 | return( np.float64(f) ) 904 | 905 | def mdot(a,b): 906 | """ 907 | Computes a contraction of two tensors/vectors. Assumes 908 | the following structure: tensor[m,n,i,j,k] OR vector[m,i,j,k], 909 | where i,j,k are spatial indices and m,n are variable indices. 910 | """ 911 | if (a.ndim == 3 and b.ndim == 3) or (a.ndim == 4 and b.ndim == 4): 912 | c = (a*b).sum(0) 913 | elif a.ndim == 5 and b.ndim == 4: 914 | c = np.empty(np.maximum(a[:,0,:,:,:].shape,b.shape),dtype=b.dtype) 915 | for i in range(a.shape[0]): 916 | c[i,:,:,:] = (a[i,:,:,:,:]*b).sum(0) 917 | elif a.ndim == 4 and b.ndim == 5: 918 | c = np.empty(np.maximum(b[0,:,:,:,:].shape,a.shape),dtype=a.dtype) 919 | for i in range(b.shape[1]): 920 | c[i,:,:,:] = (a*b[:,i,:,:,:]).sum(0) 921 | elif a.ndim == 5 and b.ndim == 5: 922 | c = np.empty((a.shape[0],b.shape[1],a.shape[2],a.shape[3],max(a.shape[4],b.shape[4])),dtype=a.dtype) 923 | for i in range(c.shape[0]): 924 | for j in range(c.shape[1]): 925 | c[i,j,:,:,:] = (a[i,:,:,:,:]*b[:,j,:,:,:]).sum(0) 926 | elif a.ndim == 5 and b.ndim == 6: 927 | c = np.empty((a.shape[0],b.shape[1],b.shape[2],max(a.shape[2],b.shape[3]),max(a.shape[3],b.shape[4]),max(a.shape[4],b.shape[5])),dtype=a.dtype) 928 | for mu in range(c.shape[0]): 929 | for k in range(c.shape[1]): 930 | for l in range(c.shape[2]): 931 | c[mu,k,l,:,:,:] = (a[mu,:,:,:,:]*b[:,k,l,:,:,:]).sum(0) 932 | else: 933 | raise Exception('mdot', 'wrong dimensions') 934 | return c 935 | 936 | 937 | 938 | def fixminus(x): 939 | """ 940 | Replace nonphysical, negative values in array *x* with the corresponding 941 | positive numerical values. Returns modified array. Does not 942 | touch original array. 943 | """ 944 | i=numpy.where(x<0) 945 | z=x.copy() 946 | z[i]=numpy.abs(x[i]) 947 | 948 | return z 949 | 950 | 951 | 952 | 953 | def wolframplot(infile,outfile,script="/Users/nemmen/work/software/mathematica/raishin.wl"): 954 | """ 955 | Makes a pretty plot of density field and B vector field of RAISHIN 956 | data using the Wolfram Language. 957 | 958 | Make sure you point to the appropriate Wolfram script. 959 | 960 | - infile: input RAISHIN ASCII file generated with Raishin.vtk above, e.g. ok200.dat 961 | - outfile: format that will produced, e.g. ok200.png 962 | """ 963 | import subprocess 964 | cmd="wolframscript -script "+script+" "+infile+" "+outfile 965 | subprocess.call(cmd.split()) 966 | 967 | 968 | 969 | 970 | def regridFast(self, n=None, xlim = None): 971 | """ 972 | Transforms a mesh in arbitrary coordinates (e.g. nonuniform elements) 973 | into a uniform grid in the same coordinates. Uses a C function to 974 | speed things up. 975 | 976 | One has to be particularly careful below about using a polar angle 977 | (-pi/2 polar angle 996 | if(xlim == None): 997 | xlim = self.x1.max() 998 | gmtry = self.pp.geometry 999 | 1000 | # figures out optimal size of cartesian grid 1001 | if n is None: 1002 | n=self.optimalgrid() 1003 | 1004 | # let's avoid dealing with arrays which are too large 1005 | if n>3000: 1006 | n=3000 1007 | 1008 | if(gmtry == "SPHERICAL" or gmtry == "CYLINRICAL"): 1009 | xnew=numpy.linspace(0, xlim, n) 1010 | ynew=numpy.linspace(-xlim, xlim, n) 1011 | else: 1012 | xnew=numpy.linspace(-xlim, xlim, n) 1013 | ynew=numpy.linspace(-xlim, xlim, n) 1014 | 1015 | rho=numpy.zeros((n,n)) 1016 | vx=numpy.zeros((n,n)) 1017 | vy=numpy.zeros((n,n)) 1018 | vz=numpy.zeros((n,n)) # vphi 1019 | p=rho.copy() 1020 | 1021 | if(gmtry == "SPHERICAL"): 1022 | fastregrid.regrid(xnew, ynew, r, th, self.rho, self.p, self.v1, self.v2, self.v3, rho, p, vx, vy, vz) 1023 | else: #polar case for bondi 1024 | print("Geometry not supported. Improve the method.") 1025 | 1026 | # coordinate arrays 1027 | obj.x1,obj.x2=xnew,ynew # cartesian coords, 1D 1028 | obj.X1,obj.X2=numpy.meshgrid(xnew,ynew) # cartesian coords, 2D 1029 | obj.r, obj.th = nmmn.misc.cart2pol(xnew, ynew) # polar coords, 1D 1030 | obj.R, obj.TH = numpy.meshgrid(obj.r,obj.th) # polar coords, 2D 1031 | obj.rsp, obj.thsp = obj.r, numpy.pi/2.-obj.th # spherical polar angle, 1D 1032 | obj.RSP, obj.THSP = numpy.meshgrid(obj.rsp,obj.thsp) # spherical polar coords, 2D 1033 | 1034 | # velocities 1035 | obj.v1,obj.v2,obj.v3 = vx.T,vy.T,vz.T # Cartesian components 1036 | obj.vr, obj.vth = nmmn.misc.vel_c2p(obj.TH,obj.v1,obj.v2) # polar components 1037 | obj.speed = numpy.sqrt(obj.v1**2+obj.v2**2+obj.v3**3) 1038 | 1039 | # fluid variables 1040 | obj.gamma=self.gamma 1041 | obj.rho,obj.p=rho.T,p.T 1042 | obj.entropy=numpy.log(obj.p/obj.rho**obj.gamma) 1043 | obj.am=obj.v3*obj.R*numpy.sin(obj.THSP) # specific a. m., vphi*r*sin(theta) 1044 | obj.Be=obj.speed**2/2.+obj.gamma*obj.p/((obj.gamma-1.)*obj.rho)-1./obj.R # Bernoulli function 1045 | obj.Omega=obj.v3/obj.R # angular velocity 1046 | 1047 | # misc info 1048 | obj.regridded=True # flag to tell whether the object was previously regridded 1049 | obj.t=self.t 1050 | obj.frame=self.frame 1051 | obj.mdot=self.mdot 1052 | obj.mass=self.mass 1053 | 1054 | return obj 1055 | 1056 | 1057 | 1058 | --------------------------------------------------------------------------------