├── .DS_Store ├── .gitignore ├── .idea ├── encodings.xml ├── libraries │ └── R_User_Library.xml ├── misc.xml ├── modules.xml ├── pycecream.iml ├── vcs.xml └── workspace.xml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── README_dream.md ├── build └── lib │ └── pycecream │ ├── __init__.py │ ├── cream_f90.f90 │ └── creaminpar.par ├── dist └── pycecream-1.5.5.tar.gz ├── docs ├── _build │ ├── pydocmd │ │ ├── index.md │ │ └── pycecream.md │ └── site │ │ ├── 404.html │ │ ├── css │ │ ├── theme.css │ │ └── theme_extra.css │ │ ├── fonts │ │ ├── Lato │ │ │ ├── lato-bold.eot │ │ │ ├── lato-bold.ttf │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-bolditalic.eot │ │ │ ├── lato-bolditalic.ttf │ │ │ ├── lato-bolditalic.woff │ │ │ ├── lato-bolditalic.woff2 │ │ │ ├── lato-italic.eot │ │ │ ├── lato-italic.ttf │ │ │ ├── lato-italic.woff │ │ │ ├── lato-italic.woff2 │ │ │ ├── lato-regular.eot │ │ │ ├── lato-regular.ttf │ │ │ ├── lato-regular.woff │ │ │ └── lato-regular.woff2 │ │ ├── RobotoSlab │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ ├── roboto-slab-v7-regular.woff2 │ │ │ └── roboto-slab.eot │ │ ├── fontawesome-webfont.eot │ │ ├── fontawesome-webfont.svg │ │ ├── fontawesome-webfont.ttf │ │ ├── fontawesome-webfont.woff │ │ └── fontawesome-webfont.woff2 │ │ ├── img │ │ └── favicon.ico │ │ ├── index.html │ │ ├── js │ │ ├── jquery-2.1.1.min.js │ │ ├── modernizr-2.8.3.min.js │ │ └── theme.js │ │ ├── pycecream │ │ └── index.html │ │ ├── search.html │ │ ├── search │ │ ├── lunr.js │ │ ├── main.js │ │ ├── search_index.json │ │ └── worker.js │ │ ├── sitemap.xml │ │ └── sitemap.xml.gz ├── autogen.py ├── build_docs.sh ├── deploy_docs.sh ├── examples │ └── pyceream_test.py ├── pydocmd.yml └── serve_docs.sh ├── examples ├── .ipynb_checkpoints │ ├── test_pycecream-checkpoint.ipynb │ └── test_pycecream_tophat_0lag-checkpoint.ipynb ├── fitinfo_fit_figures__1.pdf ├── fluxflux.pdf ├── page_0_lcplot_fit_figures__1.pdf ├── page_1_lcplot_fit_figures__1.pdf ├── resamp_ ├── test_pycecream.ipynb ├── test_pycecream.py ├── test_pycecream_background_polynomials.py ├── test_pycecream_files │ ├── test_pycecream_12_0.png │ ├── test_pycecream_6_3.png │ ├── test_pycecream_6_4.png │ ├── test_pycecream_6_6.png │ └── test_pycecream_6_8.png ├── test_pycecream_tophat_0lag.ipynb ├── test_pycecream_tophat_0lag.md └── test_pycecream_tophat_0lag_files │ ├── test_pycecream_tophat_0lag_6_1.png │ ├── test_pycecream_tophat_0lag_6_2.png │ ├── test_pycecream_tophat_0lag_6_4.png │ └── test_pycecream_tophat_0lag_6_6.png ├── misc ├── merge_test.py ├── run_lc.py ├── test_parallel.py └── test_pycecream.py ├── pycecream.egg-info ├── PKG-INFO ├── SOURCES.txt ├── dependency_links.txt ├── not-zip-safe ├── requires.txt └── top_level.txt ├── pycecream ├── .DS_Store ├── __init__.py ├── __init__.pyc ├── cream_f90.f90 ├── creaminpar.par ├── creammod.mod └── modules │ ├── cream_lcplot.py │ ├── cream_plotlibrary.py │ ├── myconvolve.py │ ├── mydisksim.py │ ├── myedlum.py │ ├── myfake.py │ ├── myfake_amp.py │ ├── mylcgen.py │ ├── myplank.py │ ├── myrandom.py │ ├── myresample.py │ ├── mytemp0.py │ └── mytfb_quick.py ├── requirements.txt └── setup.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/.DS_Store -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | pycecream/.DS_Store 3 | pycecream_test/.DS_Store 4 | .idea 5 | .idea/ 6 | .ipynb_checkpoints 7 | .ipynb_checkpoints/ 8 | .idea/workspace 9 | *venv* 10 | *scratch_* 11 | *simulation_files* 12 | *output_2020* 13 | *.pickle 14 | *simulation_dir* 15 | *.pdf 16 | __pycache__/ -------------------------------------------------------------------------------- /.idea/encodings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | -------------------------------------------------------------------------------- /.idea/libraries/R_User_Library.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 7 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/pycecream.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 16 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.idea/workspace.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 12 | 13 | 15 | 16 | 17 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | $USER_HOME$/.subversion 41 | 42 | 43 | 44 | 45 | 1560762203620 46 | 55 | 56 | 57 | 58 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Pycecream 2 | 3 | 4 | ## Suggesting new features / Reporting An Issue 5 | 6 | First, check to see if there's an existing issue/pull request for the 7 | bug/feature. All issues are at https://github.com/dstarkey23/pycecream/issues and pull reqs are at 8 | https://github.com/dstarkey23/pycecream/pull. 9 | 10 | If there isn't an existing issue there, please file an issue. The 11 | ideal report includes: 12 | 13 | - A description of the problem/suggestion. 14 | - How to recreate the bug (including the version on your python interpreter). 15 | - If possible, create a pull request with a (failing) test case 16 | demonstrating what's wrong. This makes the process for fixing bugs 17 | quicker & gets issues resolved sooner. 18 | 19 | ## Setting up your environment 20 | 21 | First, clone the repo, then `cd` into the repo. 22 | 23 | ```bash 24 | $ git@github.com:dstarkey23/pycecream.git 25 | $ cd pycecream 26 | ``` 27 | 28 | create a new virtual environment 29 | ```bash 30 | $ python3.7 -m venv venv 31 | ``` 32 | 33 | activate your environment 34 | ```bash 35 | $ . venv/bin/activate 36 | ``` 37 | 38 | install the required dependencies 39 | ``` 40 | $ pip install -e .[tests] 41 | ``` 42 | 43 | run tests 44 | ``` 45 | $ python setup.py test 46 | ``` 47 | 48 | serve documentation 49 | ```bash 50 | $ ./docs/serve_docs.sh 51 | ``` 52 | …and view the docs at http://localhost:8000 in your web browser. 53 | 54 | 55 | :tada: Now you're ready to create a new branch, add a feature or fix a bug, then send us a pull request! :tada: 56 | 57 | ## Contributing Code 58 | 59 | A good pull request: 60 | - Is clear. 61 | - Follows the existing style of the code base (PEP-8). 62 | - Has comments included as needed. 63 | - A test case that demonstrates the previous flaw that now passes with 64 | the included patch, or demonstrates the newly added feature. 65 | 66 | 67 | 68 | ## Style guide 69 | 70 | #### Commit message formatting 71 | We adopt the [Conventional Commits](https://www.conventionalcommits.org) convention to format commit messages. 72 | 73 | 74 | #### Documentation 75 | We're using [Pydocmd](https://github.com/NiklasRosenstein/pydoc-markdown) 76 | to automatically generate docs. 77 | 78 | Documentation should follow the [Google Documentation Style Guide](https://developers.google.com/style/api-reference-comments) 79 | 80 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PyceCREAM 2 | 3 | Here is a python implementation of my accretion disc and emission line lightcurve-fitting code (previously CREAM). This guide briefly covers generating synthetic data and calling a new pycecream object to ingest and fit the accretion disc model (or emission line model) to a set of input light curves. I also demonstrate how to access the output of the pycecream fit. The output includes the fitted light curves, any new light curve data points after merging, fitted response functions and parameter MCMC chain histories for the disc and/or tophat response parameters. 4 | 5 | 6 | The following documentation details the use of pycecream for either ... 7 | 8 | 1) Continuum Reverberation Mapping: Fitting the AGN lamppost model to continuum light curves for continuum reverberation mapping. 9 | 10 | 2) Line Reverberation Mapping: Fitting a tophat lag function to line light curves for line reverberation mapping 11 | 12 | 3) [Multi-telescope lightcurve merging](https://github.com/dstarkey23/pycecream/blob/master/README_dream.md): Another application of this code merges light curves taken from multiple telescopes, models the calibration differences between these light curves and produces a single combined light curve. This implementation is known as DREAM. Please see [here](https://github.com/dstarkey23/pycecream/blob/master/README_dream.md) for DREAM merging instructions. 13 | 14 | 15 | 16 | Most of these features are used in some form or another from a previous f90 version of this code (CREAM) in the following literature 17 | 18 | * Grier et al in prep 19 | * Grier et al 2018 https://iopscience.iop.org/article/10.3847/1538-4357/aa98dc/pdf 20 | * Starkey et al 2017 https://ui.adsabs.harvard.edu/#abs/arXiv:1611.06051 21 | * Starkey et al 2016 https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162 22 | 23 | Please send questions to ds207@st-andrews.ac.uk. 24 | 25 | #### Docs available [here](https://dstarkey23.github.io/pycecream/) 26 | 27 | 28 | ## Requirements & Installation 29 | 30 | #### Install Fortran compiler (see macports / wget) 31 | I use Gfortran. If you have an alternate (e.g ifort), 32 | please indicate the standard command used to call 33 | the fortran compiler using the ```fortran_caller``` argument 34 | (default ```fortran_caller = gfortran```). 35 | 36 | #### Install Pycecream 37 | ``` 38 | pip install pycecream 39 | ``` 40 | 41 | 42 | ## Section 1 (Optional): Generate test synthetic light curves 43 | 44 | In this example we generate 4 disk light curves and 2 emission-line light curves modelled as a top-hat with a 20-day lag. The code below generates a list where each index contains an Nx3 numpy array for each light curve. The 3 vertical axis for each light curve are the time, flux and noise respectively (query synthetic_data['echo lightcurves'][0] for an example of the format required when inputting your own light curve data). 45 | 46 | The example below combines continuum and line light curves and illustrates a case in which you may have two of the same emission line (and so want to fit with the same response function model) but observed from different telescopes that require seperate noise models. 47 | 48 | 49 | ```python 50 | import astropy_stark.myfake as mf 51 | import matplotlib.pylab as plt 52 | 53 | ''' 54 | mf.myfake arguments are 55 | 56 | wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), 57 | 58 | snr: set the signal-to-noise relative to light curve rms 59 | 60 | cadence:set the mean cadence 61 | 62 | top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag 63 | thing is still newish so Im used to just making continuum light curve) 64 | ''' 65 | 66 | 67 | synthetic_data = mf.myfake( 68 | [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0], 69 | [50.0,50.0,10.0,50.0,50,10.], 70 | [1.0,1.0,2.0,1.0,1.0,3.0], 71 | thcent = 20.0 72 | ) 73 | 74 | '''This recovers the synthetic data''' 75 | dat = synthetic_data['echo light curves'] 76 | ``` 77 | 78 | ## Section 2: Set-up and run PyceCREAM 79 | 80 | 81 | 82 | 83 | ```python 84 | import pycecream 85 | 86 | #instantiate a pycecream object 87 | a = pycecream.pycecream() 88 | 89 | ''' 90 | If you use a fortran compiler other than gfortran please indicate here. 91 | I just re-enter gfortran here for demonstration purposes even though 92 | this is unecassary as gfortran is the default argument. 93 | ''' 94 | a.fortran_caller = 'gfortran' 95 | 96 | 97 | 98 | '''Choose an output directory in which to save the results. 99 | This will be a new directory that you have not previously created (pycecream will make it automatically). 100 | 101 | NOTE: Each new cream simulation must have a new name for "output_directory argument below 102 | otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. 103 | I might change this in a future version 104 | ''' 105 | a.project_folder = 'fit_synthetic_lightcurves' 106 | 107 | 108 | 109 | ''' 110 | Add the light curves to be modeled. Inputs should be a 3 column numpy 111 | array of `time`, `flux`, `error`. 112 | In this case we are using the "dat" output 113 | from the synthetic data above. 114 | ''' 115 | a.add_lc(dat[0], name = 'continuum 4000') 116 | a.add_lc(dat[1], name = 'continuum 5000') 117 | a.add_lc(dat[2], name = 'continuum 5000 (b)') 118 | a.add_lc(dat[3], name = 'continuum 7000') 119 | 120 | #If adding a line light curve, must indicate using the "kind" argument 121 | a.add_lc(dat[4],name='test line 1',kind='line') 122 | 123 | #If we want the same line response function model, set "share_previous_lag"=True 124 | a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True) 125 | 126 | 127 | 128 | ''' 129 | specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers 130 | can be used just to check everything is working is done here. 131 | ''' 132 | a.N_iterations=40 133 | 134 | ''' 135 | specify the step sizes for the fit parameters. 136 | Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. 137 | ''' 138 | a.p_accretion_rate_step = 0.1 139 | 140 | ''' 141 | Check the input settings are ok prior to running 142 | ''' 143 | print(a.lightcurve_input_params) 144 | 145 | ''' 146 | RUN! specify ncores (default = 1) to parallelise with 1 chain per core 147 | ''' 148 | a.run(ncores = 4) 149 | ``` 150 | 151 | ## Section 3: Outputs 152 | 153 | The main outputs are stored in two dataframes. 154 | 155 | ### 1) output_lightcurves = a.get_light_curve_fits(): 156 | This a dictionary of 3 data frames. 157 | 158 | 1.1) output_lightcurves['model']: standard time, model, error envelope for each file 159 | 160 | 1.2) output_lightcurves['merged model'] AS above but with the error bars, vertical and horrizontal scalings applied relative to the reference model. Not sure but I think the reference model defaults to the first occurence of a particular wavelength in the order that it was added in self.add_lc 161 | 162 | 1.3) output_lightcurves['merged data'] DICTIONARY (since the input data light curves can be different sizes) The same transformations but applied to the input light curve data. useful if using cream only to merge the orriginal light curves from different telescopes to a new scale for further study elsewhere 163 | 164 | ### 2) output_chains = a.get_MCMC_chains(): 165 | These are the MCMC chains for each parameter. 166 | 167 | 168 | 169 | ```python 170 | ''' 171 | Get the mcmc chains and output fits. 172 | Each of these arguments come with a "location" argument where you can point to a 173 | previous simulation and recover the outputs. 174 | If this is left blank we default to the current simulation 175 | ''' 176 | output_chains = a.get_MCMC_chains(location = None) 177 | output_lightcurves = a.get_light_curve_fits(location = None) 178 | 179 | 180 | ''' 181 | NEW: 11/12/2019 Now the fourier chains are available as a pandas 182 | dataframe. 183 | Stats on the sine and cosine parameters are also available for each 184 | freuqency accessible in the `fourier_stats` dictionary element of this 185 | `get_MCMC_fourier_chains` function. 186 | ''' 187 | output_fourier_chains = a.get_MCMC_fourier_chains(location=None) 188 | fourier_chains = output_fourier_chains['fourier_chains'] 189 | fourier_stats = output_fourier_chains['fourier_stats'] 190 | 191 | 192 | ''' 193 | make figures of the fit, posterior, light curves etc. file prefix tells the code where you want to save the output. 194 | The figure plotting is somewhat primitive and is a relic of when I still used cream. You may prefer to use your own 195 | output figures with the output of the "get_MCMC_chains" and "get_light_curve_fits" functions above. 196 | ''' 197 | a.plot_results(file_prefix='fit_figures') 198 | 199 | 200 | 201 | 202 | ''' 203 | figures can also be made on an indivdual basis with axes objects returned from python plotting functions 204 | ''' 205 | #plot the fitted light curves. 206 | a.plot_lightcurves() 207 | plt.show() 208 | 209 | 210 | #plot the driving light curve 211 | a.plot_driver() 212 | plt.show() 213 | 214 | 215 | #plot the parameter trace plots 216 | a.plot_trace() 217 | plt.show() 218 | 219 | 220 | #plot the covariance parameter plot for the disc parameters 221 | a.plot_posterior() 222 | plt.show() 223 | 224 | 225 | ``` 226 | 227 | 228 | 229 | -------------------------------------------------------------------------------- /README_dream.md: -------------------------------------------------------------------------------- 1 | # PyceCREAM - DREAM 2 | 3 | 4 | Reverberation Mapping requires telescope images to be converted to light curves. These images are often taken 5 | from multiple telescopes over several months with unique calibration anomalies between the telescopes. 6 | These calibration anomalies are often visible in the final combined light curve. 7 | 8 | ##Example Figure showing lighcurves with calibration problem 9 | 10 | Often the individual light curves are rescaled to a reference mean and standard deviation 11 | prior to merging. 12 | 13 | Here Pycecream is used to combine light curves in a more sophisticated way by rescaling each light curve 14 | to pycecreams random walk model fit. In addition to correcting the calibration fluxes, 15 | dream also modifies the input error bars with a multiplicative (f) and additive (V) parameter. 16 | 17 | 18 | \sigma^{2} = \left ( f \sigma_0 \right )^{2} + V 19 | 20 | 21 | ## Installation (requires Python 3) 22 | 23 | `pip install pycecream` 24 | 25 | 26 | ## g-band merging example 27 | 28 | The example below shows how to use dream to merge example g-band light curves 29 | from 5 telescopes and access the merged output. 30 | 31 | ```python 32 | import pycecream as pc 33 | import pickle 34 | 35 | #initialise dream instance 36 | dream = pc.dream(Niterations = 200) 37 | 38 | #add each light curve ('dat' should be a N x 3 array of time, flux, errorbar) 39 | #errorbar_variance, errorbar_rescale should be True to optimise the 'f' and 'V' error bar parameters 40 | dream.add_lc(dat1, 'g-band 1', errorbar_variance=True, errorbar_rescale=True) 41 | dream.add_lc(dat2, 'g-band 2', errorbar_variance=True, errorbar_rescale=True) 42 | dream.add_lc(dat3, 'g-band 3', errorbar_variance=True, errorbar_rescale=True) 43 | dream.add_lc(dat4, 'g-band 4', errorbar_variance=True, errorbar_rescale=True) 44 | dream.add_lc(dat5, 'g-band 5', errorbar_variance=True, errorbar_rescale=True) 45 | 46 | 47 | 48 | #run the simulation 49 | dream.run() 50 | 51 | 52 | #access the input lightcurves 53 | input = dream.lcinput 54 | 55 | #access the combined merged light curve 56 | merged_combined = dream.lc_combined 57 | 58 | #access the individual (but rescaled light curves) 59 | merged_individual = dream.lc_merged_individual 60 | 61 | 62 | 63 | #OPTIONAL: Save the output for later 64 | os.system('rm ' + picklefile) 65 | pickle_out = open(picklefile, "wb") 66 | pickle.dump(dream, pickle_out) 67 | pickle_out.close() 68 | 69 | ``` 70 | 71 | 72 | -------------------------------------------------------------------------------- /build/lib/pycecream/creaminpar.par: -------------------------------------------------------------------------------- 1 | ./hermine_lc 2 | !!! path to main folder (dirpath) folder string subfolders of agn with the light curves !! set to ./fake/ to generate fake data 3 | F F !!! save the BOF and pspec plots as separate files and save big parameter file (takes lots of space) 4 | !!! save plot frequency (if -ve, display plots to screen and backs up every -value iterations) 5 | 20 6 | 4 !! how many backups to keep 7 | 8 | -0.01,-0.007,0.0 !!! (flo,df,start0) (0.02, -0.007, 1.0 pre 14/apr/16 values) Enter lowest and frequency spacing in cycles/day (if -ve, code decides for df uses flo by default), start0 = 1 if you want fourier amplitudes to start at 0 9 | -20 !!!(NW) number of Fourier frequencies (if -ve program choses), (if 1 start fourier amplitudes at 0 else code guesses) 10 | 0.5 !!! (whi) Enter highest frequency in cycles/day (if NW -ve above then this determines NW. If both -ve, program chooses) 11 | 100 !!! (nits) Default number of iterations (nits) (code cuts off when converged) 12 | 2 !!! (AT) NUmber of acceptances before doubling stepsizes 13 | 5 !!! (RT) Number of rejections before halving step sizes 14 | -10.0 50.0 !!! (min and maxtau) lag limits 15 | 0.2 !!! (dtaugrid) if -ve, automatically determined, but may want to control manually if having resolution problems 16 | 17 | !!!!!!! Fourier scaling 18 | 0.00001 !! starting fourier scaling (default0.001) if -ve, code uses power law slope to scale terms 19 | 20 | !!!!!!! Error bar expansion 21 | F !! sigexpand (Set true to allow error bars to expand) 22 | 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 !! Default expansion factor sigexpandparm (NLC +1 f xray data) 23 | 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 !! default logarithmic stepsize in error bar expansion (sigexpandsteplog) 24 | 25 | !!!!!!! Delay parameters 26 | 0.1 !!! black hole efficiency (eta) 27 | 0.0 !!! disk albedo (alb) 28 | 0.0 !!! height profile constant of proportionality (konst) 29 | 0.0 !!! power index f height proportionality (alpha) 30 | 3.0 !!! innner radius (rs) 31 | 1.0 !!! lamppost height (rs) 32 | 1.0 !!! black hole accretion rate (umdot) 33 | 1.12e8 !!! black hole mmdot (umbh) 34 | -6.0 -0.1 !!! prior on mmdot (mean and sd of gaussian (in log 10 units, set sd -ve to exclude prior) 35 | 0.01 !!! black hole mdot scaling [log10scaling] 36 | 37 | !! Inclination parameters 38 | 0.0 !!! starting inclination (degrees) 60 degrees 39 | 0.0 !!! scaling cos(theta) 40 | -50.0 20.0 !!! Enter deginc0 and a for inclination prior - takes form (1 / (1+(cosinc/cosinc0)^a)) set deginc0 -ve to exclude 41 | 42 | !!!!!!! PSPEC PRIOR PARMS 43 | F !! implement break in power spectrum (T or F) 44 | -0.001 !!! (p0mean) mean value of p0 45 | 0.0 !!!was -1before 26/10/2014 (siglogp0) uncertainty in logp0 (for prior prior is not yet logarithmic make sigp0=p0mean*siglogp0) (14th may set to 0.001) 46 | 0.00 !!! (p0steplog) logarithmic stepsize of p0 47 | 0.1 !!! (meanf0) mean value of f0 (cycles / day) 48 | 0.0000000 !!! (siglogw0) uncertainty in log w0 ( for prior set -ve to not include prior) 49 | 0.00 !!! (w0steplog) logarithmic step size of w0 50 | 2.0 !!! (meanalpha) mean (-ve) value of alpha (meanalpha) 51 | 0.0 !!! (sigalpha) uncertainty in alpha (sigalph) for the prior (set -ve to not include prior) 52 | 0.0 !!! (alphascale) scaling of alpha parameter 53 | 2.0 !!! (meanbeta) mean (-ve) value of beta (meanbeta) 54 | 0.0 !!! (sigbeta) uncertainty in beta (sigbeta) for the prior (set -ve to not include prior) 55 | 0.0 !!! (betascale) scaling of beta parameter 56 | 57 | !!!!!!! Stretch and offset parms (starting values inside code just the sclaings here) 58 | 0.0 !!! (stretchscale) 59 | 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 !!! (gal flux real array containing the steady state host component 60 | 0.1 !!! (galscale) !! log10 61 | 62 | !!!!!! luminosity distance and scaling (version 2 only) 63 | 0.0, 0.3, 0.7 !!! redshift of target, omega_m, omega_l 64 | 500.0 !!! Luminosity distance (MPC) 65 | 0.01 !!! dlscale (in log10 units) 66 | 0.0168 !!! MW extinction parameter in ra and dec of target 67 | 1.0 !!! Default AGN extinction parameter E(B-V) 68 | 1.0 !!! default logarithmic stepping ebmvagnsteplog 69 | 70 | !!!!!! temperature radius parms, scaling (log10) (version 3 only) 71 | 0.75 !!!!! viscous tr parameter (P(NPtridx)) 72 | 0.75 !!!!! irradiation tr parameter 73 | 0.0 !!!!! logarithmic scaling of tv 74 | 0.0 !!!!! logarithmic scaling of ta parms 75 | -5.0 !!!!! UR0 (rs) if negative then in light days 76 | 0.1 !!!!! logarithmic scaling of ur0 77 | 0.1 !!!!! offset scale (mjy) 78 | 79 | F !!!!! Skip convolve (Only set this to Y if you are running a test to fit just a driving light curve (for estimating power spectrum properties) then not interested in convolution or echo lightcurves) 80 | -------------------------------------------------------------------------------- /dist/pycecream-1.5.5.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/dist/pycecream-1.5.5.tar.gz -------------------------------------------------------------------------------- /docs/_build/pydocmd/index.md: -------------------------------------------------------------------------------- 1 | # PyceCREAM 2 | 3 | Here is a python implementation of my accretion disc and emission line lightcurve-fitting code (previously CREAM). This guide briefly covers generating synthetic data and calling a new pycecream object to ingest and fit the accretion disc model (or emission line model) to a set of input light curves. I also demonstrate how to access the output of the pycecream fit. The output includes the fitted light curves, any new light curve data points after merging, fitted response functions and parameter MCMC chain histories for the disc and/or tophat response parameters. 4 | 5 | Most of these features are used in some form or another from a previous f90 version of this code (CREAM) in the following literature 6 | 7 | * Grier et al in prep 8 | * Grier et al 2018 https://iopscience.iop.org/article/10.3847/1538-4357/aa98dc/pdf 9 | * Starkey et al 2017 https://ui.adsabs.harvard.edu/#abs/arXiv:1611.06051 10 | * Starkey et al 2016 https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162 11 | 12 | Please send questions to ds207@st-andrews.ac.uk. 13 | 14 | #### Docs available [here](https://dstarkey23.github.io/pycecream/) 15 | 16 | 17 | ## Requirements & Installation 18 | 19 | #### Install Fortran compiler (see macports / wget) 20 | I use Gfortran. If you have an alternate (e.g ifort), 21 | please indicate the standard command used to call 22 | the fortran compiler using the ```fortran_caller``` argument 23 | (default ```fortran_caller = gfortran```). 24 | 25 | #### Install Pycecream 26 | ``` 27 | pip install pycecream 28 | ``` 29 | 30 | 31 | ## Section 1 (Optional): Generate test synthetic light curves 32 | 33 | In this example we generate 4 disk light curves and 2 emission-line light curves modelled as a top-hat with a 20-day lag. The code below generates a list where each index contains an Nx3 numpy array for each light curve. The 3 vertical axis for each light curve are the time, flux and noise respectively (query synthetic_data['echo lightcurves'][0] for an example of the format required when inputting your own light curve data). 34 | 35 | The example below combines continuum and line light curves and illustrates a case in which you may have two of the same emission line (and so want to fit with the same response function model) but observed from different telescopes that require seperate noise models. 36 | 37 | 38 | ```python 39 | import astropy_stark.myfake as mf 40 | import matplotlib.pylab as plt 41 | 42 | ''' 43 | mf.myfake arguments are 44 | 45 | wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), 46 | 47 | snr: set the signal-to-noise relative to light curve rms 48 | 49 | cadence:set the mean cadence 50 | 51 | top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag 52 | thing is still newish so Im used to just making continuum light curve) 53 | ''' 54 | 55 | 56 | synthetic_data = mf.myfake( 57 | [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0], 58 | [50.0,50.0,10.0,50.0,50,10.], 59 | [1.0,1.0,2.0,1.0,1.0,3.0], 60 | thcent = 20.0 61 | ) 62 | 63 | '''This recovers the synthetic data''' 64 | dat = synthetic_data['echo light curves'] 65 | ``` 66 | 67 | ## Section 2: Set-up and run PyceCREAM 68 | 69 | 70 | 71 | 72 | ```python 73 | import pycecream 74 | 75 | #instantiate a pycecream object 76 | a = pycecream.pycecream() 77 | 78 | ''' 79 | If you use a fortran compiler other than gfortran please indicate here. 80 | I just re-enter gfortran here for demonstration purposes even though 81 | this is unecassary as gfortran is the default argument. 82 | ''' 83 | a.fortran_caller = 'gfortran' 84 | 85 | 86 | 87 | '''Choose an output directory in which to save the results. 88 | This will be a new directory that you have not previously created (pycecream will make it automatically). 89 | 90 | NOTE: Each new cream simulation must have a new name for "output_directory argument below 91 | otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. 92 | I might change this in a future version 93 | ''' 94 | a.project_folder = 'fit_synthetic_lightcurves' 95 | 96 | 97 | 98 | ''' 99 | Add the light curves to be modeled. Inputs should be a 3 column numpy 100 | array of `time`, `flux`, `error`. 101 | In this case we are using the "dat" output 102 | from the synthetic data above. 103 | ''' 104 | a.add_lc(dat[0], name = 'continuum 4000') 105 | a.add_lc(dat[1], name = 'continuum 5000') 106 | a.add_lc(dat[2], name = 'continuum 5000 (b)') 107 | a.add_lc(dat[3], name = 'continuum 7000') 108 | 109 | #If adding a line light curve, must indicate using the "kind" argument 110 | a.add_lc(dat[4],name='test line 1',kind='line') 111 | 112 | #If we want the same line response function model, set "share_previous_lag"=True 113 | a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True) 114 | 115 | 116 | 117 | ''' 118 | specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers 119 | can be used just to check everything is working is done here. 120 | ''' 121 | a.N_iterations=40 122 | 123 | ''' 124 | specify the step sizes for the fit parameters. 125 | Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. 126 | ''' 127 | a.p_accretion_rate_step = 0.1 128 | 129 | ''' 130 | Check the input settings are ok prior to running 131 | ''' 132 | print(a.lightcurve_input_params) 133 | 134 | ''' 135 | RUN! specify ncores (default = 1) to parallelise with 1 chain per core 136 | ''' 137 | a.run(ncores = 4) 138 | ``` 139 | 140 | ## Section 3: Outputs 141 | 142 | The main outputs are stored in two dataframes. 143 | 144 | ### 1) output_lightcurves = a.get_light_curve_fits(): 145 | This a dictionary of 3 data frames. 146 | 147 | 1.1) output_lightcurves['model']: standard time, model, error envelope for each file 148 | 149 | 1.2) output_lightcurves['merged model'] AS above but with the error bars, vertical and horrizontal scalings applied relative to the reference model. Not sure but I think the reference model defaults to the first occurence of a particular wavelength in the order that it was added in self.add_lc 150 | 151 | 1.3) output_lightcurves['merged data'] DICTIONARY (since the input data light curves can be different sizes) The same transformations but applied to the input light curve data. useful if using cream only to merge the orriginal light curves from different telescopes to a new scale for further study elsewhere 152 | 153 | ### 2) output_chains = a.get_MCMC_chains(): 154 | These are the MCMC chains for each parameter. 155 | 156 | 157 | 158 | ```python 159 | ''' 160 | Get the mcmc chains and output fits. 161 | Each of these arguments come with a "location" argument where you can point to a 162 | previous simulation and recover the outputs. 163 | If this is left blank we default to the current simulation 164 | ''' 165 | output_chains = a.get_MCMC_chains(location = None) 166 | output_lightcurves = a.get_light_curve_fits(location = None) 167 | 168 | 169 | ''' 170 | NEW: 11/12/2019 Now the fourier chains are available as a pandas 171 | dataframe. 172 | Stats on the sine and cosine parameters are also available for each 173 | freuqency accessible in the `fourier_stats` dictionary element of this 174 | `get_MCMC_fourier_chains` function. 175 | ''' 176 | output_fourier_chains = a.get_MCMC_fourier_chains(location=None) 177 | fourier_chains = output_fourier_chains['fourier_chains'] 178 | fourier_stats = output_fourier_chains['fourier_stats'] 179 | 180 | 181 | ''' 182 | make figures of the fit, posterior, light curves etc. file prefix tells the code where you want to save the output. 183 | The figure plotting is somewhat primitive and is a relic of when I still used cream. You may prefer to use your own 184 | output figures with the output of the "get_MCMC_chains" and "get_light_curve_fits" functions above. 185 | ''' 186 | a.plot_results(file_prefix='fit_figures') 187 | 188 | 189 | 190 | 191 | ''' 192 | figures can also be made on an indivdual basis with axes objects returned from python plotting functions 193 | ''' 194 | #plot the fitted light curves. 195 | a.plot_lightcurves() 196 | plt.show() 197 | 198 | 199 | #plot the driving light curve 200 | a.plot_driver() 201 | plt.show() 202 | 203 | 204 | #plot the parameter trace plots 205 | a.plot_trace() 206 | plt.show() 207 | 208 | 209 | #plot the covariance parameter plot for the disc parameters 210 | a.plot_posterior() 211 | plt.show() 212 | 213 | 214 | ``` 215 | 216 | 217 | 218 | -------------------------------------------------------------------------------- /docs/_build/pydocmd/pycecream.md: -------------------------------------------------------------------------------- 1 |

pycecream

2 | 3 | 4 | -------------------------------------------------------------------------------- /docs/_build/site/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | pycecream 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 | 30 | 31 | 54 | 55 |
56 | 57 | 58 | 62 | 63 | 64 |
65 |
66 |
67 |
    68 |
  • Docs »
  • 69 | 70 | 71 |
  • 72 | 73 |
  • 74 |
75 | 76 |
77 |
78 |
79 |
80 | 81 | 82 |

404

83 | 84 |

Page not found

85 | 86 | 87 |
88 |
89 | 101 | 102 |
103 |
104 | 105 |
106 | 107 |
108 | 109 |
110 | 111 | 112 | GitHub 113 | 114 | 115 | 116 | 117 |
118 | 119 | 120 | 121 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /docs/_build/site/css/theme_extra.css: -------------------------------------------------------------------------------- 1 | /* 2 | * Wrap inline code samples otherwise they shoot of the side and 3 | * can't be read at all. 4 | * 5 | * https://github.com/mkdocs/mkdocs/issues/313 6 | * https://github.com/mkdocs/mkdocs/issues/233 7 | * https://github.com/mkdocs/mkdocs/issues/834 8 | */ 9 | .rst-content code { 10 | white-space: pre-wrap; 11 | word-wrap: break-word; 12 | padding: 2px 5px; 13 | } 14 | 15 | /** 16 | * Make code blocks display as blocks and give them the appropriate 17 | * font size and padding. 18 | * 19 | * https://github.com/mkdocs/mkdocs/issues/855 20 | * https://github.com/mkdocs/mkdocs/issues/834 21 | * https://github.com/mkdocs/mkdocs/issues/233 22 | */ 23 | .rst-content pre code { 24 | white-space: pre; 25 | word-wrap: normal; 26 | display: block; 27 | padding: 12px; 28 | font-size: 12px; 29 | } 30 | 31 | /* 32 | * Fix link colors when the link text is inline code. 33 | * 34 | * https://github.com/mkdocs/mkdocs/issues/718 35 | */ 36 | a code { 37 | color: #2980B9; 38 | } 39 | a:hover code { 40 | color: #3091d1; 41 | } 42 | a:visited code { 43 | color: #9B59B6; 44 | } 45 | 46 | /* 47 | * The CSS classes from highlight.js seem to clash with the 48 | * ReadTheDocs theme causing some code to be incorrectly made 49 | * bold and italic. 50 | * 51 | * https://github.com/mkdocs/mkdocs/issues/411 52 | */ 53 | pre .cs, pre .c { 54 | font-weight: inherit; 55 | font-style: inherit; 56 | } 57 | 58 | /* 59 | * Fix some issues with the theme and non-highlighted code 60 | * samples. Without and highlighting styles attached the 61 | * formatting is broken. 62 | * 63 | * https://github.com/mkdocs/mkdocs/issues/319 64 | */ 65 | .rst-content .no-highlight { 66 | display: block; 67 | padding: 0.5em; 68 | color: #333; 69 | } 70 | 71 | 72 | /* 73 | * Additions specific to the search functionality provided by MkDocs 74 | */ 75 | 76 | .search-results { 77 | margin-top: 23px; 78 | } 79 | 80 | .search-results article { 81 | border-top: 1px solid #E1E4E5; 82 | padding-top: 24px; 83 | } 84 | 85 | .search-results article:first-child { 86 | border-top: none; 87 | } 88 | 89 | form .search-query { 90 | width: 100%; 91 | border-radius: 50px; 92 | padding: 6px 12px; /* csslint allow: box-model */ 93 | border-color: #D1D4D5; 94 | } 95 | 96 | /* 97 | * Improve inline code blocks within admonitions. 98 | * 99 | * https://github.com/mkdocs/mkdocs/issues/656 100 | */ 101 | .rst-content .admonition code { 102 | color: #404040; 103 | border: 1px solid #c7c9cb; 104 | border: 1px solid rgba(0, 0, 0, 0.2); 105 | background: #f8fbfd; 106 | background: rgba(255, 255, 255, 0.7); 107 | } 108 | 109 | /* 110 | * Account for wide tables which go off the side. 111 | * Override borders to avoid wierdness on narrow tables. 112 | * 113 | * https://github.com/mkdocs/mkdocs/issues/834 114 | * https://github.com/mkdocs/mkdocs/pull/1034 115 | */ 116 | .rst-content .section .docutils { 117 | width: 100%; 118 | overflow: auto; 119 | display: block; 120 | border: none; 121 | } 122 | 123 | td, th { 124 | border: 1px solid #e1e4e5 !important; /* csslint allow: important */ 125 | border-collapse: collapse; 126 | } 127 | -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /docs/_build/site/fonts/RobotoSlab/roboto-slab.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/RobotoSlab/roboto-slab.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/_build/site/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/_build/site/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/_build/site/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/_build/site/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/img/favicon.ico -------------------------------------------------------------------------------- /docs/_build/site/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | pycecream 13 | 14 | 15 | 16 | 17 | 18 | 19 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
36 | 37 | 38 | 85 | 86 |
87 | 88 | 89 | 93 | 94 | 95 |
96 |
97 |
98 | 111 | 112 |
113 |
114 |
115 |
116 | 117 |

PyceCREAM

118 |

Here is a python implementation of my accretion disc and emission line lightcurve-fitting code (previously CREAM). This guide briefly covers generating synthetic data and calling a new pycecream object to ingest and fit the accretion disc model (or emission line model) to a set of input light curves. I also demonstrate how to access the output of the pycecream fit. The output includes the fitted light curves, any new light curve data points after merging, fitted response functions and parameter MCMC chain histories for the disc and/or tophat response parameters.

119 |

Most of these features are used in some form or another from a previous f90 version of this code (CREAM) in the following literature

120 |
    121 |
  • Grier et al in prep
  • 122 |
  • Grier et al 2018 https://iopscience.iop.org/article/10.3847/1538-4357/aa98dc/pdf
  • 123 |
  • Starkey et al 2017 https://ui.adsabs.harvard.edu/#abs/arXiv:1611.06051
  • 124 |
  • Starkey et al 2016 https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162
  • 125 |
126 |

Please send questions to ds207@st-andrews.ac.uk.

127 |

Docs available here

128 |

Requirements & Installation

129 |

Install Fortran compiler (see macports / wget)

130 |

I use Gfortran. If you have an alternate (e.g ifort), 131 | please indicate the standard command used to call 132 | the fortran compiler using the fortran_caller argument 133 | (default fortran_caller = gfortran).

134 |

Install Pycecream

135 |
pip install pycecream
136 | 
137 | 138 |

Section 1 (Optional): Generate test synthetic light curves

139 |

In this example we generate 4 disk light curves and 2 emission-line light curves modelled as a top-hat with a 20-day lag. The code below generates a list where each index contains an Nx3 numpy array for each light curve. The 3 vertical axis for each light curve are the time, flux and noise respectively (query synthetic_data['echo lightcurves'][0] for an example of the format required when inputting your own light curve data).

140 |

The example below combines continuum and line light curves and illustrates a case in which you may have two of the same emission line (and so want to fit with the same response function model) but observed from different telescopes that require seperate noise models.

141 |
import astropy_stark.myfake as mf
142 | import matplotlib.pylab as plt
143 | 
144 | '''
145 | mf.myfake arguments are
146 | 
147 | wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response),
148 | 
149 | snr: set the signal-to-noise relative to light curve rms
150 | 
151 | cadence:set the mean cadence
152 | 
153 | top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag 
154 | thing is still newish so Im used to just making continuum light curve)
155 | '''
156 | 
157 | 
158 | synthetic_data = mf.myfake(
159 |     [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0],
160 |     [50.0,50.0,10.0,50.0,50,10.],
161 |     [1.0,1.0,2.0,1.0,1.0,3.0],
162 |     thcent = 20.0
163 | )
164 | 
165 | '''This recovers the synthetic data'''
166 | dat = synthetic_data['echo light curves']
167 | 
168 | 169 |

Section 2: Set-up and run PyceCREAM

170 |
import pycecream
171 | 
172 | #instantiate a pycecream object
173 | a = pycecream.pycecream()
174 | 
175 | '''
176 | If you use a fortran compiler other than gfortran please indicate here.
177 | I just re-enter gfortran here for demonstration purposes even though 
178 | this is unecassary as gfortran is the default argument.
179 | '''
180 | a.fortran_caller = 'gfortran'
181 | 
182 | 
183 | 
184 | '''Choose an output directory in which to save the results. 
185 | This will be a new directory that you have not previously created (pycecream will make it automatically).
186 | 
187 | NOTE: Each new cream simulation must have a new name for "output_directory argument below 
188 | otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. 
189 | I might change this in a future version 
190 | '''
191 | a.project_folder = 'fit_synthetic_lightcurves'
192 | 
193 | 
194 | 
195 | '''
196 | Add the light curves to be modeled. Inputs should be a 3 column numpy
197 |  array of `time`, `flux`, `error`. 
198 | In this case we are using the "dat" output 
199 | from the synthetic data above.
200 | '''
201 | a.add_lc(dat[0], name = 'continuum 4000')
202 | a.add_lc(dat[1], name = 'continuum 5000')
203 | a.add_lc(dat[2], name = 'continuum 5000 (b)')
204 | a.add_lc(dat[3], name = 'continuum 7000')
205 | 
206 | #If adding a line light curve, must indicate using the "kind" argument
207 | a.add_lc(dat[4],name='test line 1',kind='line')
208 | 
209 | #If we want the same line response function model, set "share_previous_lag"=True
210 | a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True)
211 | 
212 | 
213 | 
214 | '''
215 | specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers 
216 | can be used just to check everything is working is done here.
217 | '''
218 | a.N_iterations=40
219 | 
220 | '''
221 | specify the step sizes for the fit parameters. 
222 | Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year.
223 | '''
224 | a.p_accretion_rate_step = 0.1
225 | 
226 | '''
227 | Check the input settings are ok prior to running
228 | '''
229 | print(a.lightcurve_input_params)
230 | 
231 | '''
232 | RUN! specify ncores (default = 1) to parallelise with 1 chain per core
233 | '''
234 | a.run(ncores = 4)
235 | 
236 | 237 |

Section 3: Outputs

238 |

The main outputs are stored in two dataframes.

239 |

1) output_lightcurves = a.get_light_curve_fits():

240 |

This a dictionary of 3 data frames.

241 |
1.1) output_lightcurves['model']: standard time, model, error envelope for each file
242 | 
243 | 1.2) output_lightcurves['merged model'] AS above but with the error bars, vertical and horrizontal scalings applied relative to the reference model. Not sure but I think the reference model defaults to the first occurence of a particular wavelength in the order that it was added in self.add_lc
244 | 
245 | 1.3) output_lightcurves['merged data'] DICTIONARY (since the input data light curves can be different sizes) The same transformations but applied to the input light curve data. useful if using cream only to merge the orriginal light curves from different telescopes to a new scale for further study elsewhere
246 | 
247 |

2) output_chains = a.get_MCMC_chains():

248 |

These are the MCMC chains for each parameter.

249 |
'''
250 | Get the mcmc chains and output fits. 
251 | Each of these arguments come with a "location" argument where you can point to a 
252 | previous simulation and recover the outputs. 
253 | If this is left blank we default to the current simulation
254 | '''
255 | output_chains = a.get_MCMC_chains(location = None)
256 | output_lightcurves = a.get_light_curve_fits(location = None)
257 | 
258 | 
259 | '''
260 | NEW: 11/12/2019 Now the fourier chains are available as a pandas 
261 | dataframe.
262 | Stats on the sine and cosine parameters are also available for each 
263 | freuqency accessible in the `fourier_stats` dictionary element of this
264 | `get_MCMC_fourier_chains` function.
265 | '''
266 | output_fourier_chains = a.get_MCMC_fourier_chains(location=None)
267 | fourier_chains = output_fourier_chains['fourier_chains']
268 | fourier_stats = output_fourier_chains['fourier_stats']
269 | 
270 | 
271 | '''
272 | make figures of the fit, posterior, light curves etc. file prefix tells the code where you want to save the output.
273 | The figure plotting is somewhat primitive and is a relic of when I still used cream. You may prefer to use your own
274 | output figures with the output of the "get_MCMC_chains" and "get_light_curve_fits" functions above.
275 | '''
276 | a.plot_results(file_prefix='fit_figures')
277 | 
278 | 
279 | 
280 | 
281 | '''
282 | figures can also be made on an indivdual basis with axes objects returned from python plotting functions
283 | '''
284 | #plot the fitted light curves.
285 | a.plot_lightcurves()
286 | plt.show()
287 | 
288 | 
289 | #plot the driving light curve
290 | a.plot_driver()
291 | plt.show()
292 | 
293 | 
294 | #plot the parameter trace plots
295 | a.plot_trace()
296 | plt.show()
297 | 
298 | 
299 | #plot the covariance parameter plot for the disc parameters
300 | a.plot_posterior()
301 | plt.show()
302 | 
303 | 
304 | 
305 | 306 |
307 |
308 | 327 | 328 |
329 |
330 | 331 |
332 | 333 |
334 | 335 |
336 | 337 | 338 | GitHub 339 | 340 | 341 | 342 | Next » 343 | 344 | 345 |
346 | 347 | 348 | 349 | 354 | 355 | 356 | 357 | 358 | 362 | -------------------------------------------------------------------------------- /docs/_build/site/js/modernizr-2.8.3.min.js: -------------------------------------------------------------------------------- 1 | window.Modernizr=function(e,t,n){function r(e){b.cssText=e}function o(e,t){return r(S.join(e+";")+(t||""))}function a(e,t){return typeof e===t}function i(e,t){return!!~(""+e).indexOf(t)}function c(e,t){for(var r in e){var o=e[r];if(!i(o,"-")&&b[o]!==n)return"pfx"==t?o:!0}return!1}function s(e,t,r){for(var o in e){var i=t[e[o]];if(i!==n)return r===!1?e[o]:a(i,"function")?i.bind(r||t):i}return!1}function u(e,t,n){var r=e.charAt(0).toUpperCase()+e.slice(1),o=(e+" "+k.join(r+" ")+r).split(" ");return a(t,"string")||a(t,"undefined")?c(o,t):(o=(e+" "+T.join(r+" ")+r).split(" "),s(o,t,n))}function l(){p.input=function(n){for(var r=0,o=n.length;o>r;r++)j[n[r]]=!!(n[r]in E);return j.list&&(j.list=!(!t.createElement("datalist")||!e.HTMLDataListElement)),j}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),p.inputtypes=function(e){for(var r,o,a,i=0,c=e.length;c>i;i++)E.setAttribute("type",o=e[i]),r="text"!==E.type,r&&(E.value=x,E.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(o)&&E.style.WebkitAppearance!==n?(g.appendChild(E),a=t.defaultView,r=a.getComputedStyle&&"textfield"!==a.getComputedStyle(E,null).WebkitAppearance&&0!==E.offsetHeight,g.removeChild(E)):/^(search|tel)$/.test(o)||(r=/^(url|email)$/.test(o)?E.checkValidity&&E.checkValidity()===!1:E.value!=x)),P[e[i]]=!!r;return P}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var d,f,m="2.8.3",p={},h=!0,g=t.documentElement,v="modernizr",y=t.createElement(v),b=y.style,E=t.createElement("input"),x=":)",w={}.toString,S=" -webkit- -moz- -o- -ms- ".split(" "),C="Webkit Moz O ms",k=C.split(" "),T=C.toLowerCase().split(" "),N={svg:"http://www.w3.org/2000/svg"},M={},P={},j={},$=[],D=$.slice,F=function(e,n,r,o){var a,i,c,s,u=t.createElement("div"),l=t.body,d=l||t.createElement("body");if(parseInt(r,10))for(;r--;)c=t.createElement("div"),c.id=o?o[r]:v+(r+1),u.appendChild(c);return a=["­",'"].join(""),u.id=v,(l?u:d).innerHTML+=a,d.appendChild(u),l||(d.style.background="",d.style.overflow="hidden",s=g.style.overflow,g.style.overflow="hidden",g.appendChild(d)),i=n(u,e),l?u.parentNode.removeChild(u):(d.parentNode.removeChild(d),g.style.overflow=s),!!i},z=function(t){var n=e.matchMedia||e.msMatchMedia;if(n)return n(t)&&n(t).matches||!1;var r;return F("@media "+t+" { #"+v+" { position: absolute; } }",function(t){r="absolute"==(e.getComputedStyle?getComputedStyle(t,null):t.currentStyle).position}),r},A=function(){function e(e,o){o=o||t.createElement(r[e]||"div"),e="on"+e;var i=e in o;return i||(o.setAttribute||(o=t.createElement("div")),o.setAttribute&&o.removeAttribute&&(o.setAttribute(e,""),i=a(o[e],"function"),a(o[e],"undefined")||(o[e]=n),o.removeAttribute(e))),o=null,i}var r={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return e}(),L={}.hasOwnProperty;f=a(L,"undefined")||a(L.call,"undefined")?function(e,t){return t in e&&a(e.constructor.prototype[t],"undefined")}:function(e,t){return L.call(e,t)},Function.prototype.bind||(Function.prototype.bind=function(e){var t=this;if("function"!=typeof t)throw new TypeError;var n=D.call(arguments,1),r=function(){if(this instanceof r){var o=function(){};o.prototype=t.prototype;var a=new o,i=t.apply(a,n.concat(D.call(arguments)));return Object(i)===i?i:a}return t.apply(e,n.concat(D.call(arguments)))};return r}),M.flexbox=function(){return u("flexWrap")},M.flexboxlegacy=function(){return u("boxDirection")},M.canvas=function(){var e=t.createElement("canvas");return!(!e.getContext||!e.getContext("2d"))},M.canvastext=function(){return!(!p.canvas||!a(t.createElement("canvas").getContext("2d").fillText,"function"))},M.webgl=function(){return!!e.WebGLRenderingContext},M.touch=function(){var n;return"ontouchstart"in e||e.DocumentTouch&&t instanceof DocumentTouch?n=!0:F(["@media (",S.join("touch-enabled),("),v,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(e){n=9===e.offsetTop}),n},M.geolocation=function(){return"geolocation"in navigator},M.postmessage=function(){return!!e.postMessage},M.websqldatabase=function(){return!!e.openDatabase},M.indexedDB=function(){return!!u("indexedDB",e)},M.hashchange=function(){return A("hashchange",e)&&(t.documentMode===n||t.documentMode>7)},M.history=function(){return!(!e.history||!history.pushState)},M.draganddrop=function(){var e=t.createElement("div");return"draggable"in e||"ondragstart"in e&&"ondrop"in e},M.websockets=function(){return"WebSocket"in e||"MozWebSocket"in e},M.rgba=function(){return r("background-color:rgba(150,255,150,.5)"),i(b.backgroundColor,"rgba")},M.hsla=function(){return r("background-color:hsla(120,40%,100%,.5)"),i(b.backgroundColor,"rgba")||i(b.backgroundColor,"hsla")},M.multiplebgs=function(){return r("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(b.background)},M.backgroundsize=function(){return u("backgroundSize")},M.borderimage=function(){return u("borderImage")},M.borderradius=function(){return u("borderRadius")},M.boxshadow=function(){return u("boxShadow")},M.textshadow=function(){return""===t.createElement("div").style.textShadow},M.opacity=function(){return o("opacity:.55"),/^0.55$/.test(b.opacity)},M.cssanimations=function(){return u("animationName")},M.csscolumns=function(){return u("columnCount")},M.cssgradients=function(){var e="background-image:",t="gradient(linear,left top,right bottom,from(#9f9),to(white));",n="linear-gradient(left top,#9f9, white);";return r((e+"-webkit- ".split(" ").join(t+e)+S.join(n+e)).slice(0,-e.length)),i(b.backgroundImage,"gradient")},M.cssreflections=function(){return u("boxReflect")},M.csstransforms=function(){return!!u("transform")},M.csstransforms3d=function(){var e=!!u("perspective");return e&&"webkitPerspective"in g.style&&F("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(t){e=9===t.offsetLeft&&3===t.offsetHeight}),e},M.csstransitions=function(){return u("transition")},M.fontface=function(){var e;return F('@font-face {font-family:"font";src:url("https://")}',function(n,r){var o=t.getElementById("smodernizr"),a=o.sheet||o.styleSheet,i=a?a.cssRules&&a.cssRules[0]?a.cssRules[0].cssText:a.cssText||"":"";e=/src/i.test(i)&&0===i.indexOf(r.split(" ")[0])}),e},M.generatedcontent=function(){var e;return F(["#",v,"{font:0/0 a}#",v,':after{content:"',x,'";visibility:hidden;font:3px/1 a}'].join(""),function(t){e=t.offsetHeight>=3}),e},M.video=function(){var e=t.createElement("video"),n=!1;try{(n=!!e.canPlayType)&&(n=new Boolean(n),n.ogg=e.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),n.h264=e.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),n.webm=e.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,""))}catch(r){}return n},M.audio=function(){var e=t.createElement("audio"),n=!1;try{(n=!!e.canPlayType)&&(n=new Boolean(n),n.ogg=e.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),n.mp3=e.canPlayType("audio/mpeg;").replace(/^no$/,""),n.wav=e.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),n.m4a=(e.canPlayType("audio/x-m4a;")||e.canPlayType("audio/aac;")).replace(/^no$/,""))}catch(r){}return n},M.localstorage=function(){try{return localStorage.setItem(v,v),localStorage.removeItem(v),!0}catch(e){return!1}},M.sessionstorage=function(){try{return sessionStorage.setItem(v,v),sessionStorage.removeItem(v),!0}catch(e){return!1}},M.webworkers=function(){return!!e.Worker},M.applicationcache=function(){return!!e.applicationCache},M.svg=function(){return!!t.createElementNS&&!!t.createElementNS(N.svg,"svg").createSVGRect},M.inlinesvg=function(){var e=t.createElement("div");return e.innerHTML="",(e.firstChild&&e.firstChild.namespaceURI)==N.svg},M.smil=function(){return!!t.createElementNS&&/SVGAnimate/.test(w.call(t.createElementNS(N.svg,"animate")))},M.svgclippaths=function(){return!!t.createElementNS&&/SVGClipPath/.test(w.call(t.createElementNS(N.svg,"clipPath")))};for(var H in M)f(M,H)&&(d=H.toLowerCase(),p[d]=M[H](),$.push((p[d]?"":"no-")+d));return p.input||l(),p.addTest=function(e,t){if("object"==typeof e)for(var r in e)f(e,r)&&p.addTest(r,e[r]);else{if(e=e.toLowerCase(),p[e]!==n)return p;t="function"==typeof t?t():t,"undefined"!=typeof h&&h&&(g.className+=" "+(t?"":"no-")+e),p[e]=t}return p},r(""),y=E=null,function(e,t){function n(e,t){var n=e.createElement("p"),r=e.getElementsByTagName("head")[0]||e.documentElement;return n.innerHTML="x",r.insertBefore(n.lastChild,r.firstChild)}function r(){var e=y.elements;return"string"==typeof e?e.split(" "):e}function o(e){var t=v[e[h]];return t||(t={},g++,e[h]=g,v[g]=t),t}function a(e,n,r){if(n||(n=t),l)return n.createElement(e);r||(r=o(n));var a;return a=r.cache[e]?r.cache[e].cloneNode():p.test(e)?(r.cache[e]=r.createElem(e)).cloneNode():r.createElem(e),!a.canHaveChildren||m.test(e)||a.tagUrn?a:r.frag.appendChild(a)}function i(e,n){if(e||(e=t),l)return e.createDocumentFragment();n=n||o(e);for(var a=n.frag.cloneNode(),i=0,c=r(),s=c.length;s>i;i++)a.createElement(c[i]);return a}function c(e,t){t.cache||(t.cache={},t.createElem=e.createElement,t.createFrag=e.createDocumentFragment,t.frag=t.createFrag()),e.createElement=function(n){return y.shivMethods?a(n,e,t):t.createElem(n)},e.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+r().join().replace(/[\w\-]+/g,function(e){return t.createElem(e),t.frag.createElement(e),'c("'+e+'")'})+");return n}")(y,t.frag)}function s(e){e||(e=t);var r=o(e);return!y.shivCSS||u||r.hasCSS||(r.hasCSS=!!n(e,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||c(e,r),e}var u,l,d="3.7.0",f=e.html5||{},m=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,h="_html5shiv",g=0,v={};!function(){try{var e=t.createElement("a");e.innerHTML="",u="hidden"in e,l=1==e.childNodes.length||function(){t.createElement("a");var e=t.createDocumentFragment();return"undefined"==typeof e.cloneNode||"undefined"==typeof e.createDocumentFragment||"undefined"==typeof e.createElement}()}catch(n){u=!0,l=!0}}();var y={elements:f.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output progress section summary template time video",version:d,shivCSS:f.shivCSS!==!1,supportsUnknownElements:l,shivMethods:f.shivMethods!==!1,type:"default",shivDocument:s,createElement:a,createDocumentFragment:i};e.html5=y,s(t)}(this,t),p._version=m,p._prefixes=S,p._domPrefixes=T,p._cssomPrefixes=k,p.mq=z,p.hasEvent=A,p.testProp=function(e){return c([e])},p.testAllProps=u,p.testStyles=F,p.prefixed=function(e,t,n){return t?u(e,t,n):u(e,"pfx")},g.className=g.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(h?" js "+$.join(" "):""),p}(this,this.document); -------------------------------------------------------------------------------- /docs/_build/site/js/theme.js: -------------------------------------------------------------------------------- 1 | /* sphinx_rtd_theme version 0.4.1 | MIT license */ 2 | /* Built 20180727 10:07 */ 3 | require=function n(e,i,t){function o(s,a){if(!i[s]){if(!e[s]){var l="function"==typeof require&&require;if(!a&&l)return l(s,!0);if(r)return r(s,!0);var c=new Error("Cannot find module '"+s+"'");throw c.code="MODULE_NOT_FOUND",c}var u=i[s]={exports:{}};e[s][0].call(u.exports,function(n){var i=e[s][1][n];return o(i||n)},u,u.exports,n,e,i,t)}return i[s].exports}for(var r="function"==typeof require&&require,s=0;s"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var i=n(this);expand=n(''),expand.on("click",function(n){return e.toggleCurrent(i),n.stopPropagation(),!1}),i.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}i.length>0&&($(".wy-menu-vertical .current").removeClass("current"),i.addClass("current"),i.closest("li.toctree-l1").addClass("current"),i.closest("li.toctree-l1").parent().addClass("current"),i.closest("li.toctree-l1").addClass("current"),i.closest("li.toctree-l2").addClass("current"),i.closest("li.toctree-l3").addClass("current"),i.closest("li.toctree-l4").addClass("current"))}catch(o){console.log("Error expanding nav for anchor",o)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,i=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav,StickyNav:e.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],i=0;i 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | pycecream - pycecream 13 | 14 | 15 | 16 | 17 | 18 | 19 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
36 | 37 | 38 | 61 | 62 |
63 | 64 | 65 | 69 | 70 | 71 |
72 |
73 |
74 | 87 | 88 |
89 |
90 |
91 |
92 | 93 |

pycecream

94 | 95 |
96 |
97 |
98 | 99 | 105 | 106 | 107 |
108 | 109 |
110 | 111 | 112 |
113 | 114 | Built with MkDocs using a theme provided by Read the Docs. 115 |
116 | 117 |
118 |
119 | 120 |
121 | 122 |
123 | 124 |
125 | 126 | 127 | GitHub 128 | 129 | 130 | « Previous 131 | 132 | 133 | 134 |
135 | 136 | 137 | 138 | 143 | 144 | 145 | 146 | -------------------------------------------------------------------------------- /docs/_build/site/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | pycecream 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 |
29 | 30 | 31 | 54 | 55 |
56 | 57 | 58 | 62 | 63 | 64 |
65 |
66 |
67 |
    68 |
  • Docs »
  • 69 | 70 | 71 |
  • 72 | 73 |
  • 74 |
75 | 76 |
77 |
78 |
79 |
80 | 81 | 82 |

Search Results

83 | 84 | 88 | 89 |
90 | Searching... 91 |
92 | 93 | 94 |
95 |
96 |
97 | 98 | 99 |
100 | 101 |
102 | 103 | 104 |
105 | 106 | Built with MkDocs using a theme provided by Read the Docs. 107 |
108 | 109 |
110 |
111 | 112 |
113 | 114 |
115 | 116 |
117 | 118 | 119 | GitHub 120 | 121 | 122 | 123 | 124 |
125 | 126 | 127 | 128 | 133 | 134 | 135 | 136 | -------------------------------------------------------------------------------- /docs/_build/site/search/main.js: -------------------------------------------------------------------------------- 1 | function getSearchTermFromLocation() { 2 | var sPageURL = window.location.search.substring(1); 3 | var sURLVariables = sPageURL.split('&'); 4 | for (var i = 0; i < sURLVariables.length; i++) { 5 | var sParameterName = sURLVariables[i].split('='); 6 | if (sParameterName[0] == 'q') { 7 | return decodeURIComponent(sParameterName[1].replace(/\+/g, '%20')); 8 | } 9 | } 10 | } 11 | 12 | function joinUrl (base, path) { 13 | if (path.substring(0, 1) === "/") { 14 | // path starts with `/`. Thus it is absolute. 15 | return path; 16 | } 17 | if (base.substring(base.length-1) === "/") { 18 | // base ends with `/` 19 | return base + path; 20 | } 21 | return base + "/" + path; 22 | } 23 | 24 | function formatResult (location, title, summary) { 25 | return ''; 26 | } 27 | 28 | function displayResults (results) { 29 | var search_results = document.getElementById("mkdocs-search-results"); 30 | while (search_results.firstChild) { 31 | search_results.removeChild(search_results.firstChild); 32 | } 33 | if (results.length > 0){ 34 | for (var i=0; i < results.length; i++){ 35 | var result = results[i]; 36 | var html = formatResult(result.location, result.title, result.summary); 37 | search_results.insertAdjacentHTML('beforeend', html); 38 | } 39 | } else { 40 | search_results.insertAdjacentHTML('beforeend', "

No results found

"); 41 | } 42 | } 43 | 44 | function doSearch () { 45 | var query = document.getElementById('mkdocs-search-query').value; 46 | if (query.length > 2) { 47 | if (!window.Worker) { 48 | displayResults(search(query)); 49 | } else { 50 | searchWorker.postMessage({query: query}); 51 | } 52 | } else { 53 | // Clear results for short queries 54 | displayResults([]); 55 | } 56 | } 57 | 58 | function initSearch () { 59 | var search_input = document.getElementById('mkdocs-search-query'); 60 | if (search_input) { 61 | search_input.addEventListener("keyup", doSearch); 62 | } 63 | var term = getSearchTermFromLocation(); 64 | if (term) { 65 | search_input.value = term; 66 | doSearch(); 67 | } 68 | } 69 | 70 | function onWorkerMessage (e) { 71 | if (e.data.allowSearch) { 72 | initSearch(); 73 | } else if (e.data.results) { 74 | var results = e.data.results; 75 | displayResults(results); 76 | } 77 | } 78 | 79 | if (!window.Worker) { 80 | console.log('Web Worker API not supported'); 81 | // load index in main thread 82 | $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { 83 | console.log('Loaded worker'); 84 | init(); 85 | window.postMessage = function (msg) { 86 | onWorkerMessage({data: msg}); 87 | }; 88 | }).fail(function (jqxhr, settings, exception) { 89 | console.error('Could not load worker.js'); 90 | }); 91 | } else { 92 | // Wrap search in a web worker 93 | var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); 94 | searchWorker.postMessage({init: true}); 95 | searchWorker.onmessage = onWorkerMessage; 96 | } 97 | -------------------------------------------------------------------------------- /docs/_build/site/search/search_index.json: -------------------------------------------------------------------------------- 1 | {"config":{"lang":["en"],"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"PyceCREAM Here is a python implementation of my accretion disc and emission line lightcurve-fitting code (previously CREAM). This guide briefly covers generating synthetic data and calling a new pycecream object to ingest and fit the accretion disc model (or emission line model) to a set of input light curves. I also demonstrate how to access the output of the pycecream fit. The output includes the fitted light curves, any new light curve data points after merging, fitted response functions and parameter MCMC chain histories for the disc and/or tophat response parameters. Most of these features are used in some form or another from a previous f90 version of this code (CREAM) in the following literature Grier et al in prep Grier et al 2018 https://iopscience.iop.org/article/10.3847/1538-4357/aa98dc/pdf Starkey et al 2017 https://ui.adsabs.harvard.edu/#abs/arXiv:1611.06051 Starkey et al 2016 https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162 Please send questions to ds207@st-andrews.ac.uk. Docs available here Requirements & Installation Install Fortran compiler (see macports / wget) I use Gfortran. If you have an alternate (e.g ifort), please indicate the standard command used to call the fortran compiler using the fortran_caller argument (default fortran_caller = gfortran ). Install Pycecream pip install pycecream Section 1 (Optional): Generate test synthetic light curves In this example we generate 4 disk light curves and 2 emission-line light curves modelled as a top-hat with a 20-day lag. The code below generates a list where each index contains an Nx3 numpy array for each light curve. The 3 vertical axis for each light curve are the time, flux and noise respectively (query synthetic_data['echo lightcurves'][0] for an example of the format required when inputting your own light curve data). The example below combines continuum and line light curves and illustrates a case in which you may have two of the same emission line (and so want to fit with the same response function model) but observed from different telescopes that require seperate noise models. import astropy_stark.myfake as mf import matplotlib.pylab as plt ''' mf.myfake arguments are wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), snr: set the signal-to-noise relative to light curve rms cadence:set the mean cadence top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag thing is still newish so Im used to just making continuum light curve) ''' synthetic_data = mf.myfake( [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0], [50.0,50.0,10.0,50.0,50,10.], [1.0,1.0,2.0,1.0,1.0,3.0], thcent = 20.0 ) '''This recovers the synthetic data''' dat = synthetic_data['echo light curves'] Section 2: Set-up and run PyceCREAM import pycecream #instantiate a pycecream object a = pycecream.pycecream() ''' If you use a fortran compiler other than gfortran please indicate here. I just re-enter gfortran here for demonstration purposes even though this is unecassary as gfortran is the default argument. ''' a.fortran_caller = 'gfortran' '''Choose an output directory in which to save the results. This will be a new directory that you have not previously created (pycecream will make it automatically). NOTE: Each new cream simulation must have a new name for \"output_directory argument below otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. I might change this in a future version ''' a.project_folder = 'fit_synthetic_lightcurves' ''' Add the light curves to be modeled. Inputs should be a 3 column numpy array of `time`, `flux`, `error`. In this case we are using the \"dat\" output from the synthetic data above. ''' a.add_lc(dat[0], name = 'continuum 4000') a.add_lc(dat[1], name = 'continuum 5000') a.add_lc(dat[2], name = 'continuum 5000 (b)') a.add_lc(dat[3], name = 'continuum 7000') #If adding a line light curve, must indicate using the \"kind\" argument a.add_lc(dat[4],name='test line 1',kind='line') #If we want the same line response function model, set \"share_previous_lag\"=True a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True) ''' specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers can be used just to check everything is working is done here. ''' a.N_iterations=40 ''' specify the step sizes for the fit parameters. Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. ''' a.p_accretion_rate_step = 0.1 ''' Check the input settings are ok prior to running ''' print(a.lightcurve_input_params) ''' RUN! specify ncores (default = 1) to parallelise with 1 chain per core ''' a.run(ncores = 4) Section 3: Outputs The main outputs are stored in two dataframes. 1) output_lightcurves = a.get_light_curve_fits(): This a dictionary of 3 data frames. 1.1) output_lightcurves['model']: standard time, model, error envelope for each file 1.2) output_lightcurves['merged model'] AS above but with the error bars, vertical and horrizontal scalings applied relative to the reference model. Not sure but I think the reference model defaults to the first occurence of a particular wavelength in the order that it was added in self.add_lc 1.3) output_lightcurves['merged data'] DICTIONARY (since the input data light curves can be different sizes) The same transformations but applied to the input light curve data. useful if using cream only to merge the orriginal light curves from different telescopes to a new scale for further study elsewhere 2) output_chains = a.get_MCMC_chains(): These are the MCMC chains for each parameter. ''' Get the mcmc chains and output fits. Each of these arguments come with a \"location\" argument where you can point to a previous simulation and recover the outputs. If this is left blank we default to the current simulation ''' output_chains = a.get_MCMC_chains(location = None) output_lightcurves = a.get_light_curve_fits(location = None) ''' NEW: 11/12/2019 Now the fourier chains are available as a pandas dataframe. Stats on the sine and cosine parameters are also available for each freuqency accessible in the `fourier_stats` dictionary element of this `get_MCMC_fourier_chains` function. ''' output_fourier_chains = a.get_MCMC_fourier_chains(location=None) fourier_chains = output_fourier_chains['fourier_chains'] fourier_stats = output_fourier_chains['fourier_stats'] ''' make figures of the fit, posterior, light curves etc. file prefix tells the code where you want to save the output. The figure plotting is somewhat primitive and is a relic of when I still used cream. You may prefer to use your own output figures with the output of the \"get_MCMC_chains\" and \"get_light_curve_fits\" functions above. ''' a.plot_results(file_prefix='fit_figures') ''' figures can also be made on an indivdual basis with axes objects returned from python plotting functions ''' #plot the fitted light curves. a.plot_lightcurves() plt.show() #plot the driving light curve a.plot_driver() plt.show() #plot the parameter trace plots a.plot_trace() plt.show() #plot the covariance parameter plot for the disc parameters a.plot_posterior() plt.show()","title":"Home"},{"location":"#pycecream","text":"Here is a python implementation of my accretion disc and emission line lightcurve-fitting code (previously CREAM). This guide briefly covers generating synthetic data and calling a new pycecream object to ingest and fit the accretion disc model (or emission line model) to a set of input light curves. I also demonstrate how to access the output of the pycecream fit. The output includes the fitted light curves, any new light curve data points after merging, fitted response functions and parameter MCMC chain histories for the disc and/or tophat response parameters. Most of these features are used in some form or another from a previous f90 version of this code (CREAM) in the following literature Grier et al in prep Grier et al 2018 https://iopscience.iop.org/article/10.3847/1538-4357/aa98dc/pdf Starkey et al 2017 https://ui.adsabs.harvard.edu/#abs/arXiv:1611.06051 Starkey et al 2016 https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162 Please send questions to ds207@st-andrews.ac.uk.","title":"PyceCREAM"},{"location":"#docs-available-here","text":"","title":"Docs available here"},{"location":"#requirements-installation","text":"","title":"Requirements & Installation"},{"location":"#install-fortran-compiler-see-macports-wget","text":"I use Gfortran. If you have an alternate (e.g ifort), please indicate the standard command used to call the fortran compiler using the fortran_caller argument (default fortran_caller = gfortran ).","title":"Install Fortran compiler (see macports / wget)"},{"location":"#install-pycecream","text":"pip install pycecream","title":"Install Pycecream"},{"location":"#section-1-optional-generate-test-synthetic-light-curves","text":"In this example we generate 4 disk light curves and 2 emission-line light curves modelled as a top-hat with a 20-day lag. The code below generates a list where each index contains an Nx3 numpy array for each light curve. The 3 vertical axis for each light curve are the time, flux and noise respectively (query synthetic_data['echo lightcurves'][0] for an example of the format required when inputting your own light curve data). The example below combines continuum and line light curves and illustrates a case in which you may have two of the same emission line (and so want to fit with the same response function model) but observed from different telescopes that require seperate noise models. import astropy_stark.myfake as mf import matplotlib.pylab as plt ''' mf.myfake arguments are wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), snr: set the signal-to-noise relative to light curve rms cadence:set the mean cadence top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag thing is still newish so Im used to just making continuum light curve) ''' synthetic_data = mf.myfake( [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0], [50.0,50.0,10.0,50.0,50,10.], [1.0,1.0,2.0,1.0,1.0,3.0], thcent = 20.0 ) '''This recovers the synthetic data''' dat = synthetic_data['echo light curves']","title":"Section 1 (Optional): Generate test synthetic light curves"},{"location":"#section-2-set-up-and-run-pycecream","text":"import pycecream #instantiate a pycecream object a = pycecream.pycecream() ''' If you use a fortran compiler other than gfortran please indicate here. I just re-enter gfortran here for demonstration purposes even though this is unecassary as gfortran is the default argument. ''' a.fortran_caller = 'gfortran' '''Choose an output directory in which to save the results. This will be a new directory that you have not previously created (pycecream will make it automatically). NOTE: Each new cream simulation must have a new name for \"output_directory argument below otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. I might change this in a future version ''' a.project_folder = 'fit_synthetic_lightcurves' ''' Add the light curves to be modeled. Inputs should be a 3 column numpy array of `time`, `flux`, `error`. In this case we are using the \"dat\" output from the synthetic data above. ''' a.add_lc(dat[0], name = 'continuum 4000') a.add_lc(dat[1], name = 'continuum 5000') a.add_lc(dat[2], name = 'continuum 5000 (b)') a.add_lc(dat[3], name = 'continuum 7000') #If adding a line light curve, must indicate using the \"kind\" argument a.add_lc(dat[4],name='test line 1',kind='line') #If we want the same line response function model, set \"share_previous_lag\"=True a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True) ''' specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers can be used just to check everything is working is done here. ''' a.N_iterations=40 ''' specify the step sizes for the fit parameters. Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. ''' a.p_accretion_rate_step = 0.1 ''' Check the input settings are ok prior to running ''' print(a.lightcurve_input_params) ''' RUN! specify ncores (default = 1) to parallelise with 1 chain per core ''' a.run(ncores = 4)","title":"Section 2: Set-up and run PyceCREAM"},{"location":"#section-3-outputs","text":"The main outputs are stored in two dataframes.","title":"Section 3: Outputs"},{"location":"#1-output_lightcurves-aget_light_curve_fits","text":"This a dictionary of 3 data frames. 1.1) output_lightcurves['model']: standard time, model, error envelope for each file 1.2) output_lightcurves['merged model'] AS above but with the error bars, vertical and horrizontal scalings applied relative to the reference model. Not sure but I think the reference model defaults to the first occurence of a particular wavelength in the order that it was added in self.add_lc 1.3) output_lightcurves['merged data'] DICTIONARY (since the input data light curves can be different sizes) The same transformations but applied to the input light curve data. useful if using cream only to merge the orriginal light curves from different telescopes to a new scale for further study elsewhere","title":"1) output_lightcurves = a.get_light_curve_fits():"},{"location":"#2-output_chains-aget_mcmc_chains","text":"These are the MCMC chains for each parameter. ''' Get the mcmc chains and output fits. Each of these arguments come with a \"location\" argument where you can point to a previous simulation and recover the outputs. If this is left blank we default to the current simulation ''' output_chains = a.get_MCMC_chains(location = None) output_lightcurves = a.get_light_curve_fits(location = None) ''' NEW: 11/12/2019 Now the fourier chains are available as a pandas dataframe. Stats on the sine and cosine parameters are also available for each freuqency accessible in the `fourier_stats` dictionary element of this `get_MCMC_fourier_chains` function. ''' output_fourier_chains = a.get_MCMC_fourier_chains(location=None) fourier_chains = output_fourier_chains['fourier_chains'] fourier_stats = output_fourier_chains['fourier_stats'] ''' make figures of the fit, posterior, light curves etc. file prefix tells the code where you want to save the output. The figure plotting is somewhat primitive and is a relic of when I still used cream. You may prefer to use your own output figures with the output of the \"get_MCMC_chains\" and \"get_light_curve_fits\" functions above. ''' a.plot_results(file_prefix='fit_figures') ''' figures can also be made on an indivdual basis with axes objects returned from python plotting functions ''' #plot the fitted light curves. a.plot_lightcurves() plt.show() #plot the driving light curve a.plot_driver() plt.show() #plot the parameter trace plots a.plot_trace() plt.show() #plot the covariance parameter plot for the disc parameters a.plot_posterior() plt.show()","title":"2) output_chains = a.get_MCMC_chains():"},{"location":"pycecream/","text":"pycecream","title":"pycecream"}]} -------------------------------------------------------------------------------- /docs/_build/site/search/worker.js: -------------------------------------------------------------------------------- 1 | var base_path = 'function' === typeof importScripts ? '.' : '/search/'; 2 | var allowSearch = false; 3 | var index; 4 | var documents = {}; 5 | var lang = ['en']; 6 | var data; 7 | 8 | function getScript(script, callback) { 9 | console.log('Loading script: ' + script); 10 | $.getScript(base_path + script).done(function () { 11 | callback(); 12 | }).fail(function (jqxhr, settings, exception) { 13 | console.log('Error: ' + exception); 14 | }); 15 | } 16 | 17 | function getScriptsInOrder(scripts, callback) { 18 | if (scripts.length === 0) { 19 | callback(); 20 | return; 21 | } 22 | getScript(scripts[0], function() { 23 | getScriptsInOrder(scripts.slice(1), callback); 24 | }); 25 | } 26 | 27 | function loadScripts(urls, callback) { 28 | if( 'function' === typeof importScripts ) { 29 | importScripts.apply(null, urls); 30 | callback(); 31 | } else { 32 | getScriptsInOrder(urls, callback); 33 | } 34 | } 35 | 36 | function onJSONLoaded () { 37 | data = JSON.parse(this.responseText); 38 | var scriptsToLoad = ['lunr.js']; 39 | if (data.config && data.config.lang && data.config.lang.length) { 40 | lang = data.config.lang; 41 | } 42 | if (lang.length > 1 || lang[0] !== "en") { 43 | scriptsToLoad.push('lunr.stemmer.support.js'); 44 | if (lang.length > 1) { 45 | scriptsToLoad.push('lunr.multi.js'); 46 | } 47 | for (var i=0; i < lang.length; i++) { 48 | if (lang[i] != 'en') { 49 | scriptsToLoad.push(['lunr', lang[i], 'js'].join('.')); 50 | } 51 | } 52 | } 53 | loadScripts(scriptsToLoad, onScriptsLoaded); 54 | } 55 | 56 | function onScriptsLoaded () { 57 | console.log('All search scripts loaded, building Lunr index...'); 58 | if (data.config && data.config.separator && data.config.separator.length) { 59 | lunr.tokenizer.separator = new RegExp(data.config.separator); 60 | } 61 | if (data.index) { 62 | index = lunr.Index.load(data.index); 63 | data.docs.forEach(function (doc) { 64 | documents[doc.location] = doc; 65 | }); 66 | console.log('Lunr pre-built index loaded, search ready'); 67 | } else { 68 | index = lunr(function () { 69 | if (lang.length === 1 && lang[0] !== "en" && lunr[lang[0]]) { 70 | this.use(lunr[lang[0]]); 71 | } else if (lang.length > 1) { 72 | this.use(lunr.multiLanguage.apply(null, lang)); // spread operator not supported in all browsers: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Spread_operator#Browser_compatibility 73 | } 74 | this.field('title'); 75 | this.field('text'); 76 | this.ref('location'); 77 | 78 | for (var i=0; i < data.docs.length; i++) { 79 | var doc = data.docs[i]; 80 | this.add(doc); 81 | documents[doc.location] = doc; 82 | } 83 | }); 84 | console.log('Lunr index built, search ready'); 85 | } 86 | allowSearch = true; 87 | postMessage({allowSearch: allowSearch}); 88 | } 89 | 90 | function init () { 91 | var oReq = new XMLHttpRequest(); 92 | oReq.addEventListener("load", onJSONLoaded); 93 | var index_path = base_path + '/search_index.json'; 94 | if( 'function' === typeof importScripts ){ 95 | index_path = 'search_index.json'; 96 | } 97 | oReq.open("GET", index_path); 98 | oReq.send(); 99 | } 100 | 101 | function search (query) { 102 | if (!allowSearch) { 103 | console.error('Assets for search still loading'); 104 | return; 105 | } 106 | 107 | var resultDocuments = []; 108 | var results = index.search(query); 109 | for (var i=0; i < results.length; i++){ 110 | var result = results[i]; 111 | doc = documents[result.ref]; 112 | doc.summary = doc.text.substring(0, 200); 113 | resultDocuments.push(doc); 114 | } 115 | return resultDocuments; 116 | } 117 | 118 | if( 'function' === typeof importScripts ) { 119 | onmessage = function (e) { 120 | if (e.data.init) { 121 | init(); 122 | } else if (e.data.query) { 123 | postMessage({ results: search(e.data.query) }); 124 | } else { 125 | console.error("Worker - Unrecognized message: " + e); 126 | } 127 | }; 128 | } 129 | -------------------------------------------------------------------------------- /docs/_build/site/sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | None 4 | 2020-03-15 5 | daily 6 | 7 | None 8 | 2020-03-15 9 | daily 10 | 11 | -------------------------------------------------------------------------------- /docs/_build/site/sitemap.xml.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/docs/_build/site/sitemap.xml.gz -------------------------------------------------------------------------------- /docs/autogen.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | 4 | import six 5 | 6 | 7 | # From https://github.com/keras-team/keras/blob/0a0ac3fa5462cf4a72636ca4498a0a82ac91fc32/docs/autogen.py 8 | 9 | def get_module_docstring(filepath): 10 | """Extract the module docstring. 11 | Also finds the line at which the docstring ends. 12 | """ 13 | co = compile(open(filepath, encoding='utf-8').read(), filepath, 'exec') 14 | if co.co_consts and isinstance(co.co_consts[0], six.string_types): 15 | docstring = co.co_consts[0] 16 | else: 17 | print('Could not get the docstring from ' + filepath) 18 | docstring = '' 19 | return docstring, co.co_firstlineno 20 | 21 | 22 | def copy_examples(examples_dir, destination_dir): 23 | """Copy the examples directory in the documentation. 24 | Prettify files by extracting the docstrings written in Markdown. 25 | """ 26 | pathlib.Path(destination_dir).mkdir(exist_ok=True) 27 | for file in os.listdir(examples_dir): 28 | if not file.endswith('.py'): 29 | continue 30 | module_path = os.path.join(examples_dir, file) 31 | docstring, starting_line = get_module_docstring(module_path) 32 | destination_file = os.path.join(destination_dir, file[:-2] + 'md') 33 | with open(destination_file, 'w+', encoding='utf-8') as f_out, \ 34 | open(os.path.join(examples_dir, file), 35 | 'r+', encoding='utf-8') as f_in: 36 | 37 | f_out.write(docstring + '\n\n') 38 | 39 | # skip docstring 40 | for _ in range(starting_line): 41 | next(f_in) 42 | 43 | f_out.write('```python\n') 44 | # next line might be empty. 45 | line = next(f_in) 46 | if line != '\n': 47 | f_out.write(line) 48 | 49 | # copy the rest of the file. 50 | for line in f_in: 51 | f_out.write(line) 52 | f_out.write('```') 53 | 54 | 55 | if __name__ == "__main__": 56 | print(os.getcwd()) 57 | copy_examples("./docs/examples", "./_build/pydocmd/examples") 58 | -------------------------------------------------------------------------------- /docs/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python docs/autogen.py 4 | pydocmd build 5 | -------------------------------------------------------------------------------- /docs/deploy_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python docs/autogen.py 4 | pydocmd gh-deploy 5 | -------------------------------------------------------------------------------- /docs/examples/pyceream_test.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | run example tests here 4 | 5 | 6 | 7 | """ 8 | 9 | print('hello test world') 10 | -------------------------------------------------------------------------------- /docs/pydocmd.yml: -------------------------------------------------------------------------------- 1 | #pydocmd build 2 | #pydocmd serve 3 | #pydocmd gh-deploy 4 | 5 | site_name: pycecream 6 | repo_url: https://github.com/dstarkey23/pycecream 7 | generate: 8 | - pycecream.md: 9 | - pycecream 10 | pages: 11 | - Home: index.md << ../README.md 12 | - pycecream: pycecream.md 13 | theme: 14 | name: readthedocs 15 | collapse_navigation: true 16 | search_index_only: false 17 | -------------------------------------------------------------------------------- /docs/serve_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python docs/autogen.py 4 | pydocmd serve 5 | -------------------------------------------------------------------------------- /examples/fitinfo_fit_figures__1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/fitinfo_fit_figures__1.pdf -------------------------------------------------------------------------------- /examples/fluxflux.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/fluxflux.pdf -------------------------------------------------------------------------------- /examples/page_0_lcplot_fit_figures__1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/page_0_lcplot_fit_figures__1.pdf -------------------------------------------------------------------------------- /examples/page_1_lcplot_fit_figures__1.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/page_1_lcplot_fit_figures__1.pdf -------------------------------------------------------------------------------- /examples/resamp_: -------------------------------------------------------------------------------- 1 | 5.115000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 2 | 5.235000000000000142e+01 0.000000000000000000e+00 0.000000000000000000e+00 3 | 5.335000000000000142e+01 0.000000000000000000e+00 0.000000000000000000e+00 4 | 5.455000000000000426e+01 0.000000000000000000e+00 0.000000000000000000e+00 5 | 5.575000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 6 | 5.700000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 7 | 5.805000000000000426e+01 0.000000000000000000e+00 0.000000000000000000e+00 8 | 5.905000000000000426e+01 0.000000000000000000e+00 0.000000000000000000e+00 9 | 6.015000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 10 | 6.120000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 11 | 6.205000000000000426e+01 0.000000000000000000e+00 0.000000000000000000e+00 12 | 6.320000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 13 | 6.465000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 14 | 6.579999999999999716e+01 0.000000000000000000e+00 0.000000000000000000e+00 15 | 6.665000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 16 | 6.785000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 17 | 6.875000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 18 | 6.965000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 19 | 7.079999999999999716e+01 0.000000000000000000e+00 0.000000000000000000e+00 20 | 7.179999999999999716e+01 0.000000000000000000e+00 0.000000000000000000e+00 21 | 7.279999999999999716e+01 0.000000000000000000e+00 0.000000000000000000e+00 22 | 7.360000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 23 | 7.454999999999999716e+01 0.000000000000000000e+00 0.000000000000000000e+00 24 | 7.560000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 25 | 7.645000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 26 | 7.740000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 27 | 7.845000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 28 | 7.975000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 29 | 8.110000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 30 | 8.230000000000001137e+01 0.000000000000000000e+00 0.000000000000000000e+00 31 | 8.315000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 32 | 8.430000000000001137e+01 0.000000000000000000e+00 0.000000000000000000e+00 33 | 8.550000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 34 | 8.665000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 35 | 8.750000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 36 | 8.835000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 37 | 8.945000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 38 | 9.065000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 39 | 9.165000000000000568e+01 0.000000000000000000e+00 0.000000000000000000e+00 40 | 9.285000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 41 | 9.385000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 42 | 9.480000000000001137e+01 0.000000000000000000e+00 0.000000000000000000e+00 43 | 9.595000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 44 | 9.685000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 45 | 9.810000000000000853e+01 0.000000000000000000e+00 0.000000000000000000e+00 46 | 9.900000000000000000e+01 0.000000000000000000e+00 0.000000000000000000e+00 47 | 9.995000000000000284e+01 0.000000000000000000e+00 0.000000000000000000e+00 48 | 1.008500000000000085e+02 0.000000000000000000e+00 0.000000000000000000e+00 49 | 1.020500000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 50 | 1.030500000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 51 | 1.039000000000000057e+02 0.000000000000000000e+00 0.000000000000000000e+00 52 | 1.052500000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 53 | 1.063500000000000085e+02 0.000000000000000000e+00 0.000000000000000000e+00 54 | 1.074500000000000028e+02 0.000000000000000000e+00 0.000000000000000000e+00 55 | 1.084000000000000057e+02 0.000000000000000000e+00 0.000000000000000000e+00 56 | 1.094000000000000057e+02 0.000000000000000000e+00 0.000000000000000000e+00 57 | 1.104000000000000057e+02 0.000000000000000000e+00 0.000000000000000000e+00 58 | 1.113500000000000085e+02 0.000000000000000000e+00 0.000000000000000000e+00 59 | 1.125000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 60 | 1.136000000000000085e+02 0.000000000000000000e+00 0.000000000000000000e+00 61 | 1.148000000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 62 | 1.158000000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 63 | 1.167000000000000028e+02 0.000000000000000000e+00 0.000000000000000000e+00 64 | 1.178000000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 65 | 1.189000000000000057e+02 0.000000000000000000e+00 0.000000000000000000e+00 66 | 1.202000000000000028e+02 0.000000000000000000e+00 0.000000000000000000e+00 67 | 1.212500000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 68 | 1.221000000000000085e+02 0.000000000000000000e+00 0.000000000000000000e+00 69 | 1.230000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 70 | 1.240000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 71 | 1.252000000000000028e+02 0.000000000000000000e+00 0.000000000000000000e+00 72 | 1.261500000000000057e+02 0.000000000000000000e+00 0.000000000000000000e+00 73 | 1.278500000000000085e+02 0.000000000000000000e+00 0.000000000000000000e+00 74 | 1.290000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 75 | 1.299500000000000171e+02 0.000000000000000000e+00 0.000000000000000000e+00 76 | 1.313000000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 77 | 1.322500000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 78 | 1.335999999999999943e+02 0.000000000000000000e+00 0.000000000000000000e+00 79 | 1.345500000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 80 | 1.357500000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 81 | 1.370000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 82 | 1.380500000000000114e+02 0.000000000000000000e+00 0.000000000000000000e+00 83 | 1.390000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 84 | 1.400000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 85 | 1.408499999999999943e+02 0.000000000000000000e+00 0.000000000000000000e+00 86 | 1.419500000000000171e+02 0.000000000000000000e+00 0.000000000000000000e+00 87 | 1.430000000000000000e+02 0.000000000000000000e+00 0.000000000000000000e+00 88 | 1.440999999999999943e+02 0.000000000000000000e+00 0.000000000000000000e+00 89 | 1.449500000000000171e+02 0.000000000000000000e+00 0.000000000000000000e+00 90 | 1.459500000000000171e+02 0.000000000000000000e+00 0.000000000000000000e+00 91 | 1.469500000000000171e+02 0.000000000000000000e+00 0.000000000000000000e+00 92 | -------------------------------------------------------------------------------- /examples/test_pycecream.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | 5 | import pycecream.modules.myfake as mf 6 | import matplotlib.pylab as plt 7 | import numpy as np 8 | 9 | output_directory = 'fit_synthetic_lightcurves' 10 | 11 | ''' 12 | mf.myfake arguments are 13 | 14 | wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), 15 | 16 | snr: set the signal-to-noise relative to light curve rms 17 | 18 | cadence:set the mean cadence 19 | 20 | top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag 21 | thing is still newish so Im used to just making continuum light curve) 22 | ''' 23 | 24 | 25 | synthetic_data = mf.myfake( 26 | [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0], 27 | [50.0,50.0,10.0,50.0,50,10.], 28 | [1.0,1.0,2.0,1.0,1.0,3.0], 29 | thcent = 20.0 30 | ) 31 | 32 | '''This recovers the synthetic data''' 33 | dat = synthetic_data['echo light curves'] 34 | 35 | 36 | # # Section 2: Settup and run PyceCREAM 37 | # 38 | # 39 | 40 | # In[2]: 41 | 42 | 43 | import pycecream 44 | 45 | #instantiate a pycecream object 46 | a = pycecream.pycecream() 47 | 48 | ''' 49 | If you use a fortran compiler other than gfortran please indicate here. 50 | I just re-enter gfortran here for demonstration purposes even though 51 | this is unecassary as gfortran is the default argument. 52 | ''' 53 | a.fortran_caller = 'gfortran' 54 | 55 | 56 | 57 | '''Choose an output directory in which to save the results. 58 | This will be a new directory that you have not previously created (pycecream will make it automatically). 59 | 60 | NOTE: Each new cream simulation must have a new name for "output_directory argument below 61 | otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. 62 | I might change this in a future version 63 | ''' 64 | a.project_folder = output_directory 65 | 66 | #test the merging by adding offset to dat1 67 | d1 = np.array(dat[1]) 68 | d1[:,1] = d1[:,1] - np.mean(d1[:,1]) + 232. 69 | dat[1] = d1 70 | 71 | ''' 72 | Add each of the light curves in the simulation. 73 | In this case we are using the "dat" output from the synthetic data above. 74 | ''' 75 | a.add_lc(dat[0], 76 | kind='continuum', 77 | wavelength=4000., 78 | name = 'continuum 4000') 79 | #background_offset_start=[10.0,0.0], 80 | #vertical_scaling_start=[2.0,0.5]) 81 | a.add_lc(dat[1], 82 | name = 'continuum 5000', 83 | kind='continuum', 84 | wavelength=5000.) 85 | #background_offset_start=[10.0,0.0], 86 | #vertical_scaling_start=[2.0,0.5]) 87 | a.add_lc(dat[2], 88 | name = 'continuum 5000 (b)', 89 | kind='continuum', 90 | wavelength = 5000.) 91 | #background_offset_start=[10.0,0.0], 92 | #vertical_scaling_start=[2.0,0.5]) 93 | 94 | a.add_lc(dat[3], 95 | name = 'continuum 7000', 96 | kind='continuum', 97 | wavelength=7000.) 98 | #background_offset_start=[10.0,0.0], 99 | #vertical_scaling_start=[2.0,0.5]) 100 | 101 | #If adding a line light curve, must indicate using the "kind" argument 102 | a.add_lc(dat[4],name='test line 1',kind='line', 103 | background_offset_start=[10.0,0.0], 104 | extra_variance_prior = [0.1,1.0], 105 | multiplicative_errorbar_prior = [10.0,0.0000001], 106 | vertical_scaling_start=[2.0,0.5], 107 | vertical_scaling_prior=[0.0,0.1], 108 | background_offset_prior=[5.0,0.0001], 109 | tophat_width_prior=[0.0, -0.1], 110 | tophat_centroid_prior=[12.4, 0.00000001] 111 | ) 112 | 113 | #If we want the same line response function model, set "share_previous_lag"=True 114 | a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True,background_offset_start=[10.0,3.3],vertical_scaling_start=[2.0,0.5]) 115 | 116 | 117 | 118 | ''' 119 | specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers 120 | can be used just to check everything is working is done here. 121 | ''' 122 | a.N_iterations=100 123 | 124 | ''' 125 | specify the step sizes for the fit parameters. 126 | Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. 127 | ''' 128 | a.p_accretion_rate_step = 0.1 129 | 130 | ''' 131 | Check the input settings are ok prior to running 132 | ''' 133 | print(a.lightcurve_input_params) 134 | 135 | ''' 136 | RUN! 137 | ''' 138 | a.run() 139 | op = a.get_flux_flux_analysis(plotfile='fluxflux.pdf',xlim=[-4,4]) 140 | plt.show() 141 | 142 | 143 | ''' 144 | get chains 145 | ''' 146 | chains = a.get_MCMC_chains() 147 | fourier_chains = a.get_MCMC_fourier_chains() 148 | cols = list(chains.columns) 149 | fcols = [c for c in cols if 'noise m ' in c] 150 | fchains = chains[fcols] 151 | 152 | 153 | ''' 154 | clean up output directory DONT DO FOR REAL SIMULATION 155 | AS THIS DELETES ALL RESULTS 156 | ''' 157 | import os 158 | os.system('rm -rf '+output_directory) 159 | -------------------------------------------------------------------------------- /examples/test_pycecream_background_polynomials.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | 5 | import pycecream.modules.myfake as mf 6 | import matplotlib.pylab as plt 7 | import numpy as np 8 | 9 | output_directory = 'fit_synthetic_lightcurves_background_polynomial' 10 | 11 | ''' 12 | same as test_pycecream.py but using a background polynomial light curve 13 | ''' 14 | 15 | 16 | synthetic_data = mf.myfake( 17 | [4000.0,5000.0,5000.0,7000.0], 18 | [50.0,50.0,10.0,50.0], 19 | [1.0,1.0,2.0,1.0], 20 | thcent = 20.0 21 | ) 22 | 23 | '''This recovers the synthetic data''' 24 | dat = synthetic_data['echo light curves'] 25 | 26 | 27 | ''' Now append a simple polynomial model''' 28 | tlo = dat[0][:,0] 29 | thi = dat[0][:,-1] 30 | n = len(dat[0]) 31 | t = np.arange(n)/n*(thi - tlo) + tlo 32 | x = 5.0 + 2.0*t + 0.1*t**2 33 | x = (x - np.mean(x))/np.std(x) 34 | sig = np.ones(n)*0.05 35 | x = x + np.random.randn(n)*sig 36 | background_data = np.zeros((n,3)) 37 | background_data[:,0] = t 38 | background_data[:,1] = x 39 | background_data[:,2] = sig 40 | dat.append(background_data) 41 | 42 | # # Section 2: Settup and run PyceCREAM 43 | # 44 | # 45 | 46 | # In[2]: 47 | 48 | 49 | import pycecream 50 | 51 | #instantiate a pycecream object 52 | a = pycecream.pycecream() 53 | 54 | ''' 55 | If you use a fortran compiler other than gfortran please indicate here. 56 | I just re-enter gfortran here for demonstration purposes even though 57 | this is unecassary as gfortran is the default argument. 58 | ''' 59 | a.fortran_caller = 'gfortran' 60 | 61 | 62 | 63 | '''Choose an output directory in which to save the results. 64 | This will be a new directory that you have not previously created (pycecream will make it automatically). 65 | 66 | NOTE: Each new cream simulation must have a new name for "output_directory argument below 67 | otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. 68 | I might change this in a future version 69 | ''' 70 | a.project_folder = output_directory 71 | 72 | #test the merging by adding offset to dat1 73 | d1 = np.array(dat[1]) 74 | d1[:,1] = d1[:,1] - np.mean(d1[:,1]) + 232. 75 | dat[1] = d1 76 | 77 | ''' 78 | Add each of the light curves in the simulation. 79 | In this case we are using the "dat" output from the synthetic data above. 80 | ''' 81 | a.add_lc(dat[0], 82 | kind='continuum', 83 | wavelength=4000., 84 | name = 'continuum 4000') 85 | #background_offset_start=[10.0,0.0], 86 | #vertical_scaling_start=[2.0,0.5]) 87 | a.add_lc(dat[1], 88 | name = 'continuum 5000', 89 | kind='continuum', 90 | wavelength=5000.) 91 | #background_offset_start=[10.0,0.0], 92 | #vertical_scaling_start=[2.0,0.5]) 93 | a.add_lc(dat[2], 94 | name = 'continuum 5000 (b)', 95 | kind='continuum', 96 | wavelength = 5000.) 97 | #background_offset_start=[10.0,0.0], 98 | #vertical_scaling_start=[2.0,0.5]) 99 | 100 | a.add_lc(dat[3], 101 | name = 'continuum 7000', 102 | kind='continuum', 103 | wavelength=7000.) 104 | #background_offset_start=[10.0,0.0], 105 | #vertical_scaling_start=[2.0,0.5]) 106 | 107 | 108 | a.add_lc(dat[4], 109 | name = 'continuum (background polynomial)', 110 | kind='continuum', 111 | wavelength=8000., 112 | background_polynomials = [0.1,0.1,0.1]) 113 | #background_offset_start=[10.0,0.0], 114 | #vertical_scaling_start=[2.0,0.5]) 115 | 116 | 117 | ''' 118 | specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers 119 | can be used just to check everything is working is done here. 120 | ''' 121 | a.N_iterations=100 122 | 123 | ''' 124 | specify the step sizes for the fit parameters. 125 | Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. 126 | ''' 127 | a.p_accretion_rate_step = 0.1 128 | 129 | ''' 130 | Check the input settings are ok prior to running 131 | ''' 132 | print(a.lightcurve_input_params) 133 | 134 | ''' 135 | RUN! 136 | ''' 137 | a.run() 138 | op = a.get_flux_flux_analysis(plotfile='fluxflux.pdf',xlim=[-4,4]) 139 | plt.show() 140 | 141 | 142 | ''' 143 | get chains 144 | ''' 145 | chains = a.get_MCMC_chains() 146 | fourier_chains = a.get_MCMC_fourier_chains() 147 | cols = list(chains.columns) 148 | fcols = [c for c in cols if 'noise m ' in c] 149 | fchains = chains[fcols] 150 | 151 | 152 | ''' 153 | clean up output directory DONT DO FOR REAL SIMULATION 154 | AS THIS DELETES ALL RESULTS 155 | ''' 156 | import os 157 | os.system('rm -rf '+output_directory) 158 | -------------------------------------------------------------------------------- /examples/test_pycecream_files/test_pycecream_12_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_files/test_pycecream_12_0.png -------------------------------------------------------------------------------- /examples/test_pycecream_files/test_pycecream_6_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_files/test_pycecream_6_3.png -------------------------------------------------------------------------------- /examples/test_pycecream_files/test_pycecream_6_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_files/test_pycecream_6_4.png -------------------------------------------------------------------------------- /examples/test_pycecream_files/test_pycecream_6_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_files/test_pycecream_6_6.png -------------------------------------------------------------------------------- /examples/test_pycecream_files/test_pycecream_6_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_files/test_pycecream_6_8.png -------------------------------------------------------------------------------- /examples/test_pycecream_tophat_0lag.md: -------------------------------------------------------------------------------- 1 | # PyceCREAM 2 | 3 | Here is a python implementaiton of my accretion disc and emission line lightcurve-fitting code (previously CREAM). This guide briefly covers generating synthetic data and calling a new pycecream object to ingest and fit the accretion disc model (or emission line model) to a set of input light curves. I also demonstrate how to access the output of the pycecream fit. The output includes the fitted light curves, any new light curve data points after merging, fitted response functions and parameter MCMC chain histories for the disc and/or tophat response parameters. 4 | 5 | Most of these features are used in some form or another from a previous f90 version of this code (CREAM) in the following literature 6 | 7 | * Grier et al in prep 8 | * Grier et al 2018 https://iopscience.iop.org/article/10.3847/1538-4357/aa98dc/pdf 9 | * Starkey et al 2017 https://ui.adsabs.harvard.edu/#abs/arXiv:1611.06051 10 | * Starkey et al 2016 https://ui.adsabs.harvard.edu/#abs/arXiv:1511.06162 11 | 12 | Please send questions to ds207@st-andrews.ac.uk. Though I am currently taking a break from academia and may take some time to respond, I will try to do so as soon as possible. 13 | 14 | 15 | ## Requirements & Installation 16 | 17 | Please ensure that you have a fortran compiler installed. I use Gfortran. If you have an alternate (e.g ifort), please indicate the standard command used to call the fortran compiler using the ```fortran_caller``` argument (default is ```fortran_caller = gfortran```). 18 | 19 | 20 | command These are fairly easy to install from macports or wget etc. Also a Python version is required (I am using 3.7 but even 2 should be fine). The it's just... 21 | 22 | ``` 23 | pip install pycecream 24 | ``` 25 | 26 | 27 | # Section 1: Generate Synthetic Light Curves 28 | 29 | In this example we generate 4 disk light curves and 2 emission-line light curves modelled as a top-hat with a 20-day lag. The code below generates a list where each index contains an Nx3 numpy array for each light curve. The 3 vertical axis for each light curve are the time, flux and noise respectively (query synthetic_data['echo lightcurves'][0] for an example of the format required when inputting your own light curve data). 30 | 31 | The example below combines continuum and line light curves and illustrates a case in which you may have two of the same emission line (and so want to fit with the same response function model) but observed from different telescopes that require seperate noise models. 32 | 33 | 34 | ```python 35 | import astropy_stark.myfake as mf 36 | import matplotlib.pylab as plt 37 | 38 | ''' 39 | mf.myfake arguments are 40 | 41 | wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), 42 | 43 | snr: set the signal-to-noise relative to light curve rms 44 | 45 | cadence:set the mean cadence 46 | 47 | top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag 48 | thing is still newish so Im used to just making continuum light curve) 49 | ''' 50 | 51 | 52 | synthetic_data = mf.myfake( 53 | [4000.0,5000.0,5000.0,7000.0,-1.0,-1.0], 54 | [50.0,50.0,10.0,50.0,50,10.], 55 | [1.0,1.0,2.0,1.0,1.0,3.0], 56 | thcent = 20.0 57 | ) 58 | 59 | '''This recovers the synthetic data''' 60 | dat = synthetic_data['echo light curves'] 61 | ``` 62 | 63 | # Section 2: Settup and run PyceCREAM 64 | 65 | 66 | 67 | 68 | ```python 69 | import pycecream 70 | 71 | #instantiate a pycecream object 72 | a = pycecream.pycecream() 73 | 74 | ''' 75 | If you use a fortran compiler other than gfortran please indicate here. 76 | I just re-enter gfortran here for demonstration purposes even though 77 | this is unecassary as gfortran is the default argument. 78 | ''' 79 | a.fortran_caller = 'gfortran' 80 | 81 | 82 | 83 | '''Choose an output directory in which to save the results. 84 | This will be a new directory that you have not previously created (pycecream will make it automatically). 85 | 86 | NOTE: Each new cream simulation must have a new name for "output_directory argument below 87 | otherwise an excpetion is raised. This is to prevent accidentally overwriting previous simulations. 88 | I might change this in a future version 89 | ''' 90 | a.project_folder = 'fit_synthetic_lightcurves' 91 | 92 | 93 | 94 | ''' 95 | Add each of the light curves in the simulation. 96 | In this case we are using the "dat" output from the synthetic data above. 97 | ''' 98 | a.add_lc(dat[0], name = 'continuum 4000') 99 | a.add_lc(dat[1], name = 'continuum 5000') 100 | a.add_lc(dat[2], name = 'continuum 5000 (b)') 101 | a.add_lc(dat[3], name = 'continuum 7000') 102 | 103 | #If adding a line light curve, must indicate using the "kind" argument 104 | a.add_lc(dat[4],name='test line 1',kind='line') 105 | 106 | #If we want the same line response function model, set "share_previous_lag"=True 107 | a.add_lc(dat[5],name='test line 1 (shared)',kind='line',share_previous_lag=True) 108 | 109 | 110 | 111 | ''' 112 | specify the numnber of MCMC iterations. Normally at least several thousand are necessary but shorter numbers 113 | can be used just to check everything is working is done here. 114 | ''' 115 | a.N_iterations=40 116 | 117 | ''' 118 | specify the step sizes for the fit parameters. 119 | Here we are setting the accretion rate step size to vary by ~ 0.1 solar masses per year. 120 | ''' 121 | a.p_accretion_rate_step = 0.1 122 | 123 | ''' 124 | Check the input settings are ok prior to running 125 | ''' 126 | print(a.lightcurve_input_params) 127 | 128 | ''' 129 | RUN! 130 | ''' 131 | a.run() 132 | ``` 133 | 134 | pycecream path... /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/pycecream 135 | copying file... 136 | /Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/pycecream 137 | name type wavelength noise model \ 138 | 0 continuum 4000 line -1.0 [var, multiplicative] 139 | 0 continuum 5000 line -1.0 [var, multiplicative] 140 | 0 continuum 5000 (b) line -1.0 [var, multiplicative] 141 | 0 continuum 7000 line -1.0 [var, multiplicative] 142 | 0 test line 1 line -1.0 [var, multiplicative] 143 | 0 test line 1 (shared) line -1.0 [var, multiplicative] 144 | 145 | share previous lag temporary file name mean standard deviation \ 146 | 0 False line_0.dat 3.800485 0.796293 147 | 0 False line_1.dat 3.590073 0.675132 148 | 0 False line_2.dat 3.593360 0.696788 149 | 0 False line_3.dat 3.277164 0.524617 150 | 0 False line_4.dat -0.000530 1.001517 151 | 0 True line_5.dat 0.003286 1.025419 152 | 153 | tophat centroid tophat centroid step tophat width tophat width step 154 | 0 0.0 5.0 2.0 0.0 155 | 0 0.0 5.1 2.0 0.0 156 | 0 0.0 5.2 2.0 0.0 157 | 0 0.0 5.3 2.0 0.0 158 | 0 0.0 5.4 2.0 0.0 159 | 0 0.0 5.4 2.0 0.0 160 | 161 | 162 | # Examine the output 163 | 164 | There are 2 output dataframes. 165 | 166 | ## 1) output_lightcurves = a.get_light_curve_fits(): 167 | This a dictionary of 3 data frames. 168 | 169 | 1.1) output_lightcurves['model']: standard time, model, error envelope for each file 170 | 171 | 1.2) output_lightcurves['merged model'] AS above but with the error bars, vertical and horrizontal scalings applied relative to the reference model. Not sure but I think the reference model defaults to the first occurence of a particular wavelength in the order that it was added in self.add_lc 172 | 173 | 1.3) output_lightcurves['merged data'] DICTIONARY (since the input data light curves can be different sizes) The same transformations but applied to the input light curve data. useful if using cream only to merge the orriginal light curves from different telescopes to a new scale for further study elsewhere 174 | 175 | ## 2) output_chains = a.get_MCMC_chains(): 176 | These are the MCMC chains for each parameter. 177 | 178 | 179 | 180 | ```python 181 | ''' 182 | Get the mcmc chains and output fits. 183 | Each of these arguments come with a "location" argument where you can point to a 184 | previous simulation and recover the outputs. 185 | If this is left blank we default to the current simulation 186 | ''' 187 | output_chains = a.get_MCMC_chains(location = None) 188 | output_lightcurves = a.get_light_curve_fits(location = None) 189 | 190 | 191 | ''' 192 | NEW: 11/12/2019 Now the fourier chains are available as a pandas 193 | dataframe. 194 | Stats on the sine and cosine parameters are also available for each 195 | freuqency accessible in the `fourier_stats` dictionary element of this 196 | `get_MCMC_fourier_chains` function. 197 | ''' 198 | output_fourier_chains = a.get_MCMC_fourier_chains(location=None) 199 | fourier_chains = output_fourier_chains['fourier_chains'] 200 | fourier_stats = output_fourier_chains['fourier_stats'] 201 | 202 | 203 | ''' 204 | make figures of the fit, posterior, light curves etc. file prefix tells the code where you want to save the output. 205 | The figure plotting is somewhat primitive and is a relic of when I still used cream. You may prefer to use your own 206 | output figures with the output of the "get_MCMC_chains" and "get_light_curve_fits" functions above. 207 | ''' 208 | a.plot_results(file_prefix='fit_figures') 209 | 210 | 211 | 212 | 213 | ''' 214 | figures can also be made on an indivdual basis with axes objects returned from python plotting functions 215 | ''' 216 | #plot the fitted light curves. 217 | a.plot_lightcurves() 218 | plt.show() 219 | 220 | 221 | #plot the driving light curve 222 | a.plot_driver() 223 | plt.show() 224 | 225 | 226 | #plot the parameter trace plots 227 | a.plot_trace() 228 | plt.show() 229 | 230 | 231 | #plot the covariance parameter plot for the disc parameters 232 | a.plot_posterior() 233 | plt.show() 234 | 235 | 236 | 237 | ``` 238 | 239 | cream_lcplot plotting results from... fit_synthetic_lightcurves/simulation_files 240 | -15.825983 [3.80048513 3.80048513 3.80048513 3.80048513 3.80048513] 0 241 | -15.825983 [3.59007335 3.59007335 3.59007335 3.59007335 3.59007335] 1 242 | -15.825983 [3.59336019 3.59336019 3.59336019 3.59336019 3.59336019] 2 243 | -15.825983 [3.27716422 3.27716422 3.27716422 3.27716422 3.27716422] 3 244 | making posterior plot.... posterior_fit_figures__1.pdf 245 | unable to make covariance plot for disc posteriors. Please check at least some of these are set to varyin the fit. 246 | fit_synthetic_lightcurves/simulation_files/output_20190406_001/G_plot.pdf 247 | Nth 6 Ndisk 1 248 | cream_lcplot plotting results from... fit_synthetic_lightcurves/simulation_files/output_20190406_001 249 | -15.825983 [3.80048513 3.80048513 3.80048513 3.80048513 3.80048513] 0 250 | -15.825983 [3.59007335 3.59007335 3.59007335 3.59007335 3.59007335] 1 251 | -15.825983 [3.59336019 3.59336019 3.59336019 3.59336019 3.59336019] 2 252 | -15.825983 [3.27716422 3.27716422 3.27716422 3.27716422 3.27716422] 3 253 | 254 | 255 | 256 | ![png](test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_1.png) 257 | 258 | 259 | 260 | ![png](test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_2.png) 261 | 262 | 263 | cream_lcplot plotting results from... fit_synthetic_lightcurves/simulation_files/output_20190406_001 264 | 265 | 266 | 267 | ![png](test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_4.png) 268 | 269 | 270 | cream_lcplot plotting results from... fit_synthetic_lightcurves/simulation_files/output_20190406_001 271 | 272 | 273 | 274 | ![png](test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_6.png) 275 | 276 | 277 | cream_lcplot plotting results from... fit_synthetic_lightcurves/simulation_files/output_20190406_001 278 | 279 | 280 | 281 | ```python 282 | # how to install python 3 environment (skip the netcdf4 line) matplotlib should be ok now 283 | # https://salishsea-meopar-docs.readthedocs.io/en/latest/work_env/python3_conda_environment.html 284 | 285 | ``` 286 | -------------------------------------------------------------------------------- /examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_1.png -------------------------------------------------------------------------------- /examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_2.png -------------------------------------------------------------------------------- /examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_4.png -------------------------------------------------------------------------------- /examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_6.png -------------------------------------------------------------------------------- /misc/run_lc.py: -------------------------------------------------------------------------------- 1 | import pycecream 2 | import pycecream.modules.myfake as mf 3 | import matplotlib.pylab as plt 4 | import os 5 | 6 | 7 | 8 | 9 | class test_pc: 10 | 11 | def __init__(self): 12 | '''input arguments''' 13 | self.fake_wavelength = [-1.0,4680.0,4686.0,4720.,4720.,7480.0,7760.,7764.0,7764.0] 14 | self.fake_snr = [10.,50.0,30.0,50.,30.,50.0,50.0,50.0,30.0] 15 | self.fake_cadence = [1.0]*len(self.fake_snr) 16 | 17 | def gen_fake(self): 18 | ''' 19 | make fake lightcurves 20 | :return: 21 | mf.myfake arguments are 22 | wavelengths: enter the wavelengths (-1 indicates an emission line light curve modelled with a top-hat response), 23 | snr: set the signal-to-noise relative to light curve rms 24 | cadence:set the mean cadence 25 | top hat centroid: set the centroid for the top-hat (I think thats what this does but the line lag 26 | thing is still newish so Im used to just making continuum light curve) 27 | ''' 28 | 29 | 30 | synthetic_data = mf.myfake( 31 | self.fake_wavelength, 32 | self.fake_snr, 33 | self.fake_cadence, 34 | thcent = 20.0 35 | ) 36 | self.dat = synthetic_data['echo light curves'] 37 | 38 | 39 | def run_pycecream(self,test_project_folder = 'test_pycecream_output'): 40 | ''' 41 | test pycecream using yasamans script 42 | :return: 43 | ''' 44 | cream_lc0, cream_lc1, cream_lc4, cream_lc2, cream_lc3, cream_lc8, cream_lc5, cream_lc6, cream_lc7 = self.dat 45 | 46 | #instantiate and remove previous test if present 47 | os.system('rm -rf '+test_project_folder) 48 | a = pycecream.pycecream() 49 | a.project_folder = test_project_folder 50 | 51 | #step accretion rate? 52 | a.p_accretion_rate_step = 0.1 53 | a.bh_mass = 6.6e8 54 | 55 | # MgII Line lightcurve 56 | a.add_lc(cream_lc0, name='line 0 (MgII)', kind='line',background_polynomials=[0.1,0.1]) 57 | a.p_linelag_centroids_step = 0.0 58 | # g-band photometric lightcurves 59 | a.add_lc(cream_lc1,name='continuum (Bok)', kind='continuum', wavelength = 4680) 60 | a.add_lc(cream_lc2,name='continuum 4720 (CFHT 1)',kind='continuum', wavelength = 4720, share_previous_lag=True) 61 | a.add_lc(cream_lc3,name='continuum 4720 (CFHT 2)',kind='continuum', wavelength = 4720, share_previous_lag=True) 62 | a.add_lc(cream_lc4,name='continuum 4686 (SynthPhot)',kind='continuum', wavelength = 4686, share_previous_lag=True) 63 | # i-band photometric lightcurves 64 | a.add_lc(cream_lc5,name='continuum (Bok)', kind='continuum', wavelength= 7760, share_previous_lag = False) 65 | a.add_lc(cream_lc6,name='continuum (CFHT 1)',kind='continuum', wavelength = 7764, share_previous_lag=True) 66 | a.add_lc(cream_lc7,name='continuum (CFHT 2)',kind='continuum', wavelength = 7764, share_previous_lag=True) 67 | a.add_lc(cream_lc8,name='continuum (SynthPhot)',kind='continuum', wavelength = 7480,share_previous_lag=True) 68 | a.hi_frequency = 0.5 69 | a.N_iterations = 20 70 | a.run(ncores = 4) 71 | 72 | self.pc = a 73 | 74 | 75 | 76 | 77 | 78 | def post_run(self): 79 | ''' 80 | analyse output 81 | :return: 82 | ''' 83 | self.output_chains = self.pc.get_MCMC_chains(location=None) 84 | self.output_lightcurves = self.pc.get_light_curve_fits(location=None) 85 | ''' 86 | Check the input settings are ok prior to running 87 | ''' 88 | print(self.pc.lightcurve_input_params) 89 | 90 | 91 | if __name__ == '__main__': 92 | x = test_pc() 93 | x.gen_fake() 94 | x.run_pycecream() 95 | x.post_run() 96 | lcop = x.output_lightcurves -------------------------------------------------------------------------------- /misc/test_parallel.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import time 3 | from random import randint 4 | 5 | PROCESSES = 5 6 | WORKER_CALLS = 7 7 | 8 | def worker(num): 9 | """worker function""" 10 | print('Starting worker', num) 11 | time.sleep(randint(2,4)) 12 | print('Exiting worker', num) 13 | return "ok" 14 | 15 | if __name__ == '__main__': 16 | pool = multiprocessing.Pool(processes=PROCESSES) 17 | pool_outputs = pool.map(worker, range(WORKER_CALLS)) 18 | pool.close() 19 | pool.join() 20 | print('Pool:', pool_outputs) -------------------------------------------------------------------------------- /misc/test_pycecream.py: -------------------------------------------------------------------------------- 1 | import pycecream 2 | import pycecream.modules.myfake as mf 3 | import unittest 4 | import os 5 | 6 | 7 | def get_synthetic_data(): 8 | 9 | finished = False 10 | '''input arguments''' 11 | fake_wavelength = [4680.0,4686.0,4720.,4720., 12 | 7480.0,7760.,7764.0,7764.0,-1.0] 13 | fake_snr = [50.0,30.0,50.,30.,50.0,50.0,50.0,30.0,10.] 14 | fake_cadence = [1.0]*len(fake_snr) 15 | 16 | 17 | 18 | synthetic_data = mf.myfake( 19 | fake_wavelength, 20 | fake_snr, 21 | fake_cadence, 22 | thcent = 20.0 23 | ) 24 | dat = synthetic_data['echo light curves'] 25 | 26 | name = ['continuum 4680', 27 | 'continuum 4686', 28 | 'continuum 4720', 29 | 'continuum 4720 (Telescope 2)', 30 | 'continuum 7480', 31 | 'continuum 7760', 32 | 'continuum 7764', 33 | 'continuum 7764 (Telescope 2)', 34 | 'line 0 (MgII)' 35 | ] 36 | 37 | share_previous_lag = [False,False,False,True, 38 | False,False,False,True, 39 | False] 40 | 41 | kind = ['continuum']*(len(dat)-1) + ['line'] 42 | test_data = {'lightcurve':dat, 43 | 'name':name, 44 | 'kind':kind, 45 | 'wavelength':fake_wavelength, 46 | 'share_previous_lag':share_previous_lag} 47 | 48 | return test_data 49 | 50 | 51 | 52 | 53 | class Test_synthetic_data(unittest.TestCase): 54 | 55 | 56 | def test_synthetic_data(self): 57 | 58 | finished = False 59 | '''input arguments''' 60 | synthetic_data = get_synthetic_data() 61 | 62 | a = pycecream.pycecream() 63 | a.project_folder = 'test_pycecream' 64 | 65 | #step accretion rate? 66 | a.p_accretion_rate_step = 0.1 67 | a.p_linelag_centroids_step = 0.0 68 | 69 | ndata = len(synthetic_data['name']) 70 | 71 | for i in range(ndata): 72 | a.add_lc(synthetic_data['lightcurve'][i], 73 | name=synthetic_data['name'][i], 74 | kind = synthetic_data['kind'][i], 75 | wavelength=synthetic_data['wavelength'][i], 76 | share_previous_lag=synthetic_data['share_previous_lag'][i]) 77 | a.hi_frequency = 0.5 78 | a.N_iterations = 20 79 | a.run() 80 | 81 | 82 | ''' 83 | post run 84 | ''' 85 | output_chains = a.get_MCMC_chains(location=None) 86 | output_lightcurves = a.get_light_curve_fits(location=None) 87 | ''' 88 | Check the input settings are ok prior to running 89 | ''' 90 | print(a.lightcurve_input_params) 91 | finished = True 92 | self.assertEqual(finished,True) 93 | os.system('rm -rf test_pycecream') 94 | 95 | 96 | if __name__ == '__main__': 97 | unittest.main() -------------------------------------------------------------------------------- /pycecream.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: pycecream 3 | Version: 1.5.5 4 | Summary: python implementation of the cream accretion disc fitting code https://academic.oup.com/mnras/article-abstract/456/2/1960/1066664?redirectedFrom=PDF 5 | Home-page: https://github.com/dstarkey23/pycecream 6 | Author: dstarkey23 7 | Author-email: ds207@st-andrews.ac.uk 8 | License: MIT 9 | License-File: LICENSE 10 | Requires-Dist: pandas==2.2.0 11 | Requires-Dist: numpy==1.26.4 12 | Requires-Dist: matplotlib==3.8.2 13 | Requires-Dist: scipy==1.12.0 14 | Requires-Dist: corner==2.2.2 15 | Requires-Dist: seaborn==0.13.2 16 | Provides-Extra: tests 17 | Requires-Dist: nose2==0.9.1; extra == "tests" 18 | Requires-Dist: pre-commit==1.20.0; extra == "tests" 19 | Requires-Dist: flake8==3.7.9; extra == "tests" 20 | Requires-Dist: pydoc-markdown==2.0.4; extra == "tests" 21 | Requires-Dist: tabulate==0.8.5; extra == "tests" 22 | Requires-Dist: six==1.12.0; extra == "tests" 23 | 24 | add pycecream.dream light curve merging feature 25 | -------------------------------------------------------------------------------- /pycecream.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .gitignore 3 | CONTRIBUTING.md 4 | LICENSE 5 | README.md 6 | README_dream.md 7 | requirements.txt 8 | setup.py 9 | .idea/encodings.xml 10 | .idea/misc.xml 11 | .idea/modules.xml 12 | .idea/pycecream.iml 13 | .idea/vcs.xml 14 | .idea/workspace.xml 15 | .idea/libraries/R_User_Library.xml 16 | docs/autogen.py 17 | docs/build_docs.sh 18 | docs/deploy_docs.sh 19 | docs/pydocmd.yml 20 | docs/serve_docs.sh 21 | docs/_build/pydocmd/index.md 22 | docs/_build/pydocmd/pycecream.md 23 | docs/_build/site/404.html 24 | docs/_build/site/index.html 25 | docs/_build/site/search.html 26 | docs/_build/site/sitemap.xml 27 | docs/_build/site/sitemap.xml.gz 28 | docs/_build/site/css/theme.css 29 | docs/_build/site/css/theme_extra.css 30 | docs/_build/site/fonts/fontawesome-webfont.eot 31 | docs/_build/site/fonts/fontawesome-webfont.svg 32 | docs/_build/site/fonts/fontawesome-webfont.ttf 33 | docs/_build/site/fonts/fontawesome-webfont.woff 34 | docs/_build/site/fonts/fontawesome-webfont.woff2 35 | docs/_build/site/fonts/Lato/lato-bold.eot 36 | docs/_build/site/fonts/Lato/lato-bold.ttf 37 | docs/_build/site/fonts/Lato/lato-bold.woff 38 | docs/_build/site/fonts/Lato/lato-bold.woff2 39 | docs/_build/site/fonts/Lato/lato-bolditalic.eot 40 | docs/_build/site/fonts/Lato/lato-bolditalic.ttf 41 | docs/_build/site/fonts/Lato/lato-bolditalic.woff 42 | docs/_build/site/fonts/Lato/lato-bolditalic.woff2 43 | docs/_build/site/fonts/Lato/lato-italic.eot 44 | docs/_build/site/fonts/Lato/lato-italic.ttf 45 | docs/_build/site/fonts/Lato/lato-italic.woff 46 | docs/_build/site/fonts/Lato/lato-italic.woff2 47 | docs/_build/site/fonts/Lato/lato-regular.eot 48 | docs/_build/site/fonts/Lato/lato-regular.ttf 49 | docs/_build/site/fonts/Lato/lato-regular.woff 50 | docs/_build/site/fonts/Lato/lato-regular.woff2 51 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.eot 52 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.ttf 53 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.woff 54 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 55 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.eot 56 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.ttf 57 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.woff 58 | docs/_build/site/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 59 | docs/_build/site/fonts/RobotoSlab/roboto-slab.eot 60 | docs/_build/site/img/favicon.ico 61 | docs/_build/site/js/jquery-2.1.1.min.js 62 | docs/_build/site/js/modernizr-2.8.3.min.js 63 | docs/_build/site/js/theme.js 64 | docs/_build/site/pycecream/index.html 65 | docs/_build/site/search/lunr.js 66 | docs/_build/site/search/main.js 67 | docs/_build/site/search/search_index.json 68 | docs/_build/site/search/worker.js 69 | docs/examples/pyceream_test.py 70 | examples/fitinfo_fit_figures__1.pdf 71 | examples/fluxflux.pdf 72 | examples/page_0_lcplot_fit_figures__1.pdf 73 | examples/page_1_lcplot_fit_figures__1.pdf 74 | examples/resamp_ 75 | examples/test_pycecream.ipynb 76 | examples/test_pycecream.py 77 | examples/test_pycecream_background_polynomials.py 78 | examples/test_pycecream_tophat_0lag.ipynb 79 | examples/test_pycecream_tophat_0lag.md 80 | examples/.ipynb_checkpoints/test_pycecream-checkpoint.ipynb 81 | examples/.ipynb_checkpoints/test_pycecream_tophat_0lag-checkpoint.ipynb 82 | examples/test_pycecream_files/test_pycecream_12_0.png 83 | examples/test_pycecream_files/test_pycecream_6_3.png 84 | examples/test_pycecream_files/test_pycecream_6_4.png 85 | examples/test_pycecream_files/test_pycecream_6_6.png 86 | examples/test_pycecream_files/test_pycecream_6_8.png 87 | examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_1.png 88 | examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_2.png 89 | examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_4.png 90 | examples/test_pycecream_tophat_0lag_files/test_pycecream_tophat_0lag_6_6.png 91 | pycecream/.DS_Store 92 | pycecream/__init__.py 93 | pycecream/__init__.pyc 94 | pycecream/cream_f90.f90 95 | pycecream/creaminpar.par 96 | pycecream.egg-info/PKG-INFO 97 | pycecream.egg-info/SOURCES.txt 98 | pycecream.egg-info/dependency_links.txt 99 | pycecream.egg-info/not-zip-safe 100 | pycecream.egg-info/requires.txt 101 | pycecream.egg-info/top_level.txt -------------------------------------------------------------------------------- /pycecream.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pycecream.egg-info/not-zip-safe: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /pycecream.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | pandas==2.2.0 2 | numpy==1.26.4 3 | matplotlib==3.8.2 4 | scipy==1.12.0 5 | corner==2.2.2 6 | seaborn==0.13.2 7 | 8 | [tests] 9 | nose2==0.9.1 10 | pre-commit==1.20.0 11 | flake8==3.7.9 12 | pydoc-markdown==2.0.4 13 | tabulate==0.8.5 14 | six==1.12.0 15 | -------------------------------------------------------------------------------- /pycecream.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | pycecream 2 | -------------------------------------------------------------------------------- /pycecream/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/pycecream/.DS_Store -------------------------------------------------------------------------------- /pycecream/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/pycecream/__init__.pyc -------------------------------------------------------------------------------- /pycecream/creaminpar.par: -------------------------------------------------------------------------------- 1 | ./hermine_lc 2 | !!! path to main folder (dirpath) folder string subfolders of agn with the light curves !! set to ./fake/ to generate fake data 3 | F F !!! save the BOF and pspec plots as separate files and save big parameter file (takes lots of space) 4 | !!! save plot frequency (if -ve, display plots to screen and backs up every -value iterations) 5 | 20 6 | 4 !! how many backups to keep 7 | 8 | -0.01,-0.007,0.0 !!! (flo,df,start0) (0.02, -0.007, 1.0 pre 14/apr/16 values) Enter lowest and frequency spacing in cycles/day (if -ve, code decides for df uses flo by default), start0 = 1 if you want fourier amplitudes to start at 0 9 | -20 !!!(NW) number of Fourier frequencies (if -ve program choses), (if 1 start fourier amplitudes at 0 else code guesses) 10 | 0.5 !!! (whi) Enter highest frequency in cycles/day (if NW -ve above then this determines NW. If both -ve, program chooses) 11 | 100 !!! (nits) Default number of iterations (nits) (code cuts off when converged) 12 | 2 !!! (AT) NUmber of acceptances before doubling stepsizes 13 | 5 !!! (RT) Number of rejections before halving step sizes 14 | -10.0 50.0 !!! (min and maxtau) lag limits 15 | 0.2 !!! (dtaugrid) if -ve, automatically determined, but may want to control manually if having resolution problems 16 | 17 | !!!!!!! Fourier scaling 18 | 0.00001 !! starting fourier scaling (default0.001) if -ve, code uses power law slope to scale terms 19 | 20 | !!!!!!! Error bar expansion 21 | F !! sigexpand (Set true to allow error bars to expand) 22 | 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 1.0 !! Default expansion factor sigexpandparm (NLC +1 f xray data) 23 | 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 !! default logarithmic stepsize in error bar expansion (sigexpandsteplog) 24 | 25 | !!!!!!! Delay parameters 26 | 0.1 !!! black hole efficiency (eta) 27 | 0.0 !!! disk albedo (alb) 28 | 0.0 !!! height profile constant of proportionality (konst) 29 | 0.0 !!! power index f height proportionality (alpha) 30 | 3.0 !!! innner radius (rs) 31 | 1.0 !!! lamppost height (rs) 32 | 1.0 !!! black hole accretion rate (umdot) 33 | 1.12e8 !!! black hole mmdot (umbh) 34 | -6.0 -0.1 !!! prior on mmdot (mean and sd of gaussian (in log 10 units, set sd -ve to exclude prior) 35 | 0.01 !!! black hole mdot scaling [log10scaling] 36 | 37 | !! Inclination parameters 38 | 0.0 !!! starting inclination (degrees) 60 degrees 39 | 0.0 !!! scaling cos(theta) 40 | -50.0 20.0 !!! Enter deginc0 and a for inclination prior - takes form (1 / (1+(cosinc/cosinc0)^a)) set deginc0 -ve to exclude 41 | 42 | !!!!!!! PSPEC PRIOR PARMS 43 | F !! implement break in power spectrum (T or F) 44 | -0.001 !!! (p0mean) mean value of p0 45 | 0.0 !!!was -1before 26/10/2014 (siglogp0) uncertainty in logp0 (for prior prior is not yet logarithmic make sigp0=p0mean*siglogp0) (14th may set to 0.001) 46 | 0.00 !!! (p0steplog) logarithmic stepsize of p0 47 | 0.1 !!! (meanf0) mean value of f0 (cycles / day) 48 | 0.0000000 !!! (siglogw0) uncertainty in log w0 ( for prior set -ve to not include prior) 49 | 0.00 !!! (w0steplog) logarithmic step size of w0 50 | 2.0 !!! (meanalpha) mean (-ve) value of alpha (meanalpha) 51 | 0.0 !!! (sigalpha) uncertainty in alpha (sigalph) for the prior (set -ve to not include prior) 52 | 0.0 !!! (alphascale) scaling of alpha parameter 53 | 2.0 !!! (meanbeta) mean (-ve) value of beta (meanbeta) 54 | 0.0 !!! (sigbeta) uncertainty in beta (sigbeta) for the prior (set -ve to not include prior) 55 | 0.0 !!! (betascale) scaling of beta parameter 56 | 57 | !!!!!!! Stretch and offset parms (starting values inside code just the sclaings here) 58 | 0.0 !!! (stretchscale) 59 | 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 10.0 !!! (gal flux real array containing the steady state host component 60 | 0.1 !!! (galscale) !! log10 61 | 62 | !!!!!! luminosity distance and scaling (version 2 only) 63 | 0.0, 0.3, 0.7 !!! redshift of target, omega_m, omega_l 64 | 500.0 !!! Luminosity distance (MPC) 65 | 0.01 !!! dlscale (in log10 units) 66 | 0.0168 !!! MW extinction parameter in ra and dec of target 67 | 1.0 !!! Default AGN extinction parameter E(B-V) 68 | 1.0 !!! default logarithmic stepping ebmvagnsteplog 69 | 70 | !!!!!! temperature radius parms, scaling (log10) (version 3 only) 71 | 0.75 !!!!! viscous tr parameter (P(NPtridx)) 72 | 0.75 !!!!! irradiation tr parameter 73 | 0.0 !!!!! logarithmic scaling of tv 74 | 0.0 !!!!! logarithmic scaling of ta parms 75 | -5.0 !!!!! UR0 (rs) if negative then in light days 76 | 0.1 !!!!! logarithmic scaling of ur0 77 | 0.1 !!!!! offset scale (mjy) 78 | 79 | F !!!!! Skip convolve (Only set this to Y if you are running a test to fit just a driving light curve (for estimating power spectrum properties) then not interested in convolution or echo lightcurves) 80 | -------------------------------------------------------------------------------- /pycecream/creammod.mod: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/drds1/pycecream/1716da359b611ad37f1f645f247e15d59796fb56/pycecream/creammod.mod -------------------------------------------------------------------------------- /pycecream/modules/myconvolve.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # convolve xd by tf 1d 4 | 5 | 6 | def myconvolve(xd, tf): 7 | 8 | nmodtf = np.shape(tf)[0] 9 | nd = np.shape(xd)[0] 10 | # manual convolution (i dont know how to use the python one!) 11 | modf = [] 12 | for i in range(nd): 13 | xnow = xd[i] 14 | idxlo = max(0, i - nmodtf) 15 | idxhi = i 16 | idxd = np.arange(idxlo, idxhi, 1)[::-1] 17 | idxtf = np.arange(idxhi - idxlo) 18 | try: 19 | modf.append(np.sum(xd[idxd] * tf[idxtf])) 20 | except: 21 | modf.append(0) 22 | modf = np.array(modf) 23 | return modf 24 | 25 | 26 | def mc3(td, xd, tau, tf): 27 | 28 | nd = np.shape(xd)[0] 29 | # manual convolution (i dont know how to use the python one!) 30 | modf = [] 31 | 32 | taulo = np.min(tau) 33 | tauhi = np.max(tau) 34 | dtau = np.mean(tau[1:] - tau[:-1]) 35 | 36 | dtgrid = np.mean(td[1:] - td[:-1]) 37 | # check if driver and tau grid are on same scale, interpolate tf if not 38 | 39 | if dtau != dtgrid: 40 | # print('myconvolve response and driver not on same time grid, interpolating response function.',dtgrid,dtau) 41 | tauint = np.arange(taulo, tauhi + dtgrid, dtgrid) 42 | tfint = np.interp(tauint, tau, tf) 43 | else: 44 | tauint = tau 45 | tfint = tf 46 | 47 | itflo = int(np.floor(taulo / dtgrid)) 48 | itfhi = int(np.floor(tauhi / dtgrid)) 49 | 50 | for i in range(nd): 51 | xnow = xd[i] 52 | idxlo = max(0, i - itfhi) 53 | idxhi = min(nd - 1, i - itflo) 54 | idxd = np.arange(idxlo, idxhi, 1)[::-1] 55 | idxtf = np.arange(idxhi - idxlo) 56 | # print np.shape(idxtf),np.shape(idxd),np.sum(xd[idxd]*tf[idxtf]) 57 | try: 58 | modf.append(np.sum(xd[idxd] * tfint[idxtf])) 59 | except: 60 | modf.append(0) 61 | modf = np.array(modf) 62 | # print modf 63 | # raw_input() 64 | return modf 65 | -------------------------------------------------------------------------------- /pycecream/modules/mydisksim.py: -------------------------------------------------------------------------------- 1 | # calculates the black body spectrum of an accretion disc 2 | # by default at 70 Mpc in mJy (if 1 else if mjy = 0 use erg/s/ang/cm^2) 3 | 4 | # INPUT wav[...nwav] list of wavelengths to evaluate spectrum 5 | # ..... embh,emdot (the black hole mass and acretion rate in solar masses and solarmasses per year) 6 | # ..... deginc the inclination of the disc, 0 = face-on 7 | # ..... dl (optional) the luminosity distance to the disc in Mpc (default is 70) 8 | # ..... radlosch, radhisch, ngrid control the resolution of the disc grid (only change if having problems) 9 | # ..... mjy = 1 if want spectrum in mjy else get in erg/s/ang/cm^2 10 | # 11 | # OUTPUT mds[...nwav] spectrum 12 | 13 | import numpy as np 14 | import pycecream.modules.mytemp0 as mt0 15 | import pycecream.modules.myplank as mp 16 | 17 | twopi = 2 * np.pi 18 | deg2rad = twopi / 360.0 19 | 20 | 21 | def mds( 22 | wav, embh, emdot, degi, dl=70.0, radlosch=3.0, radhisch=10000.0, ngrid=1000, mjy=1 23 | ): 24 | r0 = 1.0 25 | sb = 5.670367e-8 26 | gnewt = 6.67408e-11 27 | msun = 1.98855e30 28 | secyr = 31557600.0 29 | ld = 2.59020683712e13 30 | c = 299792458.0 31 | rinld = radlosch * 2 * gnewt * msun * embh / c / c / ld 32 | if rinld > r0: 33 | r0 = np.ceil(rinld / r0) 34 | # print('mydisksim.py: black hole mass too big for inner radius of 1 light day... expanding to',r0) 35 | 36 | cosi = np.cos(deg2rad * degi) 37 | ldMpc = 1191286169.529 38 | ldMpc2 = ldMpc * ldMpc 39 | rsch = 1.15821e-10 * embh # 40 | radlo = radlosch * rsch 41 | radhi = radhisch * rsch 42 | rgrid = np.logspace(np.log10(radlo), np.log10(radhi), ngrid) 43 | drgrid = rgrid[1:] - rgrid[:-1] 44 | drgrid = np.append(drgrid, drgrid[-1]) 45 | # calculate appropriate temperatures for this embh and emdot 46 | t0v = mt0.tv0(embh, emdot, r0=r0, rinsch=radlosch) 47 | t0i = mt0.ti0(embh, emdot, r0=r0, hxsch=3, eta=0.1) 48 | tr = mt0.tr( 49 | rgrid, 50 | t0v, 51 | t0i, 52 | embh, 53 | r0=r0, 54 | alpha_visc=-0.75, 55 | alpha_irad=-0.75, 56 | rinsch=radlosch, 57 | ) 58 | 59 | # calculate the planck function for this radial grid 60 | fnu = [] 61 | for wavnow in wav: 62 | 63 | if wavnow == 0: 64 | fnu.append(0) 65 | else: 66 | bnu = mp.bnuvec(wavnow, tr, mjy=mjy) 67 | weight = rgrid * drgrid * bnu 68 | fnu.append(np.sum(weight) * twopi / dl / dl / ldMpc2 * cosi) 69 | 70 | return fnu 71 | 72 | 73 | # test the code 74 | # 75 | # dl = 70.0 76 | # embh = 3.e9 77 | # emdot = 100. 78 | # wav = np.logspace(2.,6,1000) 79 | # nrgrid = 1000 80 | # degi = 0.0 81 | # radlosch = 3.0 82 | # radhisch = 10000.0 83 | # 84 | # 85 | # fnu = mds(wav,embh,emdot,degi, dl = 70., radlosch=3.0,radhisch=10000., ngrid = 1000, mjy = 1) 86 | ##test diag 87 | ##for i in range(nrgrid): 88 | ## print rgrid[i], drgrid[i], bnu[i] 89 | # 90 | ##test plot 91 | # 92 | # fig = plt.figure() 93 | # ax1 = fig.add_subplot(111) 94 | # ax1.plot(wav, fnu) 95 | # ax1.set_xscale('log') 96 | # ax1.set_yscale('log') 97 | # plt.savefig('mydisksim_plot.pdf') 98 | # 99 | # 100 | # 101 | ##test tr 102 | ##fig = plt.figure() 103 | ##ax1 = fig.add_subplot(111) 104 | ##ax1.plot(rgrid, tr) 105 | ##ax1.set_xscale('log') 106 | ##ax1.set_yscale('log') 107 | ##plt.savefig('mydisksimtr_plot.pdf') 108 | ## 109 | -------------------------------------------------------------------------------- /pycecream/modules/myedlum.py: -------------------------------------------------------------------------------- 1 | ## script to work out the eddington luminosity for an AGN with an input Mdot 2 | 3 | import numpy as np 4 | 5 | pi = np.pi 6 | G = 6.67384e-11 7 | msun = 1.9891e30 8 | mp = 1.67262e-27 9 | c = 2.9979e8 10 | thomson_e = 6.6524e-29 11 | year = 24.0 * 3600 * 365 12 | watt2erg = 1.0e7 13 | 14 | ## input umdot: accretion rate (M0 /yr) 15 | ## um : BH mass (M0) 16 | ## output ledd (Eddington luminosity in ergs s_1) 17 | ## eddrat(Eddington ratio l/ledd) 18 | 19 | 20 | def edd(um, umdot, eta): 21 | # ledd=4*pi*G*um*msun*mp/thomson_e *watt2erg * c 22 | ledd2 = 1.26e31 * um * watt2erg 23 | l = eta * umdot / year * msun * c**2 * watt2erg 24 | 25 | eddrat = l / ledd2 26 | 27 | return (l, ledd2, eddrat) 28 | 29 | 30 | ## input (IN LOG10) ummdot:mass times accretion rate (M0**2 /yr) with error ummdotsig 31 | ## um : BH mass (M0) with error umsig 32 | ## if umlog or umsiglog < 0 then code assumes input is in linear NOT log units 33 | ## if ummdotversion = 1, code assumes ummdotlog and ummdotsiglog are in log units, 34 | ##... if -1 then ummdotlog and ummdotsiglog in linear units 35 | ##... if +2 the ummdotlog is actually umdotlog and ummdotsiglog is umdotsiglog 36 | ##... if -2 then ummdotlog is actually umdot and umdotsig 37 | ## output ledd (Eddington luminosity in ergs s_1) 38 | ## eddrat(Eddington ratio l/ledd) 39 | ## uncertainty in eddington ratio 40 | 41 | 42 | def edd2(umlog, umsiglog, ummdotlog, ummdotsiglog, eta, ummdotversion=1): 43 | # ledd=4*pi*G*um*msun*mp/thomson_e *watt2erg * c 44 | 45 | if umlog > 0: 46 | um = 10**umlog * (1.0 + 0.5 * np.log(10) ** 2 * umsiglog * umsiglog) 47 | else: 48 | um = np.abs(umlog) 49 | 50 | if umsiglog > 0: 51 | umsig = um * np.log(10.0) * umsiglog 52 | else: 53 | umsig = 1.0 * umsiglog 54 | 55 | if ummdotversion == 1: 56 | ummdot = 10**ummdotlog * ( 57 | 1.0 + 0.5 * np.log(10) ** 2 * ummdotsiglog * ummdotsiglog 58 | ) 59 | ummdotsig = ummdot * np.log(10.0) * ummdotsiglog 60 | umdot = ummdot / um 61 | sigumdot = umdot * np.sqrt((ummdotsig / ummdot) ** 2 + (umsig / um) ** 2) 62 | 63 | elif ummdotversion == -1: 64 | ummdot = 1.0 * ummdotlog 65 | ummdotsig = 1.0 * ummdotsiglog 66 | umdot = ummdot / um 67 | sigumdot = umdot * np.sqrt((ummdotsig / ummdot) ** 2 + (umsig / um) ** 2) 68 | elif ummdotversion == 2: 69 | umdot = 10**ummdotlog * ( 70 | 1.0 + 0.5 * np.log(10) ** 2 * ummdotsiglog * ummdotsiglog 71 | ) 72 | sigumdot = umdot * np.log(10.0) * ummdotsiglog 73 | elif ummdotversion == -2: 74 | umdot = ummdotlog 75 | sigumdot = ummdotsiglog 76 | 77 | ledd2 = 1.26e31 * um * watt2erg 78 | l = eta * umdot / year * msun * c**2 * watt2erg 79 | 80 | sigl = l / umdot * sigumdot 81 | sigledd2 = ledd2 / um * umsig 82 | 83 | eddrat = l / ledd2 84 | sigeddrat = eddrat * np.sqrt((sigl / l) ** 2 + (sigledd2 / ledd2) ** 2) 85 | 86 | umdotlog = ummdotlog - umlog 87 | umdotsig_log = np.sqrt(umsiglog * umsiglog + ummdotsiglog * ummdotsiglog) 88 | return (l, ledd2, eddrat, sigeddrat, umdot, sigumdot, umdotlog, umdotsig_log) 89 | 90 | 91 | # input Mass, efficiency 92 | # output Eddington ratio 93 | def eddrate(um, eta): 94 | eddmdot = 1.26e31 * um / eta / (c * c) # in Watts 95 | 96 | eddumdot = eddmdot * year / msun 97 | 98 | return eddumdot 99 | 100 | 101 | # routine to calculate the mdot for a given M and eddington ratio !! 102 | # inp um (black hole mass in solar masses) 103 | # er eddington ratio (0-1) 104 | # eta black hole accretion efficiency typically 0.1 105 | # op umdot solar masses per year 106 | 107 | 108 | def ermin_mdotout(um, er, eta=0.1): 109 | umdot = er / 460.7e6 / eta * um 110 | return umdot 111 | -------------------------------------------------------------------------------- /pycecream/modules/myfake_amp.py: -------------------------------------------------------------------------------- 1 | # import mean flux in mjy 2 | import numpy as np 3 | 4 | fluxmean = 2.0 5 | 6 | wav = 5000.0 7 | 8 | 9 | def abmag(fmjy): 10 | return -2.5 * np.log10(fmjy / 1000.0) + 8.9 11 | 12 | 13 | def absmag(abm, dMpc): 14 | return abm - 5 * np.log10(dMpc) - 25.0 15 | 16 | 17 | # uses empirical results obtained by Macleod, Ivezic et al 2010 18 | def drwtau_mc(embh, wav, absmag): 19 | A = 2.4 20 | B = 0.17 21 | C = 0.03 22 | D = 0.21 23 | taulog = ( 24 | A 25 | + B * np.log10(wav / 4000.0) 26 | + C * (absmag + 23.0) 27 | + D * np.log10(embh / 1.0e9) 28 | ) 29 | return 10**taulog 30 | 31 | 32 | def drwsfinf_mc(embh, wav, absM, z=0.0): 33 | #! uses empirical results obtained by Macleod, Ivezic et al 2010 (DIFFERENT NUMBERS FROM drwtau_mc.f90) 34 | A = -0.56 35 | B = -0.479 36 | C = 0.111 37 | D = 0.11 38 | E = 0.07 39 | sfinflog = ( 40 | A 41 | + B * np.log10(wav / 4000.0) 42 | + C * (absM + 23.0) 43 | + D * np.log10(embh / 1.0e9) 44 | + E * np.log10(1.0 + z) 45 | ) 46 | return 10**sfinflog 47 | 48 | 49 | def mfamp(embh, wav, fmjy, tlen, dMpc, z=0.0): 50 | abm = abmag(fmjy) 51 | absM = absmag(abm, dMpc) 52 | taumc = drwtau_mc(embh, wav, absM) 53 | sfinf = drwsfinf_mc(embh, wav, absM, z=z) 54 | if taumc == 0: 55 | sf_inf = sfinf 56 | else: 57 | sf_inf = sfinf * np.sqrt((1.0 - np.exp(-tlen / taumc))) 58 | 59 | sdmag = np.abs(sf_inf) / np.sqrt(2) 60 | # this is the variance in ab magnitudes. Need to change to fluxes 61 | a = 1.0 / 1000 / 3631 62 | sdmjy = np.abs(-0.4 * 10 ** (-abm * 0.4) / a * np.log(10.0) * sdmag) 63 | 64 | # print('wav, mean_AB, rms_AB ', wav, abm, sdmag,' <-- report myfake_amp --> mean_mJy, rms_mJy ',fmjy, sdmjy) 65 | 66 | return sdmjy 67 | -------------------------------------------------------------------------------- /pycecream/modules/mylcgen.py: -------------------------------------------------------------------------------- 1 | # generate a light curve with a bbroken power law power spectrum according to the prescription of timmer and kronig 1995 2 | 3 | 4 | import numpy as np 5 | 6 | # import myrandom as mr 7 | import matplotlib.pylab as plt 8 | 9 | 10 | def mylcgen( 11 | datfile="", 12 | p0=1.0, 13 | f0=0.1, 14 | a=-2, 15 | b=-2, 16 | tlo=0, 17 | thi=100, 18 | dt=0.125, 19 | ploton=0, 20 | iseed=-1, 21 | meannorm=-1.0, 22 | sdnorm=-1.0, 23 | ): 24 | """ 25 | Generate random walk light curve. Can also customize the powr spectrum slope using a and b arguments 26 | :param datfile: 27 | :param p0: 28 | :param f0: 29 | :param a: 30 | :param b: 31 | :param tlo: 32 | :param thi: 33 | :param dt: 34 | :param ploton: 35 | :param iseed: 36 | :param meannorm: 37 | :param sdnorm: 38 | :return: 39 | """ 40 | #!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!! 41 | # Input parameters 42 | 43 | # use clock to get different random light curve each call if -1, else iseed specifies light curve 44 | if iseed > 0: 45 | np.random.seed(int(iseed)) 46 | else: 47 | np.random.seed() 48 | 49 | flo = 0.5 / (thi - tlo) 50 | fhi = 1.0 / dt 51 | # datfile ='mylcggen_op.dat' 52 | # 53 | # p0 = 1.0 # scale factor 54 | # f0 = 0.1 #break frequency cycles per day 55 | # a = -2.0 56 | # b = -2.0 # second and first slopes of the broken power law 57 | # tlo = 0.0 58 | # thi = 100.0 59 | # dt = 0.1 60 | # 61 | # flo = 0.01 62 | # fhi = 4.0 # low and hi fourier frequency limits 63 | df = 1.0 * flo 64 | # 65 | # ploton = 1 66 | 67 | #!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!! 68 | 69 | #!!!!!!!!!#!!!!!!!!!#!!! Make and save the light curve !!!!!!#!!!!!!!!!#!!!!!!!!!#!!!!!!!!! 70 | 71 | time = np.arange(tlo, thi + dt, dt) 72 | nt = np.shape(time)[0] 73 | x = np.zeros(nt) 74 | dat = np.zeros((nt, 2)) 75 | 76 | nf = int(np.ceil((fhi - flo) / df + 1)) 77 | freq = np.arange(flo, fhi + df, df) 78 | w = 2 * np.pi * freq 79 | 80 | a = np.sqrt(p0 * (freq / f0) ** a / (1 + (freq / f0)) ** (a - b)) 81 | sk = np.random.randn(nf) * a # mr.normdis(nf,0,1)*a 82 | ck = np.random.randn(nf) * a # mr.normdis(nf,0,1)*a 83 | 84 | dps = np.zeros((nf, 3)) 85 | dps[:, 0] = freq 86 | dps[:, 1] = sk 87 | dps[:, 2] = ck 88 | 89 | # print 'mylcgen iseed...',iseed 90 | for i in range(nt): 91 | tnow = time[i] 92 | x[i] = np.sum(sk * np.sin(w * tnow) + ck * np.cos(w * tnow)) 93 | # print nt, i,'here' 94 | dat[:, 0] = time 95 | dat[:, 1] = x 96 | 97 | if len(datfile) > 0: 98 | np.savetxt(datfile, dat) 99 | np.savetxt("fft_" + datfile, dps) 100 | #!!!!!!!!!#!!!!!!!!!#!!!#!!!!!!!!!#!!!!!!!!!#!!!#!!!!!!!!!#!!!!!!!!!#!!!#!!!!!!!!!#!!!!!!!!!#!!! 101 | 102 | # normalise if option turned on 103 | if sdnorm > 0: 104 | datsd = np.std(dat[:, 1]) 105 | datmean = np.mean(dat[:, 1]) 106 | dat[:, 1] = (dat[:, 1] - datmean) * sdnorm / datsd + datmean 107 | 108 | if meannorm >= 0: 109 | dat[:, 1] = dat[:, 1] - dat[:, 1].mean() + meannorm 110 | 111 | #!!!!!!!!!#!!!!!!!!!#!!! If ploton make power spectrum and plot#!!!!!!!!!#!!!!!!!!!#!!!#!!!!!!!!!#!!!!!!!!!#!!! 112 | if ploton == 1: 113 | fig = plt.figure() 114 | ax1 = fig.add_subplot(211) 115 | ax1.plot(time, x) 116 | 117 | ax2 = fig.add_subplot(212) 118 | ax2.plot(freq, sk**2 + ck**2, ls="", marker="o") 119 | ax2.set_xscale("log") 120 | ax2.set_yscale("log") 121 | plt.show() 122 | 123 | return dat 124 | -------------------------------------------------------------------------------- /pycecream/modules/myplank.py: -------------------------------------------------------------------------------- 1 | ## module to return the black body plank function of an input temperature and wavelength 2 | # output blackbody intensity (erg/cm2/s/Hz/ster) 3 | 4 | 5 | # update 6/8/17 include feature to go from erg/cm^2/s/Hz to mJy 6 | # update 6/8/17 include feature to go from erg/cm^2/s/Hz to erg/cm^2/s/A 7 | # using conversions below http://www.stsci.edu/~strolger/docs/UNITS.txt 8 | # [Y Jy] = 3.33564095E+04 * [X1 erg/cm^2/s/A] * [X2 A]^2 9 | # [Y Jy] = 1.0E+26 * [X W/m^2/Hz] 10 | # [Y Jy] = 1.0E+23 * [X erg/cm^2/s/Hz] 11 | # [Y erg/cm^2/s/Hz] = 3.33564095e-19 * [X2 A]^2 [X1 erg/cm^2/s/A] 12 | 13 | 14 | import numpy as np 15 | 16 | 17 | def bnu(wave, temp, mjy=1, ergcmang=0): 18 | 19 | c1 = 1.43883e8 ### c1 = hc/k (in cgs units) *10^8 as the wavelength is given in angstroms 20 | c2 = 1.95722e5 ### c2 = (c1 / (2hc))**1/3 21 | 22 | ##convert to si 23 | 24 | # c1=1.43916e-2 25 | # c2=1.9577e6 26 | BNU = 0.0 27 | 28 | if temp > 0.0: 29 | X = c1 / (wave * temp) 30 | if X > 85: 31 | bnuln = 3.0 * np.log((c1 / wave) / c2) - X 32 | bnu = np.e ** (bnuln) 33 | if X < 1e-4: 34 | factor = 2.0 / (X * (X + 2.0)) 35 | X = X * temp / c2 36 | bnu = factor * X**3 37 | if X < 85 and X > 1e-4: 38 | factor = 1.0 / (np.e ** (X) - 1.0) 39 | X = X * temp / c2 40 | bnu = factor * X**3 41 | else: 42 | bnu = 0.0 43 | 44 | if mjy == 1: 45 | bnu = bnu * 1.0e26 # * wave*wave * 3.33564095e7 46 | elif ergcmang == 1: 47 | bnu = bnu / 3.33564095e-19 / wave / wave 48 | return bnu 49 | 50 | 51 | # if you have an array of EITHER wav or temp then use the vectorized version 52 | def bnuvec(wav, temp, mjy=1, ergcmang=0): 53 | a = np.vectorize(bnu) 54 | return a(wav, temp, mjy=mjy, ergcmang=0) 55 | -------------------------------------------------------------------------------- /pycecream/modules/myrandom.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | pi = np.pi 4 | e = np.e 5 | ##This line generates n random numbers between a and b distributed uniformly 6 | 7 | 8 | def unirand(no, a, b): 9 | dat = 1.0 * (b - a) * np.random.rand(no) + 1.0 * a 10 | return dat 11 | 12 | 13 | ###This definition is udentical to the previous but returns samples from a Gaussian distribution 14 | def unirand_n(no, sigma, mu, a, b): 15 | dat_n = ( 16 | 1.0 17 | / (sigma**2 * 2 * pi) ** 0.5 18 | * e ** (-((unirand(no, a, b) - mu) ** 2) / (2 * sigma**2)) 19 | ) 20 | ### dat_n=np.random.randn(no) 21 | return dat_n 22 | 23 | 24 | def unirand_e(no, tau, a, b): 25 | # dat_e=1./tau*e**(-np.random.rand(10**4)/tau) 26 | dat_e = 1.0 / tau * e ** (-unirand(no, a, b) / tau) 27 | return dat_e 28 | 29 | 30 | def normdis(n, mu, sigma): 31 | # draw n random numbers from gaussian distribution with mean mu and sd sigma 32 | op = np.random.randn(n) * sigma + mu 33 | return op 34 | 35 | 36 | def normdisoldandbroken(n, mu, sigma): 37 | # for some reason, must set n to one otherwise get odd distribution of random numbers 24/10/2014 38 | 39 | # n=1000000 40 | # sigma=1 41 | # mu=0 42 | ## draw n random numbers uniformly 43 | view_out = [] 44 | count = 0 45 | while count < n: 46 | randval = unirand(n, mu - 10 * sigma, mu + 10 * sigma) 47 | randval.sort() 48 | 49 | randprob = np.zeros((n, 2)) 50 | randprob[:, 0] = ( 51 | 1.0 52 | / (sigma**2 * 2 * pi) ** 0.5 53 | * e ** (-((randval - mu) ** 2) / (2 * sigma**2)) 54 | ) 55 | randprob[:, 0] = randprob[:, 0] / np.max( 56 | randprob[:, 0] 57 | ) ##the probablility that the chosen data point will remain 58 | randprob[:, 1] = unirand(n, 0, 1) 59 | 60 | # remove all data points where the random number just drawn between 0 a d 1 is less than the gausian prob for each f the original random numbers 61 | view = np.delete(randval, [np.where(randprob[:, 0] < randprob[:, 1])]) 62 | 63 | view_out = np.append(view_out, view, axis=1) 64 | count = len(view_out) 65 | # print 'generating gaussian... ' + str(count) + ' of ' + str(n) 66 | ## delete elements in the view_out array untill we have the correct number of points 67 | # diff= len(view) - n 68 | view_out = view_out[:n] 69 | return view_out 70 | 71 | 72 | # normdis=np.vectorize(normdis) 73 | -------------------------------------------------------------------------------- /pycecream/modules/myresample.py: -------------------------------------------------------------------------------- 1 | #### code to randomly resample a set of input light curves 2 | # 10/9/2017 sampcode 3 update includes sampmin the minum spacing between data points 3 | # sampcode 4, dtave indicates the minimum space between data points, data points will be selected (with no interpolation) from the parent sample, skipping points until the minimum spacing dtave is achieved 4 | ##avesamp is the average length of time between the random samples 5 | # set dir = '' and fname=[''] to have this function work on datin[nt,3] and output array rather than save to file 6 | 7 | # new 10/9/2017 added option dtsdin need mean and standard deviation of spacing between points e.g setting 8 | # dtsdin very small will give very regularly spaced points. 9 | # if negative then the absolute value is the fraction relative to the mean e.g -0.2 will set the 10 | # standard deviation as a fifth of the mean spacing between points 11 | 12 | 13 | import numpy as np 14 | import os 15 | 16 | 17 | def myresample(datin, dtave, dtsdin=-0.2, sampmin=0.8, sampcode=3): 18 | 19 | if dtsdin < 0: 20 | dtsd = np.abs(dtsdin) * dtave 21 | else: 22 | dtsd = dtsdin 23 | 24 | dat = datin 25 | t = dat[:, 0] 26 | x = dat[:, 1] 27 | sig = dat[:, 2] 28 | Ndat = t.shape[0] 29 | dt = (t[-1] - t[0]) / (Ndat - 1) 30 | 31 | # below are two versions of the code (the 2nd should be more sophisticated and consider the approximate spacing between each point when making its idxsamp selection 32 | if sampcode == 1: 33 | nidx = (1.0 - sampmin) * np.random.ranom_sample(1)[0] + sampmin 34 | idxsamp = np.random.rand(low=0, high=Ndat, size=nidx) 35 | datsamp = np.zeros((nidx, 3)) 36 | datsamp[:, 0] = t[idxsamp] 37 | datsamp[:, 1] = x[idxsamp] 38 | datsamp[:, 2] = sig[idxsamp] 39 | 40 | elif sampcode == 2: 41 | idxcount = 0 42 | tthen = t[0] 43 | idxsamp = [] 44 | xn = [] 45 | sign = [] 46 | tn = [] 47 | while (idxcount < Ndat) & (tthen < t[-1]): 48 | 49 | a = np.random.randn(1) * dt * 2 50 | 51 | tnow = tthen + dtave + a 52 | tn.append(tnow) 53 | xn.append(np.interp([tnow], t, x)[0]) 54 | sign.append(np.interp([tnow], t, sig)[0]) 55 | tthen = tnow 56 | idxcount = idxcount + 1 57 | 58 | tn = np.array(tn) 59 | xn = np.array(xn) 60 | sign = np.array(sign) 61 | nn = xn.shape[0] 62 | datsamp = np.zeros((nn, 3)) 63 | datsamp[:, 0] = tn[:, 0] 64 | datsamp[:, 1] = xn[:, 0] 65 | datsamp[:, 2] = sign[:, 0] 66 | 67 | elif sampcode == 3: 68 | idxcount = 0 69 | tthen = t[0] 70 | idxsamp = [] 71 | tlast = t[-1] 72 | while (idxcount < Ndat - 1) & (tthen < tlast - 4 * sampmin): 73 | 74 | a = np.random.normal(dtave, dtsd, 1)[0] 75 | 76 | tnow = tthen + np.abs(a) 77 | idxtemp = np.abs(t - tnow).argmin() 78 | 79 | if (idxtemp not in idxsamp) and ( 80 | (tnow - tthen > sampmin) or (tnow > tlast - sampmin) 81 | ): 82 | idxsamp.append(idxtemp) ## index of closest time to tnow 83 | idxcount = idxcount + 1 84 | a = 1.0 * tthen - tnow 85 | tthen = tnow 86 | 87 | idxsamp = np.array(idxsamp) 88 | datsamp = np.zeros((idxsamp.shape[0], 3)) 89 | datsamp[:, 0] = t[idxsamp] 90 | datsamp[:, 1] = x[idxsamp] 91 | datsamp[:, 2] = sig[idxsamp] 92 | 93 | elif sampcode == 4: 94 | idxcount = 0 95 | tthen = t[0] 96 | idxsamp = [] 97 | while (idxcount < Ndat) & (tthen < t[-1]): 98 | 99 | tnow = tthen + dtave 100 | 101 | b = t > tnow 102 | idxtemp = [i for i, elem in enumerate(b, 1) if elem] 103 | if len(idxtemp) == 0: 104 | break 105 | 106 | idxtemp = idxtemp[0] 107 | if idxtemp >= t.shape[0]: 108 | break 109 | 110 | if idxtemp not in idxsamp: 111 | idxsamp.append(idxtemp) ## index of closest time to tnow 112 | idxcount = idxcount + 1 113 | 114 | a = tnow - tthen 115 | tthen = t[idxtemp] 116 | 117 | idxsamp = np.array(idxsamp) 118 | datsamp = np.zeros((idxsamp.shape[0], 3)) 119 | datsamp[:, 0] = t[idxsamp] 120 | datsamp[:, 1] = x[idxsamp] 121 | datsamp[:, 2] = sig[idxsamp] 122 | 123 | return datsamp 124 | -------------------------------------------------------------------------------- /pycecream/modules/mytemp0.py: -------------------------------------------------------------------------------- 1 | # big fix nov 1st 2017 for very large black hole masses (schwarzchild radius bigger than 1 ld - reference radius and so 1 - rin/rld is negative. This screws up temperature radius law. Fix by setting temperature at all inner radii to zero and only using the scaling for radi above this 2 | 3 | import numpy as np 4 | 5 | 6 | #!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!! 7 | ##!!!!!!!!!!!!!!!!!!!!!!!!!!!!define T0 in T = T0 (r/r0)**alpha#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!! 8 | #!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!! 9 | def temp0( 10 | dotmmlog, 11 | emlog, 12 | sigdotmmlog=0, 13 | sigemlog=0.0, 14 | alpha_in=0.75, 15 | sig_alphain=0.0, 16 | eta=0.1, 17 | alb=0, 18 | hxrs=3, 19 | r0ld=1, 20 | ): 21 | 22 | gnewt = 6.673e-11 23 | c = 2.9979e8 24 | sig = 5.67e-8 25 | emsun = 2.0e30 26 | ld = 2.59e13 27 | em = 10**emlog 28 | sigem = em * np.log(10) * sigemlog 29 | 30 | rs = 2 * gnewt * em * emsun / c**2 31 | 32 | hx = hxrs * rs 33 | 34 | if r0ld < 0: 35 | r0 = np.abs(r0ld * rs) 36 | else: 37 | r0 = r0ld * ld 38 | 39 | emsunyr = emsun / (3600.0 * 24 * 365) 40 | 41 | dotmm = 10**dotmmlog 42 | sigdotmm = dotmm * np.log(10) * sigdotmmlog 43 | 44 | emdot = dotmm / em 45 | sigemdot = emdot * np.sqrt((sigdotmm / dotmm) ** 2 + (sigem / em) ** 2) 46 | 47 | a = 3 * gnewt * emsun * emsunyr / (8 * np.pi * sig) 48 | aextra = r0 ** (4 * alpha_in) 49 | sigaextra = aextra * np.log(r0) * 4 * sig_alphain 50 | anew = a / aextra 51 | siganew = anew / aextra * sigaextra 52 | 53 | b = anew * dotmm 54 | sigb = b * np.sqrt((siganew / anew) ** 2 + (sigdotmm / dotmm) ** 2) 55 | 56 | # for i in range(np.shape(b)[0]): 57 | # print i,a,b[i],sigb[i] 58 | 59 | # print 'diagnostics' 60 | # print hx,(1.-a),eta,emsunyr,c**2,sig,r0 61 | # raw_input() 62 | 63 | c = hx * (1.0 - alb) * eta * emsunyr * c**2 / (4 * np.pi * sig) 64 | cextra = 1.0 * aextra 65 | sigcextra = 1.0 * sigaextra 66 | cnew = c / cextra 67 | sigcnew = cnew / cextra * sigcextra 68 | 69 | d = cnew * emdot 70 | sigd = d * np.sqrt((sigcnew / cnew) ** 2 + (sigemdot / emdot) ** 2) 71 | 72 | # for i in range(np.shape(b)[0]): 73 | # print i,c,d[i],sigd[i] 74 | # raw_input() 75 | 76 | baddc = b + d 77 | sig_baddc = np.sqrt(sigb * sigb + sigd * sigd) 78 | 79 | temp0 = baddc**0.25 80 | sigtemp0 = temp0 / baddc * 0.25 * sig_baddc 81 | # print sigtemp0,temp0, sig_baddc, baddc,'dsfdsfs', sigb,b, sigd,d, 'dfs', sigcnew,cnew, sigemdot,emdot 82 | 83 | return (temp0, sigtemp0, b**0.25, d**0.25) 84 | 85 | 86 | #!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!! 87 | #!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!! 88 | #!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!!#!!!!!!!!!!!!!!!!!!!!!!!!!!!! 89 | 90 | 91 | # function to return the radius for a specific temperature r = (T1/T)^[1/alpha] r1 92 | # can include errors on T1 and alpha 93 | def rsub(Tsub, r1, T1, alpha, sigT1=0.0, sigalpha=0.0): 94 | 95 | alpha_1 = 1.0 / alpha 96 | alpha_1_sig = alpha_1 * sigalpha / alpha 97 | 98 | T1_T = T1 / Tsub 99 | 100 | T1_T_alpha_1 = T1_T**alpha_1 101 | 102 | T1_T_alpha_1_sig = T1_T_alpha_1 * np.sqrt( 103 | (alpha_1 / T1_T * sigT1 / Tsub) ** 2 + (np.log(T1_T) * alpha_1_sig) ** 2 104 | ) 105 | 106 | a = T1_T_alpha_1 * r1 107 | siga = T1_T_alpha_1_sig * r1 108 | 109 | return (a, siga) 110 | 111 | 112 | def tv0(em, emdot, r0=1.0, rinsch=3.0): 113 | sb = 5.670367e-8 114 | gnewt = 6.67408e-11 115 | msun = 1.98855e30 116 | secyr = 31557600.0 117 | ld = 2.59020683712e13 118 | c = 299792458.0 119 | rinld = rinsch * 2 * gnewt * msun * em / c / c / ld 120 | 121 | if rinld > r0: 122 | r0in = np.ceil(rinld / r0) * r0 123 | print( 124 | "mytemp0.py inner radius bigger than reference radius (Mbh too big) changing reference radius" 125 | ) 126 | else: 127 | r0in = r0 128 | 129 | tv0out4 = ( 130 | em 131 | * emdot 132 | * 3 133 | * gnewt 134 | * msun 135 | * msun 136 | / secyr 137 | / 8 138 | / np.pi 139 | / sb 140 | / (r0 * ld) ** 3 141 | * (1.0 - np.sqrt(rinld / r0in)) 142 | ) 143 | 144 | tv0out2 = np.sqrt(tv0out4) 145 | tv0out = np.sqrt(tv0out2) 146 | return tv0out 147 | 148 | 149 | def ti0(em, emdot, r0=1, hxsch=3, eta=0.1): 150 | sb = 5.670367e-8 151 | gnewt = 6.67408e-11 152 | msun = 1.98855e30 153 | secyr = 31557600.0 154 | ld = 2.59020683712e13 155 | c = 299792458.0 156 | hxld = hxsch * 2 * gnewt * msun * em / c / c / ld 157 | 158 | d2_ld = hxld * hxld + r0 * r0 159 | d1ld = np.sqrt(d2_ld) 160 | d3ld = d2_ld * d1ld 161 | ti0out4 = ( 162 | eta * msun / secyr * emdot * c * c / 8 / np.pi / sb * hxld / d3ld / ld / ld 163 | ) 164 | ti0out2 = np.sqrt(ti0out4) 165 | ti0out = np.sqrt(ti0out2) 166 | return ti0out 167 | 168 | 169 | # input grid r[....nr]] in light days, output t[r] 170 | def tr(r, t0v, t0i, embh, r0=1, alpha_visc=-0.75, alpha_irad=-0.75, rinsch=3): 171 | 172 | t0i2 = t0i * t0i 173 | t0i4 = t0i2 * t0i2 174 | 175 | t0v2 = t0v * t0v 176 | t0v4 = t0v2 * t0v2 177 | 178 | av4 = alpha_visc * 4 179 | 180 | ai4 = alpha_irad * 4 181 | 182 | rsch = 1.15821e-10 * embh 183 | rinld = rinsch * rsch 184 | 185 | if rinld > r0: 186 | r0in = np.ceil(rinld / r0) * r0 187 | print( 188 | "mytemp0.py inner radius bigger than reference radius (Mbh too big) changing reference radius" 189 | ) 190 | else: 191 | r0in = r0 192 | 193 | # print r.shape 194 | # print np.mean(r) 195 | # print t0v4*(r)**av4 196 | 197 | tv4 = ( 198 | t0v4 * (r / r0) ** av4 * (1 - np.sqrt(rinld / r)) / (1 - np.sqrt(rinld / r0in)) 199 | ) 200 | ti4 = t0i4 * (r / r0) ** ai4 201 | 202 | tout2 = np.sqrt(tv4 + ti4) 203 | tout = np.sqrt(tout2) 204 | return tout 205 | -------------------------------------------------------------------------------- /pycecream/modules/mytfb_quick.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pycecream.modules.mytemp0 as mt0 3 | 4 | # from scipy import signal 5 | 6 | twopi = np.pi * 2 7 | deg2rad = np.pi / 180 8 | planck = 6.626307004e-34 9 | c = 2.99792458e8 10 | boltz = 1.38064852e-23 11 | 12 | 13 | def pytfb_sub( 14 | taus, 15 | embh, 16 | emdot, 17 | wavang, 18 | deginc, 19 | t0vin=-1, 20 | t0iin=-1, 21 | alpha_visc=-0.75, 22 | hxsch=3.0, 23 | alpha_irad=-0.75, 24 | eta=0.1, 25 | rlosch=3.0, 26 | norm=1, 27 | quick=1, 28 | xstop=15, 29 | udlnr=0.01, 30 | thcent=1.0, 31 | thfwhm=0.2, 32 | oldsmooth=0, 33 | newsmooth=1, 34 | diagnose=0, 35 | ): 36 | 37 | taulo = taus[0] 38 | ntaus = np.shape(taus)[0] 39 | psis = np.zeros(ntaus) 40 | 41 | # if you want a top hat response then its easy else do what you had before 42 | if wavang < 0.0: 43 | idxinc = np.where((taus > thcent - thfwhm / 2) & (taus < thcent + thfwhm / 2))[ 44 | 0 45 | ] 46 | psis[idxinc] = 1 47 | 48 | else: 49 | # either input desired reference temperature t0 at 1 light day, or calculate the 50 | # value appropriate for black body accretion disc given m and mdot 51 | if t0vin < 0: 52 | t0v = mt0.tv0(embh, emdot) 53 | else: 54 | t0v = t0vin 55 | 56 | if t0iin < 0: 57 | t0i = mt0.ti0(embh, emdot, eta=eta) 58 | else: 59 | t0i = t0iin 60 | 61 | # need to calculate 1/T^3 (r/r0)^alpha_irad x^2/wav^2 / (cosh(x) - 1) delta (tau - tau(r,theta)) dtau 62 | 63 | t0v2 = t0v * t0v 64 | t0v4 = t0v2 * t0v2 65 | 66 | t0i2 = t0i * t0i 67 | t0i4 = t0i2 * t0i2 68 | 69 | rsch = 1.15821e-10 * embh # scwarzchild radius in light days 70 | hx = hxsch * rsch 71 | hx2 = hx * hx 72 | cosi = np.cos(deginc * deg2rad) 73 | sini = np.sin(deginc * deg2rad) 74 | hxci = hx * cosi 75 | hc_kwav = planck * c / wavang / 1.0e-10 / boltz 76 | wavang2 = wavang * wavang 77 | rlold = rlosch * rsch 78 | 79 | # this estimate for the cutoff radius is based on the max radius of the 80 | # highest lag (re-arrange equation 3 in Starkey et al 2017) 81 | # should be ok but might exclude some significant low lags for a VERY hot, edge on disk 82 | 83 | av4 = alpha_visc * 4 84 | ai4 = alpha_irad * 4 85 | # now define radius grid be smart here. 86 | 87 | # use a cutoff x_stop to determine when to stop the radius grid 88 | dtau = taus[1] - taus[0] 89 | rhilog = dtau 90 | rtemp = np.array([rhilog, 10 * rhilog]) 91 | rtl = np.log(rtemp) 92 | ttemp4 = t0v4 * (rtemp) ** av4 + t0i4 * (rtemp) ** ai4 93 | ttemp = np.sqrt(np.sqrt(ttemp4)) 94 | ttl = np.log(ttemp) 95 | tstop = hc_kwav / xstop 96 | grad = (rtl[1] - rtl[0]) / (ttl[1] - ttl[0]) 97 | rhil = rtl[0] + grad * (np.log(tstop) - ttl[0]) 98 | rhild = np.exp(rhil) 99 | 100 | rgridlog = np.exp( 101 | np.arange(np.log(rlold), np.log(rhilog), udlnr)[:-1] 102 | ) # np.logspace(np.log(rlold),np.log(rhilog)) 103 | rgridlin = np.arange(rhilog, rhild, rhilog) 104 | rgrid = np.concatenate((rgridlog, rgridlin)) 105 | nrad = np.shape(rgrid)[0] 106 | dr = rgrid[1:] - rgrid[:-1] 107 | 108 | # calculate temperature at each radius grid 109 | tv4 = t0v4 * (rgrid) ** av4 110 | ti4 = t0i4 * (rgrid) ** ai4 111 | rir0b = ti4 / t0i4 # this is just (r/r0)^alpha_irad 112 | ttot4 = tv4 + ti4 113 | ttot2 = np.sqrt(ttot4) 114 | ttot = np.sqrt(ttot2) 115 | ttot3 = ttot2 * ttot 116 | 117 | # each point will have a time delay and a weighting append these to a list 118 | # for each point in the disc 119 | 120 | # loop of azimuths 121 | delsave = [] 122 | wsave = [] 123 | if diagnose == 1: 124 | azsave = [] 125 | rsave = [] 126 | 127 | if quick == 1: 128 | for i in range(nrad - 1): 129 | # quick way 130 | ttotnow = ttot[i] 131 | ttot3now = ttot3[i] 132 | radlo = rgrid[i] 133 | radhi = rgrid[i + 1] 134 | drad = dr[i] # radhi - radlo 135 | 136 | # now azimuth grid 137 | azwidth = drad / radlo 138 | azgrid = np.arange(0.0, twopi, azwidth) 139 | naz = int(twopi / azwidth) + 1 140 | # naz1 = np.shape(azgrid)[0] 141 | nazsub1 = naz - 1 142 | 143 | raz = np.random.uniform(radlo, radhi, nazsub1) 144 | daz = np.sqrt(raz * raz + hx2) 145 | az = np.random.uniform( 146 | low=azgrid[:-1], high=azgrid[1:], size=nazsub1 147 | ) # np.random.uniform(low=0,high=1,size=nazsub1)*azgrid_s + azgrid[:-1]#np.random.uniform(azgrid[:-1],azgrid[1:],1) 148 | caz = np.cos(az) 149 | tdl = hxci - raz * caz * sini + daz 150 | x = hc_kwav / ttotnow # hc_kwav/ttot#hc_kwav/ttotnow 151 | 152 | # print radlo, x 153 | x2 = x * x 154 | # the radlo * drad *azwidth is the solid angle element 155 | # azwidth can be left off here as it is always the same 156 | weight = ( 157 | rir0b[i] 158 | / ttot3now 159 | * x2 160 | / wavang2 161 | / (np.cosh(x) - 1) 162 | * radlo 163 | * drad 164 | * azwidth 165 | ) # rir0b/ttot3 * x2/wavang2/(np.cosh(x) - 1) * radlo*drad*azwidth # 166 | wsave.append([weight] * nazsub1) 167 | delsave.append(tdl) 168 | if diagnose == 1: 169 | azsave.append(az) 170 | rsave.append(raz) 171 | 172 | else: 173 | for i in range(nrad - 1): 174 | radlo = rgrid[i] 175 | radhi = rgrid[i + 1] 176 | drad = radhi - radlo 177 | 178 | # now azimuth grid 179 | azwidth = drad / radhi 180 | azgrid = np.arange(0.0, twopi, azwidth) 181 | naz = np.shape(azgrid)[0] 182 | nazsub1 = naz - 1 183 | 184 | raz = np.random.uniform(radlo, radhi, nazsub1) 185 | daz = np.sqrt(raz * raz + hx2) 186 | az = np.random.uniform( 187 | low=azgrid[:-1], high=azgrid[1:], size=nazsub1 188 | ) # np.random.uniform(azgrid[:-1],azgrid[1:],1) 189 | caz = np.cos(az) 190 | tdl = hxci - raz * caz * sini + daz 191 | tv4 = ( 192 | t0v4 * (raz) ** av4 193 | ) # modification for inner radius to the right (negligible)* (1 - np.sqrt(rlold/raz)) / (1 - np.sqrt(rlold/1)) 194 | ti4 = t0i4 * (raz) ** ai4 195 | rir0b = ti4 / t0i4 # this is just (r/r0)^alpha_irad 196 | ttot4 = tv4 + ti4 197 | ttot2 = np.sqrt(ttot4) 198 | ttot = np.sqrt(ttot2) 199 | ttot3 = ttot2 * ttot 200 | x = hc_kwav / ttot 201 | x2 = x * x 202 | weight = ( 203 | rir0b 204 | / ttot3 205 | * x2 206 | / wavang2 207 | / (np.cosh(x) - 1) 208 | * radlo 209 | * drad 210 | * azwidth 211 | ) 212 | wsave.append(weight) 213 | delsave.append(tdl) 214 | if diagnose == 1: 215 | azsave.append(az) 216 | rsave.append(raz) 217 | 218 | # introduce a lower threshold weight to abort the iterations (to save time) 219 | # delsave = [item for sublist in delsave for item in sublist] 220 | # wsave = [item for sublist in wsave for item in sublist] 221 | # delsave = np.array(delsave) 222 | # wsave = np.array(wsave) 223 | delsave = np.concatenate(delsave) 224 | wsave = np.concatenate(wsave) 225 | 226 | if diagnose == 1: 227 | rsave = [item for sublist in rsave for item in sublist] 228 | azsave = [item for sublist in azsave for item in sublist] 229 | 230 | if diagnose == 1: 231 | rsave = np.array(rsave) 232 | azsave = np.array(azsave) 233 | 234 | nds = np.shape(delsave)[0] 235 | 236 | # add smoothing function to approximate delta function 237 | # ntaus = np.shape(taus)[0] 238 | sigtau = 5 * dtau / 2 239 | sigtaulim = 3 * sigtau 240 | if oldsmooth == 1: 241 | sigtau2 = sigtau * sigtau 242 | 243 | psis = np.zeros(ntaus) 244 | for id in range(nds): 245 | delnow = delsave[id] 246 | idlo = int(max(1, np.floor((delnow - sigtaulim - taulo) / dtau))) 247 | # print 'dfsdf',ntaus, np.ceil((delnow + sigtaulim)/dtau), np.max(ntaus,np.ceil((delnow + sigtaulim)/dtau)) 248 | idhi = int(min(ntaus, np.ceil((delnow + sigtaulim - taulo) / dtau))) 249 | idinc = np.arange(idlo, idhi, 1) 250 | # print id,delnow,idlo,idhi,taus[idlo],taus[idhi] 251 | tdelsubtau = delnow - taus[idinc] 252 | gtemp = np.exp(-0.5 * tdelsubtau * tdelsubtau / sigtau2) 253 | gsum = np.sum(gtemp) 254 | wnow = wsave[id] 255 | psis[idinc] = psis[idinc] + wnow * gtemp / gsum 256 | 257 | elif newsmooth == 1: 258 | for i in range(ntaus): 259 | taunow = taus[i] 260 | idxlo = taunow - sigtaulim 261 | idxhi = taunow + sigtaulim 262 | idnow = np.where((delsave > idxlo) & (delsave < idxhi))[0] 263 | 264 | a = (taunow - delsave[idnow]) / sigtau 265 | a2 = -a * a / 2 266 | ea2 = np.exp(a2) 267 | ea2out = np.sum(wsave[idnow] * ea2) 268 | psis[i] = ea2out # /ea2sum 269 | 270 | # if no smoothing just sort the uneven array into ascending time order and release it (do not use psis as output in this case. BEST NOT TO USE THIS SETTING) 271 | else: 272 | ida = np.argsort(delsave) 273 | delsave = delsave[ida] 274 | wsave = wsave[ida] 275 | if diagnose == 1: 276 | azsave = azsave[ida] 277 | rsave = rsave[ida] 278 | psis = np.nan_to_num(psis, 0) 279 | 280 | if norm == 1: 281 | pt = psis - np.min(psis) 282 | psis = pt / np.max(pt) 283 | 284 | if diagnose == 1: 285 | return (psis, delsave, wsave, rsave, azsave) 286 | else: 287 | return psis 288 | 289 | 290 | if __name__ == "__main__": 291 | 292 | taugrid = np.arange(0, 30.1, 0.1) 293 | embh = 1.0e7 294 | emdot = 1.0 295 | wavnow = 5000 296 | deginc = 0.0 297 | 298 | psi = pytfb_sub( 299 | taugrid, 300 | embh, 301 | emdot, 302 | wavnow, 303 | deginc, 304 | t0vin=-1, 305 | t0iin=-1, 306 | alpha_visc=-0.75, 307 | hxsch=3.0, 308 | alpha_irad=-0.75, 309 | eta=0.1, 310 | rlosch=3.0, 311 | norm=1, 312 | quick=1, 313 | xstop=15, 314 | udlnr=0.01, 315 | thcent=1.0, 316 | thfwhm=0.2, 317 | oldsmooth=0, 318 | newsmooth=1, 319 | diagnose=0, 320 | ) 321 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | . 2 | pandas==2.2.0 3 | numpy==1.26.4 4 | matplotlib==3.8.2 5 | scipy==1.12.0 6 | #mkdocs 7 | #pydoc-markdown 8 | corner==2.2.2 9 | seaborn==0.13.2 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | #upload to pip 4 | #pip install . 5 | #python setup.py sdist 6 | #twine upload dist/pycecream-1.5.5.tar.gz 7 | 8 | setup(name='pycecream', 9 | version='1.5.5', 10 | description='python implementation of the cream accretion disc fitting code ' 11 | 'https://academic.oup.com/mnras/article-abstract/456/2/1960/1066664?redirectedFrom=PDF' 12 | , 13 | long_description= 'add pycecream.dream light curve merging feature' 14 | ,url='https://github.com/dstarkey23/pycecream', 15 | author='dstarkey23', 16 | author_email='ds207@st-andrews.ac.uk', 17 | license='MIT', 18 | packages=['pycecream'], 19 | package_data={'': ['creaminpar.par','cream_f90.f90']}, 20 | install_requires=[ 21 | 'pandas==2.2.0', 22 | 'numpy==1.26.4', 23 | 'matplotlib==3.8.2', 24 | 'scipy==1.12.0', 25 | 'corner==2.2.2', 26 | 'seaborn==0.13.2' 27 | ], 28 | extras_require={ 29 | 'tests': [ 30 | 'nose2==0.9.1', 31 | 'pre-commit==1.20.0', 32 | 'flake8==3.7.9', 33 | 'pydoc-markdown==2.0.4', 34 | 'tabulate==0.8.5', 35 | 'six==1.12.0' 36 | ] 37 | }, 38 | zip_safe=False) 39 | 40 | #jupyter nbconvert --to Markdown ./pycecream_test/test_pycecream.ipynb --------------------------------------------------------------------------------