├── .github └── workflows │ ├── python-package.yml │ └── python-publish.yml ├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── RMtools_1D ├── __init__.py ├── calculate_RMSF.py ├── cats │ ├── catComplex.csv │ ├── cat_simple_doQUfit.bat │ ├── catalogue.csv │ ├── catalogue1.csv │ └── testCat.dat ├── clean_model.py ├── do_QUfit_1D_mnest.py ├── do_RMclean_1D.py ├── do_RMsynth_1D.py ├── do_RMsynth_1D_fromFITS.py ├── mk_test_ascii_data.py ├── models_ns │ ├── __init__.py │ ├── m1.py │ ├── m11.py │ ├── m111.py │ ├── m12.py │ ├── m2.py │ ├── m3.py │ ├── m4.py │ ├── m5.py │ ├── m6.py │ └── m7.py ├── rmtools_bwdepol.py └── rmtools_bwpredict.py ├── RMtools_3D ├── RMpeakfit_3D.py ├── __init__.py ├── assemble_chunks.py ├── catalogue.csv ├── create_chunks.py ├── do_RMclean_3D.py ├── do_RMsynth_3D.py ├── do_fitIcube.py ├── extract_region.py ├── make_freq_file.py ├── mk_test_cube_data.py └── rescale_I_model_3D.py ├── RMutils ├── __init__.py ├── mpfit.py ├── normalize.py ├── util_FITS.py ├── util_RM.py ├── util_misc.py ├── util_plotFITS.py ├── util_plotTk.py ├── util_rec.py └── util_testing.py ├── VERSION_HISTORY.txt ├── external └── finufft_LICENSE.md ├── setup.py └── tests ├── QA_test.py ├── RMclean1D_referencevalues.json ├── RMsynth1D_referencevalues.json ├── RMsynth1D_testdata.dat ├── __init__.py ├── cli_test.py ├── helper_test.py ├── import_test.py ├── nufft_test.py └── test_stokesIfit.py /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Tests 5 | 6 | on: 7 | push: 8 | branches: [ master, fix_workflow, dev ] 9 | pull_request: 10 | branches: [ master ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | python-version: [3.7, 3.8, 3.9, "3.10", 3.11, 3.12] 20 | 21 | steps: 22 | - uses: actions/checkout@v2 23 | - name: Set up conda / Python 24 | uses: conda-incubator/setup-miniconda@v2 25 | with: 26 | miniconda-version: "latest" 27 | activate-environment: test 28 | python-version: ${{ matrix.python-version }} 29 | auto-activate-base: false 30 | - name: Install dependencies 31 | shell: bash -l {0} 32 | run: | 33 | conda install -c conda-forge pymultinest 34 | pip install -U .[QUfitting,parallel] nestle pytest numpy 35 | - name: Test with pytest 36 | shell: bash -l {0} 37 | run: | 38 | pytest 39 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v3 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | 141 | # Tests 142 | simdata 143 | tests/*/ 144 | tests/*.pdf 145 | *.pdf 146 | .vscode/settings.json 147 | dist/* 148 | build/* 149 | RM.egg-info/* 150 | RM_tools.egg-info/* 151 | tests/simdata 152 | tests/models_ns 153 | *egg-info 154 | .DS_Store 155 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | - id: end-of-file-fixer 7 | - id: check-yaml 8 | - id: check-added-large-files 9 | - repo: https://github.com/psf/black 10 | rev: 25.1.0 11 | hooks: 12 | - id: black 13 | - repo: https://github.com/PyCQA/isort 14 | rev: 6.0.1 15 | hooks: 16 | - id: isort 17 | args: ["--profile=black"] 18 | ci: 19 | autofix_commit_msg: | 20 | [pre-commit.ci] auto fixes from pre-commit.com hooks 21 | 22 | for more information, see https://pre-commit.ci 23 | autofix_prs: true 24 | autoupdate_branch: '' 25 | autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' 26 | autoupdate_schedule: weekly 27 | skip: [] 28 | submodules: false 29 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | Copyright (c) 2020 Canadian Initiative for Radio Astronomy Data Analysis 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | The above copyright notice and this permission notice shall be included in all 10 | copies or substantial portions of the Software. 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 14 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 15 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 16 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 17 | SOFTWARE. 18 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include RMtools_1D/models_ns/* 3 | include RMtools_1D/cats/* 4 | include RMtools_1D/data/* 5 | include RMtools_1D/README.txt 6 | include RMtools_3D/README.txt 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Tests](https://github.com/CIRADA-Tools/RM-tools/actions/workflows/python-package.yml/badge.svg) [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/CIRADA-Tools/RM-Tools/master.svg)](https://results.pre-commit.ci/latest/github/CIRADA-Tools/RM-Tools/master) 2 | 3 | # RM-Tools 4 | 5 | RM-synthesis, RM-clean and QU-fitting on polarised radio spectra 6 | 7 | Python scripts to perform RM-synthesis, RM-clean and QU-fitting on 8 | polarised radio spectra. 9 | 10 | 11 | Initial version by Cormac R. Purcell 12 | Currently hosted by CIRADA and maintained by Cameron Van Eck 13 | 14 | Version 1.2 is now released! Check the wiki for new functionality. 15 | 16 | ## Installation / Usage 17 | Installation, usage instructions and detailed algorithm information can be found in the [wiki](https://github.com/CIRADA-Tools/RM-Tools/wiki). 18 | 19 | ## Structure: 20 | - RMtools_1D ... Toolkit to produce Faraday spectra of single pixels. 21 | - RMtools_3D ... Toolkit to produce Faraday depth cubes. 22 | - RMutils ... Utilities for interacting with polarized data and Faraday depth 23 | 24 | ![RM-Tools component diagram](https://github.com/CIRADA-Tools/RM-Tools/wiki/diagram.png) 25 | 26 | Five terminal commands are added to invoke the main tools: 27 | - `rmsynth1d` 28 | - `rmclean1d` 29 | - `rmsynth3d` 30 | - `rmclean3d` 31 | - `qufit` 32 | 33 | Use these commands with a -h flag to get information on the usage of each. Full documentation is on the [wiki](https://github.com/CIRADA-Tools/RM-Tools/wiki). 34 | 35 | The following terminal commands are available to access the [additional tools](https://github.com/CIRADA-Tools/RM-Tools/wiki/Tools): 36 | - `rmtools_freqfile` 37 | - `rmtools_calcRMSF` 38 | - `rmtools_testdata1D` 39 | - `rmtools_createchunks` 40 | - `rmtools_assemblechunks` 41 | - `rmtools_fitIcube` 42 | - `rmtools_peakfitcube` 43 | - `rmtools_testdata3D` 44 | - `rmtools_extractregion` 45 | 46 | 47 | ## Citing 48 | If you use this package in a publication, please cite the [ASCL entry](https://ui.adsabs.harvard.edu/abs/2020ascl.soft05003P/abstract) for the time being. A paper with a full description of the package is being prepared but is not available yet. 49 | 50 | More information on the Canadian Initiative for Radio Astronomy Data Analysis (CIRADA) can be found at cirada.ca. 51 | 52 | RM-Tools is open source under an MIT License. 53 | 54 | ## Contributing 55 | Contributions are welcome. Questions, bug reports, and feature requests can be posted to the GitHub issues page or sent to Cameron Van Eck, cameron.vaneck (at) anu.edu.au. 56 | 57 | The development dependencies can be installed via `pip` from PyPI: 58 | ```bash 59 | pip install "RM-Tools[dev]" 60 | ``` 61 | or for a local clone: 62 | ```bash 63 | cd RM-Tools 64 | pip install ".[dev]" 65 | ``` 66 | 67 | Code formatting and style is handled by `black` and `isort`, with tests run by `pytest`. A `pre-commit` hook is available to handle the autoformatting. After installing the `dev` dependencies, you can install the hooks by running: 68 | ```bash 69 | cd RM-Tools 70 | pre-commit install 71 | ``` 72 | -------------------------------------------------------------------------------- /RMtools_1D/__init__.py: -------------------------------------------------------------------------------- 1 | import pkg_resources 2 | 3 | __version__ = pkg_resources.get_distribution("RM-Tools").version 4 | -------------------------------------------------------------------------------- /RMtools_1D/calculate_RMSF.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Wed Mar 27 11:01:48 2019 5 | 6 | @author: cvaneck 7 | 8 | This routine will determine the RMSF and related parameters, 9 | giving the following input information. One of: 10 | a file with channel frequencies and weights 11 | OR 12 | a file with channel frequencies (assumes equal weights) 13 | OR 14 | Input values for mininum frequency, maximum frequency, and channel width. 15 | (assumes equal weights and all channels present) 16 | 17 | The outputs are a list of relavant RMSF properties, and a plot of the RMSF 18 | shape. 19 | """ 20 | 21 | # import sys 22 | import argparse 23 | 24 | import numpy as np 25 | from astropy.constants import c as speed_of_light 26 | from matplotlib import pyplot as plt 27 | 28 | from RMutils.util_RM import get_rmsf_planes 29 | 30 | 31 | def main(): 32 | """ 33 | Determines what set of input parameters were defined, reads in file or 34 | generates frequency array as appropriate, and passes frequency and weight 35 | arrays to the function that works out the RMSF properties. 36 | """ 37 | 38 | descStr = """ 39 | Calculate and plot RMSF and report main properties, given a supplied 40 | frequency coverage and optional weights (either as second column of 41 | frequency file, or as separate file).""" 42 | 43 | parser = argparse.ArgumentParser( 44 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 45 | ) 46 | parser.add_argument( 47 | "freqFile", 48 | metavar="freqFile.dat", 49 | nargs="?", 50 | default=None, 51 | help="ASCII file containing frequencies and optionally weights.", 52 | ) 53 | parser.add_argument( 54 | "weightFile", 55 | metavar="weightFile.dat", 56 | nargs="?", 57 | help="Optional ASCII file containing weights.", 58 | ) 59 | parser.add_argument( 60 | "-f", 61 | dest=("freq_parms"), 62 | nargs=3, 63 | default=None, 64 | help="Generate frequencies (in Hz): minfreq, maxfreq, channel_width", 65 | ) 66 | parser.add_argument( 67 | "-m", 68 | dest="phiMax_radm2", 69 | type=float, 70 | default=None, 71 | help="absolute max Faraday depth sampled [Auto, ~10xFWHM].", 72 | ) 73 | parser.add_argument( 74 | "-d", 75 | dest="dphi_radm2", 76 | type=float, 77 | default=None, 78 | help="Delta phi [Auto, ~10/FWHM].", 79 | ) 80 | parser.add_argument( 81 | "-s", 82 | dest="plotfile", 83 | default=None, 84 | help="Filename to save plot to. [do not save]", 85 | ) 86 | parser.add_argument( 87 | "-n", dest="plotname", default=None, help='Name of plot ["Simulated RMSF"]' 88 | ) 89 | parser.add_argument( 90 | "-r", 91 | "--super-resolution", 92 | action="store_true", 93 | help="Optimise the resolution of the RMSF (as per Rudnick & Cotton). ", 94 | ) 95 | args = parser.parse_args() 96 | 97 | # Check that at least one frequency input has been given: 98 | if args.freqFile == None and args.freq_parms == None: 99 | print("Please supply either a file with frequency values or use the -f flag.") 100 | raise (Exception("No frequency input! Use -h flag for help on inputs.")) 101 | 102 | # if args.phiMax_radm2 != None: 103 | # if args.phiMax_radm2 104 | 105 | # Order of priority: frequency file takes precedence over -i flag. 106 | # weight file takes precedence over 2nd column of frequency file. 107 | if args.freqFile != None: 108 | data = np.genfromtxt(args.freqFile, encoding=None, dtype=None) 109 | if len(data.shape) == 2: 110 | freq_array = data[:, 0] 111 | weights_array = data[:, 1] 112 | else: 113 | freq_array = data 114 | weights_array = np.ones_like(freq_array) 115 | else: 116 | # Generate frequency and weight arrays from intput values. 117 | freq_array = np.arange( 118 | float(args.freq_parms[0]), 119 | float(args.freq_parms[1]), 120 | float(args.freq_parms[2]), 121 | ) 122 | weights_array = np.ones_like(freq_array) 123 | 124 | if args.weightFile != None: 125 | weights_array = np.genfromtxt(args.weightFile, encoding=None, dtype=None) 126 | if len(weights_array) != len(freq_array): 127 | raise Exception( 128 | "Weights file does not have same number of channels as frequency source" 129 | ) 130 | 131 | determine_RMSF_parameters( 132 | freq_array, 133 | weights_array, 134 | args.phiMax_radm2, 135 | args.dphi_radm2, 136 | args.plotfile, 137 | args.plotname, 138 | args.super_resolution, 139 | ) 140 | 141 | 142 | def determine_RMSF_parameters( 143 | freq_array, 144 | weights_array, 145 | phi_max, 146 | dphi, 147 | plotfile=None, 148 | plotname=None, 149 | super_resolution=False, 150 | ): 151 | """ 152 | Characterizes an RMSF given the supplied frequency and weight arrays. 153 | Prints the results to terminal and produces a plot. 154 | Inputs: 155 | freq_array: array of frequency values (in Hz) 156 | weights_array: array of channel weights (arbitrary units) 157 | phi_max (float): maximum Faraday depth to compute RMSF out to. 158 | dphi (float): step size in Faraday depth 159 | plotfile (str): file name and path to save RMSF plot. 160 | plotname (str): title of plot 161 | """ 162 | lambda2_array = speed_of_light.value**2 / freq_array**2 163 | l2_min = np.min(lambda2_array) 164 | l2_max = np.max(lambda2_array) 165 | dl2 = np.median(np.abs(np.diff(lambda2_array))) 166 | 167 | if phi_max == None: 168 | phi_max = 10 * 2 * np.sqrt(3.0) / (l2_max - l2_min) # ~10*FWHM 169 | if dphi == None: 170 | dphi = 0.1 * 2 * np.sqrt(3.0) / (l2_max - l2_min) # ~10*FWHM 171 | 172 | phi_array = np.arange( 173 | -1 * phi_max / 2, phi_max / 2 + 1e-6, dphi 174 | ) # division by two accounts for how RMSF is always twice as wide as FDF. 175 | 176 | rmsf_results = get_rmsf_planes( 177 | lambda2_array, 178 | phi_array, 179 | weightArr=weights_array, 180 | fitRMSF=True, 181 | fitRMSFreal=super_resolution, 182 | lam0Sq_m2=0 if super_resolution else None, 183 | ) 184 | 185 | # Output key results to terminal: 186 | print("RMSF PROPERTIES:") 187 | print( 188 | "Theoretical (unweighted) FWHM: {:.4g} rad m^-2".format( 189 | 3.8 / (l2_max - l2_min) 190 | ) 191 | ) 192 | print( 193 | "Measured FWHM: {:.4g} rad m^-2".format( 194 | rmsf_results.fwhmRMSFArr 195 | ) 196 | ) 197 | print("Theoretical largest FD scale probed: {:.4g} rad m^-2".format(np.pi / l2_min)) 198 | print( 199 | "Theoretical maximum FD*: {:.4g} rad m^-2".format( 200 | np.sqrt(3.0) / dl2 201 | ) 202 | ) 203 | print( 204 | "*50% bandwdith depolarization threshold, for median channel width in Delta-lambda^2" 205 | ) 206 | print( 207 | "* may not be reliable over very large fractional bandwidths or in data with " 208 | ) 209 | print("differing channel widths or many frequency gaps.") 210 | # Explanation for below: This code find the local maxima in the positive half of the RMSF, 211 | # finds the highest amplitude one, and calls that the first sidelobe. 212 | try: 213 | x = np.diff( 214 | np.sign( 215 | np.diff( 216 | np.abs(rmsf_results.RMSFcube[rmsf_results.RMSFcube.size // 2 :]) 217 | ) 218 | ) 219 | ) # -2=local max, +2=local min 220 | y = ( 221 | 1 + np.where(x == -2)[0] 222 | ) # indices of peaks, +1 is because of offset from double differencing 223 | peaks = np.abs(rmsf_results.RMSFcube[rmsf_results.RMSFcube.size // 2 :])[y] 224 | print( 225 | "First sidelobe FD and amplitude: {:.4g} rad m^-2".format( 226 | rmsf_results.phi2Arr[rmsf_results.phi2Arr.size // 2 :][ 227 | y[np.argmax(peaks)] 228 | ] 229 | ) 230 | ) 231 | print( 232 | " {:.4g} % of peak".format( 233 | np.max(peaks) * 100 234 | ) 235 | ) 236 | except: 237 | pass 238 | 239 | # Plotting: 240 | plt.figure(figsize=(7, 7)) 241 | plt.subplot(211) 242 | plt.axhline(0, color="k") 243 | if plotname == None: 244 | plt.title("Simulated RMSF") 245 | else: 246 | plt.title(plotname) 247 | plt.plot( 248 | rmsf_results.phi2Arr, np.real(rmsf_results.RMSFcube), "b-", label="Stokes Q" 249 | ) 250 | plt.plot( 251 | rmsf_results.phi2Arr, np.imag(rmsf_results.RMSFcube), "r--", label="Stokes U" 252 | ) 253 | plt.plot( 254 | rmsf_results.phi2Arr, np.abs(rmsf_results.RMSFcube), "k-", label="Amplitude" 255 | ) 256 | plt.legend() 257 | plt.xlabel(r"Faraday depth (rad m$^{-2}$)") 258 | plt.ylabel("RMSF (unitless)") 259 | plt.subplot(212) 260 | ax = plt.gca() 261 | ax.axis([0, 1, 0, 1]) 262 | ax.axis("off") 263 | ax.text( 264 | 0.1, 265 | 0.8, 266 | ( 267 | "Theoretical (unweighted) FWHM: {:.4g} rad m^-2\n" 268 | + "Measured FWHM: {:.4g} rad m^-2\n" 269 | + "Theoretical largest FD scale probed: {:.4g} rad m^-2\n" 270 | + "Theoretical maximum FD: {:.4g} rad m^-2\n" 271 | + "\n\n\n" 272 | + "Lowest frequency/wavelength [GHz/cm]: {:>7.4g}/{:.4g}\n" 273 | + "Highest frequency/wavelength [GHz/cm]: {:>7.4g}/{:.4g}\n" 274 | + "# of channels: {:.4g}\n" 275 | ).format( 276 | 3.8 / (l2_max - l2_min), 277 | rmsf_results.fwhmRMSFArr, 278 | np.pi / l2_min, 279 | np.sqrt(3.0) / dl2, 280 | np.min(freq_array) / 1e9, 281 | speed_of_light.value / np.min(freq_array) * 100.0, 282 | np.max(freq_array) / 1e9, 283 | speed_of_light.value / np.max(freq_array) * 100.0, 284 | freq_array.size, 285 | ), 286 | family="monospace", 287 | horizontalalignment="left", 288 | verticalalignment="top", 289 | ) 290 | 291 | try: 292 | ax.text( 293 | 0.1, 294 | 0.8, 295 | ( 296 | "\n\n\n\n" 297 | + "First sidelobe FD and amplitude: {:.4g} rad m^-2\n" 298 | + " {:.4g} % of peak" 299 | ).format( 300 | rmsf_results.phi2Arr[rmsf_results.phi2Arr.size // 2 :][ 301 | y[np.argmax(peaks)] 302 | ], 303 | np.max(peaks) * 100, 304 | ), 305 | family="monospace", 306 | horizontalalignment="left", 307 | verticalalignment="top", 308 | ) 309 | except: 310 | pass 311 | 312 | # ax.text(0.,0.7,('Theoretical (unweighted) FWHM: {:.4g} rad m^-2'.format(2*np.sqrt(3.0) / (l2_max-l2_min))) 313 | # ax.text(0.,0.58,'Measured FWHM: {:.4g} rad m^-2'.format(fwhmRMSFArr)) 314 | # ax.text(0.,0.46,'Theoretical largest FD scale probed: {:.4g} rad m^-2'.format(np.pi/l2_min)) 315 | # ax.text(0.,0.34,'Theoretical maximum FD: {:.4g} rad m^-2'.format(np.sqrt(3.0)/dl2)) 316 | # ax.text(0.,0.22,'First sidelobe FD and amplitude: {:.4g} rad m^-2'.format(phi2Arr[phi2Arr.size//2:][y[np.argmax(peaks)]])) 317 | # ax.text(0.,0.1,' {:.4g} % of peak'.format(np.max(peaks)*100)) 318 | 319 | if plotfile is not None: 320 | plt.savefig(plotfile, bbox_inches="tight") 321 | else: 322 | plt.show() 323 | 324 | 325 | if __name__ == "__main__": 326 | main() 327 | -------------------------------------------------------------------------------- /RMtools_1D/cats/catComplex.csv: -------------------------------------------------------------------------------- 1 | #-----------------------------------------------------------------------------# 2 | # # 3 | # Input catalogue file for use with the POSSUM pipeline. # 4 | # Used to generate artificial data for testing purposes. # 5 | # # 6 | # Note: spatial information (x,y,maj,min,pa) is ignored when generating 1D # 7 | # ASCII data. If generating image data these are the world-coordinates # 8 | # and parameters of the injected Gaussians. # 9 | # # 10 | # C. Purcell 24-Mar-2016 # 11 | # # 12 | #-----------------------------------------------------------------------------# 13 | 14 | # MODEL TYPE 1: External depolarised components within same beam (not LOS). 15 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2, faradayDispersion_radm2]xN 16 | # 17 | # MODEL TYPE 2: Emitting & rotating regions stacked along the line-of-sight. 18 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2]xN 19 | # 20 | 21 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0 # 1 22 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 31.0, 0.0 # 2 23 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 32.0, 0.0 # 3 24 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 33.0, 0.0 # 4 25 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 34.0, 0.0 # 5 26 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 35.0, 0.0 # 6 27 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 36.0, 0.0 # 7 28 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 37.0, 0.0 # 8 29 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 38.0, 0.0 # 9 30 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 39.0, 0.0 # 10 31 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 1500.0, 0.0, 0.4, 30.0, 1000.0, 0.0 # 11 32 | -------------------------------------------------------------------------------- /RMtools_1D/cats/cat_simple_doQUfit.bat: -------------------------------------------------------------------------------- 1 | ./do_QUfit_1D_auto.py data_simple/Source1.dat -t 3 -m 1 2 | ./do_QUfit_1D_auto.py data_simple/Source2.dat -t 3 -m 1 3 | ./do_QUfit_1D_auto.py data_simple/Source3.dat -t 3 -m 1 4 | ./do_QUfit_1D_auto.py data_simple/Source4.dat -t 3 -m 1 5 | ./do_QUfit_1D_auto.py data_simple/Source5.dat -t 3 -m 1 6 | ./do_QUfit_1D_auto.py data_simple/Source6.dat -t 3 -m 1 7 | ./do_QUfit_1D_auto.py data_simple/Source7.dat -t 3 -m 1 8 | ./do_QUfit_1D_auto.py data_simple/Source8.dat -t 3 -m 1 9 | ./do_QUfit_1D_auto.py data_simple/Source9.dat -t 3 -m 1 10 | ./do_QUfit_1D_auto.py data_simple/Source10.dat -t 3 -m 1 11 | ./do_QUfit_1D_auto.py data_simple/Source11.dat -t 3 -m 1 12 | ./do_QUfit_1D_auto.py data_simple/Source12.dat -t 3 -m 1 13 | ./do_QUfit_1D_auto.py data_simple/Source13.dat -t 3 -m 1 14 | ./do_QUfit_1D_auto.py data_simple/Source14.dat -t 3 -m 1 15 | ./do_QUfit_1D_auto.py data_simple/Source15.dat -t 3 -m 1 16 | ./do_QUfit_1D_auto.py data_simple/Source16.dat -t 3 -m 1 17 | ./do_QUfit_1D_auto.py data_simple/Source17.dat -t 3 -m 1 18 | ./do_QUfit_1D_auto.py data_simple/Source18.dat -t 3 -m 1 19 | ./do_QUfit_1D_auto.py data_simple/Source19.dat -t 3 -m 1 20 | ./do_QUfit_1D_auto.py data_simple/Source20.dat -t 3 -m 1 21 | ./do_QUfit_1D_auto.py data_simple/Source21.dat -t 3 -m 1 22 | ./do_QUfit_1D_auto.py data_simple/Source22.dat -t 3 -m 1 23 | ./do_QUfit_1D_auto.py data_simple/Source23.dat -t 3 -m 1 24 | ./do_QUfit_1D_auto.py data_simple/Source24.dat -t 3 -m 1 25 | ./do_QUfit_1D_auto.py data_simple/Source25.dat -t 3 -m 1 26 | ./do_QUfit_1D_auto.py data_simple/Source26.dat -t 3 -m 1 27 | ./do_QUfit_1D_auto.py data_simple/Source27.dat -t 3 -m 1 28 | ./do_QUfit_1D_auto.py data_simple/Source28.dat -t 3 -m 1 29 | ./do_QUfit_1D_auto.py data_simple/Source29.dat -t 3 -m 1 30 | ./do_QUfit_1D_auto.py data_simple/Source30.dat -t 3 -m 1 31 | ./do_QUfit_1D_auto.py data_simple/Source31.dat -t 3 -m 1 32 | ./do_QUfit_1D_auto.py data_simple/Source32.dat -t 3 -m 1 33 | ./do_QUfit_1D_auto.py data_simple/Source33.dat -t 3 -m 1 34 | ./do_QUfit_1D_auto.py data_simple/Source34.dat -t 3 -m 1 35 | ./do_QUfit_1D_auto.py data_simple/Source35.dat -t 3 -m 1 36 | ./do_QUfit_1D_auto.py data_simple/Source36.dat -t 3 -m 1 37 | ./do_QUfit_1D_auto.py data_simple/Source37.dat -t 3 -m 1 38 | ./do_QUfit_1D_auto.py data_simple/Source38.dat -t 3 -m 1 39 | ./do_QUfit_1D_auto.py data_simple/Source39.dat -t 3 -m 1 40 | ./do_QUfit_1D_auto.py data_simple/Source40.dat -t 3 -m 1 41 | ./do_QUfit_1D_auto.py data_simple/Source41.dat -t 3 -m 1 42 | ./do_QUfit_1D_auto.py data_simple/Source42.dat -t 3 -m 1 43 | ./do_QUfit_1D_auto.py data_simple/Source43.dat -t 3 -m 1 44 | ./do_QUfit_1D_auto.py data_simple/Source44.dat -t 3 -m 1 45 | ./do_QUfit_1D_auto.py data_simple/Source45.dat -t 3 -m 1 46 | ./do_QUfit_1D_auto.py data_simple/Source46.dat -t 3 -m 1 47 | ./do_QUfit_1D_auto.py data_simple/Source47.dat -t 3 -m 1 48 | ./do_QUfit_1D_auto.py data_simple/Source48.dat -t 3 -m 1 49 | ./do_QUfit_1D_auto.py data_simple/Source49.dat -t 3 -m 1 50 | ./do_QUfit_1D_auto.py data_simple/Source50.dat -t 3 -m 1 51 | ./do_QUfit_1D_auto.py data_simple/Source51.dat -t 3 -m 1 52 | ./do_QUfit_1D_auto.py data_simple/Source52.dat -t 3 -m 1 53 | ./do_QUfit_1D_auto.py data_simple/Source53.dat -t 3 -m 1 54 | ./do_QUfit_1D_auto.py data_simple/Source54.dat -t 3 -m 1 55 | ./do_QUfit_1D_auto.py data_simple/Source55.dat -t 3 -m 1 56 | ./do_QUfit_1D_auto.py data_simple/Source56.dat -t 3 -m 1 57 | ./do_QUfit_1D_auto.py data_simple/Source57.dat -t 3 -m 1 58 | ./do_QUfit_1D_auto.py data_simple/Source58.dat -t 3 -m 1 59 | ./do_QUfit_1D_auto.py data_simple/Source59.dat -t 3 -m 1 60 | ./do_QUfit_1D_auto.py data_simple/Source60.dat -t 3 -m 1 61 | ./do_QUfit_1D_auto.py data_simple/Source61.dat -t 3 -m 1 62 | ./do_QUfit_1D_auto.py data_simple/Source62.dat -t 3 -m 1 63 | ./do_QUfit_1D_auto.py data_simple/Source63.dat -t 3 -m 1 64 | ./do_QUfit_1D_auto.py data_simple/Source64.dat -t 3 -m 1 65 | ./do_QUfit_1D_auto.py data_simple/Source65.dat -t 3 -m 1 66 | ./do_QUfit_1D_auto.py data_simple/Source66.dat -t 3 -m 1 67 | ./do_QUfit_1D_auto.py data_simple/Source67.dat -t 3 -m 1 68 | ./do_QUfit_1D_auto.py data_simple/Source68.dat -t 3 -m 1 69 | ./do_QUfit_1D_auto.py data_simple/Source69.dat -t 3 -m 1 70 | ./do_QUfit_1D_auto.py data_simple/Source70.dat -t 3 -m 1 71 | ./do_QUfit_1D_auto.py data_simple/Source71.dat -t 3 -m 1 72 | ./do_QUfit_1D_auto.py data_simple/Source72.dat -t 3 -m 1 73 | ./do_QUfit_1D_auto.py data_simple/Source73.dat -t 3 -m 1 74 | ./do_QUfit_1D_auto.py data_simple/Source74.dat -t 3 -m 1 75 | ./do_QUfit_1D_auto.py data_simple/Source75.dat -t 3 -m 1 76 | ./do_QUfit_1D_auto.py data_simple/Source76.dat -t 3 -m 1 77 | ./do_QUfit_1D_auto.py data_simple/Source77.dat -t 3 -m 1 78 | ./do_QUfit_1D_auto.py data_simple/Source78.dat -t 3 -m 1 79 | ./do_QUfit_1D_auto.py data_simple/Source79.dat -t 3 -m 1 80 | ./do_QUfit_1D_auto.py data_simple/Source80.dat -t 3 -m 1 81 | ./do_QUfit_1D_auto.py data_simple/Source81.dat -t 3 -m 1 82 | ./do_QUfit_1D_auto.py data_simple/Source82.dat -t 3 -m 1 83 | ./do_QUfit_1D_auto.py data_simple/Source83.dat -t 3 -m 1 84 | ./do_QUfit_1D_auto.py data_simple/Source84.dat -t 3 -m 1 85 | ./do_QUfit_1D_auto.py data_simple/Source85.dat -t 3 -m 1 86 | ./do_QUfit_1D_auto.py data_simple/Source86.dat -t 3 -m 1 87 | ./do_QUfit_1D_auto.py data_simple/Source87.dat -t 3 -m 1 88 | ./do_QUfit_1D_auto.py data_simple/Source88.dat -t 3 -m 1 89 | ./do_QUfit_1D_auto.py data_simple/Source89.dat -t 3 -m 1 90 | ./do_QUfit_1D_auto.py data_simple/Source90.dat -t 3 -m 1 91 | ./do_QUfit_1D_auto.py data_simple/Source91.dat -t 3 -m 1 92 | ./do_QUfit_1D_auto.py data_simple/Source92.dat -t 3 -m 1 93 | ./do_QUfit_1D_auto.py data_simple/Source93.dat -t 3 -m 1 94 | ./do_QUfit_1D_auto.py data_simple/Source94.dat -t 3 -m 1 95 | ./do_QUfit_1D_auto.py data_simple/Source95.dat -t 3 -m 1 96 | ./do_QUfit_1D_auto.py data_simple/Source96.dat -t 3 -m 1 97 | ./do_QUfit_1D_auto.py data_simple/Source97.dat -t 3 -m 1 98 | ./do_QUfit_1D_auto.py data_simple/Source98.dat -t 3 -m 1 99 | ./do_QUfit_1D_auto.py data_simple/Source99.dat -t 3 -m 1 100 | ./do_QUfit_1D_auto.py data_simple/Source100.dat -t 3 -m 1 101 | -------------------------------------------------------------------------------- /RMtools_1D/cats/catalogue.csv: -------------------------------------------------------------------------------- 1 | #-----------------------------------------------------------------------------# 2 | # # 3 | # Input catalogue file for use with the POSSUM pipeline. # 4 | # Used to generate artificial data for testing purposes. # 5 | # # 6 | # Note: spatial information (x,y,maj,min,pa) is ignored when generating 1D # 7 | # ASCII data. If generating image data these are the world-coordinates # 8 | # and parameters of the injected Gaussians. # 9 | # # 10 | # C. Purcell 14-Feb-2017 # 11 | # # 12 | #-----------------------------------------------------------------------------# 13 | 14 | # MODEL TYPE 1: External depolarised components within same beam (not LOS). 15 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2, faradayDispersion_radm2]xN 16 | # 17 | # MODEL TYPE 2: Emitting & rotating regions stacked along the line-of-sight. 18 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2]xN 19 | # 20 | 21 | 1, 89.9981, +0.0001, 0.0, 0.0, 0.0, 1.0, -0.0, 0.6, 180.0, 30.0, 0.0 22 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 0.5, -0.7, 0.4, 30.0, 100.0, 0.0, 0.3, 70.0, 80.0, 0.0 23 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 0.5, -0.7, 0.4, 30.0, 100.0, 30.0, 0.3, 70.0, 80.0, 30.0 24 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 0.5, -0.7, 0.3, 42.0, 200.0, 30.0, 0.4, 70.0, 120.0, 60.0 25 | 2, 90.0026, +0.000, 0.0, 0.0, 0.0, 2.0, 0.0, 0.5, 45.0, -20.0 26 | 2, 90.0, +0.000, 0.0, 0.0, 0.0, 2.0, 0.0, 0.5, 45.0, -20.0, 0.25, 10, 50 27 | 2, 90.0, +0.000, 0.0, 0.0, 0.0, 3.0, 0.0, 0.4, 45.0, -30.0, 0.25, 10, 50, 0.25, 10, 90 28 | 1, 90.0, +0.000, 0.0, 0.0, 0.0, 3.0, 0.0, 0.4, 45.0, -30.0, 0.0, 0.25, 10, 50, 0.0, 0.25, 10, 90, 0.0 29 | -------------------------------------------------------------------------------- /RMtools_1D/cats/catalogue1.csv: -------------------------------------------------------------------------------- 1 | #-----------------------------------------------------------------------------# 2 | # # 3 | # Input catalogue file for use with the POSSUM pipeline. # 4 | # Used to generate artificial data for testing purposes. # 5 | # # 6 | # Note: spatial information (x,y,maj,min,pa) is ignored when generating 1D # 7 | # ASCII data. If generating image data these are the world-coordinates # 8 | # and parameters of the injected Gaussians. # 9 | # # 10 | # C. Purcell 06-April-2016 # 11 | # # 12 | #-----------------------------------------------------------------------------# 13 | 14 | # MODEL TYPE 1: External depolarised components within same beam (not LOS). 15 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2, faradayDispersion_radm2]xN 16 | # 17 | # MODEL TYPE 2: Emitting & rotating regions stacked along the line-of-sight. 18 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2]xN 19 | # 20 | 21 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 100.0, 0.0 # 1 22 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 90.0, 0.0 # 2 23 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 80.0, 0.0 # 3 24 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 70.0, 0.0 # 4 25 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 60.0, 0.0 # 5 26 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 50.0, 0.0 # 6 27 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 40.0, 0.0 # 7 28 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 39.0, 0.0 # 8 29 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 38.0, 0.0 # 9 30 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.4, 30.0, 37.0, 0.0 # 10 31 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0 # 11 32 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 1.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.05, 30.0, 50.0, 0.0 # 12 33 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 100.0, -0.7, 0.4, 30.0, 30.0, 0.0 # 13 34 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 100.0, -0.7, 0.4, 30.0, 30.0, 0.0, 0.05, 30.0, 50.0, 0.0 # 14 35 | -------------------------------------------------------------------------------- /RMtools_1D/cats/testCat.dat: -------------------------------------------------------------------------------- 1 | 1, 0, 0, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -75.000000, 0.0, 10.000000, 10.000000, 75.000000, 0.0 2 | 1, 0, 1, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -66.666667, 0.0, 10.000000, 10.000000, 66.666667, 0.0 3 | 1, 0, 2, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -58.333333, 0.0, 10.000000, 10.000000, 58.333333, 0.0 4 | 1, 0, 3, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -50.000000, 0.0, 10.000000, 10.000000, 50.000000, 0.0 5 | 1, 0, 4, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -41.666667, 0.0, 10.000000, 10.000000, 41.666667, 0.0 6 | 1, 0, 5, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -33.333333, 0.0, 10.000000, 10.000000, 33.333333, 0.0 7 | 1, 0, 6, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -25.000000, 0.0, 10.000000, 10.000000, 25.000000, 0.0 8 | 1, 0, 7, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -16.666667, 0.0, 10.000000, 10.000000, 16.666667, 0.0 9 | 1, 0, 8, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, -8.333333, 0.0, 10.000000, 10.000000, 8.333333, 0.0 10 | 1, 0, 9, 0.0, 0.0, 0.0, 1, -0.7, 10.000000, 10.000000, 0.000000, 0.0, 10.000000, 10.000000, 0.000000, 0.0 11 | 1, 1, 0, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -75.000000, 0.0, 8.900000, 10.000000, 75.000000, 0.0 12 | 1, 1, 1, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -66.666667, 0.0, 8.900000, 10.000000, 66.666667, 0.0 13 | 1, 1, 2, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -58.333333, 0.0, 8.900000, 10.000000, 58.333333, 0.0 14 | 1, 1, 3, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -50.000000, 0.0, 8.900000, 10.000000, 50.000000, 0.0 15 | 1, 1, 4, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -41.666667, 0.0, 8.900000, 10.000000, 41.666667, 0.0 16 | 1, 1, 5, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -33.333333, 0.0, 8.900000, 10.000000, 33.333333, 0.0 17 | 1, 1, 6, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -25.000000, 0.0, 8.900000, 10.000000, 25.000000, 0.0 18 | 1, 1, 7, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -16.666667, 0.0, 8.900000, 10.000000, 16.666667, 0.0 19 | 1, 1, 8, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, -8.333333, 0.0, 8.900000, 10.000000, 8.333333, 0.0 20 | 1, 1, 9, 0.0, 0.0, 0.0, 1, -0.7, 8.900000, 10.000000, 0.000000, 0.0, 8.900000, 10.000000, 0.000000, 0.0 21 | 1, 2, 0, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -75.000000, 0.0, 7.800000, 10.000000, 75.000000, 0.0 22 | 1, 2, 1, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -66.666667, 0.0, 7.800000, 10.000000, 66.666667, 0.0 23 | 1, 2, 2, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -58.333333, 0.0, 7.800000, 10.000000, 58.333333, 0.0 24 | 1, 2, 3, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -50.000000, 0.0, 7.800000, 10.000000, 50.000000, 0.0 25 | 1, 2, 4, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -41.666667, 0.0, 7.800000, 10.000000, 41.666667, 0.0 26 | 1, 2, 5, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -33.333333, 0.0, 7.800000, 10.000000, 33.333333, 0.0 27 | 1, 2, 6, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -25.000000, 0.0, 7.800000, 10.000000, 25.000000, 0.0 28 | 1, 2, 7, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -16.666667, 0.0, 7.800000, 10.000000, 16.666667, 0.0 29 | 1, 2, 8, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, -8.333333, 0.0, 7.800000, 10.000000, 8.333333, 0.0 30 | 1, 2, 9, 0.0, 0.0, 0.0, 1, -0.7, 7.800000, 10.000000, 0.000000, 0.0, 7.800000, 10.000000, 0.000000, 0.0 31 | 1, 3, 0, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -75.000000, 0.0, 6.700000, 10.000000, 75.000000, 0.0 32 | 1, 3, 1, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -66.666667, 0.0, 6.700000, 10.000000, 66.666667, 0.0 33 | 1, 3, 2, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -58.333333, 0.0, 6.700000, 10.000000, 58.333333, 0.0 34 | 1, 3, 3, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -50.000000, 0.0, 6.700000, 10.000000, 50.000000, 0.0 35 | 1, 3, 4, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -41.666667, 0.0, 6.700000, 10.000000, 41.666667, 0.0 36 | 1, 3, 5, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -33.333333, 0.0, 6.700000, 10.000000, 33.333333, 0.0 37 | 1, 3, 6, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -25.000000, 0.0, 6.700000, 10.000000, 25.000000, 0.0 38 | 1, 3, 7, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -16.666667, 0.0, 6.700000, 10.000000, 16.666667, 0.0 39 | 1, 3, 8, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, -8.333333, 0.0, 6.700000, 10.000000, 8.333333, 0.0 40 | 1, 3, 9, 0.0, 0.0, 0.0, 1, -0.7, 6.700000, 10.000000, 0.000000, 0.0, 6.700000, 10.000000, 0.000000, 0.0 41 | 1, 4, 0, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -75.000000, 0.0, 5.600000, 10.000000, 75.000000, 0.0 42 | 1, 4, 1, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -66.666667, 0.0, 5.600000, 10.000000, 66.666667, 0.0 43 | 1, 4, 2, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -58.333333, 0.0, 5.600000, 10.000000, 58.333333, 0.0 44 | 1, 4, 3, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -50.000000, 0.0, 5.600000, 10.000000, 50.000000, 0.0 45 | 1, 4, 4, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -41.666667, 0.0, 5.600000, 10.000000, 41.666667, 0.0 46 | 1, 4, 5, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -33.333333, 0.0, 5.600000, 10.000000, 33.333333, 0.0 47 | 1, 4, 6, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -25.000000, 0.0, 5.600000, 10.000000, 25.000000, 0.0 48 | 1, 4, 7, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -16.666667, 0.0, 5.600000, 10.000000, 16.666667, 0.0 49 | 1, 4, 8, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, -8.333333, 0.0, 5.600000, 10.000000, 8.333333, 0.0 50 | 1, 4, 9, 0.0, 0.0, 0.0, 1, -0.7, 5.600000, 10.000000, 0.000000, 0.0, 5.600000, 10.000000, 0.000000, 0.0 51 | 1, 5, 0, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -75.000000, 0.0, 4.500000, 10.000000, 75.000000, 0.0 52 | 1, 5, 1, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -66.666667, 0.0, 4.500000, 10.000000, 66.666667, 0.0 53 | 1, 5, 2, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -58.333333, 0.0, 4.500000, 10.000000, 58.333333, 0.0 54 | 1, 5, 3, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -50.000000, 0.0, 4.500000, 10.000000, 50.000000, 0.0 55 | 1, 5, 4, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -41.666667, 0.0, 4.500000, 10.000000, 41.666667, 0.0 56 | 1, 5, 5, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -33.333333, 0.0, 4.500000, 10.000000, 33.333333, 0.0 57 | 1, 5, 6, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -25.000000, 0.0, 4.500000, 10.000000, 25.000000, 0.0 58 | 1, 5, 7, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -16.666667, 0.0, 4.500000, 10.000000, 16.666667, 0.0 59 | 1, 5, 8, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, -8.333333, 0.0, 4.500000, 10.000000, 8.333333, 0.0 60 | 1, 5, 9, 0.0, 0.0, 0.0, 1, -0.7, 4.500000, 10.000000, 0.000000, 0.0, 4.500000, 10.000000, 0.000000, 0.0 61 | 1, 6, 0, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -75.000000, 0.0, 3.400000, 10.000000, 75.000000, 0.0 62 | 1, 6, 1, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -66.666667, 0.0, 3.400000, 10.000000, 66.666667, 0.0 63 | 1, 6, 2, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -58.333333, 0.0, 3.400000, 10.000000, 58.333333, 0.0 64 | 1, 6, 3, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -50.000000, 0.0, 3.400000, 10.000000, 50.000000, 0.0 65 | 1, 6, 4, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -41.666667, 0.0, 3.400000, 10.000000, 41.666667, 0.0 66 | 1, 6, 5, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -33.333333, 0.0, 3.400000, 10.000000, 33.333333, 0.0 67 | 1, 6, 6, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -25.000000, 0.0, 3.400000, 10.000000, 25.000000, 0.0 68 | 1, 6, 7, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -16.666667, 0.0, 3.400000, 10.000000, 16.666667, 0.0 69 | 1, 6, 8, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, -8.333333, 0.0, 3.400000, 10.000000, 8.333333, 0.0 70 | 1, 6, 9, 0.0, 0.0, 0.0, 1, -0.7, 3.400000, 10.000000, 0.000000, 0.0, 3.400000, 10.000000, 0.000000, 0.0 71 | 1, 7, 0, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -75.000000, 0.0, 2.300000, 10.000000, 75.000000, 0.0 72 | 1, 7, 1, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -66.666667, 0.0, 2.300000, 10.000000, 66.666667, 0.0 73 | 1, 7, 2, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -58.333333, 0.0, 2.300000, 10.000000, 58.333333, 0.0 74 | 1, 7, 3, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -50.000000, 0.0, 2.300000, 10.000000, 50.000000, 0.0 75 | 1, 7, 4, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -41.666667, 0.0, 2.300000, 10.000000, 41.666667, 0.0 76 | 1, 7, 5, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -33.333333, 0.0, 2.300000, 10.000000, 33.333333, 0.0 77 | 1, 7, 6, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -25.000000, 0.0, 2.300000, 10.000000, 25.000000, 0.0 78 | 1, 7, 7, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -16.666667, 0.0, 2.300000, 10.000000, 16.666667, 0.0 79 | 1, 7, 8, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, -8.333333, 0.0, 2.300000, 10.000000, 8.333333, 0.0 80 | 1, 7, 9, 0.0, 0.0, 0.0, 1, -0.7, 2.300000, 10.000000, 0.000000, 0.0, 2.300000, 10.000000, 0.000000, 0.0 81 | 1, 8, 0, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -75.000000, 0.0, 1.200000, 10.000000, 75.000000, 0.0 82 | 1, 8, 1, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -66.666667, 0.0, 1.200000, 10.000000, 66.666667, 0.0 83 | 1, 8, 2, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -58.333333, 0.0, 1.200000, 10.000000, 58.333333, 0.0 84 | 1, 8, 3, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -50.000000, 0.0, 1.200000, 10.000000, 50.000000, 0.0 85 | 1, 8, 4, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -41.666667, 0.0, 1.200000, 10.000000, 41.666667, 0.0 86 | 1, 8, 5, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -33.333333, 0.0, 1.200000, 10.000000, 33.333333, 0.0 87 | 1, 8, 6, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -25.000000, 0.0, 1.200000, 10.000000, 25.000000, 0.0 88 | 1, 8, 7, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -16.666667, 0.0, 1.200000, 10.000000, 16.666667, 0.0 89 | 1, 8, 8, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, -8.333333, 0.0, 1.200000, 10.000000, 8.333333, 0.0 90 | 1, 8, 9, 0.0, 0.0, 0.0, 1, -0.7, 1.200000, 10.000000, 0.000000, 0.0, 1.200000, 10.000000, 0.000000, 0.0 91 | 1, 9, 0, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -75.000000, 0.0, 0.100000, 10.000000, 75.000000, 0.0 92 | 1, 9, 1, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -66.666667, 0.0, 0.100000, 10.000000, 66.666667, 0.0 93 | 1, 9, 2, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -58.333333, 0.0, 0.100000, 10.000000, 58.333333, 0.0 94 | 1, 9, 3, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -50.000000, 0.0, 0.100000, 10.000000, 50.000000, 0.0 95 | 1, 9, 4, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -41.666667, 0.0, 0.100000, 10.000000, 41.666667, 0.0 96 | 1, 9, 5, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -33.333333, 0.0, 0.100000, 10.000000, 33.333333, 0.0 97 | 1, 9, 6, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -25.000000, 0.0, 0.100000, 10.000000, 25.000000, 0.0 98 | 1, 9, 7, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -16.666667, 0.0, 0.100000, 10.000000, 16.666667, 0.0 99 | 1, 9, 8, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, -8.333333, 0.0, 0.100000, 10.000000, 8.333333, 0.0 100 | 1, 9, 9, 0.0, 0.0, 0.0, 1, -0.7, 0.100000, 10.000000, 0.000000, 0.0, 0.100000, 10.000000, 0.000000, 0.0 101 | -------------------------------------------------------------------------------- /RMtools_1D/clean_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | This is an experimental tool to generate Stokes Q and U models from 5 | clean components produced by RMclean1D. 6 | 7 | 8 | Author: cvaneck, Aug 2021 9 | """ 10 | 11 | import json 12 | 13 | import numpy as np 14 | from astropy.constants import c as speed_of_light 15 | 16 | from RMtools_1D.do_RMsynth_1D import readFile as read_freqFile 17 | from RMutils.util_misc import FitResult, calculate_StokesI_model 18 | 19 | 20 | def calculate_QU_model(freqArr, phiArr, CCArr, lambdaSq_0, Iparms=None): 21 | """Compute the predicted Stokes Q and U values for each channel from a 22 | set of clean components (CCs), with optional accounting for Stokes I model. 23 | Inputs: freqArr: array of channel frequencies, in Hz 24 | phiArr: array of Faraday depth values for the clean component array 25 | CCarr: array of (complex) clean components. 26 | lambdaSq_0: scalar value of the reference wavelength squared to 27 | which all the polarization angles are referenced. 28 | Iparms: list of Stokes I polynomial values. If None, all Stokes I 29 | values will be set to 1. 30 | Returns: 31 | model: array of complex values, one per channel, of Stokes Q and U 32 | predictions based on the clean component model. 33 | 34 | CURRENTLY ASSUMES THAT STOKES I MODEL IS LOG MODEL. SHOULD BE FIXED! 35 | """ 36 | 37 | lambdaSqArr_m2 = np.power(speed_of_light.value / freqArr, 2.0) 38 | 39 | a = lambdaSqArr_m2 - lambdaSq_0 40 | quarr = np.sum(CCArr[:, np.newaxis] * np.exp(2.0j * np.outer(phiArr, a)), axis=0) 41 | 42 | # TODO: Pass in fit function, which is currently not output by rmsynth1d 43 | fit_result = FitResult( 44 | params=Iparms if Iparms is not None else [0, 0, 0, 0, 0, 1], 45 | fitStatus=0, 46 | chiSq=0.0, 47 | chiSqRed=0.0, 48 | AIC=0, 49 | polyOrd=0, 50 | nIter=0, 51 | reference_frequency_Hz=speed_of_light.value / np.sqrt(lambdaSq_0), 52 | dof=0, 53 | pcov=None, 54 | perror=None, 55 | fit_function="log", 56 | ) 57 | StokesI_model = calculate_StokesI_model(fit_result, freqArr) 58 | 59 | QUarr = StokesI_model * quarr 60 | 61 | return QUarr, StokesI_model 62 | 63 | 64 | def save_model(filename, freqArr, Imodel, QUarr): 65 | np.savetxt(filename, list(zip(freqArr, Imodel, QUarr.real, QUarr.imag))) 66 | 67 | 68 | def read_files(freqfile, rmSynthfile, CCfile): 69 | """Get necessary data from the RMsynth and RMclean files. These data are: 70 | * The array of channel frequencies, from the RMsynth input file. 71 | * The phi array and clean components, from the RMclean1D _FDFmodel.dat file 72 | * The Stokes I model and lambda^2_0 value, from the RMsynth1D _RMsynth.json file. 73 | 74 | Inputs: freqfile (str): filename containing frequencies 75 | rmSynthfile (str): filename of RMsynth JSON output. 76 | CCfile (str): filename of clean component model file (_FDFmodel.dat)/ 77 | 78 | Returns: phiArr: array of Faraday depth values for CC spectrum 79 | CCarr: array of (complex) clean components 80 | Iparms: list of Stokes I model parameters. 81 | lambdaSq_0: scalar value of lambda^2_0, in m^2. 82 | """ 83 | 84 | phiArr, CCreal, CCimag = np.loadtxt(CCfile, unpack=True, dtype="float") 85 | CCArr = CCreal + 1j * CCimag 86 | 87 | # TODO: change filename to JSON if needed? 88 | synth_mDict = json.load(open(rmSynthfile, "r")) 89 | Iparms = [float(x) for x in synth_mDict["polyCoeffs"].split(",")] 90 | lambdaSq_0 = synth_mDict["lam0Sq_m2"] 91 | 92 | data = read_freqFile(freqfile, 64, verbose=False, debug=False) 93 | freqArr = data[0] 94 | 95 | return phiArr, CCArr, Iparms, lambdaSq_0, freqArr 96 | 97 | 98 | def main(): 99 | """Generate Stokes QU model based on clean components and (optional) 100 | Stokes I model. Requires inputs to rmsynth1D and outputs of rmsynth1d and 101 | rmclean1d. 102 | """ 103 | import argparse 104 | 105 | descStr = """ 106 | Generate Stokes QU model based on clean components and (optional) 107 | Stokes I model. Requires inputs to rmsynth1D and outputs of rmsynth1d and 108 | rmclean1d. Saves ASCII file containing arrays of IQU for each channel. 109 | """ 110 | 111 | # Parse the command line options 112 | parser = argparse.ArgumentParser( 113 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 114 | ) 115 | parser.add_argument( 116 | "freqfile", 117 | metavar="input.dat", 118 | nargs=1, 119 | help="ASCII file containing original frequency spectra.", 120 | ) 121 | parser.add_argument( 122 | "rmSynthfile", 123 | metavar="_RMsynth.json", 124 | nargs=1, 125 | help="RMsynth1d output JSON file.", 126 | ) 127 | parser.add_argument( 128 | "CCfile", 129 | metavar="_FDFmodel.dat", 130 | nargs=1, 131 | help="Clean component model file (_FDFmodel.dat)", 132 | ) 133 | parser.add_argument( 134 | "outfile", 135 | metavar="QUmodel.dat", 136 | nargs=1, 137 | help="Filename to save output model to.", 138 | ) 139 | args = parser.parse_args() 140 | 141 | phiArr, CCArr, Iparms, lambdaSq_0, freqArr = read_files( 142 | args.freqfile, args.rmSynthfile, args.CCfile 143 | ) 144 | QUarr, Imodel = calculate_QU_model(freqArr, phiArr, CCArr, lambdaSq_0, Iparms) 145 | 146 | save_model(args.outfile, freqArr, Imodel, QUarr) 147 | 148 | 149 | if __name__ == "__main__": 150 | main() 151 | -------------------------------------------------------------------------------- /RMtools_1D/do_RMsynth_1D_fromFITS.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # =============================================================================# 3 | # # 4 | # NAME: do_RMsynth_1D_fromFITS.py # 5 | # # 6 | # PURPOSE: Run RM-synthesis on an ASCII Stokes I, Q & U spectrum. # 7 | # # 8 | # MODIFIED: Summer 2019, by Boris Gbeasor # 9 | # # 10 | # =============================================================================# 11 | # # 12 | # The MIT License (MIT) # 13 | # # 14 | # Copyright (c) 2015 - 2018 Cormac R. Purcell # 15 | # # 16 | # Permission is hereby granted, free of charge, to any person obtaining a # 17 | # copy of this software and associated documentation files (the "Software"), # 18 | # to deal in the Software without restriction, including without limitation # 19 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, # 20 | # and/or sell copies of the Software, and to permit persons to whom the # 21 | # Software is furnished to do so, subject to the following conditions: # 22 | # # 23 | # The above copyright notice and this permission notice shall be included in # 24 | # all copies or substantial portions of the Software. # 25 | # # 26 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # 27 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # 28 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # 29 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # 30 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # 31 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # 32 | # DEALINGS IN THE SOFTWARE. # 33 | # # 34 | # =============================================================================# 35 | 36 | # import time 37 | import argparse 38 | import os 39 | import sys 40 | 41 | # import pdb 42 | 43 | if sys.version_info.major == 2: 44 | print("RM-tools will no longer run with Python 2! Please use Python 3.") 45 | exit() 46 | 47 | 48 | import numpy as np 49 | from astropy import wcs 50 | from astropy.io import fits 51 | 52 | from RMtools_1D.do_RMsynth_1D import run_rmsynth, saveOutput 53 | from RMtools_3D.make_freq_file import get_freq_array 54 | 55 | 56 | # -----------------------------------------------------------------------------# 57 | def main(): 58 | """ 59 | Start the function to perform RM-synthesis if called from the command line. 60 | """ 61 | 62 | # Help string to be shown using the -h option 63 | descStr = """ 64 | *** PROTOTYPE! FUNCTIONALITY NOT GUARANTEED! PLEASE TEST AND SUBMIT BUG REPORTS!*** 65 | 66 | Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in a FITS 67 | file. Does not currently account for errors. 68 | If these features are needed, please use the standard 1D function. 69 | """ 70 | 71 | epilog_text = """ 72 | Outputs with -S flag: 73 | _FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U] 74 | _RMSF.dat: Computed RMSF [Phi, Q, U] 75 | _RMsynth.dat: list of derived parameters for RM spectrum 76 | (approximately equivalent to -v flag output) 77 | _RMsynth.json: dictionary of derived parameters for RM spectrum 78 | _weight.dat: Calculated channel weights [freq_Hz, weight] 79 | """ 80 | 81 | # Parse the command line options 82 | parser = argparse.ArgumentParser( 83 | description=descStr, 84 | epilog=epilog_text, 85 | formatter_class=argparse.RawTextHelpFormatter, 86 | ) 87 | parser.add_argument( 88 | "dataFile", 89 | metavar="StokesQ.fits", 90 | nargs=1, 91 | help="FITS cube with Stokes Q data.", 92 | ) 93 | parser.add_argument( 94 | "UFile", metavar="StokesU.fits", nargs=1, help="FITS cube with Stokes U data." 95 | ) 96 | parser.add_argument( 97 | "xcoords", 98 | metavar="xcoords", 99 | nargs="?", 100 | type=float, 101 | default="1", 102 | help="X pixel location (FITS 1-indexed convention)", 103 | ) 104 | parser.add_argument( 105 | "ycoords", 106 | metavar="ycoords", 107 | nargs="?", 108 | type=float, 109 | default="1", 110 | help="Y pixel location (FITS 1-indexed convention)", 111 | ) 112 | parser.add_argument( 113 | "-c", 114 | dest="sky_coords", 115 | action="store_true", 116 | help="transform from sky coordinates (assumes X and Y are sky coordinates in degrees).", 117 | ) 118 | parser.add_argument( 119 | "-I", 120 | dest="StokesI_fits", 121 | default=None, 122 | help="extract Stokes I from given file. [None]", 123 | ) 124 | parser.add_argument( 125 | "-t", 126 | dest="fitRMSF", 127 | action="store_true", 128 | help="fit a Gaussian to the RMSF [False]", 129 | ) 130 | parser.add_argument( 131 | "-l", 132 | dest="phiMax_radm2", 133 | type=float, 134 | default=None, 135 | help="absolute max Faraday depth sampled [Auto].", 136 | ) 137 | parser.add_argument( 138 | "-d", 139 | dest="dPhi_radm2", 140 | type=float, 141 | default=None, 142 | help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)", 143 | ) 144 | parser.add_argument( 145 | "-s", 146 | dest="nSamples", 147 | type=float, 148 | default=10, 149 | help="number of samples across the RMSF lobe [10].", 150 | ) 151 | parser.add_argument( 152 | "-w", 153 | dest="weightType", 154 | default="variance", 155 | help="weighting [inverse variance] or 'uniform' (all 1s).", 156 | ) 157 | parser.add_argument( 158 | "-o", 159 | dest="polyOrd", 160 | type=int, 161 | default=2, 162 | help="polynomial order to fit to I spectrum [2].", 163 | ) 164 | parser.add_argument( 165 | "-i", 166 | dest="noStokesI", 167 | action="store_true", 168 | help="ignore the Stokes I spectrum [False].", 169 | ) 170 | parser.add_argument( 171 | "-b", 172 | dest="bit64", 173 | action="store_true", 174 | help="use 64-bit floating point precision [False (uses 32-bit)]", 175 | ) 176 | parser.add_argument( 177 | "-p", dest="showPlots", action="store_true", help="show the plots [False]." 178 | ) 179 | parser.add_argument( 180 | "-v", dest="verbose", action="store_true", help="verbose output [False]." 181 | ) 182 | parser.add_argument( 183 | "-S", dest="saveOutput", action="store_true", help="save the arrays [False]." 184 | ) 185 | parser.add_argument( 186 | "-D", 187 | dest="debug", 188 | action="store_true", 189 | help="turn on debugging messages & plots [False].", 190 | ) 191 | parser.add_argument( 192 | "-U", 193 | dest="units", 194 | type=str, 195 | default=None, 196 | help="Intensity units of the data. [from FITS header]", 197 | ) 198 | parser.add_argument( 199 | "-r", 200 | "--super-resolution", 201 | action="store_true", 202 | help="Optimise the resolution of the RMSF (as per Rudnick & Cotton). ", 203 | ) 204 | args = parser.parse_args() 205 | 206 | # Sanity checks 207 | if not os.path.exists(args.dataFile[0]): 208 | print("File does not exist: '%s'." % args.dataFile[0]) 209 | sys.exit() 210 | prefixOut, ext = os.path.splitext(args.dataFile[0]) 211 | dataDir, dummy = os.path.split(args.dataFile[0]) 212 | # Set the floating point precision 213 | nBits = 32 214 | if args.bit64: 215 | nBits = 64 216 | verbose = args.verbose 217 | # data = clRM.readFile(args.dataFile[0],nBits, verbose) 218 | 219 | hduList = fits.open(args.dataFile[0]) 220 | if args.sky_coords: 221 | imgwcs = wcs.WCS(hduList[0].header, naxis=(1, 2)) 222 | xcoords, ycoords = np.round( 223 | imgwcs.all_world2pix(args.xcoords, args.ycoords, 0) 224 | ).astype(int) 225 | print("Extracting pixel {} x {} (1-indexed)".format(xcoords + 1, ycoords + 1)) 226 | # print(ycoords+1) 227 | else: 228 | xcoords = int(args.xcoords) - 1 229 | ycoords = int(args.ycoords) - 1 230 | freq_array = get_freq_array(args.dataFile[0]) 231 | Q_array = get_data_Q_U(args.dataFile[0], ycoords, xcoords) 232 | U_array = get_data_Q_U(args.UFile[0], ycoords, xcoords) 233 | dQ_array = np.full(freq_array.shape, 1 * 10 ** (-3)) 234 | dU_array = np.full(freq_array.shape, 1 * 10 ** (-3)) 235 | 236 | Q_array[~np.isfinite(Q_array)] = np.nan 237 | U_array[~np.isfinite(U_array)] = np.nan 238 | data = [freq_array, Q_array, U_array, dQ_array, dU_array] 239 | 240 | if args.units is None: 241 | if "BUNIT" in hduList[0].header: 242 | args.units = hduList[0].header["BUNIT"] 243 | else: 244 | args.units = "Jy/beam" 245 | 246 | if (Q_array != 0).sum() == 0 and (Q_array != 0).sum() == 0: 247 | raise Exception("All QU values zero! Maybe invalid pixel?") 248 | 249 | if args.StokesI_fits is not None: 250 | I_array = get_data_Q_U(args.StokesI_fits, ycoords, xcoords) 251 | dI_array = np.full(freq_array.shape, 1 * 10 ** (-3)) 252 | data = [freq_array, I_array, Q_array, U_array, dI_array, dQ_array, dU_array] 253 | # Run RM-synthesis on the spectra 254 | mDict, aDict = run_rmsynth( 255 | data=data, 256 | polyOrd=args.polyOrd, 257 | phiMax_radm2=args.phiMax_radm2, 258 | dPhi_radm2=args.dPhi_radm2, 259 | nSamples=args.nSamples, 260 | weightType=args.weightType, 261 | fitRMSF=args.fitRMSF, 262 | noStokesI=args.noStokesI, 263 | nBits=nBits, 264 | showPlots=args.showPlots, 265 | debug=args.debug, 266 | verbose=verbose, 267 | units=args.units, 268 | saveFigures=args.saveOutput, 269 | super_resolution=args.super_resolution, 270 | ) 271 | if args.saveOutput: 272 | saveOutput(mDict, aDict, prefixOut, verbose) 273 | 274 | 275 | def get_data_Q_U(filename, ycoords, xcoords): 276 | hduList = fits.open(filename) 277 | return hduList[0].data[:, ycoords, xcoords] 278 | 279 | 280 | # -----------------------------------------------------------------------------# 281 | if __name__ == "__main__": 282 | main() 283 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | __all__ = [""] 3 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m1.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | 7 | 8 | # -----------------------------------------------------------------------------# 9 | # Function defining the model. # 10 | # # 11 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 12 | # lamSqArr_m2 = Array of lambda-squared values # 13 | # quArr = Complex array containing the Re and Im spectra. # 14 | # -----------------------------------------------------------------------------# 15 | def model(pDict, lamSqArr_m2): 16 | """ 17 | 18 | Simple Faraday thin source 19 | 20 | Ref: 21 | Sokoloff et al. (1998) Eq 2 22 | O'Sullivan et al. (2012) Eq 8 23 | Ma et al. (2019a) Eq 10 24 | 25 | """ 26 | 27 | # Calculate the complex fractional q and u spectra 28 | # fmt: off 29 | pArr = pDict["fracPol"] * np.ones_like(lamSqArr_m2) 30 | quArr = pArr * np.exp( 31 | 2j * (np.radians(pDict["psi0_deg"]) + pDict["RM_radm2"] * lamSqArr_m2) 32 | ) 33 | # fmt: on 34 | 35 | return quArr 36 | 37 | 38 | # -----------------------------------------------------------------------------# 39 | # Priors for the above model. # 40 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 41 | # # 42 | # -----------------------------------------------------------------------------# 43 | priors = { 44 | "fracPol": bilby.prior.Uniform( 45 | minimum=0.0, maximum=1.0, name="fracPol", latex_label=r"$p$" 46 | ), 47 | "psi0_deg": bilby.prior.Uniform( 48 | minimum=0, 49 | maximum=180.0, 50 | name="psi0_deg", 51 | latex_label=r"$\psi_0$ (deg)", 52 | boundary="periodic", 53 | ), 54 | "RM_radm2": bilby.prior.Uniform( 55 | minimum=-1100.0, 56 | maximum=1100.0, 57 | name="RM_radm2", 58 | latex_label=r"RM (rad m$^{-2}$)", 59 | ), 60 | } 61 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m11.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | from bilby.core.prior import Constraint, PriorDict 7 | 8 | 9 | # -----------------------------------------------------------------------------# 10 | # Function defining the model. # 11 | # # 12 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 13 | # lamSqArr_m2 = Array of lambda-squared values # 14 | # quArr = Complex array containing the Re and Im spectra. # 15 | # -----------------------------------------------------------------------------# 16 | def model(pDict, lamSqArr_m2): 17 | """ 18 | 19 | Two separate Faraday thin sources 20 | Averaged within the same telescope beam (i.e., unresolved) 21 | 22 | Ref (for individual source component): 23 | Sokoloff et al. (1998) Eq 2 24 | O'Sullivan et al. (2012) Eq 8 25 | Ma et al. (2019a) Eq 10 26 | 27 | """ 28 | 29 | # Calculate the complex fractional q and u spectra 30 | pArr1 = pDict["fracPol1"] * np.ones_like(lamSqArr_m2) 31 | pArr2 = pDict["fracPol2"] * np.ones_like(lamSqArr_m2) 32 | # fmt: off 33 | quArr1 = pArr1 * np.exp( 34 | 2j * (np.radians(pDict["psi01_deg"]) + pDict["RM1_radm2"] * lamSqArr_m2) 35 | ) 36 | quArr2 = pArr2 * np.exp( 37 | 2j * (np.radians(pDict["psi02_deg"]) + pDict["RM2_radm2"] * lamSqArr_m2) 38 | ) 39 | quArr = quArr1 + quArr2 40 | # fmt: on 41 | 42 | return quArr 43 | 44 | 45 | # -----------------------------------------------------------------------------# 46 | # Priors for the above model. # 47 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 48 | # # 49 | # -----------------------------------------------------------------------------# 50 | def converter(parameters): 51 | """ 52 | Function to convert between sampled parameters and constraint parameter. 53 | 54 | Parameters 55 | ---------- 56 | parameters: dict 57 | Dictionary containing sampled parameter values, 'RM1_radm2', 'RM1_radm2', 58 | 'fracPol1', 'fracPol2' 59 | 60 | Returns 61 | ------- 62 | dict: Dictionary with constraint parameter 'delta_RM1_RM2_radm2' and 'sum_p1_p2' added. 63 | """ 64 | converted_parameters = parameters.copy() 65 | converted_parameters["delta_RM1_RM2_radm2"] = ( 66 | parameters["RM1_radm2"] - parameters["RM2_radm2"] 67 | ) 68 | converted_parameters["sum_p1_p2"] = parameters["fracPol1"] + parameters["fracPol2"] 69 | return converted_parameters 70 | 71 | 72 | priors = PriorDict(conversion_function=converter) 73 | 74 | priors["fracPol1"] = bilby.prior.Uniform( 75 | minimum=0.0, 76 | maximum=1.0, 77 | name="fracPol1", 78 | latex_label=r"$p_1$", 79 | ) 80 | 81 | priors["fracPol2"] = bilby.prior.Uniform( 82 | minimum=0.0, 83 | maximum=1.0, 84 | name="fracPol2", 85 | latex_label=r"$p_2$", 86 | ) 87 | 88 | priors["psi01_deg"] = bilby.prior.Uniform( 89 | minimum=0, 90 | maximum=180.0, 91 | name="psi01_deg", 92 | latex_label=r"$\psi_{0,1}$ (deg)", 93 | boundary="periodic", 94 | ) 95 | priors["psi02_deg"] = bilby.prior.Uniform( 96 | minimum=0, 97 | maximum=180.0, 98 | name="psi02_deg", 99 | latex_label=r"$\psi_{0,2}$ (deg)", 100 | boundary="periodic", 101 | ) 102 | 103 | priors["RM1_radm2"] = bilby.prior.Uniform( 104 | minimum=-1100.0, 105 | maximum=1100.0, 106 | name="RM1_radm2", 107 | latex_label=r"$\phi_1$ (rad m$^{-2}$)", 108 | ) 109 | priors["RM2_radm2"] = bilby.prior.Uniform( 110 | minimum=-1100.0, 111 | maximum=1100.0, 112 | name="RM2_radm2", 113 | latex_label=r"$\phi_2$ (rad m$^{-2}$)", 114 | ) 115 | priors["delta_RM1_RM2_radm2"] = Constraint( 116 | minimum=0, 117 | maximum=2200.0, 118 | name="delta_RM1_RM2_radm2", 119 | latex_label=r"$\Delta\phi_{1,2}$ (rad m$^{-2}$)", 120 | ) 121 | priors["sum_p1_p2"] = Constraint( 122 | minimum=0.0, 123 | maximum=1, 124 | name="sum_p1_p2", 125 | latex_label=r"$p_1+p_2$)", 126 | ) 127 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m111.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | from bilby.core.prior import Constraint, PriorDict 7 | 8 | 9 | # -----------------------------------------------------------------------------# 10 | # Function defining the model. # 11 | # # 12 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 13 | # lamSqArr_m2 = Array of lambda-squared values # 14 | # quArr = Complex array containing the Re and Im spectra. # 15 | # -----------------------------------------------------------------------------# 16 | def model(pDict, lamSqArr_m2): 17 | """ 18 | 19 | Three separate Faraday thin sources 20 | Averaged within the same telescope beam (i.e., unresolved) 21 | 22 | Ref (for individual source component): 23 | Sokoloff et al. (1998) Eq 2 24 | O'Sullivan et al. (2012) Eq 8 25 | Ma et al. (2019a) Eq 10 26 | 27 | """ 28 | 29 | # Calculate the complex fractional q and u spectra 30 | # fmt: off 31 | pArr1 = pDict["fracPol1"] * np.ones_like(lamSqArr_m2) 32 | pArr2 = pDict["fracPol2"] * np.ones_like(lamSqArr_m2) 33 | pArr3 = pDict["fracPol3"] * np.ones_like(lamSqArr_m2) 34 | quArr1 = pArr1 * np.exp( 2j * (np.radians(pDict["psi01_deg"]) + 35 | pDict["RM1_radm2"] * lamSqArr_m2)) 36 | quArr2 = pArr2 * np.exp( 2j * (np.radians(pDict["psi02_deg"]) + 37 | pDict["RM2_radm2"] * lamSqArr_m2)) 38 | quArr3 = pArr3 * np.exp( 2j * (np.radians(pDict["psi03_deg"]) + 39 | pDict["RM3_radm2"] * lamSqArr_m2)) 40 | quArr = (quArr1 + quArr2 + quArr3) 41 | # fmt: on 42 | 43 | return quArr 44 | 45 | 46 | # -----------------------------------------------------------------------------# 47 | # Priors for the above model. # 48 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 49 | # # 50 | # -----------------------------------------------------------------------------# 51 | def converter(parameters): 52 | """ 53 | Function to convert between sampled parameters and constraint parameter. 54 | 55 | Parameters 56 | ---------- 57 | parameters: dict 58 | Dictionary containing sampled parameter values, 'RM1_radm2', 'RM2_radm2', 59 | 'RM3_radm2', 'fracPol1', 'fracPol2', 'fracPol3' 60 | 61 | Returns 62 | ------- 63 | dict: Dictionary with constraint parameter 'delta_RM1_RM2_radm2' and 'sum_p1_p2_p3' added. 64 | """ 65 | converted_parameters = parameters.copy() 66 | converted_parameters["delta_RM1_RM2_radm2"] = ( 67 | parameters["RM1_radm2"] - parameters["RM2_radm2"] 68 | ) 69 | converted_parameters["delta_RM2_RM3_radm2"] = ( 70 | parameters["RM2_radm2"] - parameters["RM3_radm2"] 71 | ) 72 | converted_parameters["sum_p1_p2_p3"] = ( 73 | parameters["fracPol1"] + parameters["fracPol2"] + parameters["fracPol3"] 74 | ) 75 | return converted_parameters 76 | 77 | 78 | priors = PriorDict(conversion_function=converter) 79 | 80 | priors["fracPol1"] = bilby.prior.Uniform( 81 | minimum=0.0, 82 | maximum=1.0, 83 | name="fracPol1", 84 | latex_label="$p_1$", 85 | ) 86 | priors["fracPol2"] = bilby.prior.Uniform( 87 | minimum=0.0, 88 | maximum=1.0, 89 | name="fracPol2", 90 | latex_label="$p_2$", 91 | ) 92 | priors["fracPol3"] = bilby.prior.Uniform( 93 | minimum=0.0, 94 | maximum=1.0, 95 | name="fracPol3", 96 | latex_label="$p_3$", 97 | ) 98 | 99 | priors["psi01_deg"] = bilby.prior.Uniform( 100 | minimum=0, 101 | maximum=180.0, 102 | name="psi01_deg", 103 | latex_label="$\psi_{0,1}$ (deg)", 104 | boundary="periodic", 105 | ) 106 | priors["psi02_deg"] = bilby.prior.Uniform( 107 | minimum=0, 108 | maximum=180.0, 109 | name="psi02_deg", 110 | latex_label="$\psi_{0,2}$ (deg)", 111 | boundary="periodic", 112 | ) 113 | priors["psi03_deg"] = bilby.prior.Uniform( 114 | minimum=0, 115 | maximum=180.0, 116 | name="psi03_deg", 117 | latex_label="$\psi_{0,3}$ (deg)", 118 | boundary="periodic", 119 | ) 120 | 121 | priors["RM1_radm2"] = bilby.prior.Uniform( 122 | minimum=-1100.0, 123 | maximum=1100.0, 124 | name="RM1_radm2", 125 | latex_label="$\phi_1$ (rad m$^{-2}$)", 126 | ) 127 | priors["RM2_radm2"] = bilby.prior.Uniform( 128 | minimum=-1100.0, 129 | maximum=1100.0, 130 | name="RM2_radm2", 131 | latex_label="$\phi_2$ (rad m$^{-2}$)", 132 | ) 133 | priors["RM3_radm2"] = bilby.prior.Uniform( 134 | minimum=-1100.0, 135 | maximum=1100.0, 136 | name="RM3_radm2", 137 | latex_label="$\phi_3$ (rad m$^{-2}$)", 138 | ) 139 | priors["delta_RM1_RM2_radm2"] = Constraint( 140 | minimum=0, 141 | maximum=2200.0, 142 | name="delta_RM1_RM2_radm2", 143 | latex_label="$\Delta\phi_{1,2}$ (rad m$^{-2}$)", 144 | ) 145 | priors["delta_RM2_RM3_radm2"] = Constraint( 146 | minimum=0, 147 | maximum=2200.0, 148 | name="delta_RM2_RM3_radm2", 149 | latex_label="$\Delta\phi_{1,2}$ (rad m$^{-2}$)", 150 | ) 151 | priors["sum_p1_p2_p3"] = Constraint( 152 | minimum=0.0, 153 | maximum=1, 154 | name="sum_p1_p2_p3", 155 | latex_label="$p_1+p_2+p_3$)", 156 | ) 157 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m12.py: -------------------------------------------------------------------------------- 1 | # 2 | # =============================================================================# 3 | import bilby 4 | import numpy as np 5 | 6 | 7 | # -----------------------------------------------------------------------------# 8 | # Function defining the model. # 9 | # # 10 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 11 | # lamSqArr_m2 = Array of lambda-squared values # 12 | # quArr = Complex array containing the Re and Im spectra. # 13 | # -----------------------------------------------------------------------------# 14 | def model(pDict, lamSqArr_m2): 15 | """ 16 | Single Faraday component with internal Faraday dispersion and 17 | an foreground external dispersion screen 18 | 19 | Ref: 20 | Sokoloff et al. (1998) Eq 34 21 | O'Sullivan et al. (2012) Eq 10 22 | Oberhelman et al. (in prep) Eq 6 23 | """ 24 | 25 | # Calculate the complex fractional q and u spectra 26 | pArr = pDict["fracPol"] * np.ones_like(lamSqArr_m2) 27 | para_S = ( 28 | 2.0 * lamSqArr_m2**2 * pDict["sigmaRM_radm2"] ** 2 29 | - 2j * lamSqArr_m2 * pDict["deltaRM_radm2"] 30 | ) 31 | 32 | quArr = ( 33 | pArr 34 | * np.exp( 35 | 2j * (np.radians(pDict["psi0_deg"]) + pDict["RM_screen"] * lamSqArr_m2) 36 | ) 37 | * ((1 - np.exp(-1.0 * para_S)) / para_S) 38 | * np.exp(-2.0 * pDict["sigmaRM_radm2_FG"] ** 2.0 * lamSqArr_m2**2.0) 39 | ) 40 | 41 | return quArr 42 | 43 | 44 | # -----------------------------------------------------------------------------# 45 | # Priors for the above model. # 46 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 47 | # # 48 | # -----------------------------------------------------------------------------# 49 | priors = { 50 | "fracPol": bilby.prior.Uniform( 51 | minimum=0.0, maximum=0.6, name="fracPol", latex_label="$p$" 52 | ), 53 | "psi0_deg": bilby.prior.Uniform( 54 | minimum=0, 55 | maximum=180.0, 56 | name="psi0_deg", 57 | latex_label="$\psi_0$ (deg)", 58 | boundary="periodic", 59 | ), 60 | "deltaRM_radm2": bilby.prior.Uniform( 61 | minimum=0.0, 62 | maximum=60.0, 63 | name="deltaRM_radm2", 64 | latex_label="$\Delta{RM}$ (rad m$^{-2}$)", 65 | ), 66 | "sigmaRM_radm2": bilby.prior.Uniform( 67 | minimum=0.0, 68 | maximum=50.0, 69 | name="sigmaRM_radm2", 70 | latex_label="$\sigma_{RM}$ (rad m$^{-2}$)", 71 | ), 72 | "sigmaRM_radm2_FG": bilby.prior.Uniform( 73 | minimum=0.0, 74 | maximum=50.0, 75 | name="sigmaRM_radm2_FG", 76 | latex_label="$\sigma_{RM,FG}$ (rad m$^{-2}$)", 77 | ), 78 | "RM_screen": bilby.prior.Uniform( 79 | minimum=-1100.0, 80 | maximum=1100.0, 81 | name="RM_screen_radm2_FG", 82 | latex_label="$RM_{screen,FG}$ (rad m$^{-2}$)", 83 | ), 84 | } 85 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m2.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | 7 | 8 | # -----------------------------------------------------------------------------# 9 | # Function defining the model. # 10 | # # 11 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 12 | # lamSqArr_m2 = Array of lambda-squared values # 13 | # quArr = Complex array containing the Re and Im spectra. # 14 | # -----------------------------------------------------------------------------# 15 | def model(pDict, lamSqArr_m2): 16 | """ 17 | 18 | Single Faraday component with external Faraday dispersion 19 | 20 | Ref: 21 | Burn (1966) Eq 21 22 | Sokoloff et al. (1998) Eq B3 23 | O'Sullivan et al. (2012) Eq 11 24 | Ma et al. (2019a) Eq 13 25 | 26 | """ 27 | 28 | # Calculate the complex fractional q and u spectra 29 | # fmt: off 30 | pArr = pDict["fracPol"] * np.ones_like(lamSqArr_m2) 31 | quArr = ( 32 | pArr 33 | * np.exp(2j * (np.radians(pDict["psi0_deg"]) + pDict["RM_radm2"] * lamSqArr_m2)) 34 | * np.exp(-2.0 * pDict["sigmaRM_radm2"] ** 2.0 * lamSqArr_m2**2.0) 35 | ) 36 | # fmt: on 37 | 38 | return quArr 39 | 40 | 41 | # -----------------------------------------------------------------------------# 42 | # Priors for the above model. # 43 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 44 | # # 45 | # -----------------------------------------------------------------------------# 46 | priors = { 47 | "fracPol": bilby.prior.Uniform( 48 | minimum=0.0, maximum=1.0, name="fracPol", latex_label=r"$p$" 49 | ), 50 | "psi0_deg": bilby.prior.Uniform( 51 | minimum=0, 52 | maximum=180.0, 53 | name="psi0_deg", 54 | latex_label=r"$\psi_0$ (deg)", 55 | boundary="periodic", 56 | ), 57 | "RM_radm2": bilby.prior.Uniform( 58 | minimum=-1100.0, 59 | maximum=1100.0, 60 | name="RM_radm2", 61 | latex_label=r"RM (rad m$^{-2}$)", 62 | ), 63 | "sigmaRM_radm2": bilby.prior.Uniform( 64 | minimum=0.0, 65 | maximum=100.0, 66 | name="sigmaRM_radm2", 67 | latex_label=r"$\sigma_{RM}$ (rad m$^{-2}$)", 68 | ), 69 | } 70 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m3.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | from bilby.core.prior import Constraint, PriorDict 7 | 8 | 9 | # -----------------------------------------------------------------------------# 10 | # Function defining the model. # 11 | # # 12 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 13 | # lamSqArr_m2 = Array of lambda-squared values # 14 | # quArr = Complex array containing the Re and Im spectra. # 15 | # -----------------------------------------------------------------------------# 16 | def model(pDict, lamSqArr_m2): 17 | """ 18 | 19 | Two separate Faraday components with external Faraday dispersion 20 | With a common depolarisation term 21 | Averaged within the same telescope beam (i.e., unresolved) 22 | 23 | Ref (for individual source component): 24 | Burn (1966) Eq 21 25 | Sokoloff et al. (1998) Eq B3 26 | O'Sullivan et al. (2012) Eq 11 27 | Ma et al. (2019a) Eq 13 28 | 29 | """ 30 | 31 | # Calculate the complex fractional q and u spectra 32 | pArr1 = pDict["fracPol1"] * np.ones_like(lamSqArr_m2) 33 | pArr2 = pDict["fracPol2"] * np.ones_like(lamSqArr_m2) 34 | # fmt: off 35 | quArr1 = pArr1 * np.exp( 36 | 2j * (np.radians(pDict["psi01_deg"]) + pDict["RM1_radm2"] * lamSqArr_m2) 37 | ) 38 | quArr2 = pArr2 * np.exp( 39 | 2j * (np.radians(pDict["psi02_deg"]) + pDict["RM2_radm2"] * lamSqArr_m2) 40 | ) 41 | quArr = (quArr1 + quArr2) * np.exp( 42 | -2.0 * pDict["sigmaRM_radm2"] ** 2.0 * lamSqArr_m2**2.0 43 | ) 44 | # fmt: on 45 | return quArr 46 | 47 | 48 | # -----------------------------------------------------------------------------# 49 | # Priors for the above model. # 50 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 51 | # # 52 | # -----------------------------------------------------------------------------# 53 | def converter(parameters): 54 | """ 55 | Function to convert between sampled parameters and constraint parameter. 56 | 57 | Parameters 58 | ---------- 59 | parameters: dict 60 | Dictionary containing sampled parameter values, 'RM1_radm2', 'RM1_radm2'. 61 | 62 | Returns 63 | ------- 64 | dict: Dictionary with constraint parameter 'delta_RM1_RM2_radm2' added. 65 | """ 66 | converted_parameters = parameters.copy() 67 | converted_parameters["delta_RM1_RM2_radm2"] = ( 68 | parameters["RM1_radm2"] - parameters["RM2_radm2"] 69 | ) 70 | converted_parameters["sum_p1_p2"] = parameters["fracPol1"] + parameters["fracPol2"] 71 | return converted_parameters 72 | 73 | 74 | priors = PriorDict(conversion_function=converter) 75 | 76 | priors["fracPol1"] = bilby.prior.Uniform( 77 | minimum=0.0, 78 | maximum=1.0, 79 | name="fracPol1", 80 | latex_label=r"$p_1$", 81 | ) 82 | 83 | priors["fracPol2"] = bilby.prior.Uniform( 84 | minimum=0.0, 85 | maximum=1.0, 86 | name="fracPol2", 87 | latex_label=r"$p_2$", 88 | ) 89 | 90 | priors["psi01_deg"] = bilby.prior.Uniform( 91 | minimum=0, 92 | maximum=180.0, 93 | name="psi01_deg", 94 | latex_label=r"$\psi_{0,1}$ (deg)", 95 | boundary="periodic", 96 | ) 97 | priors["psi02_deg"] = bilby.prior.Uniform( 98 | minimum=0, 99 | maximum=180.0, 100 | name="psi02_deg", 101 | latex_label=r"$\psi_{0,2}$ (deg)", 102 | boundary="periodic", 103 | ) 104 | 105 | priors["RM1_radm2"] = bilby.prior.Uniform( 106 | minimum=-1100.0, 107 | maximum=1100.0, 108 | name="RM1_radm2", 109 | latex_label=r"$\phi_1$ (rad m$^{-2}$)", 110 | ) 111 | priors["RM2_radm2"] = bilby.prior.Uniform( 112 | minimum=-1100.0, 113 | maximum=1100.0, 114 | name="RM2_radm2", 115 | latex_label=r"$\phi_2$ (rad m$^{-2}$)", 116 | ) 117 | priors["delta_RM1_RM2_radm2"] = Constraint( 118 | minimum=0, 119 | maximum=2200.0, 120 | name="delta_RM1_RM2_radm2", 121 | latex_label=r"$\Delta\phi_{1,2}$ (rad m$^{-2}$)", 122 | ) 123 | 124 | priors["sigmaRM_radm2"] = bilby.prior.Uniform( 125 | minimum=0, 126 | maximum=100.0, 127 | name="sigmaRM_radm2", 128 | latex_label=r"$\sigma_{RM}$ (rad m$^{-2}$)", 129 | ) 130 | 131 | priors["sum_p1_p2"] = Constraint( 132 | minimum=0.0, 133 | maximum=1, 134 | name="sum_p1_p2", 135 | latex_label=r"$p_1+p_2$)", 136 | ) 137 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m4.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | from bilby.core.prior import Constraint, PriorDict 7 | 8 | 9 | # -----------------------------------------------------------------------------# 10 | # Function defining the model. # 11 | # # 12 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 13 | # lamSqArr_m2 = Array of lambda-squared values # 14 | # quArr = Complex array containing the Re and Im spectra. # 15 | # -----------------------------------------------------------------------------# 16 | def model(pDict, lamSqArr_m2): 17 | """ 18 | 19 | Two separate Faraday components with external Faraday dispersion 20 | With individual depolarisation terms 21 | Averaged within the same telescope beam (i.e., unresolved) 22 | 23 | Ref (for individual source component): 24 | Burn (1966) Eq 21 25 | Sokoloff et al. (1998) Eq B3 26 | O'Sullivan et al. (2012) Eq 11 27 | Ma et al. (2019a) Eq 13 28 | 29 | """ 30 | 31 | # Calculate the complex fractional q and u spectra 32 | pArr1 = pDict["fracPol1"] * np.ones_like(lamSqArr_m2) 33 | pArr2 = pDict["fracPol2"] * np.ones_like(lamSqArr_m2) 34 | # fmt: off 35 | quArr1 = pArr1 * np.exp( 36 | 2j * (np.radians(pDict["psi01_deg"]) + pDict["RM1_radm2"] * lamSqArr_m2) 37 | ) 38 | quArr2 = pArr2 * np.exp( 39 | 2j * (np.radians(pDict["psi02_deg"]) + pDict["RM2_radm2"] * lamSqArr_m2) 40 | ) 41 | quArr = quArr1 * np.exp( 42 | -2.0 * pDict["sigmaRM1_radm2"] ** 2.0 * lamSqArr_m2**2.0 43 | ) + quArr2 * np.exp(-2.0 * pDict["sigmaRM2_radm2"] ** 2.0 * lamSqArr_m2**2.0) 44 | # fmt: on 45 | return quArr 46 | 47 | 48 | # -----------------------------------------------------------------------------# 49 | # Priors for the above model. # 50 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 51 | # # 52 | # -----------------------------------------------------------------------------# 53 | def converter(parameters): 54 | """ 55 | Function to convert between sampled parameters and constraint parameter. 56 | 57 | Parameters 58 | ---------- 59 | parameters: dict 60 | Dictionary containing sampled parameter values, 'RM1_radm2', 'RM1_radm2', 61 | 'fracPol1', 'fracPol2' 62 | 63 | Returns 64 | ------- 65 | dict: Dictionary with constraint parameter 'delta_RM1_RM2_radm2' and 'sum_p1_p2' added. 66 | """ 67 | converted_parameters = parameters.copy() 68 | converted_parameters["delta_RM1_RM2_radm2"] = ( 69 | parameters["RM1_radm2"] - parameters["RM2_radm2"] 70 | ) 71 | converted_parameters["sum_p1_p2"] = parameters["fracPol1"] + parameters["fracPol2"] 72 | return converted_parameters 73 | 74 | 75 | priors = PriorDict(conversion_function=converter) 76 | 77 | priors["fracPol1"] = bilby.prior.Uniform( 78 | minimum=0.0, 79 | maximum=1.0, 80 | name="fracPol1", 81 | latex_label=r"$p_1$", 82 | ) 83 | 84 | priors["fracPol2"] = bilby.prior.Uniform( 85 | minimum=0.0, 86 | maximum=1.0, 87 | name="fracPol2", 88 | latex_label=r"$p_2$", 89 | ) 90 | 91 | priors["psi01_deg"] = bilby.prior.Uniform( 92 | minimum=0, 93 | maximum=180.0, 94 | name="psi01_deg", 95 | latex_label=r"$\psi_{0,1}$ (deg)", 96 | boundary="periodic", 97 | ) 98 | priors["psi02_deg"] = bilby.prior.Uniform( 99 | minimum=0, 100 | maximum=180.0, 101 | name="psi02_deg", 102 | latex_label=r"$\psi_{0,2}$ (deg)", 103 | boundary="periodic", 104 | ) 105 | 106 | priors["RM1_radm2"] = bilby.prior.Uniform( 107 | minimum=-1100.0, 108 | maximum=1100.0, 109 | name="RM1_radm2", 110 | latex_label=r"$\phi_1$ (rad m$^{-2}$)", 111 | ) 112 | priors["RM2_radm2"] = bilby.prior.Uniform( 113 | minimum=-1100.0, 114 | maximum=1100.0, 115 | name="RM2_radm2", 116 | latex_label=r"$\phi_2$ (rad m$^{-2}$)", 117 | ) 118 | 119 | priors["sigmaRM1_radm2"] = bilby.prior.Uniform( 120 | minimum=0.0, 121 | maximum=100.0, 122 | name="sigmaRM1_radm2", 123 | latex_label=r"$\sigma_{RM,1}$ (rad m$^{-2}$))", 124 | ) 125 | 126 | priors["sigmaRM2_radm2"] = bilby.prior.Uniform( 127 | minimum=0.0, 128 | maximum=100.0, 129 | name="sigmaRM2_radm2", 130 | latex_label=r"$\sigma_{RM,2}$ (rad m$^{-2}$)", 131 | ) 132 | 133 | priors["delta_RM1_RM2_radm2"] = Constraint( 134 | minimum=0, 135 | maximum=2200.0, 136 | name="delta_RM1_RM2_radm2", 137 | latex_label=r"$\Delta\phi_{1,2}$ (rad m$^{-2}$)", 138 | ) 139 | 140 | priors["sum_p1_p2"] = Constraint( 141 | minimum=0.0, 142 | maximum=1, 143 | name="sum_p1_p2", 144 | latex_label=r"$p_1+p_2$)", 145 | ) 146 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m5.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | 7 | 8 | # -----------------------------------------------------------------------------# 9 | # Function defining the model. # 10 | # # 11 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 12 | # lamSqArr_m2 = Array of lambda-squared values # 13 | # quArr = Complex array containing the Re and Im spectra. # 14 | # -----------------------------------------------------------------------------# 15 | def model(pDict, lamSqArr_m2): 16 | """ 17 | 18 | Single Faraday component with differential Faraday rotation 19 | "Burn slab" 20 | 21 | Ref: 22 | Burn (1966) Eq 18; with N >> (H_r/2H_z^0)^2 23 | Sokoloff et al. (1998) Eq 3 24 | O'Sullivan et al. (2012) Eq 9 25 | Ma et al. (2019a) Eq 12 26 | 27 | """ 28 | 29 | # Calculate the complex fractional q and u spectra 30 | # fmt: off 31 | pArr = pDict["fracPol"] * np.ones_like(lamSqArr_m2) 32 | quArr = (pArr * np.exp( 2j * (np.radians(pDict["psi0_deg"]) + 33 | (0.5*pDict["deltaRM_radm2"] + 34 | pDict["RM_radm2"]) * lamSqArr_m2)) 35 | * np.sin(pDict["deltaRM_radm2"] * lamSqArr_m2) / 36 | (pDict["deltaRM_radm2"] * lamSqArr_m2)) 37 | # fmt: on 38 | 39 | return quArr 40 | 41 | 42 | # -----------------------------------------------------------------------------# 43 | # Priors for the above model. # 44 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 45 | # # 46 | # -----------------------------------------------------------------------------# 47 | priors = { 48 | "fracPol": bilby.prior.Uniform( 49 | minimum=0.0, maximum=1.0, name="fracPol", latex_label=r"$p$" 50 | ), 51 | "psi0_deg": bilby.prior.Uniform( 52 | minimum=0, 53 | maximum=180.0, 54 | name="psi0_deg", 55 | latex_label=r"$\psi_0$ (deg)", 56 | boundary="periodic", 57 | ), 58 | "RM_radm2": bilby.prior.Uniform( 59 | minimum=-1100.0, 60 | maximum=1100.0, 61 | name="RM_radm2", 62 | latex_label=r"RM (rad m$^{-2}$)", 63 | ), 64 | "deltaRM_radm2": bilby.prior.Uniform( 65 | minimum=0.0, 66 | maximum=100.0, 67 | name="deltaRM_radm2", 68 | latex_label=r"$\Delta{RM}$ (rad m$^{-2}$)", 69 | ), 70 | } 71 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m6.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | from bilby.core.prior import Constraint, PriorDict 7 | 8 | 9 | # -----------------------------------------------------------------------------# 10 | # Function defining the model. # 11 | # # 12 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 13 | # lamSqArr_m2 = Array of lambda-squared values # 14 | # quArr = Complex array containing the Re and Im spectra. # 15 | # -----------------------------------------------------------------------------# 16 | def model(pDict, lamSqArr_m2): 17 | """ 18 | 19 | Two separate Faraday components with differential Faraday rotation 20 | Double "Burn slab" 21 | Averaged within the same telescope beam (i.e., unresolved) 22 | 23 | Ref (for individual source component): 24 | Burn (1966) Eq 18; with N >> (H_r/2H_z^0)^2 25 | Sokoloff et al. (1998) Eq 3 26 | O'Sullivan et al. (2012) Eq 9 27 | Ma et al. (2019a) Eq 12 28 | 29 | """ 30 | 31 | # Calculate the complex fractional q and u spectra 32 | pArr1 = pDict["fracPol1"] * np.ones_like(lamSqArr_m2) 33 | pArr2 = pDict["fracPol2"] * np.ones_like(lamSqArr_m2) 34 | 35 | # fmt: off 36 | quArr1 = pArr1 * np.exp( 2j * (np.radians(pDict["psi01_deg"]) + 37 | (0.5*pDict["deltaRM1_radm2"] + 38 | pDict["RM1_radm2"]) * lamSqArr_m2)) 39 | quArr2 = pArr2 * np.exp( 2j * (np.radians(pDict["psi02_deg"]) + 40 | (0.5*pDict["deltaRM2_radm2"] + 41 | pDict["RM2_radm2"]) * lamSqArr_m2)) 42 | quArr = (quArr1 * np.sin(pDict["deltaRM1_radm2"] * lamSqArr_m2) / 43 | (pDict["deltaRM1_radm2"] * lamSqArr_m2) + 44 | quArr2 * np.sin(pDict["deltaRM2_radm2"] * lamSqArr_m2) / 45 | (pDict["deltaRM2_radm2"] * lamSqArr_m2)) 46 | # fmt: on 47 | 48 | return quArr 49 | 50 | 51 | # -----------------------------------------------------------------------------# 52 | # Priors for the above model. # 53 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 54 | # # 55 | # -----------------------------------------------------------------------------# 56 | def converter(parameters): 57 | """ 58 | Function to convert between sampled parameters and constraint parameter. 59 | 60 | Parameters 61 | ---------- 62 | parameters: dict 63 | Dictionary containing sampled parameter values, 'RM1_radm2', 'RM1_radm2', 64 | 'fracPol1', 'fracPol2' 65 | 66 | Returns 67 | ------- 68 | dict: Dictionary with constraint parameter 'delta_RM1_RM2_radm2' and 'sum_p1_p2' added. 69 | """ 70 | converted_parameters = parameters.copy() 71 | converted_parameters["delta_RM1_RM2_radm2"] = ( 72 | parameters["RM1_radm2"] - parameters["RM2_radm2"] 73 | ) 74 | converted_parameters["sum_p1_p2"] = parameters["fracPol1"] + parameters["fracPol2"] 75 | return converted_parameters 76 | 77 | 78 | priors = PriorDict(conversion_function=converter) 79 | 80 | priors["fracPol1"] = bilby.prior.Uniform( 81 | minimum=0.0, 82 | maximum=1.0, 83 | name="fracPol1", 84 | latex_label=r"$p_1$", 85 | ) 86 | priors["fracPol2"] = bilby.prior.Uniform( 87 | minimum=0.0, 88 | maximum=1.0, 89 | name="fracPol2", 90 | latex_label=r"$p_2$", 91 | ) 92 | 93 | priors["psi01_deg"] = bilby.prior.Uniform( 94 | minimum=0, 95 | maximum=180.0, 96 | name="psi01_deg", 97 | latex_label=r"$\psi_{0,1}$ (deg)", 98 | boundary="periodic", 99 | ) 100 | priors["psi02_deg"] = bilby.prior.Uniform( 101 | minimum=0, 102 | maximum=180.0, 103 | name="psi02_deg", 104 | latex_label=r"$\psi_{0,2}$ (deg)", 105 | boundary="periodic", 106 | ) 107 | 108 | priors["RM1_radm2"] = bilby.prior.Uniform( 109 | minimum=-1100.0, 110 | maximum=1100.0, 111 | name="RM1_radm2", 112 | latex_label="$\phi_1$ (rad m$^{-2}$)", 113 | ) 114 | priors["RM2_radm2"] = bilby.prior.Uniform( 115 | minimum=-1100.0, 116 | maximum=1100.0, 117 | name="RM2_radm2", 118 | latex_label=r"$\phi_2$ (rad m$^{-2}$)", 119 | ) 120 | priors["deltaRM1_radm2"] = bilby.prior.Uniform( 121 | minimum=0.0, 122 | maximum=100.0, 123 | name="deltaRM1_radm2", 124 | latex_label=r"$\Delta{RM,1}$ (rad m$^{-2}$))", 125 | ) 126 | priors["deltaRM2_radm2"] = bilby.prior.Uniform( 127 | minimum=0.0, 128 | maximum=100.0, 129 | name="deltaRM2_radm2", 130 | latex_label=r"$\Delta{RM,2}$ (rad m$^{-2}$)", 131 | ) 132 | priors["delta_RM1_RM2_radm2"] = Constraint( 133 | minimum=0, 134 | maximum=2200.0, 135 | name="delta_RM1_RM2_radm2", 136 | latex_label=r"$\Delta\phi_{1,2}$ (rad m$^{-2}$)", 137 | ) 138 | priors["sum_p1_p2"] = Constraint( 139 | minimum=0.0, 140 | maximum=1, 141 | name="sum_p1_p2", 142 | latex_label=r"$p_1+p_2$)", 143 | ) 144 | -------------------------------------------------------------------------------- /RMtools_1D/models_ns/m7.py: -------------------------------------------------------------------------------- 1 | # =============================================================================# 2 | # MODEL DEFINITION FILE # 3 | # =============================================================================# 4 | import bilby 5 | import numpy as np 6 | 7 | 8 | # -----------------------------------------------------------------------------# 9 | # Function defining the model. # 10 | # # 11 | # pDict = Dictionary of parameters, created by parsing inParms, below. # 12 | # lamSqArr_m2 = Array of lambda-squared values # 13 | # quArr = Complex array containing the Re and Im spectra. # 14 | # -----------------------------------------------------------------------------# 15 | def model(pDict, lamSqArr_m2): 16 | """ 17 | 18 | Single Faraday component with internal Faraday dispersion 19 | 20 | Ref: 21 | Burn (1966) Eq 18 22 | Sokoloff et al. (1998) Eq 34 23 | O'Sullivan et al. (2012) Eq 10 24 | Ma et al. (2019a) Eq 15 25 | 26 | """ 27 | 28 | # fmt: off 29 | 30 | # Calculate the complex fractional q and u spectra 31 | pArr = pDict["fracPol"] * np.ones_like(lamSqArr_m2) 32 | para_S = (2. * lamSqArr_m2**2 * pDict["sigmaRM_radm2"]**2 - 33 | 2j * lamSqArr_m2 * pDict["deltaRM_radm2"]) 34 | quArr = (pArr * np.exp( 2j * (np.radians(pDict["psi0_deg"]) + 35 | pDict["RM_radm2"] * lamSqArr_m2)) * 36 | (1 - np.exp(-1.*para_S)) / para_S) 37 | # fmt: on 38 | 39 | return quArr 40 | 41 | 42 | # -----------------------------------------------------------------------------# 43 | # Priors for the above model. # 44 | # See https://lscsoft.docs.ligo.org/bilby/prior.html for details. # 45 | # # 46 | # -----------------------------------------------------------------------------# 47 | priors = { 48 | "fracPol": bilby.prior.Uniform( 49 | minimum=0.0, maximum=1.0, name="fracPol", latex_label=r"$p$" 50 | ), 51 | "psi0_deg": bilby.prior.Uniform( 52 | minimum=0, 53 | maximum=180.0, 54 | name="psi0_deg", 55 | latex_label=r"$\psi_0$ (deg)", 56 | boundary="periodic", 57 | ), 58 | "RM_radm2": bilby.prior.Uniform( 59 | minimum=-1100.0, 60 | maximum=1100.0, 61 | name="RM_radm2", 62 | latex_label=r"RM (rad m$^{-2}$)", 63 | ), 64 | "deltaRM_radm2": bilby.prior.Uniform( 65 | minimum=0.0, 66 | maximum=100.0, 67 | name="deltaRM_radm2", 68 | latex_label=r"$\Delta{RM}$ (rad m$^{-2}$)", 69 | ), 70 | "sigmaRM_radm2": bilby.prior.Uniform( 71 | minimum=0.0, 72 | maximum=100.0, 73 | name="sigmaRM_radm2", 74 | latex_label=r"$\sigma_{RM}$ (rad m$^{-2}$)", 75 | ), 76 | } 77 | -------------------------------------------------------------------------------- /RMtools_1D/rmtools_bwpredict.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # =============================================================================# 4 | # # 5 | # NAME: rmtools_bwpredict.py # 6 | # # 7 | # PURPOSE: Algorithm for finding polarized sources while accounting for # 8 | # bandwidth depolarization. # 9 | # # 10 | # =============================================================================# 11 | # # 12 | # The MIT License (MIT) # 13 | # # 14 | # Copyright (c) 2020 Canadian Initiative for Radio Astronomy Data Analysis # # 15 | # Permission is hereby granted, free of charge, to any person obtaining a # 16 | # copy of this software and associated documentation files (the "Software"), # 17 | # to deal in the Software without restriction, including without limitation # 18 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, # 19 | # and/or sell copies of the Software, and to permit persons to whom the # 20 | # Software is furnished to do so, subject to the following conditions: # 21 | # # 22 | # The above copyright notice and this permission notice shall be included in # 23 | # all copies or substantial portions of the Software. # 24 | # # 25 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # 26 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # 27 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # 28 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # 29 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # 30 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # 31 | # DEALINGS IN THE SOFTWARE. # 32 | # # 33 | # =============================================================================# 34 | 35 | import argparse 36 | import math as m 37 | import sys 38 | 39 | import matplotlib.pyplot as plt 40 | import numpy as np 41 | from astropy.constants import c as speed_of_light 42 | 43 | from RMtools_1D.rmtools_bwdepol import ( 44 | adjoint_theory, 45 | estimate_channel_bandwidth, 46 | plot_adjoint_info, 47 | ) 48 | 49 | if sys.version_info.major == 2: 50 | print("RM-tools will no longer run with Python 2! Please use Python 3.") 51 | exit() 52 | 53 | 54 | # -----------------------------------------------------------------------------# 55 | def bwdepol_compute_predictions( 56 | freqArr_Hz, widths_Hz=None, phiMax_radm2=None, dPhi_radm2=None 57 | ): 58 | """Computes theoretical sensitivity and noise curves for given 59 | channelization. 60 | 61 | Parameters 62 | ---------- 63 | freqArr_Hz: array, float 64 | array of the centers of the frequency channels 65 | 66 | Kwargs 67 | ------ 68 | phiMax_radm2: float 69 | Maximum absolute Faraday depth (rad/m^2) 70 | 71 | dPhi_radm2: float 72 | Faraday depth channel size (rad/m^2) 73 | 74 | nSamples: float 75 | Number of samples across the RMSF 76 | 77 | Returns 78 | ------- 79 | adjoint_info: list 80 | Faraday depth array, sensitivity array, noise array 81 | 82 | """ 83 | # Calculate some wavelength parameters 84 | lambdaSqArr_m2 = np.power(speed_of_light.value / freqArr_Hz, 2.0) 85 | # dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz))) 86 | lambdaSqRange_m2 = np.nanmax(lambdaSqArr_m2) - np.nanmin(lambdaSqArr_m2) 87 | dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2))) 88 | dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2))) 89 | 90 | # Set the Faraday depth range 91 | fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2 92 | if dPhi_radm2 is None: 93 | dPhi_radm2 = fwhmRMSF_radm2 94 | if phiMax_radm2 is None: 95 | phiMax_radm2 = 2 * m.sqrt(3.0) / dLambdaSqMin_m2 96 | 97 | # Faraday depth sampling. 98 | phiArr_radm2 = np.arange(0, phiMax_radm2 + 1e-6, dPhi_radm2) 99 | phiArr_radm2 = phiArr_radm2.astype("float64") 100 | 101 | print( 102 | "Computing out to a Faraday depth of {:g} rad/m^2 in steps of {:g} rad/m^2".format( 103 | phiMax_radm2, dPhi_radm2 104 | ) 105 | ) 106 | 107 | # Uniform weights only for prediction purposes 108 | weightArr = np.ones(freqArr_Hz.shape, dtype="float64") 109 | 110 | # Get channel widths if not given by user. 111 | K = 1.0 / np.sum(weightArr) 112 | if widths_Hz is None: 113 | widths_Hz = estimate_channel_bandwidth(freqArr_Hz) 114 | 115 | adjoint_varbs = [widths_Hz, freqArr_Hz, phiArr_radm2, K, weightArr] 116 | adjoint_info = adjoint_theory(adjoint_varbs, weightArr, show_progress=False) 117 | phiArr_radm2, adjoint_sens, adjoint_noise = adjoint_info 118 | 119 | adjoint_info[2] = adjoint_noise / np.max(adjoint_noise) # Renormalize to unity. 120 | 121 | return adjoint_info 122 | 123 | 124 | def main(): 125 | """ 126 | Start the function to generate the figures if called from the command line. 127 | """ 128 | # Help string to be shown using the -h option 129 | descStr = """ 130 | Calculate the theoretical sensitivity and noise curves for the bandwidth- 131 | depolarization-corrected RM synthesis method described in Fine et al. (2022). 132 | 133 | Takes in a ASCII file containing either 1 column (channel frequencies, in Hz) 134 | or two columns (channel frequencies and channel bandwidths in Hz, space separated). 135 | 136 | Generates interactive plots of the two curves. These are intended to guide 137 | users in deciding in what RM range traditional RM synthesis is sufficiently 138 | accurate, and over what RM range they may want to use the modified method. 139 | """ 140 | 141 | # Parse the command line options 142 | parser = argparse.ArgumentParser( 143 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 144 | ) 145 | parser.add_argument( 146 | "dataFile", 147 | metavar="dataFile.dat", 148 | nargs=1, 149 | help="ASCII file containing channel frequencies.", 150 | ) 151 | parser.add_argument( 152 | "-l", 153 | dest="phiMax_radm2", 154 | type=float, 155 | default=None, 156 | help="absolute max Faraday depth sampled [Auto].", 157 | ) 158 | parser.add_argument( 159 | "-d", 160 | dest="dPhi_radm2", 161 | type=float, 162 | default=None, 163 | help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)", 164 | ) 165 | parser.add_argument( 166 | "-s", 167 | dest="nSamples", 168 | type=float, 169 | default=10, 170 | help="number of samples across the RMSF lobe [10].", 171 | ) 172 | parser.add_argument( 173 | "-f", 174 | dest="plotfile", 175 | default=None, 176 | help="Filename to save plot to. [do not save]", 177 | ) 178 | 179 | args = parser.parse_args() 180 | 181 | # Get data: 182 | try: 183 | data = np.loadtxt(args.dataFile[0], unpack=True, dtype="float64") 184 | if data.ndim == 1: # If single column file, data is only channel freqs 185 | freqArr_Hz = data 186 | widths_Hz = None 187 | else: # file has multiple columns 188 | freqArr_Hz = data[0] # assume the first column is channel freqs 189 | widths_Hz = data[1] # Assume widths are 2nd column if present. 190 | except: 191 | print( 192 | "Unable to read file. Please ensure file is readable and contains 1 or 2 columns." 193 | ) 194 | exit 195 | 196 | adjoint_info = bwdepol_compute_predictions( 197 | freqArr_Hz=freqArr_Hz, 198 | widths_Hz=widths_Hz, 199 | phiMax_radm2=args.phiMax_radm2, 200 | dPhi_radm2=args.dPhi_radm2, 201 | ) 202 | 203 | # plot adjoint info 204 | plot_adjoint_info(adjoint_info, units="arb. units") 205 | if args.plotfile is not None: 206 | plt.savefig(args.plotfile, bbox_inches="tight") 207 | else: 208 | plt.show() 209 | 210 | 211 | # -----------------------------------------------------------------------------# 212 | if __name__ == "__main__": 213 | main() 214 | -------------------------------------------------------------------------------- /RMtools_3D/__init__.py: -------------------------------------------------------------------------------- 1 | import pkg_resources 2 | 3 | __version__ = pkg_resources.get_distribution("RM-Tools").version 4 | -------------------------------------------------------------------------------- /RMtools_3D/assemble_chunks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Wed May 29 13:10:26 2019 5 | 6 | This code reassembles chunks into larger files. This is useful for assembling 7 | output files from 3D RM synthesis back into larger cubes. 8 | 9 | @author: cvaneck 10 | """ 11 | 12 | import argparse 13 | import re 14 | from glob import glob 15 | from math import ceil 16 | 17 | import astropy.io.fits as pf 18 | import numpy as np 19 | from tqdm.auto import trange 20 | 21 | 22 | def main(): 23 | """This function will assemble a large FITS file or cube from smaller chunks.""" 24 | 25 | descStr = """ 26 | Assemble a FITS image/cube from small pieces. The new image will be created 27 | in the running directory. 28 | Supply one of the chunk files (other files will be identified by name pattern). 29 | Output name will follow the name of the input chunk, minus the '.C??.' 30 | """ 31 | 32 | parser = argparse.ArgumentParser( 33 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 34 | ) 35 | parser.add_argument( 36 | "chunkname", metavar="chunk.fits", help="One of the chunks to be assembled" 37 | ) 38 | parser.add_argument( 39 | "-f", 40 | dest="output", 41 | default=None, 42 | help="Specify output file name [basename of chunk]", 43 | ) 44 | parser.add_argument( 45 | "-o", 46 | dest="overwrite", 47 | action="store_true", 48 | help="Overwrite existing file? [False].", 49 | ) 50 | 51 | args = parser.parse_args() 52 | 53 | if args.output == None: 54 | output_filename = ".".join( 55 | [ 56 | x 57 | for x in args.chunkname.split(".") 58 | if not (x.startswith("C") and x[1:].isnumeric()) 59 | ] 60 | ) 61 | else: 62 | output_filename = args.output 63 | 64 | # Get all the chunk filenames. Missing chunks will break things! 65 | filename = re.search("\.C\d+\.", args.chunkname) 66 | chunkfiles = glob( 67 | args.chunkname[0 : filename.start()] + ".C*." + args.chunkname[filename.end() :] 68 | ) 69 | chunkfiles.sort() 70 | 71 | old_header = pf.getheader(chunkfiles[0]) 72 | x_dim = old_header["OLDXDIM"] 73 | y_dim = old_header["OLDYDIM"] 74 | Nperchunk = old_header["NAXIS1"] 75 | Npix_image = x_dim * y_dim 76 | num_chunks = ceil(Npix_image / Nperchunk) 77 | Ndim = old_header["NAXIS"] 78 | 79 | if (Ndim != 4) and (Ndim != 3) and (Ndim != 2): 80 | raise Exception( 81 | "Right now this code only supports FITS files with 2-4 dimensions!" 82 | ) 83 | 84 | new_header = old_header.copy() 85 | del new_header["OLDXDIM"] 86 | del new_header["OLDYDIM"] 87 | new_header["NAXIS1"] = x_dim 88 | new_header["NAXIS2"] = y_dim 89 | 90 | # Create blank file: 91 | new_header.tofile(output_filename, overwrite=args.overwrite) 92 | 93 | # According to astropy, this is how to create a large file without needing it in memory: 94 | shape = tuple( 95 | new_header["NAXIS{0}".format(ii)] for ii in range(1, new_header["NAXIS"] + 1) 96 | ) 97 | with open(output_filename, "rb+") as fobj: 98 | fobj.seek( 99 | len(new_header.tostring()) 100 | + (np.product(shape) * np.abs(new_header["BITPIX"] // 8)) 101 | - 1 102 | ) 103 | fobj.write(b"\0") 104 | 105 | if len(chunkfiles) != num_chunks: 106 | raise Exception("Number of chunk files found does not match expectations!") 107 | 108 | base_idx_arr = np.array(range(Nperchunk)) 109 | 110 | large = pf.open(output_filename, mode="update", memmap=True) 111 | 112 | for i in trange(num_chunks - 1, desc="Assembling chunks"): 113 | file = chunkfiles[i] 114 | idx = base_idx_arr + i * Nperchunk 115 | xarr = idx // y_dim 116 | yarr = idx % y_dim 117 | 118 | chunk = pf.open(file, memmap=True) 119 | if Ndim == 4: 120 | large[0].data[:, :, yarr, xarr] = chunk[0].data[:, :, 0, :] 121 | elif Ndim == 3: 122 | large[0].data[:, yarr, xarr] = chunk[0].data[:, 0, :] 123 | elif Ndim == 2: 124 | large[0].data[yarr, xarr] = chunk[0].data 125 | 126 | # large.flush() 127 | chunk.close() 128 | 129 | i += 1 130 | file = chunkfiles[i] 131 | idx = base_idx_arr + i * Nperchunk 132 | idx = idx[idx < Npix_image] 133 | xarr = idx // y_dim 134 | yarr = idx % y_dim 135 | chunk = pf.open(file, memmap=True) 136 | if Ndim == 4: 137 | large[0].data[:, :, yarr, xarr] = chunk[0].data[:, :, 0, :] 138 | elif Ndim == 3: 139 | large[0].data[:, yarr, xarr] = chunk[0].data[:, 0, :] 140 | elif Ndim == 2: 141 | large[0].data[yarr, xarr] = chunk[0].data 142 | large.flush() 143 | chunk.close() 144 | 145 | large.close() 146 | 147 | 148 | if __name__ == "__main__": 149 | main() 150 | -------------------------------------------------------------------------------- /RMtools_3D/catalogue.csv: -------------------------------------------------------------------------------- 1 | #-----------------------------------------------------------------------------# 2 | # # 3 | # Input catalogue file for use with the POSSUM pipeline. # 4 | # Used to generate artificial data for testing purposes. # 5 | # # 6 | # Note: spatial information (x,y,maj,min,pa) is ignored when generating 1D # 7 | # ASCII data. If generating image data these are the world-coordinates # 8 | # and parameters of the injected Gaussians. # 9 | # # 10 | # C. Purcell 05-April-2016 # 11 | # # 12 | #-----------------------------------------------------------------------------# 13 | 14 | # MODEL TYPE 1: External depolarised components within same beam (not LOS). 15 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2, faradayDispersion_radm2]xN 16 | # 17 | # MODEL TYPE 2: Emitting & rotating regions stacked along the line-of-sight. 18 | # Type, x_deg, y_deg, maj_asec, min_asec, pa_deg, fluxI_mJy, SI, [polFrac, evpa_deg, faradayDepth_radm2]xN 19 | # 20 | 21 | 1, 89.9981, +0.0001, 0.0, 0.0, 0.0, 10.0, -0.1, 0.6, 80.0, 30.0, 0.0 22 | 1, 90.0010, -0.0020, 0.0, 0.0, 0.0, 9.0, 0.0, 0.1, -120.0, 60.0, 0.0 23 | 1, 90.0016, +0.0023, 0.0, 0.0, 0.0, 5.0, -0.7, 0.4, 30.0, 100.0, 50.0, 0.5, 80.0, -100.0, 0.0 24 | 1, 89.9979, +0.0025, 0.0, 0.0, 0.0, 10.0, -0.1, 0.6, 80.0, 200.0, 100.0, 0.3, 80.0, 10.0, 50.0 25 | 1, 89.9981, -0.0020, 0.0, 0.0, 0.0, 5.0, -0.2, 0.5, 45.0, -32.0, 0.0, 0.5, 45.0, -20.0, 20.0 26 | 2, 89.9970, -0.0027, 0.0, 0.0, 0.0, 5.0, -0.2, 0.5, 45.0, -32.0, 0.5, 50.0, 20.0, 0.1, 20.0, -110.0 27 | 2, 90.0026, +0.0000, 0.0, 0.0, 0.0, 10.0, 0.0, 0.5, 45.0, -20.0 28 | 2, 90.0000, +0.0000, 0.0, 0.0, 0.0, 10.0, 0.0, 0.5, 45.0, -20.0, 0.25, 10, 50.0 29 | -------------------------------------------------------------------------------- /RMtools_3D/create_chunks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Tue May 28 13:25:30 2019 5 | 6 | This code will divide a FITS cube into individual chunks. 7 | To minimize problems with how to divide the cube, it will convert the image 8 | plane into a 1D list of spectra. 9 | Then the file will divided into smaller files, with fewer pixels, in order to 10 | be run through RM synthesis and CLEAN. 11 | A separate routine will re-assemble the individual chunks into a combined file again. 12 | This code attempts to minimize the memory profile: in principle it should never 13 | need more memory than the size of a single chunk, and perhaps not even that much. 14 | 15 | @author: cvaneck 16 | May 2019 17 | """ 18 | 19 | import argparse 20 | import os.path as path 21 | from math import ceil, floor, log10 22 | 23 | import astropy.io.fits as pf 24 | import numpy as np 25 | from tqdm.auto import trange 26 | 27 | 28 | def main(): 29 | """This function will divide a large FITS file or cube into smaller chunks. 30 | It does so in a memory efficient way that requires only a small RAM overhead 31 | (approximately 1 chunk worth?). 32 | """ 33 | descStr = """ 34 | Divide a FITS cube into small pieces for memory-efficient RM synthesis. 35 | Files will be created in running directory. 36 | WARNING: ONLY WORKS ON FIRST HDU, OTHERS WILL BE LOST.""" 37 | 38 | parser = argparse.ArgumentParser( 39 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 40 | ) 41 | parser.add_argument( 42 | "infile", metavar="filename.fits", help="FITS cube containing data" 43 | ) 44 | parser.add_argument( 45 | "Nperchunk", metavar="N_pixels", help="Number of pixels per chunk" 46 | ) 47 | parser.add_argument( 48 | "-v", dest="verbose", action="store_true", help="Verbose [False]." 49 | ) 50 | parser.add_argument( 51 | "-p", dest="prefix", default=None, help="Prefix of output files [filename]" 52 | ) 53 | 54 | args = parser.parse_args() 55 | 56 | Nperchunk = int(args.Nperchunk) 57 | 58 | if not path.exists(args.infile): 59 | raise Exception("Input file not found!") 60 | 61 | if args.prefix == None: 62 | prefix = path.splitext(args.infile)[0] 63 | else: 64 | prefix = args.prefix 65 | 66 | hdu = pf.open(args.infile, memmap=True) 67 | header = hdu[0].header 68 | data = np.transpose(hdu[0].data) 69 | 70 | x_image = header["NAXIS1"] 71 | y_image = header["NAXIS2"] 72 | Npix_image = x_image * y_image 73 | 74 | num_chunks = ceil(Npix_image / Nperchunk) 75 | digits = floor(log10(num_chunks)) + 1 76 | prntcode = ":0" + str(digits) + "d" 77 | 78 | if args.verbose: 79 | print(('Chunk name set to "{}.C{' + prntcode + '}.fits"').format(prefix, 0)) 80 | print("File will be divided into {} chunks".format(num_chunks)) 81 | 82 | base_idx_arr = np.array(range(Nperchunk)) 83 | 84 | new_header = header.copy() 85 | new_header["NAXIS2"] = 1 86 | new_header["NAXIS1"] = Nperchunk 87 | new_header["OLDXDIM"] = x_image 88 | new_header["OLDYDIM"] = y_image 89 | 90 | # Run for all but last. Last chunk requires some finessing. 91 | for i in trange(num_chunks - 1, desc="Creating chunks"): 92 | idx = base_idx_arr + i * Nperchunk 93 | xarr = idx // y_image 94 | yarr = idx % y_image 95 | newdata = np.expand_dims(data[xarr, yarr], 1) 96 | filename = ("{}.C{" + prntcode + "}.fits").format(prefix, i) 97 | pf.writeto(filename, np.transpose(newdata), new_header, overwrite=True) 98 | 99 | i += 1 100 | idx = base_idx_arr + i * Nperchunk 101 | idx = idx[idx < Npix_image] 102 | xarr = idx // y_image 103 | yarr = idx % y_image 104 | newdata = np.expand_dims(data[xarr, yarr], 1) 105 | filename = ("{}.C{" + prntcode + "}.fits").format(prefix, i) 106 | pf.writeto(filename, np.transpose(newdata), new_header, overwrite=True) 107 | 108 | hdu.close() 109 | 110 | 111 | if __name__ == "__main__": 112 | main() 113 | -------------------------------------------------------------------------------- /RMtools_3D/extract_region.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Thu May 30 10:44:28 2019 5 | 6 | Extract subregion of a FITS file, with option to extract a plane. 7 | 8 | There are many cutout tools like it, but this one is mine. 9 | 10 | 11 | @author: cvaneck 12 | May 2019 13 | """ 14 | 15 | import argparse 16 | import os 17 | 18 | import astropy.io.fits as pf 19 | from astropy.wcs import WCS 20 | 21 | 22 | def main(): 23 | """This function will extract a region ('cutout) from an FITS file and save 24 | it to a new file. Command line options will allow the user to select the 25 | region in either pixel or sky coordinates. 26 | """ 27 | 28 | descStr = """ 29 | Cut out a region in a fits file, writing it to a new file. 30 | Selecting -1 for any coordinate parameter will cause it be set as the 31 | maximum/minimum value allowed. 32 | Default is for box to be defined in pixel coordinates, in the form 33 | xmin xmax ymin ymax. 34 | Pixel selection is inclusive: all corner pixels will be present in output. 35 | Pixel counting starts at 1 (FITS convention). 36 | Sky coordinates not guaranteed to give correct size box in if projection is 37 | highly nonlinear. 38 | If a third non-generate axis is present (as either axis 3 or 4), the 39 | -z flag will allow selection of subranges along this axis.""" 40 | 41 | parser = argparse.ArgumentParser( 42 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 43 | ) 44 | parser.add_argument( 45 | "infile", metavar="infile.fits", help="FITS file containing data." 46 | ) 47 | parser.add_argument("outfile", metavar="outfile.fits", help="Output fits file") 48 | parser.add_argument( 49 | "box", 50 | metavar="xmin xmax ymin ymax", 51 | nargs=4, 52 | type=float, 53 | help="Box dimensions (in pixels unless -s set)", 54 | ) 55 | parser.add_argument( 56 | "-s", 57 | dest="sky", 58 | action="store_true", 59 | help="Box defined in sky coordinates (in decimal degrees if set, otherwise pixels).", 60 | ) 61 | parser.add_argument( 62 | "-c", 63 | dest="center", 64 | action="store_true", 65 | help="If true, define box as x_center x_halfwidth y_center y_halfwidth", 66 | ) 67 | parser.add_argument( 68 | "-z", 69 | dest="zlim", 70 | metavar="axis3", 71 | nargs=2, 72 | default=None, 73 | type=int, 74 | help="3rd axis limits (only pixel coords supported)", 75 | ) 76 | parser.add_argument( 77 | "-o", 78 | dest="overwrite", 79 | action="store_true", 80 | help="Overwrite existing file if present?", 81 | ) 82 | 83 | args = parser.parse_args() 84 | 85 | if not os.path.exists(args.infile): 86 | raise Exception("Input file not found!") 87 | 88 | if os.path.exists(args.outfile) and not args.overwrite: 89 | raise Exception("Outfile file already exists! Add -o flag to overwrite.") 90 | 91 | if not args.center: 92 | box = args.box 93 | else: 94 | box = [ 95 | args.box[0] - args.box[1], 96 | args.box[0] + args.box[1], 97 | args.box[2] - args.box[3], 98 | args.box[2] + args.box[3], 99 | ] 100 | 101 | if box[0] > box[1]: 102 | raise Exception("Box dimensions incorrect! x_max < x_min!") 103 | 104 | if box[2] > box[3]: 105 | raise Exception("Box dimensions incorrect! y_max < y_min!") 106 | 107 | hdu = pf.open(args.infile, memmap=True) 108 | header = hdu[0].header 109 | 110 | if args.sky: 111 | raise Exception("Not yet implemented. Soon!") 112 | csys = WCS(header, naxis=2) 113 | pixbox = [-1, -1, -1, -1] 114 | pix = csys.all_world2pix(box[0], (box[2] + box[3]) / 2, 1) 115 | pixbox[0] = float(pix[0]) 116 | pix = csys.all_world2pix(box[1], (box[2] + box[3]) / 2, 1) 117 | pixbox[1] = float(pix[0]) 118 | if pixbox[1] < pixbox[0]: 119 | a = pixbox[0] 120 | pixbox[0] = pixbox[1] 121 | pixbox[1] = a 122 | pix = csys.all_world2pix((box[0] + box[1]) / 2, box[2], 1) 123 | pixbox[2] = float(pix[1]) 124 | pix = csys.all_world2pix((box[0] + box[1]) / 2, box[3], 1) 125 | pixbox[3] = float(pix[1]) 126 | box = [round(x) for x in pixbox] 127 | else: 128 | box = [int(x) for x in box] 129 | 130 | if header["NAXIS"] == 3: 131 | cube_axis = 3 132 | if header["NAXIS"] == 4: 133 | if header["NAXIS3"] != 1: 134 | cube_axis = 3 135 | else: 136 | cube_axis = 4 137 | if header["NAXIS"] == 2: 138 | cube_axis = 2 139 | 140 | if args.zlim != None: 141 | zlim = args.zlim 142 | elif cube_axis > 2: 143 | zlim = [1, header["NAXIS" + str(cube_axis)]] 144 | 145 | if box[0] < 1: 146 | box[0] = 1 147 | if (box[1] == -1) or (box[1] > header["NAXIS1"]): 148 | box[1] = header["NAXIS1"] 149 | if box[2] < 1: 150 | box[2] = 1 151 | if (box[3] == -1) or (box[3] > header["NAXIS2"]): 152 | box[3] = header["NAXIS2"] 153 | 154 | # Extract sub-region: 155 | if header["NAXIS"] == 4: 156 | if cube_axis == 3: 157 | data = hdu[0].data[ 158 | :, zlim[0] - 1 : zlim[1], box[2] - 1 : box[3], box[0] - 1 : box[1] 159 | ] 160 | if cube_axis == 4: 161 | data = hdu[0].data[ 162 | zlim[0] - 1 : zlim[1], :, box[2] - 1 : box[3], box[0] - 1 : box[1] 163 | ] 164 | elif header["NAXIS"] == 3: 165 | data = hdu[0].data[ 166 | zlim[0] - 1 : zlim[1], box[2] - 1 : box[3], box[0] - 1 : box[1] 167 | ] 168 | elif header["NAXIS"] == 2: 169 | data = hdu[0].data[box[2] - 1 : box[3] - 1, box[0] - 1 : box[1] - 1] 170 | else: 171 | raise Exception("Number of dimensions is some nonsupported value!") 172 | 173 | # Change header information: 174 | new_header = header.copy() 175 | new_header["NAXIS1"] = box[1] - box[0] + 1 176 | new_header["NAXIS2"] = box[3] - box[2] + 1 177 | if args.zlim != None: 178 | new_header["NAXIS" + str(cube_axis)] = zlim[1] - zlim[0] + 1 179 | new_header["CRPIX" + str(cube_axis)] = ( 180 | header["CRPIX" + str(cube_axis)] - zlim[0] + 1 181 | ) 182 | new_header["CRPIX1"] = header["CRPIX1"] - box[0] + 1 183 | new_header["CRPIX2"] = header["CRPIX2"] - box[2] + 1 184 | 185 | pf.writeto(args.outfile, data, new_header, overwrite=args.overwrite) 186 | 187 | 188 | if __name__ == "__main__": 189 | main() 190 | -------------------------------------------------------------------------------- /RMtools_3D/make_freq_file.py: -------------------------------------------------------------------------------- 1 | # This script creates a frequency file from a FITS header. This is a helper 2 | # script to make it easier to run RMsynth 1D or 3D. Run this first to generate 3 | # the required frequency file. If you create a spectrum or cube from multiple 4 | # FITS files, run it on the individual input files. 5 | # This script assumes the FITS header has a FREQ axis, and that this axis 6 | # accurately describes the frequency channels. 7 | 8 | # version 1 by Boris Gbeasor, summer 2019 9 | # ver2: modified by Cameron Van Eck 10 | 11 | 12 | import argparse 13 | 14 | import numpy as np 15 | from astropy.io import fits 16 | from astropy.wcs import WCS 17 | 18 | 19 | def save_freq_file(): 20 | """ 21 | Parses command line arguments, extracts FITS header, and saves it to a file. 22 | """ 23 | # Parse the command line options 24 | parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) 25 | parser.add_argument( 26 | "fits_file", 27 | metavar="Infile.fits", 28 | nargs=1, 29 | help="FITS cube with frequency axis.", 30 | ) 31 | parser.add_argument( 32 | "freq_file", metavar="outfile.dat", help="Name of the freq file to write." 33 | ) 34 | 35 | args = parser.parse_args() 36 | 37 | freq_array = get_freq_array(args.fits_file[0]) 38 | 39 | np.savetxt(args.freq_file, freq_array, delimiter="") 40 | print("Saving the frequencies list to {}".format(args.freq_file)) 41 | 42 | 43 | def get_fits_header(filename): 44 | hduList = fits.open(filename) 45 | header = hduList[0].header 46 | hduList.close() 47 | return header 48 | 49 | 50 | def get_freq_array(filename): 51 | header = fits.getheader(filename) 52 | wcs = WCS(header) 53 | spec_wcs = wcs.spectral 54 | nchan = spec_wcs.array_shape[0] 55 | freqs = spec_wcs.pixel_to_world_values(np.arange(nchan)) 56 | 57 | return freqs 58 | 59 | 60 | if __name__ == "__main__": 61 | save_freq_file() 62 | -------------------------------------------------------------------------------- /RMtools_3D/rescale_I_model_3D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | # # 5 | # NAME: rescale_I_model_3D.py # 6 | # # 7 | # PURPOSE: Convert a 3D Stokes I model to a new reference frequency # 8 | # # 9 | # CREATED: 19-Mar-2024 by Cameron Van Eck 10 | # # 11 | # =============================================================================# 12 | # # 13 | # The MIT License (MIT) # 14 | # # 15 | # Copyright (c) 2024 Cameron Van Eck # 16 | # # 17 | # Permission is hereby granted, free of charge, to any person obtaining a # 18 | # copy of this software and associated documentation files (the "Software"), # 19 | # to deal in the Software without restriction, including without limitation # 20 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, # 21 | # and/or sell copies of the Software, and to permit persons to whom the # 22 | # Software is furnished to do so, subject to the following conditions: # 23 | # # 24 | # The above copyright notice and this permission notice shall be included in # 25 | # all copies or substantial portions of the Software. # 26 | # # 27 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # 28 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # 29 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # 30 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # 31 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # 32 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # 33 | # DEALINGS IN THE SOFTWARE. # 34 | # # 35 | # =============================================================================# 36 | 37 | 38 | # Steps: 1) Read in covariance matrix, coefficient maps, I ref.freq., lambda^2_0 39 | # 2a) Create fitDict, pixel-wise 40 | # 2b) Invoke rescaling function pixel-wise 41 | # 3) Drizzle output fitDict value into arrays 42 | # 4) Write new coefficient maps 43 | 44 | # Optional variations: user-specified reference frequency, unspecified 45 | # (make uniform across all pixels) 46 | 47 | import argparse 48 | import multiprocessing as mp 49 | import os 50 | from functools import partial 51 | 52 | import astropy.io.fits as pf 53 | import numpy as np 54 | 55 | from RMutils.util_misc import FitResult, renormalize_StokesI_model 56 | 57 | 58 | def command_line(): 59 | """Handle invocation from the command line, parsing inputs and running 60 | everything.""" 61 | 62 | # Help string to be shown using the -h option 63 | descStr = """ 64 | Convert a Stokes I model to a new reference frequency. This changes the 65 | model coefficients and their errors, but does not change (and does not 66 | recalculate) the actual model spectrum. 67 | The conversion of the coefficient uncertainties uses a first-order Taylor 68 | approximation, so the uncertainties are only valid for relatively small 69 | variations in reference frequency (depends a lot on the model, but 10% 70 | seems to be a good rule of thumb). 71 | The new reference frequency can either be from lambda^2_0 from an FDF cube, 72 | which will create Stokes I maps that match the coresponding frequency, 73 | a constant value given by the user, or unspecified (in which case the tool 74 | will make all pixels have a common reference frequency at the mean input reference frequency). 75 | The input files are assumed to be the products of do_fitIcube, with the usual filenames. 76 | Outputs are new coefficient (and error) maps and reference frequency map. 77 | """ 78 | 79 | # Parse the command line options 80 | parser = argparse.ArgumentParser( 81 | description=descStr, formatter_class=argparse.RawTextHelpFormatter 82 | ) 83 | 84 | parser.add_argument( 85 | "covar_file", 86 | metavar="covariance.fits", 87 | help="FITS cube Stokes I fit covariance matrices.", 88 | ) 89 | 90 | parser.add_argument( 91 | "-l", 92 | dest="lambda2_0", 93 | type=str, 94 | default=None, 95 | help="FDF cube from rmsynth3d. If given, will convert model to frequency matching the polarization products.", 96 | ) 97 | 98 | parser.add_argument( 99 | "-f", 100 | dest="new_reffreq", 101 | type=float, 102 | default=None, 103 | help="New reference frequency (in Hz). If given, forces all pixels to this frequency. Incompatible with -l", 104 | ) 105 | 106 | parser.add_argument( 107 | "-o", 108 | dest="outname", 109 | type=str, 110 | default=None, 111 | help="Output path+filename. If not given, defaults to input path+name (minus the covariance.fits).", 112 | ) 113 | parser.add_argument( 114 | "-n", 115 | dest="num_cores", 116 | type=int, 117 | default=1, 118 | help="Number of cores to use for multiprocessing. Default is 1.", 119 | ) 120 | 121 | parser.add_argument( 122 | "-w", 123 | dest="overwrite", 124 | action="store_true", 125 | help="Overwrite existing files? [False].", 126 | ) 127 | 128 | args = parser.parse_args() 129 | 130 | if (args.new_reffreq is not None) and (args.lambda2_0 is not None): 131 | raise Exception( 132 | "Please do not set both -f and -l flags -- chose one or neither." 133 | ) 134 | 135 | if not os.path.exists(args.covar_file): 136 | raise Exception( 137 | f"Cannot find covarance file at {args.covar_file}, please check filename/path." 138 | ) 139 | 140 | if args.covar_file[-15:] != "covariance.fits": 141 | raise Exception( 142 | "Input covariance file name doesn't end in covariance.fits; this is required." 143 | ) 144 | 145 | basename = args.covar_file[:-15] 146 | 147 | if args.outname is None: 148 | args.outname = basename 149 | 150 | # Get data: 151 | covar_map, old_reffreq_map, coeffs, header = read_data(basename) 152 | 153 | # Create new frequency map: 154 | if args.lambda2_0 is not None: 155 | FDF_header = pf.getheader(args.lambda2_0) 156 | lam0Sq_m2 = FDF_header["LAMSQ0"] 157 | freq0 = 2.997924538e8 / np.sqrt(lam0Sq_m2) 158 | new_freq_map = np.ones_like(old_reffreq_map) * freq0 159 | elif args.new_reffreq is not None: 160 | new_freq_map = np.ones_like(old_reffreq_map) * args.new_reffreq 161 | else: 162 | freq0 = np.nanmean(old_reffreq_map) 163 | new_freq_map = np.ones_like(old_reffreq_map) * freq0 164 | 165 | # Get fit function from header: 166 | x = ["Fit model is" in card for card in header["HISTORY"]] 167 | line = header["HISTORY"][np.where(x)[0][-1]] 168 | fit_function = [x for x in line.split() if "polynomial" in x][0].split("-")[0] 169 | 170 | new_freq_map, new_coeffs, new_errors = rescale_I_model_3D( 171 | covar_map, 172 | old_reffreq_map, 173 | new_freq_map, 174 | coeffs, 175 | fit_function, 176 | num_cores=args.num_cores, 177 | ) 178 | 179 | write_new_parameters( 180 | new_freq_map, 181 | new_coeffs, 182 | new_errors, 183 | args.outname, 184 | header, 185 | overwrite=args.overwrite, 186 | ) 187 | 188 | 189 | def read_data(basename): 190 | """Reads the covariance matrix map, (current) reference frequency map, and 191 | (current) coefficient+error maps. 192 | Input: 193 | basename (str): file path and name up to 'covariance.fits'/'reffreq.fits', etc. 194 | """ 195 | 196 | if not ( 197 | os.path.exists(basename + "coeff0.fits") 198 | and os.path.exists(basename + "coeff0err.fits") 199 | ): 200 | raise Exception("Cannot find coeff0 map. At least coeff 0 map must exist.") 201 | 202 | covar_file = basename + "covariance.fits" 203 | covar_map = pf.getdata(covar_file) 204 | 205 | freq_file = basename + "reffreq.fits" 206 | old_reffreq_map, header = pf.getdata(freq_file, header=True) 207 | # Grabs header from frequency map -- needed for fit function, and to use for 208 | # writing out products. Better to have 2D map header to avoid fussing with 209 | # extra axes. 210 | 211 | # Get coefficient maps (without knowing how many there are) 212 | # Reverse index order to match RM-Tools internal ordering (highest to lowest polynomial order) 213 | coeffs = np.zeros((6, *old_reffreq_map.shape), dtype=covar_map.dtype) 214 | for i in range(6): 215 | try: # Keep trying higher orders 216 | data = pf.getdata(basename + f"coeff{i}.fits") 217 | coeffs[5 - i] = data 218 | except FileNotFoundError: 219 | break # Once it runs out of valid coefficient maps, move on 220 | 221 | return covar_map, old_reffreq_map, coeffs, header 222 | 223 | 224 | def rescale_I_pixel(data, fit_function): 225 | covar, coeff, old_freq, new_freq = data 226 | oldDict = {} # Initialize a fitDict, which contains the relevant fit information 227 | oldDict["reference_frequency_Hz"] = old_freq 228 | oldDict["p"] = coeff 229 | oldDict["pcov"] = covar 230 | oldDict["fit_function"] = fit_function 231 | 232 | old_result = FitResult( 233 | params=coeff, 234 | fitStatus=np.nan, # Placeholder 235 | chiSq=np.nan, # Placeholder 236 | chiSqRed=np.nan, # Placeholder 237 | AIC=np.nan, # Placeholder 238 | polyOrd=len(coeff) - 1, 239 | nIter=0, # Placeholder 240 | reference_frequency_Hz=old_freq, 241 | dof=np.nan, # Placeholder 242 | pcov=covar, 243 | perror=np.zeros_like(coeff), # Placeholder 244 | fit_function=fit_function, 245 | ) 246 | 247 | new_fit_result = renormalize_StokesI_model(old_result, new_freq) 248 | return new_fit_result.params, new_fit_result.perror 249 | 250 | 251 | def rescale_I_model_3D( 252 | covar_map, old_reffreq_map, new_freq_map, coeffs, fit_function="log", num_cores=1 253 | ): 254 | """Rescale the Stokes I model parameters to a new reference frequency, for 255 | an entire image (i.e., 3D pipeline products). 256 | 257 | Inputs: 258 | covar_map (4D array): covariance matrix map, such as produced by do_fitIcube. 259 | old_reffreq_map (2D array): map of current reference frequency, such as produced by do_fitIcube. 260 | new_freq_map (2D array): map of new reference frequencies. 261 | coeffs (3D array): model parameter map (going from highest to lowest order) 262 | coeff_errors (3D array): model parameter uncertainties map (highest to lowest order) 263 | 264 | Returns: 265 | new_freq_map (unchanged from input) 266 | new_coeffs (3D array): maps of new model parameters (highest to lowest order) 267 | new_errors (3D array): maps of new parameter uncertainties (highest to lowest) 268 | """ 269 | 270 | # Initialize output arrays, keep dtype consistent 271 | new_coeffs = np.zeros_like(coeffs, dtype=coeffs.dtype) 272 | new_errors = np.zeros_like(coeffs, dtype=coeffs.dtype) 273 | rs = old_reffreq_map.shape[ 274 | 1 275 | ] # Get the length of a row, for array indexing later on. 276 | 277 | # Set up inputs for parallelization: 278 | # Input order is: covariance matrix, coefficient vector, old frequency, new frequency 279 | inputs = list( 280 | zip( 281 | np.reshape( 282 | np.moveaxis(covar_map, (0, 1), (2, 3)), (old_reffreq_map.size, 6, 6) 283 | ), 284 | np.reshape(np.moveaxis(coeffs, 0, 2), (old_reffreq_map.size, 6)), 285 | old_reffreq_map.flat, 286 | new_freq_map.flat, 287 | ) 288 | ) 289 | with mp.Pool(num_cores) as pool_: 290 | results = pool_.map( 291 | partial(rescale_I_pixel, fit_function=fit_function), inputs, chunksize=100 292 | ) 293 | 294 | for i, (p, perror) in enumerate(results): 295 | new_coeffs[:, i // rs, i % rs] = p 296 | new_errors[:, i // rs, i % rs] = perror 297 | 298 | return new_freq_map, new_coeffs, new_errors 299 | 300 | 301 | def write_new_parameters( 302 | new_freq_map, new_coeffs, new_errors, out_basename, header, overwrite=False 303 | ): 304 | """Write out new parameter/uncertainty maps to FITS files. 305 | Inputs: 306 | new_freq_map (unchanged from input) 307 | new_coeffs (3D array): maps of new model parameters (highest to lowest order) 308 | new_errors (3D array): maps of new parameter uncertainties (highest to lowest) 309 | out_basename (str): base path+name of the files to be written out 310 | (will be postpended with 'newcoeff0.fits', etc.) 311 | 312 | Returns: (nothing) 313 | Writes out coefficient maps (newcoeff0.fits, etc.) and 314 | coefficient errors (newcoeff0err.fits, etc.) 315 | """ 316 | 317 | out_header = header.copy() 318 | out_header["HISTORY"] = "Stokes I model rescaled to new reference frequency." 319 | out_header["REFFREQ"] = (new_freq_map[0, 0], "Hz") 320 | if "BUNIT" in out_header: 321 | del out_header["BUNIT"] 322 | 323 | # Work out highest order of polynomial: 324 | # if any of the 6 possible coeff planes contain non-zero and non-nan values, it's a 'good' plane. 325 | max_order = ( 326 | np.sum(np.any((new_coeffs != 0.0) & (~np.isnan(new_coeffs)), axis=(1, 2))) - 1 327 | ) 328 | 329 | for i in range(max_order + 1): 330 | pf.writeto( 331 | out_basename + f"newcoeff{i}.fits", 332 | new_coeffs[5 - i], 333 | header=out_header, 334 | overwrite=overwrite, 335 | ) 336 | pf.writeto( 337 | out_basename + f"newcoeff{i}err.fits", 338 | new_errors[5 - i], 339 | header=out_header, 340 | overwrite=overwrite, 341 | ) 342 | 343 | 344 | # -----------------------------------------------------------------------------# 345 | if __name__ == "__main__": 346 | command_line() 347 | -------------------------------------------------------------------------------- /RMutils/__init__.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | """Dependencies for RM utilities""" 3 | import pkg_resources 4 | 5 | __all__ = [ 6 | "mpfit", 7 | "normalize", 8 | "util_FITS", 9 | "util_misc", 10 | "util_plotFITS", 11 | "util_plotTk", 12 | "util_rec", 13 | "util_RM", 14 | ] 15 | 16 | __version__ = pkg_resources.get_distribution("RM-Tools").version 17 | -------------------------------------------------------------------------------- /RMutils/normalize.py: -------------------------------------------------------------------------------- 1 | # The APLpyNormalize class is largely based on code provided by Sarah Graves. 2 | 3 | import matplotlib.cbook as cbook 4 | import numpy as np 5 | import numpy.ma as ma 6 | from matplotlib.colors import Normalize 7 | 8 | 9 | class APLpyNormalize(Normalize): 10 | """ 11 | A Normalize class for imshow that allows different stretching functions 12 | for astronomical images. 13 | """ 14 | 15 | def __init__( 16 | self, stretch="linear", exponent=5, vmid=None, vmin=None, vmax=None, clip=False 17 | ): 18 | """ 19 | Initalize an APLpyNormalize instance. 20 | 21 | Optional Keyword Arguments: 22 | 23 | *vmin*: [ None | float ] 24 | Minimum pixel value to use for the scaling. 25 | 26 | *vmax*: [ None | float ] 27 | Maximum pixel value to use for the scaling. 28 | 29 | *stretch*: [ 'linear' | 'log' | 'sqrt' | 'arcsinh' | 'power' ] 30 | The stretch function to use (default is 'linear'). 31 | 32 | *vmid*: [ None | float ] 33 | Mid-pixel value used for the log and arcsinh stretches. If 34 | set to None, a default value is picked. 35 | 36 | *exponent*: [ float ] 37 | if self.stretch is set to 'power', this is the exponent to use. 38 | 39 | *clip*: [ True | False ] 40 | If clip is True and the given value falls outside the range, 41 | the returned value will be 0 or 1, whichever is closer. 42 | """ 43 | 44 | # Call original initalization routine 45 | Normalize.__init__(self, vmin=vmin, vmax=vmax, clip=clip) 46 | 47 | # Save parameters 48 | self.stretch = stretch 49 | self.exponent = exponent 50 | 51 | if stretch == "power" and np.equal(self.exponent, None): 52 | raise Exception("For stretch=='power', an exponent should be specified") 53 | 54 | if np.equal(vmid, None): 55 | if stretch == "log": 56 | self.midpoint = 0.05 57 | elif stretch == "arcsinh": 58 | self.midpoint = -0.033 59 | else: 60 | self.midpoint = None 61 | else: 62 | self.midpoint = (vmid - vmin) / (vmax - vmin) 63 | 64 | def __call__(self, value, clip=None): 65 | # read in parameters 66 | method = self.stretch 67 | exponent = self.exponent 68 | midpoint = self.midpoint 69 | 70 | # ORIGINAL MATPLOTLIB CODE 71 | 72 | if clip is None: 73 | clip = self.clip 74 | 75 | if cbook.iterable(value): 76 | vtype = "array" 77 | val = ma.asarray(value).astype(np.float) 78 | else: 79 | vtype = "scalar" 80 | val = ma.array([value]).astype(np.float) 81 | 82 | self.autoscale_None(val) 83 | vmin, vmax = self.vmin, self.vmax 84 | if vmin > vmax: 85 | raise ValueError("minvalue must be less than or equal to maxvalue") 86 | elif vmin == vmax: 87 | return 0.0 * val 88 | else: 89 | if clip: 90 | mask = ma.getmask(val) 91 | val = ma.array(np.clip(val.filled(vmax), vmin, vmax), mask=mask) 92 | result = (val - vmin) * (1.0 / (vmax - vmin)) 93 | 94 | # CUSTOM APLPY CODE 95 | 96 | if self.stretch == "linear": 97 | pass 98 | 99 | elif self.stretch == "log": 100 | result = ma.log10((result / self.midpoint) + 1.0) / ma.log10( 101 | (1.0 / self.midpoint) + 1.0 102 | ) 103 | 104 | elif self.stretch == "sqrt": 105 | result = ma.sqrt(result) 106 | 107 | elif self.stretch == "arcsinh": 108 | result = ma.arcsinh(result / self.midpoint) / ma.arcsinh( 109 | 1.0 / self.midpoint 110 | ) 111 | 112 | elif self.stretch == "power": 113 | result = ma.power(result, exponent) 114 | 115 | else: 116 | raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) 117 | 118 | if vtype == "scalar": 119 | result = result[0] 120 | 121 | return result 122 | 123 | def inverse(self, value): 124 | # ORIGINAL MATPLOTLIB CODE 125 | 126 | if not self.scaled(): 127 | raise ValueError("Not invertible until scaled") 128 | 129 | vmin, vmax = self.vmin, self.vmax 130 | 131 | # CUSTOM APLPY CODE 132 | 133 | if cbook.iterable(value): 134 | val = ma.asarray(value) 135 | else: 136 | val = value 137 | 138 | if self.stretch == "linear": 139 | pass 140 | 141 | elif self.stretch == "log": 142 | val = self.midpoint * ( 143 | ma.power(10.0, (val * ma.log10(1.0 / self.midpoint + 1.0))) - 1.0 144 | ) 145 | 146 | elif self.stretch == "sqrt": 147 | val = val * val 148 | 149 | elif self.stretch == "arcsinh": 150 | val = self.midpoint * ma.sinh(val * ma.arcsinh(1.0 / self.midpoint)) 151 | 152 | elif self.stretch == "power": 153 | val = ma.power(val, (1.0 / self.exponent)) 154 | 155 | else: 156 | raise Exception("Unknown stretch in APLpyNormalize: %s" % self.stretch) 157 | 158 | return vmin + val * (vmax - vmin) 159 | -------------------------------------------------------------------------------- /RMutils/util_plotFITS.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # =============================================================================# 3 | # # 4 | # NAME: util_plotFITS.py # 5 | # # 6 | # PURPOSE: Common function for plotting fits images. # 7 | # # 8 | # MODIFIED: 15-May-2016 by C. Purcell # 9 | # # 10 | # CONTENTS: # 11 | # # 12 | # label_format_dms # 13 | # label_format_hms # 14 | # label_format_deg # 15 | # plot_fits_map # 16 | # # 17 | # =============================================================================# 18 | 19 | import math as m 20 | 21 | import astropy.wcs.wcs as pw 22 | import matplotlib.pyplot as plt 23 | import numpy as np 24 | from matplotlib.patches import Ellipse, Polygon 25 | from matplotlib.ticker import FuncFormatter, MaxNLocator 26 | 27 | from .normalize import APLpyNormalize 28 | from .util_FITS import mkWCSDict, strip_fits_dims 29 | from .util_misc import calc_stats 30 | 31 | 32 | # -----------------------------------------------------------------------------# 33 | def label_format_dms(deg, pos): 34 | """ 35 | Format decimal->DD:MM:SS. Called by the label formatter. 36 | """ 37 | 38 | angle = abs(deg) 39 | sign = 1 40 | if angle != 0: 41 | sign = angle / deg 42 | 43 | # Calcuate the degrees, min and sec 44 | dd = int(angle) 45 | rmndr = 60.0 * (angle - dd) 46 | mm = int(rmndr) 47 | ss = 60.0 * (rmndr - mm) 48 | 49 | # If rounding up to 60, carry to the next term 50 | if float("%05.2f" % ss) >= 60.0: 51 | ss = 0.0 52 | mm += 1.0 53 | if float("%02d" % mm) >= 60.0: 54 | mm = 0.0 55 | dd += 1.0 56 | 57 | if sign > 0: 58 | return "%02dd%02dm" % (sign * dd, mm) 59 | else: 60 | return "%03dd%02dm" % (sign * dd, mm) 61 | 62 | 63 | # -----------------------------------------------------------------------------# 64 | def label_format_hms(deg, pos): 65 | """ 66 | Format decimal->HH:MM:SS. Called by the label formatter. 67 | """ 68 | 69 | hrs = deg / 15.0 70 | 71 | angle = abs(hrs) 72 | sign = 1 73 | if angle != 0: 74 | sign = angle / hrs 75 | 76 | # Calcuate the hrsrees, min and sec 77 | dd = int(angle) 78 | rmndr = 60.0 * (angle - dd) 79 | mm = int(rmndr) 80 | ss = 60.0 * (rmndr - mm) 81 | 82 | # If rounding up to 60, carry to the next term 83 | if float("%05.2f" % ss) >= 60.0: 84 | ss = 0.0 85 | mm += 1.0 86 | if float("%02d" % mm) >= 60.0: 87 | mm = 0.0 88 | dd += 1.0 89 | 90 | if sign > 0: 91 | return "%02dh%02dm%02.0fs" % (sign * dd, mm, ss) 92 | else: 93 | return "%03dh%02dm%02.0fs" % (sign * dd, mm, ss) 94 | 95 | 96 | # -----------------------------------------------------------------------------# 97 | def label_format_deg(deg, pos): 98 | return "%.3f" % deg 99 | 100 | 101 | # -----------------------------------------------------------------------------# 102 | def plot_fits_map( 103 | data, 104 | header, 105 | stretch="auto", 106 | exponent=2, 107 | scaleFrac=0.9, 108 | cmapName="gist_heat", 109 | zMin=None, 110 | zMax=None, 111 | annEllipseLst=[], 112 | annPolyLst=[], 113 | bunit=None, 114 | lw=1.0, 115 | interpolation="Nearest", 116 | fig=None, 117 | dpi=100, 118 | doColbar=True, 119 | ): 120 | """ 121 | Plot a colourscale image of a FITS map. 122 | 123 | annEllipseLst is a list of lists: 124 | annEllipseLst[0][i] = x_deg 125 | annEllipseLst[1][i] = y_deg 126 | annEllipseLst[2][i] = minor_deg 127 | annEllipseLst[3][i] = major_deg 128 | annEllipseLst[4][i] = pa_deg 129 | annEllipseLst[5][i] = colour ... optional, default to 'g' 130 | 131 | annPolyLst is also a list of lists: 132 | annPolyLst[0][i] = list of polygon coords = [[x1,y1], [x2, y2] ...] 133 | annPolyLst[1][i] = colour of polygon e.g., 'w' 134 | """ 135 | 136 | # Strip unused dimensions from the array 137 | data, header = strip_fits_dims(data, header, 2, 5) 138 | 139 | # Parse the WCS information 140 | w = mkWCSDict(header) 141 | wcs = pw.WCS(w["header2D"]) 142 | 143 | # Calculate the image vmin and vmax by measuring the range in the inner 144 | # 'scale_frac' of the image 145 | s = data.shape 146 | boxMaxX = int(s[-1] / 2.0 + s[-1] * scaleFrac / 2.0 + 1.0) 147 | boxMinX = int(s[-1] / 2.0 - s[-1] * scaleFrac / 2.0 + 1.0) 148 | boxMaxY = int(s[-2] / 2.0 + s[-2] * scaleFrac / 2.0 + 1.0) 149 | boxMinY = int(s[-2] / 2.0 - s[-2] * scaleFrac / 2.0 + 1.0) 150 | dataSample = data[boxMinY:boxMaxY, boxMinX:boxMaxX] 151 | measures = calc_stats(dataSample) 152 | sigma = abs(measures["max"] / measures["madfm"]) 153 | if stretch == "auto": 154 | if sigma <= 20: 155 | vMin = measures["madfm"] * (-1.5) 156 | vMax = measures["madfm"] * 10.0 157 | stretch = "linear" 158 | elif sigma > 20: 159 | vMin = measures["madfm"] * (-3.0) 160 | vMax = measures["madfm"] * 40.0 161 | stretch = "linear" 162 | elif sigma > 500: 163 | vMin = measures["madfm"] * (-7.0) 164 | vMax = measures["madfm"] * 200.0 165 | stretch = "sqrt" 166 | if zMax is not None: 167 | vMax = max(zMax, measures["max"]) 168 | if zMax is not None: 169 | vMin = zMin 170 | 171 | # Set the colourscale using an normalizer object 172 | normalizer = APLpyNormalize( 173 | stretch=stretch, exponent=exponent, vmin=vMin, vmax=vMax 174 | ) 175 | 176 | # Setup the figure 177 | if fig is None: 178 | fig = plt.figure(facecolor="w", figsize=(9.5, 8)) 179 | ax = fig.add_axes([0.1, 0.08, 0.9, 0.87]) 180 | if w["coord_type"] == "EQU": 181 | ax.set_xlabel("Right Ascension") 182 | ax.set_ylabel("Declination") 183 | elif w["coord_type"] == "GAL": 184 | ax.set_xlabel("Galactic Longitude (deg)") 185 | ax.set_ylabel("Galactic Latitude (deg)") 186 | else: 187 | ax.set_xlabel("Unknown") 188 | ax.set_ylabel("Unknown") 189 | cosY = m.cos(m.radians(w["ycent"])) 190 | aspect = abs(w["ydelt"] / (w["xdelt"] * cosY)) 191 | 192 | # Set the format of the major tick mark and labels 193 | if w["coord_type"] == "EQU": 194 | f = 15.0 195 | majorFormatterX = FuncFormatter(label_format_hms) 196 | minorFormatterX = None 197 | majorFormatterY = FuncFormatter(label_format_dms) 198 | minorFormattery = None 199 | else: 200 | f = 1.0 201 | majorFormatterX = FuncFormatter(label_format_deg) 202 | minorFormatterX = None 203 | majorFormatterY = FuncFormatter(label_format_deg) 204 | minorFormattery = None 205 | ax.xaxis.set_major_formatter(majorFormatterX) 206 | ax.yaxis.set_major_formatter(majorFormatterY) 207 | 208 | # Set the location of the the major tick marks 209 | # xrangeArcmin = abs(w['xmax']-w['xmin'])*(60.0*f) 210 | # xmultiple = m.ceil(xrangeArcmin/3.0)/(60.0*f) 211 | # yrangeArcmin = abs(w['ymax']-w['ymin'])*60.0 212 | # ymultiple = m.ceil(yrangeArcmin/3.0)/60.0 213 | # majorLocatorX = MultipleLocator(xmultiple) 214 | # ax.xaxis.set_major_locator(majorLocatorX) 215 | # majorLocatorY = MultipleLocator(ymultiple) 216 | # ax.yaxis.set_major_locator(majorLocatorY) 217 | 218 | ax.xaxis.set_major_locator(MaxNLocator(5)) 219 | ax.yaxis.set_major_locator(MaxNLocator(5)) 220 | 221 | # Print the image to the axis 222 | im = ax.imshow( 223 | data, 224 | interpolation=interpolation, 225 | origin="lower", 226 | aspect=aspect, 227 | extent=[w["xmax"], w["xmin"], w["ymin"], w["ymax"]], 228 | cmap=plt.get_cmap(cmapName), 229 | norm=normalizer, 230 | ) 231 | 232 | # Add the colorbar 233 | if doColbar: 234 | cbar = fig.colorbar(im, pad=0.0) 235 | if "BUNIT" in header: 236 | cbar.set_label(header["BUNIT"]) 237 | else: 238 | cbar.set_label("Unknown") 239 | if bunit is not None: 240 | cbar.set_label(bunit) 241 | 242 | # Format the colourbar labels - TODO 243 | 244 | # Set white ticks 245 | ax.tick_params(pad=5) 246 | for line in ax.xaxis.get_ticklines() + ax.get_yticklines(): 247 | line.set_markeredgewidth(1) 248 | line.set_color("w") 249 | 250 | # Create the ellipse source annotations 251 | if len(annEllipseLst) > 0: 252 | if len(annEllipseLst) >= 5: 253 | srcXLst = np.array(annEllipseLst[0]) 254 | srcYLst = np.array(annEllipseLst[1]) 255 | srcMinLst = np.array(annEllipseLst[2]) 256 | srcMajLst = np.array(annEllipseLst[3]) 257 | srcPALst = np.array(annEllipseLst[4]) 258 | if len(annEllipseLst) >= 6: 259 | if type(annEllipseLst[5]) is str: 260 | srcEColLst = [annEllipseLst[5]] * len(srcXLst) 261 | elif type(annEllipseLst[5]) is list: 262 | srcEColLst = annEllipseLst[5] 263 | else: 264 | rcEColLst = ["g"] * len(srcXLst) 265 | else: 266 | srcEColLst = ["g"] * len(srcXLst) 267 | for i in range(len(srcXLst)): 268 | try: 269 | el = Ellipse( 270 | (srcXLst[i], srcYLst[i]), 271 | srcMinLst[i], 272 | srcMajLst[i], 273 | angle=180.0 - srcPALst[i], 274 | edgecolor=srcEColLst[i], 275 | linewidth=lw, 276 | facecolor="none", 277 | ) 278 | ax.add_artist(el) 279 | except Exception: 280 | pass 281 | 282 | # Create the polygon source annotations 283 | if len(annPolyLst) > 0: 284 | annPolyCoordLst = annPolyLst[0] 285 | if len(annPolyLst) > 1: 286 | if type(annPolyLst[1]) is str: 287 | annPolyColorLst = [annPolyLst[1]] * len(annPolyCoordLst) 288 | elif type(annPolyLst[1]) is list: 289 | annPolyColorLst = annPolyLst[1] 290 | else: 291 | annPolyColorLst = ["g"] * len(annPolyCoordLst) 292 | else: 293 | annPolyColorLst = ["g"] * len(annPolyCoordLst) 294 | for i in range(len(annPolyCoordLst)): 295 | cpoly = Polygon(annPolyCoordLst[i], animated=False, linewidth=lw) 296 | cpoly.set_edgecolor(annPolyColorLst[i]) 297 | cpoly.set_facecolor("none") 298 | ax.add_patch(cpoly) 299 | 300 | return fig 301 | -------------------------------------------------------------------------------- /RMutils/util_rec.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # =============================================================================# 3 | # # 4 | # NAME: util_rec.py # 5 | # # 6 | # PURPOSE: Functions for operating on python record arrays. # 7 | # # 8 | # MODIFIED: 19-November-2015 by C. Purcell # 9 | # # 10 | # CONTENTS: # 11 | # # 12 | # pyify ... return type converters given type strings # 13 | # irecarray_to_py ... convert a recarray into a list # 14 | # fields-view ... return a view of chosen fields in a recarray # 15 | # # 16 | # =============================================================================# 17 | # # 18 | # The MIT License (MIT) # 19 | # # 20 | # Copyright (c) 2015 Cormac R. Purcell # 21 | # # 22 | # Permission is hereby granted, free of charge, to any person obtaining a # 23 | # copy of this software and associated documentation files (the "Software"), # 24 | # to deal in the Software without restriction, including without limitation # 25 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, # 26 | # and/or sell copies of the Software, and to permit persons to whom the # 27 | # Software is furnished to do so, subject to the following conditions: # 28 | # # 29 | # The above copyright notice and this permission notice shall be included in # 30 | # all copies or substantial portions of the Software. # 31 | # # 32 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # 33 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # 34 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # 35 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # 36 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # 37 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # 38 | # DEALINGS IN THE SOFTWARE. # 39 | # # 40 | # =============================================================================# 41 | 42 | 43 | import numpy as np 44 | 45 | 46 | # -----------------------------------------------------------------------------# 47 | def pyify(typestr): 48 | """ 49 | Return a Python :class:'type' that most closely represents the 50 | type encoded by *typestr* 51 | """ 52 | if typestr[1] in "iu": 53 | return int 54 | elif typestr[1] == "f": 55 | return float 56 | elif typestr[1] == "S": 57 | return str 58 | return lambda x: x 59 | 60 | 61 | # -----------------------------------------------------------------------------# 62 | def irecarray_to_py(a): 63 | """ 64 | Slow conversion of a recarray into a list of records with python types. 65 | Get the field names from :attr:'a.dtype.names'. 66 | :Returns: iterator so that one can handle big input arrays 67 | """ 68 | pytypes = [pyify(typestr) for name, typestr in a.dtype.descr] 69 | 70 | def convert_record(r): 71 | return tuple([converter(value) for converter, value in zip(pytypes, r)]) 72 | 73 | return (convert_record(r) for r in a) 74 | 75 | 76 | # -----------------------------------------------------------------------------# 77 | def fields_view(arr, fieldNameLst=None): 78 | """ 79 | Return a view of a numpy record array containing only the fields names in 80 | the fields argument. 'fields' should be a list of column names. 81 | """ 82 | 83 | # Default to all fields 84 | if not fieldNameLst: 85 | fieldNameLst = arr.dtype.names 86 | dtype2 = np.dtype({name: arr.dtype.fields[name] for name in fieldNameLst}) 87 | 88 | return np.ndarray(arr.shape, dtype2, arr, 0, arr.strides) 89 | -------------------------------------------------------------------------------- /RMutils/util_testing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Utility functions for unit tests. 5 | 6 | Created on Fri Sep 6 15:03:12 2024 7 | @author: cvaneck 8 | """ 9 | 10 | import shutil 11 | 12 | import numpy as np 13 | from astropy.constants import c as speed_of_light 14 | from astropy.io import fits as pf 15 | from scipy.ndimage import gaussian_filter 16 | 17 | 18 | def Faraday_thin_complex_polarization(freq_array, RM, Polint, initial_angle): 19 | """freq_array = channel frequencies in Hz 20 | RM = source RM in rad m^-2 21 | Polint = polarized intensity in whatever units 22 | initial angle = pre-rotation polarization angle (in degrees)""" 23 | l2_array = (speed_of_light.value / freq_array) ** 2 24 | Q = Polint * np.cos(2 * (np.outer(l2_array, RM) + np.deg2rad(initial_angle))) 25 | U = Polint * np.sin(2 * (np.outer(l2_array, RM) + np.deg2rad(initial_angle))) 26 | return np.squeeze(np.transpose(Q + 1j * U)) 27 | 28 | 29 | def create_1D_data(freq_arr, TEST_PATH, ONED_PATH): 30 | RM = 200 31 | pol_angle_deg = 50 32 | StokesI_midband = 1 33 | fracpol = 0.7 34 | noise_amplitude = 0.1 35 | spectral_index = -0.7 36 | error_estimate = 1 # Size of assumed errors as multiple of actual error. 37 | if not ONED_PATH.exists(): 38 | ONED_PATH.mkdir(parents=True) 39 | shutil.copy(TEST_PATH / "RMsynth1D_testdata.dat", ONED_PATH / "simsource.dat") 40 | with open(ONED_PATH / "sim_truth.txt", "w") as f: 41 | f.write("RM = {} rad/m^2\n".format(RM)) 42 | f.write("Intrsinsic polarization angle = {} deg\n".format(pol_angle_deg)) 43 | f.write("Fractional polarization = {} %\n".format(fracpol * 100.0)) 44 | f.write("Stokes I = {} Jy/beam\n".format(StokesI_midband)) 45 | f.write( 46 | "Reference frequency for I = {} GHz\n".format(np.median(freq_arr) / 1e9) 47 | ) 48 | f.write("Spectral index = {}\n".format(spectral_index)) 49 | f.write("Actual error per channel = {} Jy/beam\n".format(noise_amplitude)) 50 | f.write( 51 | "Input assumed error = {} Jy/beam\n".format( 52 | noise_amplitude * error_estimate 53 | ) 54 | ) 55 | 56 | 57 | def create_3D_data(freq_arr, THREED_PATH, N_side=100): 58 | src_RM = 200 59 | src_pol_angle_deg = 50 60 | src_flux = 2 61 | src_x = N_side // 4 62 | src_y = N_side // 4 63 | 64 | diffuse_RM = 50 65 | diffuse_pol_angle_deg = -10 66 | diffuse_flux = 1 67 | 68 | noise_amplitude = 0.1 69 | beam_size_pix = 20 70 | 71 | src_pol_spectrum = Faraday_thin_complex_polarization( 72 | freq_arr, src_RM, src_flux, src_pol_angle_deg 73 | ) 74 | diffuse_pol_spectrum = Faraday_thin_complex_polarization( 75 | freq_arr, diffuse_RM, diffuse_flux, diffuse_pol_angle_deg 76 | ) 77 | 78 | src_Q_cube = np.zeros((N_side, N_side, freq_arr.size)) 79 | src_U_cube = np.zeros((N_side, N_side, freq_arr.size)) 80 | src_Q_cube[src_x, src_y, :] = src_pol_spectrum.real 81 | src_U_cube[src_x, src_y, :] = src_pol_spectrum.imag 82 | 83 | src_Q_cube = gaussian_filter( 84 | src_Q_cube, (beam_size_pix / 2.35, beam_size_pix / 2.35, 0), mode="wrap" 85 | ) 86 | src_U_cube = gaussian_filter( 87 | src_U_cube, (beam_size_pix / 2.35, beam_size_pix / 2.35, 0), mode="wrap" 88 | ) 89 | scale_factor = ( 90 | np.max(np.sqrt(src_Q_cube**2 + src_U_cube**2)) / src_flux 91 | ) # Renormalizing flux after convolution 92 | src_Q_cube = src_Q_cube / scale_factor 93 | src_U_cube = src_U_cube / scale_factor 94 | 95 | diffuse_Q_cube = np.tile( 96 | diffuse_pol_spectrum.real[np.newaxis, np.newaxis, :], (N_side, N_side, 1) 97 | ) 98 | diffuse_U_cube = np.tile( 99 | diffuse_pol_spectrum.imag[np.newaxis, np.newaxis, :], (N_side, N_side, 1) 100 | ) 101 | 102 | rng = np.random.default_rng(20200422) 103 | noise_Q_cube = rng.normal(scale=noise_amplitude, size=src_Q_cube.shape) 104 | noise_U_cube = rng.normal(scale=noise_amplitude, size=src_Q_cube.shape) 105 | noise_Q_cube = gaussian_filter( 106 | noise_Q_cube, (beam_size_pix / 2.35, beam_size_pix / 2.35, 0), mode="wrap" 107 | ) 108 | noise_U_cube = gaussian_filter( 109 | noise_U_cube, (beam_size_pix / 2.35, beam_size_pix / 2.35, 0), mode="wrap" 110 | ) 111 | scale_factor = ( 112 | np.std(noise_Q_cube) / noise_amplitude 113 | ) # Renormalizing flux after convolution 114 | noise_Q_cube = noise_Q_cube / scale_factor 115 | noise_U_cube = noise_U_cube / scale_factor 116 | 117 | Q_cube = src_Q_cube + noise_Q_cube + diffuse_Q_cube 118 | U_cube = src_U_cube + noise_U_cube + diffuse_U_cube 119 | 120 | header = pf.Header() 121 | header["BITPIX"] = -32 122 | header["NAXIS"] = 3 123 | header["NAXIS1"] = N_side 124 | header["NAXIS2"] = N_side 125 | header["NAXIS3"] = freq_arr.size 126 | header["CTYPE1"] = "RA---SIN" 127 | header["CRVAL1"] = 90 128 | header["CDELT1"] = -1.0 / 3600.0 129 | header["CRPIX1"] = 1 130 | header["CUNIT1"] = "deg" 131 | 132 | header["CTYPE2"] = "DEC--SIN" 133 | header["CRVAL2"] = 0 134 | header["CDELT2"] = 1.0 / 3600.0 135 | header["CRPIX2"] = 1 136 | header["CUNIT2"] = "deg" 137 | 138 | header["CTYPE3"] = "FREQ" 139 | header["CRVAL3"] = freq_arr[0] 140 | header["CDELT3"] = freq_arr[1] - freq_arr[0] 141 | header["CRPIX3"] = 1 142 | header["CUNIT3"] = "Hz" 143 | 144 | header["BUNIT"] = "Jy/beam" 145 | 146 | if not THREED_PATH.exists(): 147 | THREED_PATH.mkdir(parents=True) 148 | 149 | pf.writeto( 150 | THREED_PATH / "Q_cube.fits", np.transpose(Q_cube), header=header, overwrite=True 151 | ) 152 | pf.writeto( 153 | THREED_PATH / "U_cube.fits", np.transpose(U_cube), header=header, overwrite=True 154 | ) 155 | with open(THREED_PATH / "freqHz.txt", "w") as f: 156 | for freq in freq_arr: 157 | f.write("{:}\n".format(freq)) 158 | 159 | with open(THREED_PATH / "sim_truth.txt", "w") as f: 160 | f.write("Point source:\n") 161 | f.write("RM = {} rad/m^2\n".format(src_RM)) 162 | f.write("Intrsinsic polarization angle = {} deg\n".format(src_pol_angle_deg)) 163 | f.write("Polarized Flux = {} Jy/beam\n".format(src_flux)) 164 | f.write("x position = {} pix\n".format(src_x)) 165 | f.write("y position = {} pix\n".format(src_y)) 166 | f.write("\n") 167 | f.write("Diffuse emission:\n") 168 | f.write("RM = {} rad/m^2\n".format(diffuse_RM)) 169 | f.write( 170 | "Intrsinsic polarization angle = {} deg\n".format(diffuse_pol_angle_deg) 171 | ) 172 | f.write("Polarized Flux = {} Jy/beam\n".format(diffuse_flux)) 173 | f.write("\n") 174 | f.write("Other:\n") 175 | f.write("Actual error per channel = {} Jy/beam\n".format(noise_amplitude)) 176 | f.write("Beam FWHM = {} pix\n".format(beam_size_pix)) 177 | -------------------------------------------------------------------------------- /VERSION_HISTORY.txt: -------------------------------------------------------------------------------- 1 | 2 | 1.4.8: 3 | -Added m12 model (mixed slab with turbulent foreground). 4 | -forced rmsynth1d outputs to the bit depth specified by the user (previously many outputs were 64bit regardless of user specification). 5 | -Added combined Q/U sigma_add calculation and output. Now produces sigma_add for Q and U separately, and for both together. 6 | -Fixed bug in rmclean3d where multiprocessing pool was not used properly. 7 | 8 | 1.4.7: 9 | -fixed crash when using Stokes I model cube in rmsynth3d. 10 | -changed all references to speed of light to use astropy constant. 11 | -fixed bug from strange interaction between Prefect, Docker, and multiprocessing. 12 | -added basic tests for helper tools. 13 | 14 | 1.4.6: 15 | -fixed bug in rmsynth3d when computing RMSF that is not spatially uniform. 16 | 17 | 1.4.5: 18 | -Fixed 3DIrescale tool to work with new NamedTuple for Stokes I fitting outputs. 19 | 20 | 1.4.4: 21 | -Fixed calcRMSF tool to work with new NamedTuple system for RMSF calculation/fitting outputs. 22 | 23 | 1.4.3: 24 | -Fixed bug in RMpeakfit3D which wasn't using the correct channel weights to calculate uncertainties. 25 | 26 | 1.4.2: 27 | -minor fix to uncertainties generated by RMpeakfit3D 28 | 29 | 1.4.1: 30 | -changed internal handling of Stokes I models; fixed problem when using user-defined model. 31 | 32 | 1.4.0: 33 | -updates to do_fitIcube tool: removed deprecated cutoff keywords, fixed tqdm 34 | progress bars, some minor speed and efficiency improvements. 35 | -relaxed numpy version restriction, since the numpy-astropy version conflict 36 | has been resolved. 37 | -added tqdm as install dependency. 38 | -removed internal copies of external packages (corner, emcee, nestle) used by QU-fitting 39 | -added pre-commit auto-formatting system 40 | -escaped all LaTeX characters 41 | -implemented auto-formatting, using black, and updated all existing code to that standard. 42 | -removed RMS noise estimate for FDF -- it was inacurrate and misleading 43 | -fixed corMAD noise estimate for FDF -- it was estimating MAD of PI rather than Q and U, 44 | and thus was biased low. Now uses Q and U of FDF, and reproduces correct value in 45 | signal-free and RM-cleaned simulations. Still biased high in dirty spectra; no fixing that. 46 | -old versions of core rmsynth and RMSF calculation functions are now officially deprecated 47 | -fixed crashing when trying to generate RMsynth1d plots for source with negative Stokes I model 48 | -improved FITS headers of 3D tools output 49 | -1D: removed RMS noise estimate (for being misleading); removed _chan outputs (for being low value and confusing) 50 | -fixed estimation of empirical FDF noise. (1D RMclean) 51 | -QU-fitting: fixed problems with de-rotated angle wrapping 52 | -Integrated nufft tool for the Fourier transform math (makes rmsynth faster) 53 | -added options to rmsynth3d to decrease RAM use 54 | -new QU-fitting models: m5 (single Burn slab), m6 (double Burn slab), m7 (single mixed emission slab) 55 | 56 | 57 | 1.3.1: 58 | -progress bars updated to use tdqm, which should make them faster. 59 | -Integrated super-resolution method of Cotton & Rudnick 2023, which creates 60 | narrower RMSF shapes by setting lambda^2_0 = 0. 61 | -Added options to dofitIcube tool to save more outputs (coefficient maps, 62 | errors, etc). 63 | -Made RMSF theoretical width calculations consistent: Dickey et al. 2019 64 | now used throughout. 65 | 66 | 1.3.0: 67 | -merged in bwdepol branch. This does not change any core functionality in the 68 | existing RM-Tools programs, but adds two new command-line tools: rmtools_bwpredict 69 | and rmtools_bwdepol, which predict and correct for bandwidth depolarization 70 | respectively. 71 | -fixed a bug with JSON outputs not handling numpy variables. This was partially 72 | due to a problem with asscalar() in the utils using a depcreciated numpy function, 73 | but partially from other causes. 74 | -an improved version of the Stokes I cube fitting routine has been developed 75 | by Lerato Sebokolody, which includes parallelization. 76 | 77 | 1.2.0: 78 | -changed Stokes I fitting to always use 64 bit values (even if data is 32 bit). 79 | This is because the fitter was found to be vulnerable to numerical issues 80 | when fitting log-polynomials in 32-bits (thanks to Shannon Vanderwoude for 81 | discovering the problem). 82 | -added parameter errors of Stokes I fit as output to Stokes I fitDict, to 83 | RMsynth1D outputs, and QU-fitting outputs. These are the 1-sigma errors 84 | computed by the mpfit routine. 85 | -the renormalize_StokesI_model function is now depreciated, since it cannot 86 | transform the errors. It is left in for the time being in case it is needed 87 | for any legacy reasons. 88 | -the make_freq_file.py/rmtools_freqfile tool was found to sometime produce 89 | truncated output (not all channels included). This was fixed and changed 90 | to a more rigourous calculation (using WCS, as suggested by Alec Thomson). 91 | -modified do_fitIcube.py/rmtools_fitIcube to output maps of the fit coefficients 92 | and their errors. Thanks to Biny Sebastian for providing this code. 93 | -merged pull request from Alec Thomson that changes the fractional polarization 94 | reported by rmsynth1d to use the bias-corrected polarized intensity. 95 | !!!Previous versions used the non-corrected polarized intensity, from this 96 | version onwards the corrected polarized intensity will be used!!! 97 | -merged pull request from Alec Thomson that implements windowed cleaning 98 | and adds a command line flag to set the windowed clean threshold. 99 | -merged pull request from Alec Thomson that changed the QU-fitting routine 100 | to use the bilby package for the nested sampling. This package allows the 101 | user to specify which sampler they wish to use -- the default is dynesty, 102 | but users can choose to use pymultinest or other supported samplers. 103 | -removed support for Python 3.6, as a consequence of the change to bilby. 104 | From this point forward, Python 3.7 is the minimum version supported 105 | (at least for the QU-fitting portion of the package). 106 | 107 | 1.1.2: 108 | -modified the generation of maxPI and peakRM maps in RMsynth3D so that 109 | it occurs in its own function rather than as part of the file writing 110 | function; this allows it to be run inside a notebook. 111 | -modified RMpeakfit_3D to be more stable when FDF cubes have NaNs present. 112 | -Stokes I fit coefficients no longer become NaNs when negative Stokes I 113 | models are renormalized to a new reference frequency. 114 | -added test for unreasonably large peak RM (compared to computed range) 115 | as this can happen when peak is on edge of spectrum. Changes RM to NaN in 116 | these cases. 117 | -Fixed crash when sigmaAdd calculations received NaNs in inputs. 118 | -added experimental tool to compute QU models from clean components, 119 | requested by Luke Pratley. This currently has a normalization error of some 120 | kind, which I've asked Luke to chase down. 121 | -dropping support for Python3.5, since the QUfitting code has problems with it 122 | and it's already end-of-life. 123 | -improved the calculate_RMSF script to not crash when given width smaller than 124 | first sidelobes. Also corrected it to produce the correct width: previously 125 | when width was specified by user it would be 2x wider than specified. 126 | -fixed problem with do_fitIcube.py which would crash when fed 4D Stokes I cube. 127 | -fixed undesired behaviour in rmsynth1d and rmclean1d where spectra plots 128 | wouldn't be saved when plotting to screen was turned on. 129 | 130 | 1.1.1: 131 | -updated calculate_RMSF to use same theoretical width as rmsynth1D 132 | (the formula from Dickey et al. 2019) 133 | -fixed problem where very small Stokes I model values (~1E-60) would 134 | cause numeric overflow in Q/I, U/I, creating infinities that were 135 | not handled gracefully. Now converting infinities to NaNs. 136 | -fixed peakfitcube not being callable from the command line. 137 | -fixed typos in rmtools_calcRMSF. 138 | -fixed pip installation so that QUfitting models are correctly installed. 139 | 140 | 1.1.0: New feature update 141 | -QU-fitting updates from Alec Thomson: sigma-clipping of posteriors, 142 | saving evidence to output file, option to not restart from scratch. 143 | -Fixed bug in chunk reassembly script (assemble_chunks.py) where it would 144 | not figure out the cube size correctly when fed the last chunk. 145 | -Updated RMclean1D to use new derotated angle equation for Observed noise 146 | (previously was using old flawed equation fixed in v1.0.2). 147 | -Fixed problem with handling of degenerate FITS axes in RMclean3D 148 | -Created new tool: RMpeakfit_3D.py,which applies the 1D peak fitting to 149 | each pixel in a 3D FDF cube. Outputs 2D maps for the resulting 150 | parameters. 151 | -New Stokes I fitting. Two changes: 152 | 1) Logarithmic polynomial fitting function added. This is essentially 153 | a 5-th order polynomial in log-log space, modified into the 154 | standard power-law with curvature formulation. Added flags to 155 | RMsynth1D, do_QUfitting, and do_fitIcube to let user select fitting 156 | function. 157 | 2) Dynamic order selection. If user enters a negative polynomial order, 158 | it will now trying fitting successively higher order polynomials 159 | (starting with 0) until the AIC no longer improves. 160 | -default Stokes I fitting is now 2nd order polynomial 161 | -Plotting code has been made more consistent, in terms of no longer 162 | requiring user to press 'Enter', for all routines. 163 | -QUfitting now has .dat output like that of RMsynth1D. 164 | -All supporting tools now have command-line hooks, which all begin with 165 | 'rmtools_'. 166 | 167 | 1.0.5: RMSF FWHM calculation fix (13 Aug 2020) 168 | -Theoretical estimate for FWHM now uses formula from Dickey et al 2019 169 | instead of Brentjens & de Bruyn 2005, as this is more accurate. 170 | -RMSF fitting improved to be more accurate (previously would over-estimate) 171 | -fitReal option for RMSF fitting now works as intended (previously crashed) 172 | 173 | 1.0.4: QUfitting tweaks + RMsynth3D crash fixes (10 Aug 2020) 174 | -Minor QUfitting fixes by Alec Thomson. 175 | -Fixed crash when writing 3D cubes where all pixels are NaNs. 176 | -changed output names for the QUfitting multinest files. 177 | -Fixed crashes in RMsynth3D when input cubes are all NaNs. 178 | -Fixed crash when RMsynth3D normalizes output cubes by Stokes I model. 179 | 180 | 1.0.3: Noise propagation improvements (23 Jul 2020) 181 | -Minor fixes to new plot-saving code. 182 | -Changed theoretical noise estimation equation: Shinsuke Ideguchi pointed 183 | out that it wasn't correctly computing the error in the fractional 184 | polarization. Modified to normalize the channel errors by the model 185 | Stokes I (errors in the model are not accounted for). This will alter 186 | the noise behaviour for sources with steep Stokes I spectra. 187 | 188 | 1.0.2: Fixing derotated angle errors + Multimodel QUfitting (6 Jul 2020) 189 | -RMsynth1D: added option to save plots (pull request by Lenart Heino) 190 | -Error formula for the derotated polarization angle (dPolAngle0Fit/Chan) 191 | was wrong (too small by a factor of sqrt(Nchan)). Corrected formula 192 | incorporated. 193 | -Multi-modal posterior handling added to QU-fitting (by Alec Thomson). 194 | 195 | 1.0.1: Minor bug fixes (12 May 2020) 196 | -For RMsynth1D, the default Phimax value was previously fixed at 600, 197 | which was not ideal for high-frequency data. Modified to now be 198 | dynamic: 10x RMSF FWHM. 199 | -RMclean1D was not using the noise properly when calculating derived 200 | properties. RMclean1D error bars prior to this verison likely have 201 | issues. 202 | 203 | 1.0: Official release (1 May 2020) 204 | -------------------------------------------------------------------------------- /external/finufft_LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (C) 2017-2023 The Simons Foundation, Inc. - All Rights Reserved. 2 | 3 | Lead developer: Alex H. Barnett; see docs/ackn.rst for other contributors. 4 | 5 | ------ 6 | 7 | FINUFFT is licensed under the Apache License, Version 2.0 (the 8 | "License"); you may not use this file except in compliance with the 9 | License. You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | 19 | ------ 20 | 21 | Certain parts of this repository are contributed by others. 22 | For their license info, see: 23 | 24 | contrib/legendre_rule_fast.license 25 | fortran/cmcl_license.txt 26 | tutorial/utils/lgwt.m 27 | 28 | ------ 29 | 30 | If you find this library useful, or it helps you in creating software 31 | or publications, please let us know, and acknowledge that fact by citing our 32 | repository: 33 | 34 | https://github.com/flatironinstitute/finufft 35 | 36 | and the corresponding journal articles (particularly the first): 37 | 38 | A parallel non-uniform fast Fourier transform library based on an 39 | ``exponential of semicircle'' kernel. A. H. Barnett, J. F. Magland, 40 | and L. af Klinteberg. SIAM J. Sci. Comput. 41(5), C479-C504 (2019). 41 | 42 | Aliasing error of the exp$(\beta \sqrt{1-z^2})$ kernel in the 43 | nonuniform fast Fourier transform. A. H. Barnett, 44 | Appl. Comput. Harmon. Anal. 51, 1-16 (2021). 45 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import io 5 | import os 6 | 7 | from setuptools import setup 8 | 9 | NAME = "RM-Tools" 10 | DESCRIPTION = "RM-synthesis, RM-clean and QU-fitting on polarised radio spectra" 11 | URL = "https://github.com/CIRADA-Tools/RM-Tools" 12 | REQUIRES_PYTHON = ">=3.7.0" 13 | VERSION = "1.4.8" 14 | DOWNLOAD_URL = ( 15 | "https://github.com/CIRADA-Tools/RM-Tools/archive/v" + VERSION + ".tar.gz" 16 | ) 17 | 18 | REQUIRED = [ 19 | "numpy<2", 20 | "numpy>1.22;python_version=='3.8'", 21 | "scipy", 22 | "matplotlib>=3.4.0", 23 | "astropy", 24 | "tdqm", 25 | "deprecation", 26 | "finufft", 27 | ] 28 | 29 | # Using AT's fork for now - includes tiny bug fix for bilby 30 | extras_require = { 31 | "QUfitting": ["bilby>=1.1.5", "emcee", "nestle", "corner"], 32 | "parallel": ["schwimmbad"], 33 | "dev": ["pre-commit", "black", "isort", "pytest"], 34 | } 35 | 36 | here = os.path.abspath(os.path.dirname(__file__)) 37 | 38 | try: 39 | with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f: 40 | long_description = "\n" + f.read() 41 | except FileNotFoundError: 42 | long_description = DESCRIPTION 43 | 44 | setup( 45 | name=NAME, 46 | version=VERSION, 47 | description=DESCRIPTION, 48 | long_description=long_description, 49 | long_description_content_type="text/markdown", 50 | python_requires=REQUIRES_PYTHON, 51 | url=URL, 52 | download_url=DOWNLOAD_URL, 53 | packages=["RMtools_1D", "RMtools_3D", "RMutils"], 54 | entry_points={ 55 | "console_scripts": [ 56 | "rmsynth3d=RMtools_3D.do_RMsynth_3D:main", 57 | "rmclean3d=RMtools_3D.do_RMclean_3D:main", 58 | "rmsynth1d=RMtools_1D.do_RMsynth_1D:main", 59 | "rmclean1d=RMtools_1D.do_RMclean_1D:main", 60 | "rmsynth1dFITS=RMtools_1D.do_RMsynth_1D_fromFITS:main", 61 | "qufit=RMtools_1D.do_QUfit_1D_mnest:main", 62 | "rmtools_freqfile=RMtools_3D.make_freq_file:save_freq_file", 63 | "rmtools_calcRMSF=RMtools_1D.calculate_RMSF:main", 64 | "rmtools_testdata1D=RMtools_1D.mk_test_ascii_data:main", 65 | "rmtools_createchunks=RMtools_3D.create_chunks:main", 66 | "rmtools_assemblechunks=RMtools_3D.assemble_chunks:main", 67 | "rmtools_fitIcube=RMtools_3D.do_fitIcube:main", 68 | "rmtools_peakfitcube=RMtools_3D.RMpeakfit_3D:main", 69 | "rmtools_testdata3D=RMtools_3D.mk_test_cube_data:main", 70 | "rmtools_extractregion=RMtools_3D.extract_region:main", 71 | "rmtools_bwdepol=RMtools_1D.rmtools_bwdepol:main", 72 | "rmtools_bwpredict=RMtools_1D.rmtools_bwpredict:main", 73 | "rmtools_3DIrescale=RMtools_3D.rescale_I_model_3D:command_line", 74 | ], 75 | }, 76 | install_requires=REQUIRED, 77 | extras_require=extras_require, 78 | include_package_data=True, 79 | license="MIT", 80 | classifiers=[ 81 | "Development Status :: 5 - Production/Stable", 82 | "License :: OSI Approved :: MIT License", 83 | "Programming Language :: Python", 84 | "Programming Language :: Python :: 3", 85 | "Programming Language :: Python :: 3.7", 86 | "Intended Audience :: Science/Research", 87 | "Topic :: Scientific/Engineering :: Astronomy", 88 | ], 89 | maintainer="Cameron Van Eck", 90 | maintainer_email="cameron.vaneck@anu.edu.au", 91 | test_suite="tests", 92 | ) 93 | -------------------------------------------------------------------------------- /tests/QA_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | QA testing tools for RM-tools. 5 | These tools are intended to produce test simulated data sets, and run them 6 | through RM-tools. Automated tools will only be able to confirm that things ran, 7 | but user inspection of the results will be needed to confirm that the expected 8 | values are produced. 9 | 10 | Random values are necessary to simulate noise, which is expected for different 11 | parts of the code. I have forced the random seed to be the same each run, 12 | in order to make the tests deterministic. This works for everything except 13 | QU fitting, which uses random numbers internally that can't be controlled. 14 | 15 | Created on Fri Oct 25 10:00:24 2019 16 | @author: Cameron Van Eck 17 | """ 18 | import json 19 | import os 20 | import shutil 21 | import subprocess 22 | import unittest 23 | from pathlib import Path 24 | 25 | import numpy as np 26 | from astropy.constants import c as speed_of_light 27 | from astropy.io import fits as pf 28 | from scipy.ndimage import gaussian_filter 29 | 30 | from RMutils.util_testing import create_1D_data, create_3D_data 31 | 32 | TEST_PATH = Path(__file__).parent.absolute() 33 | ONED_PATH = TEST_PATH / "simdata" / "1D" 34 | THREED_PATH = TEST_PATH / "simdata" / "3D" 35 | 36 | 37 | class test_RMtools(unittest.TestCase): 38 | def setUp(self): 39 | # Clean up old simulations to prevent interference with new runs. 40 | N_chan = 288 41 | self.freq_arr = np.linspace(800e6, 1088e6, num=N_chan) 42 | self.models = (1, 2, 3, 4, 5, 7, 11) 43 | self.sampler = "nestle" 44 | 45 | def test_a1_1D_synth_runs(self): 46 | create_1D_data(self.freq_arr, TEST_PATH, ONED_PATH) 47 | returncode = subprocess.call( 48 | f"rmsynth1d '{(ONED_PATH/'simsource.dat').as_posix()}' -l 600 -d 3 -S -i", 49 | shell=True, 50 | ) 51 | self.assertEqual(returncode, 0, "RMsynth1D failed to run.") 52 | 53 | def test_a2_1D_synth_values(self): 54 | mDict = json.load(open(ONED_PATH / "simsource_RMsynth.json", "r")) 55 | refDict = json.load(open(TEST_PATH / "RMsynth1D_referencevalues.json", "r")) 56 | for key in mDict.keys(): 57 | if (key == "polyCoefferr") or key == "polyCoeffs": 58 | ref_values = refDict[key].split(",") 59 | test_values = mDict[key].split(",") 60 | for ref, test in zip(ref_values, test_values): 61 | self.assertAlmostEqual( 62 | float(test), 63 | float(ref), 64 | places=3, 65 | msg=f"Key {key} differs from expectation", 66 | ) 67 | elif type(mDict[key]) == str or refDict[key] == 0: 68 | self.assertEqual( 69 | mDict[key], refDict[key], "{} differs from expectation.".format(key) 70 | ) 71 | else: 72 | self.assertTrue( 73 | np.abs((mDict[key] - refDict[key]) / refDict[key]) < 1e-3, 74 | "{} differs from expectation.".format(key), 75 | ) 76 | 77 | def test_c_3D_synth(self): 78 | create_3D_data(self.freq_arr, THREED_PATH) 79 | returncode = subprocess.call( 80 | f"rmsynth3d '{(THREED_PATH/'Q_cube.fits').as_posix()}' '{(THREED_PATH/'U_cube.fits').as_posix()}' '{(THREED_PATH/'freqHz.txt').as_posix()}' -l 300 -d 10", 81 | shell=True, 82 | ) 83 | self.assertEqual(returncode, 0, "RMsynth3D failed to run.") 84 | header = pf.getheader(THREED_PATH / "FDF_tot_dirty.fits") 85 | self.assertEqual(header["NAXIS"], 3, "Wrong number of axes in output?") 86 | self.assertEqual( 87 | (header["NAXIS1"], header["NAXIS2"]), 88 | (100, 100), 89 | "Image plane has wrong dimensions!", 90 | ) 91 | self.assertEqual( 92 | header["NAXIS3"], 61, "Number of output FD planes has changed." 93 | ) 94 | 95 | def test_b1_1D_clean(self): 96 | if not (ONED_PATH / "simsource_RMsynth.dat").exists(): 97 | self.skipTest("Could not test 1D clean; 1D synth failed first.") 98 | returncode = subprocess.call( 99 | f"rmclean1d '{(ONED_PATH/'simsource.dat').as_posix()}' -n 11 -S", shell=True 100 | ) 101 | self.assertEqual(returncode, 0, "RMclean1D failed to run.") 102 | 103 | def test_b2_1D_clean_values(self): 104 | mDict = json.load(open(ONED_PATH / "simsource_RMclean.json", "r")) 105 | refDict = json.load(open(TEST_PATH / "RMclean1D_referencevalues.json", "r")) 106 | for key in mDict.keys(): 107 | self.assertTrue( 108 | np.abs((mDict[key] - refDict[key]) / refDict[key]) < 1e-3, 109 | "{} differs from expectation.".format(key), 110 | ) 111 | 112 | def test_d_3D_clean(self): 113 | if not (THREED_PATH / "FDF_tot_dirty.fits").exists(): 114 | self.skipTest("Could not test 3D clean; 3D synth failed first.") 115 | returncode = subprocess.call( 116 | f"rmclean3d '{(THREED_PATH/'FDF_tot_dirty.fits').as_posix()}' '{(THREED_PATH/'RMSF_tot.fits').as_posix()}' -n 10", 117 | shell=True, 118 | ) 119 | self.assertEqual(returncode, 0, "RMclean3D failed to run.") 120 | # what else? 121 | 122 | def test_e_1Dsynth_fromFITS(self): 123 | if not (THREED_PATH / "Q_cube.fits").exists(): 124 | create_3D_data(self.freq_arr, THREED_PATH) 125 | returncode = subprocess.call( 126 | f"rmsynth1dFITS '{(THREED_PATH/'Q_cube.fits').as_posix()}' '{(THREED_PATH/'U_cube.fits').as_posix()}' 25 25 -l 600 -d 3 -S", 127 | shell=True, 128 | ) 129 | self.assertEqual(returncode, 0, "RMsynth1D_fromFITS failed to run.") 130 | 131 | def test_f1_QUfitting(self): 132 | if not (ONED_PATH / "simsource.dat").exists(): 133 | create_1D_data(self.freq_arr, TEST_PATH, ONED_PATH) 134 | 135 | local_models = Path("models_ns") 136 | if not local_models.exists(): 137 | shutil.copytree(TEST_PATH / ".." / "RMtools_1D" / "models_ns", local_models) 138 | 139 | for model in self.models: 140 | returncode = subprocess.call( 141 | f"qufit simdata/1D/simsource.dat --sampler {self.sampler} -m {model}", 142 | shell=True, 143 | ) 144 | 145 | self.assertEqual(returncode, 0, "QU fitting failed to run.") 146 | shutil.rmtree(local_models) 147 | 148 | def _test_f2_QUfit_values(self): 149 | # I have temporarily disabled this test because it causes a lot of problems 150 | # with values not being consistant across different runs. 151 | err_limit = 0.05 # 5% 152 | 153 | for model in self.models: 154 | mDict = json.load( 155 | open(ONED_PATH / f"simsource_m{model}_{self.sampler}.json", "r") 156 | ) 157 | 158 | refDict = json.load( 159 | open( 160 | TEST_PATH 161 | / f"QUfit_referencevalues/simsource_m{model}_{self.sampler}.json", 162 | "r", 163 | ) 164 | ) 165 | 166 | # The QU-fitting code has internal randomness that I can't control. So every run 167 | # will produce slightly different results. I want to assert that these differences 168 | # are below some limit. 169 | for key, val in refDict.items(): 170 | if isinstance(val, str): 171 | continue 172 | if isinstance(val, list): 173 | for i, v in enumerate(val): 174 | if isinstance(v, str): 175 | continue 176 | self.assertTrue( 177 | abs(mDict[key][i] - v) / abs(v) < err_limit, 178 | f"values[{i}] of {key} of model {model} differs from expectation.", 179 | ) 180 | else: 181 | self.assertTrue( 182 | abs(mDict[key] - val) / abs(val) < err_limit, 183 | f"{key} of model {model} differs from expectation.", 184 | ) 185 | 186 | 187 | if __name__ == "__main__": 188 | os.chdir(os.path.dirname(os.path.realpath(__file__))) 189 | if (TEST_PATH / "simdata").exists(): 190 | shutil.rmtree("simdata") 191 | 192 | print("\nUnit tests running.") 193 | print("Test data inputs and outputs can be found in {}\n\n".format(os.getcwd())) 194 | 195 | unittest.TestLoader.sortTestMethodsUsing = None 196 | suite = unittest.TestLoader().loadTestsFromTestCase(test_RMtools) 197 | unittest.TextTestRunner(verbosity=2).run(suite) 198 | -------------------------------------------------------------------------------- /tests/RMclean1D_referencevalues.json: -------------------------------------------------------------------------------- 1 | {"dFDFcorMAD": 0.006610545329749584, "phiPeakPIfit_rm2": 200.32874264404734, "dPhiPeakPIfit_rm2": 0.24808332381938963, "ampPeakPIfit": 0.6996190479469143, "ampPeakPIfitEff": 0.6995619709029499, "dAmpPeakPIfit": 0.005892556709695188, "snrPIfit": 118.72928550620676, "indxPeakPIfit": 266.77624754801576, "peakFDFimagFit": -0.5592812449927833, "peakFDFrealFit": 0.4189165597767205, "polAngleFit_deg": 153.41709453229126, "dPolAngleFit_deg": 0.24128747709042306, "polAngle0Fit_deg": 48.02926667512406, "dPolAngle0Fit_deg": 1.3728252062919697, "cleanCutoff": 0.017677670129085565, "nIter": 11, "mom2CCFDF": 0.9521095156669617, "dPhiObserved_rm2": 0.27831143571313965, "dAmpObserved": 0.006610545329749584, "dPolAngleFitObserved_deg": 0.27068753810121443, "dPolAngleFit0Observed_deg": 1.5400993031859858} 2 | -------------------------------------------------------------------------------- /tests/RMsynth1D_referencevalues.json: -------------------------------------------------------------------------------- 1 | {"dFDFcorMAD": 0.01995876058936119, "phiPeakPIfit_rm2": 200.29003676576465, "dPhiPeakPIfit_rm2": 0.24806386441280062, "ampPeakPIfit": 0.6996739296667636, "ampPeakPIfitEff": 0.6996168571002304, "dAmpPeakPIfit": 0.005892556709695188, "snrPIfit": 118.73859924259541, "indxPeakPIfit": 266.7633455885882, "peakFDFimagFit": -0.5593258116763724, "peakFDFrealFit": 0.41904044934377743, "polAngleFit_deg": 153.42006391662085, "dPolAngleFit_deg": 0.2412685507432214, "polAngle0Fit_deg": 48.26126698321059, "dPolAngle0Fit_deg": 1.37271752326252, "Ifreq0": 1.0, "polyCoeffs": "0.0,0.0,0.0,0.0,0.0,1.0", "polyCoefferr": "0.0,0.0,0.0,19.157488,0.66518927,0.088204324", "polyOrd": 2, "IfitStat": 4, "IfitChiSqRed": 0.0, "fit_function": "log", "lam0Sq_m2": 0.10327484831236765, "freq0_Hz": 932874912.6426204, "fwhmRMSF": 58.90951156616211, "dQU": 0.10000000149011612, "dFDFth": 0.005892556709695188, "units": "Jy/beam", "min_freq": 800000000.0, "max_freq": 1088000000.0, "N_channels": 288, "median_channel_width": 1003456.0, "fracPol": 0.6996168571002304, "sigmaAddQ": 0.3932188246076685, "dSigmaAddMinusQ": 0.2858783770995449, "dSigmaAddPlusQ": 0.13642421257417375, "sigmaAddU": 0.22665457086880808, "dSigmaAddMinusU": 0.20935913039582774, "dSigmaAddPlusU": 0.20587796377894235, "sigmaAddC": 0.41018026613255093, "dSigmaAddMinusC": 0.1133475584572689, "dSigmaAddPlusC": 0.08594851594895281} 2 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CIRADA-Tools/RM-Tools/9ee9a375d719cb623477f658696ef3d30ebe86df/tests/__init__.py -------------------------------------------------------------------------------- /tests/cli_test.py: -------------------------------------------------------------------------------- 1 | """Tests for CLI.""" 2 | 3 | import subprocess 4 | import unittest 5 | 6 | 7 | class test_cli(unittest.TestCase): 8 | def test_cli_rmsynth1d(self): 9 | """Tests that the CLI `rmsynth1d` runs.""" 10 | res = subprocess.run(["rmsynth1d", "--help"]) 11 | self.assertEqual(res.returncode, 0) 12 | 13 | def test_cli_rmsynth3d(self): 14 | """Tests that the CLI `rmsynth3d` runs.""" 15 | res = subprocess.run(["rmsynth3d", "--help"]) 16 | self.assertEqual(res.returncode, 0) 17 | 18 | def test_cli_rmclean1d(self): 19 | """Tests that the CLI `rmclean1d` runs.""" 20 | res = subprocess.run(["rmclean1d", "--help"]) 21 | self.assertEqual(res.returncode, 0) 22 | 23 | def test_cli_rmclean3d(self): 24 | """Tests that the CLI `rmclean3d` runs.""" 25 | res = subprocess.run(["rmclean3d", "--help"]) 26 | self.assertEqual(res.returncode, 0) 27 | 28 | 29 | if __name__ == "__main__": 30 | unittest.main() 31 | -------------------------------------------------------------------------------- /tests/helper_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Wed May 8 10:10:36 2024 5 | 6 | Testing tools for confirming the basic functions of the various helper tools. 7 | In the past, changes to RM-Tools has broken some of these, because only the 8 | core functionality has had tests to confirm function after code changes. 9 | 10 | The initial version of these tests only really tests that they run, but not 11 | that the outputs are correct or that the functions within have performed as 12 | expected. 13 | 14 | List of helper functions: 15 | rmtools_testdata1D 16 | rmtools_testdata3D 17 | rmtools_calcRMSF 18 | rmtools_freqfile 19 | rmtools_extractregion 20 | rmtools_createchunks 21 | rmtools_assemblechunks 22 | rmtools_fitIcube 23 | rmtools_3DIrescale 24 | rmtools_peakfitcube 25 | rmtools_bwpredict 26 | rmtools_bwdepol 27 | 28 | 29 | @author: cvaneck 30 | """ 31 | 32 | import os 33 | import shlex 34 | import shutil 35 | import subprocess 36 | import unittest 37 | from pathlib import Path 38 | 39 | import astropy.io.fits as pf 40 | import numpy as np 41 | 42 | from RMutils.util_testing import create_1D_data, create_3D_data 43 | 44 | TEST_PATH = Path(__file__).parent.absolute() 45 | ONED_PATH = TEST_PATH / "simdata" / "1D" 46 | THREED_PATH = TEST_PATH / "simdata" / "3D" 47 | 48 | 49 | def create_3D_stokesI(freq_arr, N_side=100): 50 | from scipy.ndimage import gaussian_filter 51 | 52 | from RMutils.util_misc import powerlaw_poly5 53 | 54 | if not (TEST_PATH / "simdata/3D").exists(): 55 | (TEST_PATH / "simdata/3D").mkdir(parents=True) 56 | 57 | I_cube = np.zeros((N_side, 2 * N_side, freq_arr.size)) 58 | model_parms = [0, 0, 0, 0, -1, 10000] 59 | source_I = powerlaw_poly5(model_parms)(freq_arr / 940e6) 60 | I_cube[N_side // 4, N_side // 2, :] += source_I 61 | 62 | beam_sigma = 5 63 | new_I_cube = gaussian_filter(I_cube, (beam_sigma, beam_sigma, 0), mode="wrap") 64 | 65 | # Output for 3D RM-tools: 66 | header = pf.Header() 67 | header["BITPIX"] = -32 68 | header["NAXIS"] = 3 69 | header["NAXIS1"] = N_side 70 | header["NAXIS2"] = N_side 71 | header["NAXIS3"] = freq_arr.size 72 | header["CTYPE1"] = "RA---SIN" 73 | header["CRVAL1"] = 90 74 | header["CDELT1"] = -1.0 / 3600.0 75 | header["CRPIX1"] = 1 76 | header["CUNIT1"] = "deg" 77 | 78 | header["CTYPE2"] = "DEC--SIN" 79 | header["CRVAL2"] = 0 80 | header["CDELT2"] = 1.0 / 3600.0 81 | header["CRPIX2"] = 1 82 | header["CUNIT2"] = "deg" 83 | 84 | header["CTYPE3"] = "FREQ" 85 | header["CRVAL3"] = freq_arr[0] 86 | header["CDELT3"] = freq_arr[1] - freq_arr[0] 87 | header["CRPIX3"] = 1 88 | header["CUNIT3"] = "Hz" 89 | 90 | header["BUNIT"] = "mJy/beam" 91 | pf.writeto( 92 | TEST_PATH / "simdata/3D/I_cube.fits", 93 | np.transpose(new_I_cube), 94 | header=header, 95 | overwrite=True, 96 | ) 97 | 98 | 99 | class test_RMtools(unittest.TestCase): 100 | def setUp(self): 101 | 102 | # Clean up directory if it exists (to keep test clean). 103 | if (TEST_PATH / "simdata").exists(): 104 | shutil.rmtree(TEST_PATH / "simdata") 105 | 106 | (TEST_PATH / "simdata").mkdir(parents=True) 107 | (TEST_PATH / "simdata/1D").mkdir(parents=True) 108 | (TEST_PATH / "simdata/3D").mkdir(parents=True) 109 | 110 | N_chan = 288 111 | self.freq_arr = np.linspace(800e6, 1088e6, num=N_chan) 112 | 113 | def test_testdata3D(self): 114 | """Test that rmtools_testdata3D runs and outputs.""" 115 | 116 | res = subprocess.run( 117 | [ 118 | "rmtools_testdata3D", 119 | f"{TEST_PATH}/../RMtools_3D/catalogue.csv", 120 | f"{THREED_PATH}", 121 | ] 122 | ) 123 | self.assertEqual(res.returncode, 0, "testdata3D fails to run to completion.") 124 | 125 | self.assertTrue( 126 | (TEST_PATH / "simdata/3D/StokesU.fits").exists(), 127 | "testdata3D not outputting files as expected.", 128 | ) 129 | 130 | def test_testdata1D(self): 131 | """Test that rmtools_testdata1D runs and outputs.""" 132 | 133 | res = subprocess.run( 134 | [ 135 | "rmtools_testdata1D", 136 | f"{TEST_PATH}/../RMtools_3D/catalogue.csv", 137 | f"{ONED_PATH}", 138 | ] 139 | ) 140 | self.assertEqual(res.returncode, 0, "testdata1D fails to run to completion.") 141 | 142 | self.assertTrue( 143 | (ONED_PATH / "Source8.dat").exists(), 144 | "testdata1D not outputting files as expected.", 145 | ) 146 | 147 | def test_calcRMSF(self): 148 | """Test that rmtools_calcRMSF runs as expected.""" 149 | 150 | if not ONED_PATH.exists(): 151 | ONED_PATH.mkdir(parents=True) 152 | 153 | res = subprocess.run( 154 | shlex.split( 155 | f"rmtools_calcRMSF -f 800e6 1088e6 1e6 -s {TEST_PATH}/rmsf.png" 156 | ), 157 | capture_output=True, 158 | text=True, 159 | ) 160 | self.assertEqual(res.returncode, 0, "calcRMSF fails to run to completion.") 161 | 162 | output = res.stdout.splitlines() 163 | # Testing some output values; hardcoding expected values. 164 | self.assertEqual( 165 | output[1].split()[3], 166 | "59.04", 167 | "calcRMSF values have changed from expectations", 168 | ) 169 | self.assertEqual( 170 | output[4].split()[3], 171 | "8093", 172 | "calcRMSF values have changed from expectations", 173 | ) 174 | 175 | def test_freqfile(self): 176 | """Test that rmtools_freqfile runs as expected.""" 177 | if not (TEST_PATH / "simdata/3D/Q_cube.fits").exists(): 178 | create_3D_data(self.freq_arr, THREED_PATH) 179 | 180 | res = subprocess.run( 181 | shlex.split( 182 | f"rmtools_freqfile {TEST_PATH}/simdata/3D/Q_cube.fits {TEST_PATH}/simdata/freqfile.dat" 183 | ) 184 | ) 185 | self.assertEqual(res.returncode, 0, "freqfile fails to run to completion.") 186 | 187 | expected_array = np.loadtxt(f"{TEST_PATH}/simdata/3D/freqHz.txt") 188 | output_array = np.loadtxt(f"{TEST_PATH}/simdata/freqfile.dat") 189 | 190 | self.assertIsNone( 191 | np.testing.assert_array_almost_equal_nulp( 192 | expected_array.astype("float32"), output_array.astype("float32"), nulp=1 193 | ) 194 | ) 195 | 196 | def test_extractregion(self): 197 | """Test that rmtools_extractregion runs as expected.""" 198 | if not (TEST_PATH / "simdata/3D/Q_cube.fits").exists(): 199 | create_3D_data(self.freq_arr, THREED_PATH) 200 | 201 | res = subprocess.run( 202 | shlex.split( 203 | f"rmtools_extractregion {TEST_PATH}/simdata/3D/Q_cube.fits {TEST_PATH}/simdata/3D/Q_cutout.fits 30 50 40 60 -z 12 24" 204 | ) 205 | ) 206 | self.assertEqual(res.returncode, 0, "extractregion fails to run to completion.") 207 | 208 | def test_createchunks(self): 209 | """Test that rmtools_createchunks runs as expected.""" 210 | if not (TEST_PATH / "simdata/3D/Q_cube.fits").exists(): 211 | create_3D_data(self.freq_arr, THREED_PATH) 212 | 213 | res = subprocess.run( 214 | shlex.split(f"rmtools_createchunks {TEST_PATH}/simdata/3D/Q_cube.fits 2099") 215 | ) 216 | self.assertEqual(res.returncode, 0, "createchunks fails to run to completion.") 217 | 218 | self.assertTrue( 219 | (TEST_PATH / "simdata/3D/Q_cube.C0.fits").exists(), 220 | "createchunks does not create first chunk.", 221 | ) 222 | self.assertTrue( 223 | (TEST_PATH / "simdata/3D/Q_cube.C4.fits").exists(), 224 | "createchunks does not create final chunk.", 225 | ) 226 | 227 | def test_assemblechunks(self): 228 | """Test that rmtools_assemblechunks runs as expected.""" 229 | if not (TEST_PATH / "simdata/3D/Q_cube.fits").exists(): 230 | create_3D_data(self.freq_arr, THREED_PATH) 231 | 232 | if not (TEST_PATH / "simdata/3D/Q_cube.C4.fits").exists(): 233 | res = subprocess.run( 234 | shlex.split( 235 | f"rmtools_createchunks {TEST_PATH}/simdata/3D/Q_cube.fits 2099" 236 | ) 237 | ) 238 | 239 | res = subprocess.run( 240 | shlex.split( 241 | f"rmtools_assemblechunks {TEST_PATH}/simdata/3D/Q_cube.C0.fits -f {TEST_PATH}/simdata/3D/Q_assembled.fits" 242 | ) 243 | ) 244 | self.assertEqual( 245 | res.returncode, 0, "assemblechunks fails to run to completion." 246 | ) 247 | 248 | assembled_data = pf.open(f"{TEST_PATH}/simdata/3D/Q_assembled.fits") 249 | original_data = pf.open(f"{TEST_PATH}/simdata/3D/Q_cube.fits") 250 | 251 | self.assertIsNone( 252 | np.testing.assert_array_almost_equal_nulp( 253 | assembled_data[0].data, original_data[0].data, nulp=1 254 | ), 255 | "Pixel values not roundtripped identically.", 256 | ) 257 | 258 | self.assertEqual( 259 | assembled_data[0].header.tostring(), 260 | original_data[0].header.tostring(), 261 | "Headers don't round trip identically.", 262 | ) 263 | 264 | def test_fitIcube(self): 265 | """Test that rmtools_fitIcube runs as expected.""" 266 | if not (TEST_PATH / "simdata/3D/I_cube.fits").exists(): 267 | create_3D_stokesI(self.freq_arr) 268 | 269 | res = subprocess.run( 270 | shlex.split(f"rmtools_fitIcube {TEST_PATH}/simdata/3D/I_cube.fits") 271 | ) 272 | self.assertEqual(res.returncode, 0, "fitIcube fails to run to completion.") 273 | 274 | self.assertTrue( 275 | (TEST_PATH / "simdata/3D/coeff0.fits").exists(), "fitIcube outputs missing." 276 | ) 277 | self.assertTrue( 278 | (TEST_PATH / "simdata/3D/coeff0err.fits").exists(), 279 | "fitIcube outputs missing.", 280 | ) 281 | self.assertTrue( 282 | (TEST_PATH / "simdata/3D/noise.dat").exists(), "fitIcube outputs missing." 283 | ) 284 | self.assertTrue( 285 | (TEST_PATH / "simdata/3D/covariance.fits").exists(), 286 | "fitIcube outputs missing.", 287 | ) 288 | 289 | def test_3DIrescale(self): 290 | """Test that rmtools_fitIcube runs as expected.""" 291 | if not (TEST_PATH / "simdata/3D/I_cube.fits").exists(): 292 | create_3D_stokesI(self.freq_arr) 293 | 294 | if not (TEST_PATH / "simdata/3D/covariance.fits").exists(): 295 | res = subprocess.run( 296 | shlex.split(f"rmtools_fitIcube {TEST_PATH}/simdata/3D/I_cube.fits") 297 | ) 298 | 299 | res = subprocess.run( 300 | shlex.split( 301 | f"rmtools_3DIrescale {TEST_PATH}/simdata/3D/covariance.fits -f 950e6 -o {TEST_PATH}/simdata/3D/rescale" 302 | ) 303 | ) 304 | self.assertEqual(res.returncode, 0, "3DIrescale fails to run to completion.") 305 | 306 | def test_peakfitcube(self): 307 | """Test that rmtools_peakfitcube runs as expected.""" 308 | if not (TEST_PATH / "simdata/3D/Q_cube.fits").exists(): 309 | create_3D_data(self.freq_arr, THREED_PATH) 310 | res = subprocess.run( 311 | shlex.split( 312 | f"rmsynth3d {TEST_PATH}/simdata/3D/Q_cube.fits {TEST_PATH}/simdata/3D/Q_cube.fits {TEST_PATH}/simdata/3D/freqHz.txt -s 2" 313 | ) 314 | ) 315 | 316 | res = subprocess.run( 317 | shlex.split( 318 | f"rmtools_peakfitcube {TEST_PATH}/simdata/3D/FDF_real_dirty.fits {TEST_PATH}/simdata/3D/freqHz.txt {TEST_PATH}/simdata/3D/peakfit -p" 319 | ) 320 | ) 321 | self.assertEqual(res.returncode, 0, "peakfitcube fails to run to completion.") 322 | 323 | self.assertTrue( 324 | (TEST_PATH / "simdata/3D/peakfitphiPeakPIfit_rm2.fits").exists(), 325 | "peakfitcube outputs missing.", 326 | ) 327 | 328 | def test_bwpredict(self): 329 | """Test that rmtools_bwpredict runs as expected.""" 330 | if not (TEST_PATH / "simdata/3D/Q_cube.fits").exists(): 331 | create_3D_data(self.freq_arr, THREED_PATH) 332 | 333 | res = subprocess.run( 334 | shlex.split( 335 | f"rmtools_bwpredict {TEST_PATH}/simdata/3D/freqHz.txt -f {TEST_PATH}/simdata/test.png" 336 | ) 337 | ) 338 | self.assertEqual(res.returncode, 0, "bwpredict fails to run to completion.") 339 | 340 | def test_bwdepol(self): 341 | """Test that rmtools_bwdepol runs as expected.""" 342 | if not (TEST_PATH / "simdata/1D/simsource.dat").exists(): 343 | create_1D_data(self.freq_arr, TEST_PATH, ONED_PATH) 344 | 345 | res = subprocess.run( 346 | shlex.split(f"rmtools_bwdepol {TEST_PATH}/simdata/1D/simsource.dat -s 3 -S") 347 | ) 348 | self.assertEqual(res.returncode, 0, "bwdepol fails to run to completion.") 349 | self.assertTrue( 350 | (TEST_PATH / "simdata/1D/simsource_RMsynth.dat").exists(), 351 | "bwdepol outputs missing.", 352 | ) 353 | 354 | 355 | if __name__ == "__main__": 356 | # os.chdir(os.path.dirname(os.path.realpath(__file__))) 357 | 358 | print("\nUnit tests running.") 359 | print("Test data inputs and outputs can be found in {}\n\n".format(os.getcwd())) 360 | 361 | unittest.TestLoader.sortTestMethodsUsing = None 362 | suite = unittest.TestLoader().loadTestsFromTestCase(test_RMtools) 363 | unittest.TextTestRunner(verbosity=2).run(suite) 364 | -------------------------------------------------------------------------------- /tests/import_test.py: -------------------------------------------------------------------------------- 1 | """Tests for importing modules.""" 2 | 3 | import unittest 4 | 5 | 6 | class test_imports(unittest.TestCase): 7 | def test_imports(self): 8 | """Tests that package imports are working correctly.""" 9 | # This is a bit of a weird test, but package imports 10 | # have not worked before. 11 | modules = [ 12 | "RMtools_1D.calculate_RMSF", 13 | "RMtools_1D.do_QUfit_1D_mnest", 14 | "RMtools_1D.do_RMclean_1D", 15 | "RMtools_1D.do_RMsynth_1D_fromFITS", 16 | "RMtools_1D.do_RMsynth_1D", 17 | "RMtools_3D.make_freq_file", 18 | "RMtools_1D.mk_test_ascii_data", 19 | "RMtools_3D.assemble_chunks", 20 | "RMtools_3D.create_chunks", 21 | "RMtools_3D.do_fitIcube", 22 | "RMtools_3D.do_RMclean_3D", 23 | "RMtools_3D.do_RMsynth_3D", 24 | "RMtools_3D.extract_region", 25 | "RMtools_3D.mk_test_cube_data", 26 | "RMutils.mpfit", 27 | "RMutils.normalize", 28 | "RMutils.util_FITS", 29 | "RMutils.util_misc", 30 | "RMutils.util_plotFITS", 31 | "RMutils.util_plotTk", 32 | "RMutils.util_rec", 33 | "RMutils.util_RM", 34 | ] 35 | for module in modules: 36 | __import__(module) 37 | 38 | 39 | if __name__ == "__main__": 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /tests/test_stokesIfit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | """Tests for the Stokes I fitting in RMtools_3D""" 4 | 5 | import logging 6 | import os 7 | 8 | import numpy as np 9 | from astropy.io import fits 10 | 11 | # rmtools_fitIcube 12 | from RMtools_3D.do_fitIcube import make_model_I, open_datacube 13 | from RMtools_3D.make_freq_file import get_freq_array 14 | 15 | # import RMtools_3D 16 | # print(f"Using version {RMtools_3D.__file__}") 17 | 18 | 19 | logger = logging.getLogger(__name__) 20 | logger.setLevel(logging.INFO) 21 | 22 | 23 | def make_fake_StokesIcube(filename="random_Icube.fits"): 24 | # Create random data cube, 144 channels 102,102 pixels 25 | data = np.random.rand(144, 1, 102, 102).astype(np.float32) 26 | 27 | # Create FITS header 28 | header = fits.Header() 29 | header["SIMPLE"] = True 30 | header["BITPIX"] = -32 31 | header["NAXIS"] = 4 32 | header["NAXIS1"] = 102 33 | header["NAXIS2"] = 102 34 | header["NAXIS3"] = 1 35 | header["NAXIS4"] = 144 36 | header["WCSAXES"] = 4 37 | header["CRPIX1"] = -139869.0021857 38 | header["CRPIX2"] = -94562.00147332 39 | header["CRPIX3"] = 1.0 40 | header["CRPIX4"] = 1.0 41 | header["PC1_1"] = 0.7071067811865 42 | header["PC1_2"] = 0.7071067811865 43 | header["PC2_1"] = -0.7071067811865 44 | header["PC2_2"] = 0.7071067811865 45 | header["CDELT1"] = -0.0009710633743375 46 | header["CDELT2"] = 0.0009710633743375 47 | header["CDELT3"] = 1.0 48 | header["CDELT4"] = 1000000.0 49 | header["CUNIT1"] = "deg" 50 | header["CUNIT2"] = "deg" 51 | header["CUNIT4"] = "Hz" 52 | header["CTYPE1"] = "RA---HPX" 53 | header["CTYPE2"] = "DEC--HPX" 54 | header["CTYPE3"] = "STOKES" 55 | header["CTYPE4"] = "FREQ" 56 | header["CRVAL1"] = 0.0 57 | header["CRVAL2"] = 0.0 58 | header["CRVAL3"] = 1.0 59 | header["CRVAL4"] = 1295990740.741 60 | header["PV2_1"] = 4.0 61 | header["PV2_2"] = 3.0 62 | header["LONPOLE"] = 0.0 63 | header["LATPOLE"] = 90.0 64 | header["RESTFRQ"] = 1420405751.786 65 | header["RADESYS"] = "FK5" 66 | header["EQUINOX"] = 2000.0 67 | header["SPECSYS"] = "TOPOCENT" 68 | header["BMAJ"] = 0.005555555555556 69 | header["BMIN"] = 0.005555555555556 70 | header["BPA"] = 0.0 71 | header["BUNIT"] = "JY/BEAM" 72 | header["HISTORY"] = "RANDOM FITS FILE FOR TESTING" 73 | 74 | # Create PrimaryHDU object 75 | hdu = fits.PrimaryHDU(data=data, header=header) 76 | # Write the FITS file 77 | hdu.writeto(filename, overwrite=True) 78 | print(f"Random FITS cube created: {filename}") 79 | 80 | return filename 81 | 82 | 83 | def cleanup(outDir, prefixOut, polyOrd): 84 | """ 85 | Cleanup files that are made by make_model_I 86 | """ 87 | os.system("rm random_Icube.fits") 88 | 89 | for i in range(np.abs(polyOrd) + 1): 90 | outname = os.path.join(outDir, prefixOut + "coeff" + str(i) + ".fits") 91 | os.system(f"rm {outname}") 92 | 93 | outname = os.path.join(outDir, prefixOut + "coeff" + str(i) + "err.fits") 94 | os.system(f"rm {outname}") 95 | 96 | MaskfitsFile = os.path.join(outDir, prefixOut + "mask.fits") 97 | os.system(f"rm {MaskfitsFile}") 98 | 99 | fitsModelFile = os.path.join(outDir, prefixOut + "model.i.fits") 100 | os.system(f"rm {fitsModelFile}") 101 | 102 | noisefile = os.path.join(outDir, prefixOut + "noise.dat") 103 | os.system(f"rm {noisefile}") 104 | 105 | outname = os.path.join(outDir, prefixOut + "reffreq.fits") 106 | os.system(f"rm {outname}") 107 | 108 | outname = os.path.join(outDir, prefixOut + "covariance.fits") 109 | os.system(f"rm {outname}") 110 | 111 | 112 | def test_stokesIfit_with_without_verbose(): 113 | """ 114 | Testing RMtools_3D/do_fitIcube.py with and without verbose 115 | """ 116 | I_filename = make_fake_StokesIcube() 117 | 118 | datacube, headI = open_datacube(fitsI=I_filename, verbose=False) 119 | # Deriving frequencies from the fits header.") 120 | freqArr_Hz = get_freq_array(I_filename) 121 | 122 | prefixOut = "" 123 | outDir = "./" 124 | polyOrd = 2 125 | 126 | logger.info("Running make_RMtools_3D.do_fitIcube.make_model_I with verbose=True") 127 | # Run polynomial fitting on the spectra with verbose=T 128 | make_model_I( 129 | datacube=datacube, 130 | header=headI, 131 | freqArr_Hz=freqArr_Hz, 132 | polyOrd=polyOrd, 133 | prefixOut=prefixOut, 134 | outDir=outDir, 135 | nBits=32, 136 | threshold=-3, 137 | apply_mask=False, 138 | num_cores=2, 139 | chunk_size=1, 140 | verbose=True, 141 | fit_function="log", 142 | ) 143 | logger.info("Finished succesfully") 144 | 145 | logger.info("Running make_RMtools_3D.do_fitIcube.make_model_I with verbose=False") 146 | # Run polynomial fitting on the spectra with verbose=F 147 | make_model_I( 148 | datacube=datacube, 149 | header=headI, 150 | freqArr_Hz=freqArr_Hz, 151 | polyOrd=polyOrd, 152 | prefixOut=prefixOut, 153 | outDir=outDir, 154 | nBits=32, 155 | threshold=-3, 156 | apply_mask=False, 157 | num_cores=2, 158 | chunk_size=1, 159 | verbose=False, 160 | fit_function="log", 161 | ) 162 | logger.info("Fitting finished succesfully") 163 | logger.info("Removing output files...") 164 | cleanup(outDir, prefixOut, polyOrd) 165 | 166 | logger.info("Stokes I fitting test finished succesfully") 167 | 168 | 169 | if __name__ == "__main__": 170 | test_stokesIfit_with_without_verbose() 171 | --------------------------------------------------------------------------------