├── docs ├── caldetector1 │ ├── index.rst │ ├── rscd.rst │ ├── lastframe.rst │ ├── refpix.rst │ ├── ramp_fit.rst │ ├── jump.rst │ ├── superbias.rst │ ├── dq_init.rst │ ├── saturation.rst │ ├── linearity.rst │ └── dark_current.rst ├── _templates │ └── autosummary │ │ ├── base.rst │ │ ├── class.rst │ │ └── module.rst ├── calibration-pipeline-testing-tool │ ├── index.rst │ └── developer.rst ├── index.rst ├── Makefile ├── make.bat └── conf.py ├── caltest ├── test_caldetector1 │ ├── __init__.py │ ├── test_rscd.py │ ├── test_lastframe.py │ ├── test_dq_init.py │ ├── test_saturation.py │ ├── test_ramp_fit.py │ ├── test_superbias.py │ ├── test_dark_current.py │ ├── test_linearity.py │ ├── test_persistence.py │ ├── test_refpix.py │ ├── test_jump.py │ └── test_assign_wcs.py ├── data │ └── README.rst ├── __init__.py ├── runner.py ├── utils.py ├── conftest.py └── _astropy_init.py ├── readthedocs.yml ├── .gitmodules ├── licenses ├── README.rst └── LICENSE.rst ├── .rtd-environment.yml ├── README.rst ├── .gitignore ├── MANIFEST.in ├── setup.cfg ├── setup.py ├── .travis.yml ├── ez_setup.py └── ah_bootstrap.py /docs/caldetector1/index.rst: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /readthedocs.yml: -------------------------------------------------------------------------------- 1 | conda: 2 | file: .rtd-environment.yml 3 | 4 | python: 5 | setup_py_install: true 6 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "astropy_helpers"] 2 | url = https://github.com/astropy/astropy-helpers.git 3 | path = astropy_helpers 4 | branch = refs/heads/v2.0 5 | -------------------------------------------------------------------------------- /licenses/README.rst: -------------------------------------------------------------------------------- 1 | Licenses 2 | ======== 3 | 4 | This directory holds license and credit information for the affiliated package, 5 | works the affiliated package is derived from, and/or datasets. 6 | -------------------------------------------------------------------------------- /docs/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/base.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/class.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /docs/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {% extends "autosummary_core/module.rst" %} 2 | {# The template this is inherited from is in astropy/sphinx/ext/templates/autosummary_core. If you want to modify this template, it is strongly recommended that you still inherit from the astropy template. #} -------------------------------------------------------------------------------- /caltest/data/README.rst: -------------------------------------------------------------------------------- 1 | Data directory 2 | ============== 3 | 4 | This directory contains data files included with the affiliated package source 5 | code distribution. Note that this is intended only for relatively small files 6 | - large files should be externally hosted and downloaded as needed. 7 | 8 | -------------------------------------------------------------------------------- /.rtd-environment.yml: -------------------------------------------------------------------------------- 1 | name: caltest 2 | 3 | channels: 4 | - astropy 5 | - http://ssb.stsci.edu/astroconda 6 | - http://ssb.stsci.edu/conda-dev 7 | 8 | dependencies: 9 | - astropy 10 | - Cython 11 | - matplotlib 12 | - numpy 13 | - stsci_sphinx_theme 14 | - jwst=0.7.8rc2 15 | # - pip: 16 | # - stsci 17 | -------------------------------------------------------------------------------- /docs/calibration-pipeline-testing-tool/index.rst: -------------------------------------------------------------------------------- 1 | *********************************************** 2 | Calibration Pipeline Testing Tool Documentation 3 | *********************************************** 4 | 5 | This is the documentation for calibration-pipeline-testing-tool. 6 | 7 | Reference/API 8 | ============= 9 | 10 | .. automodapi:: calibration-pipeline-testing-tool 11 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | calibration pipeline testing tool 2 | --------------------------------- 3 | 4 | .. image:: http://img.shields.io/badge/powered%20by-AstroPy-orange.svg?style=flat 5 | :target: http://www.astropy.org 6 | :alt: Powered by Astropy Badge 7 | 8 | 9 | 10 | 11 | License 12 | ------- 13 | 14 | This project is Copyright (c) Matthew Hill and licensed under the terms of the BSD 3-Clause license. See the licenses folder for more information. 15 | -------------------------------------------------------------------------------- /caltest/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 | 3 | """ 4 | This is an Astropy affiliated package. 5 | """ 6 | 7 | # Affiliated packages may add whatever they like to this file, but 8 | # should keep this content at the top. 9 | # ---------------------------------------------------------------------------- 10 | from ._astropy_init import * 11 | # ---------------------------------------------------------------------------- 12 | 13 | # if not _ASTROPY_SETUP_: 14 | # # For egg_info test builds to pass, put package imports here. 15 | # 16 | # from .example_mod import * 17 | 18 | -------------------------------------------------------------------------------- /caltest/runner.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import argparse 3 | import os 4 | 5 | def run_test(): 6 | parser = argparse.ArgumentParser(description="run test") 7 | parser.add_argument('--config', help='configuration file') 8 | # parser.add_argument('--output-dir') 9 | # parser.add_argument('--save_pipeline_output', help='') 10 | args = parser.parse_args() 11 | config = os.path.abspath(args.config) 12 | # try: 13 | # os.mkdir(args.output_dir) 14 | # old_dir = os.path.abspath(os.curdir) 15 | # os.chdir(args.output_dir) 16 | # except FileExistsError: 17 | # print("'{}' already exists pick a different output directory".format(args.output_dir)) 18 | # return 19 | 20 | pytest_args = ['-v'] 21 | pytest_args += [os.path.dirname(__file__)] 22 | pytest_args += ['--config', config] 23 | pytest_args += ['--html', 'summary.html', '--self-contained-html'] 24 | pytest.main(pytest_args) 25 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_rscd.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from jwst.rscd import RSCD_Step 3 | from jwst import datamodels 4 | import pytest 5 | from astropy.io import fits 6 | import os 7 | 8 | @pytest.fixture(scope='module') 9 | def fits_output(fits_input): 10 | fname = fits_input[0].header['filename'].replace('.fits', '_rscdstep.fits') 11 | yield fits.open(fname) 12 | # delete the output FITS file after this module is finished 13 | os.remove(fname) 14 | 15 | def test_rscd_step(fits_input): 16 | """Make sure the RSCD_Step runs without error.""" 17 | fname = fits_input[0].header['filename'].replace('.fits', '_rscdstep.fits') 18 | RSCD_Step.call(datamodels.open(fits_input), output_file=fname, 19 | save_results=True) 20 | 21 | def test_first_integration(fits_input, fits_output): 22 | """check that nothing changes in the first integration""" 23 | 24 | assert np.all(fits_input['SCI'].data[0] == fits_output['SCI'].data[0]) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.py[cod] 3 | *.a 4 | *.o 5 | *.so 6 | __pycache__ 7 | 8 | # Ignore .c files by default to avoid including generated code. If you want to 9 | # add a non-generated .c extension, use `git add -f filename.c`. 10 | *.c 11 | 12 | # Other generated files 13 | */version.py 14 | */cython_version.py 15 | htmlcov 16 | .coverage 17 | MANIFEST 18 | .ipynb_checkpoints 19 | 20 | # Sphinx 21 | docs/api 22 | docs/_build 23 | 24 | # Eclipse editor project files 25 | .project 26 | .pydevproject 27 | .settings 28 | 29 | # Pycharm editor project files 30 | .idea 31 | 32 | # Floobits project files 33 | .floo 34 | .flooignore 35 | 36 | # Packages/installer info 37 | *.egg 38 | *.egg-info 39 | dist 40 | build 41 | eggs 42 | parts 43 | bin 44 | var 45 | sdist 46 | develop-eggs 47 | .installed.cfg 48 | distribute-*.tar.gz 49 | 50 | # Other 51 | .cache 52 | .tox 53 | .*.sw[op] 54 | *~ 55 | .project 56 | .pydevproject 57 | .settings 58 | 59 | # Mac OSX 60 | .DS_Store 61 | -------------------------------------------------------------------------------- /docs/caldetector1/rscd.rst: -------------------------------------------------------------------------------- 1 | ******************************************* 2 | Reset Switch Charge Decay (RSCD) Correction 3 | ******************************************* 4 | 5 | This step corrects for the slow adjustment of the reset FET to the asymptotic level after reset.   6 | The effect appears as a hook over the first ~5 frames and employs a double exponential fit. 7 | For more details on this step refer to the JWST Science Pipelines Documentation at 8 | http://jwst-pipeline.readthedocs.io/en/latest/jwst/rscd 9 | 10 | Test Requirements 11 | ================= 12 | ====================================================== =============================================================== 13 | Requirement Fulfilled by 14 | ====================================================== =============================================================== 15 | Make sure the RSCD_Step runs without error. `~caltest.test_caldetector1.test_rscd.test_rscd_step` 16 | Check that nothing changes in the first integration. `~caltest.test_caldetector1.test_rscd.test_first_integration` 17 | 18 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_lastframe.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from jwst.lastframe import LastFrameStep 3 | from jwst import datamodels 4 | import pytest 5 | from astropy.io import fits 6 | import os 7 | 8 | @pytest.fixture(scope='module') 9 | def fits_output(fits_input): 10 | fname = fits_input[0].header['filename'].split('.fits', '_lastframestep.fits') 11 | yield fits.open(fname) 12 | # delete the output FITS file after this module is finished 13 | os.remove(fname) 14 | 15 | def test_lastframe_step(fits_input): 16 | """Make sure the LastFrameStep runs without error.""" 17 | fname = fits_input[0].header['filename'].split('.fits', '_lastframestep.fits') 18 | LastFrameStep.call(datamodels.open(fits_input), output_file=fname, 19 | save_results=True) 20 | 21 | def test_lastframe_flagged(fits_input, fits_output): 22 | """ 23 | check that GROUPDQ lastframe is flagged as DO_NOT_USE 24 | unless there is only 1 group 25 | """ 26 | 27 | if fits_output['SCI'].data.shape[1] > 1: 28 | assert np.all(fits_output['GROUPDQ'].data[:, -1, :, :] & (1 << 0)) 29 | else: 30 | assert np.all(fits_input['GROUPDQ'].data[:, -1, :, :] 31 | == fits_output['GROUPDQ'].data[:, -1, :, :]) -------------------------------------------------------------------------------- /docs/caldetector1/lastframe.rst: -------------------------------------------------------------------------------- 1 | ******************** 2 | Lastframe Correction 3 | ******************** 4 | 5 | This is a MIRI specific correction that removes, from the last frame of an integration, an anomalous offset produced by 6 | signal coupling through the reset of the adjacent row pair. This effect manifests in the bias level and in any collected 7 | signal in the array when the last frame is read out. For more details on this step refer to the JWST Science Pipelines 8 | Documentation at http://jwst-pipeline.readthedocs.io/en/latest/jwst/lastframe/ 9 | 10 | The last frame correction may be subarray-dependent. There should be matching subarray last frame reference files in CRDS. 11 | 12 | Test Requirements 13 | ================= 14 | ====================================================== ================================================================== 15 | Requirement Fulfilled by 16 | ====================================================== ================================================================== 17 | Check that the LastFrameStep runs without error `~caltest.test_caldetector1.test_lastframe.test_lastframe_step` 18 | Check that GROUPDQ lastframe is flagged as DO_NOT_USE `~caltest.test_caldetector1.test_lastframe.test_lastframe_flagged` 19 | 20 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include CHANGES.rst 3 | 4 | include ez_setup.py 5 | include ah_bootstrap.py 6 | include setup.cfg 7 | include calibration-pipeline-testing-tool/tests/coveragerc 8 | 9 | recursive-include calibration-pipeline-testing-tool *.pyx *.c *.pxd 10 | recursive-include docs * 11 | recursive-include licenses * 12 | recursive-include cextern * 13 | recursive-include scripts * 14 | 15 | prune build 16 | prune docs/_build 17 | prune docs/api 18 | 19 | 20 | # the next few stanzas are for astropy_helpers. It's derived from the 21 | # astropy_helpers/MANIFEST.in, but requires additional includes for the actual 22 | # package directory and egg-info. 23 | 24 | include astropy_helpers/README.rst 25 | include astropy_helpers/CHANGES.rst 26 | include astropy_helpers/LICENSE.rst 27 | recursive-include astropy_helpers/licenses * 28 | 29 | include astropy_helpers/ez_setup.py 30 | include astropy_helpers/ah_bootstrap.py 31 | 32 | recursive-include astropy_helpers/astropy_helpers *.py *.pyx *.c *.h *.rst 33 | recursive-include astropy_helpers/astropy_helpers.egg-info * 34 | # include the sphinx stuff with "*" because there are css/html/rst/etc. 35 | recursive-include astropy_helpers/astropy_helpers/sphinx * 36 | 37 | prune astropy_helpers/build 38 | prune astropy_helpers/astropy_helpers/tests 39 | 40 | 41 | global-exclude *.pyc *.o 42 | -------------------------------------------------------------------------------- /docs/caldetector1/refpix.rst: -------------------------------------------------------------------------------- 1 | ************************** 2 | Reference Pixel Correction 3 | ************************** 4 | 5 | This step corrects for drifts in the counts due to the readout electronics using the reference pixels. 6 | The correction is done for each pixel from amplifier-to-amplifier in a given group and from group-to-group. 7 | The drift per row or column (depending on the instrument) seems to be the same for all amplifiers and it seems 8 | to also depend on the row/column being odd or even. The algorithm is different for NIR and MIR detectors.  9 | For more details on this steps refer to the CalWG documentation and the JWST Science Pipelines Documentation 10 | at http://jwst-pipeline.readthedocs.io/en/latest/jwst/refpix/ 11 | 12 | Test Requirements 13 | ================= 14 | ====================================================== =================================================================== 15 | Requirement Fulfilled by 16 | ====================================================== =================================================================== 17 | Check that the RefPixStep runs without error `~caltest.test_caldetector1.test_refpix.test_refpix_step` 18 | Determine if the correction has been correctly applied `~caltest.test_caldetector1.test_refpix.test_linearity_correction` 19 | -------------------------------------------------------------------------------- /docs/caldetector1/ramp_fit.rst: -------------------------------------------------------------------------------- 1 | ************ 2 | Ramp Fitting 3 | ************ 4 | 5 | This step determines the mean count rate for each pixel by performing a linear fit to the jump-free ramp intervals for 6 | each pixel. This is done via weighted ordinary least-squares method. For more information about this step refer the 7 | JWST Science Calibration Pipeline documentation at http://jwst-pipeline.readthedocs.io/en/latest/jwst/ramp_fitting 8 | 9 | Jump-free intervals are determined from the GROUPDQ array of the input data set, under the assumption that the jump step 10 | has already flagged cosmic rays. Ramp intervals flagged as saturated are ignored. 11 | 12 | Test Requirements 13 | ================= 14 | =========================================================== ================================================================= 15 | Requirement Fulfilled by 16 | =========================================================== ================================================================= 17 | Make sure the RampFitStep runs without error. `~caltest.test_caldetector1.test_ramp_fit.test_ramp_fit_step` 18 | Check that output slope is close to the input slope. `~caltest.test_caldetector1.test_ramp_fit.test_ramp_fit_slopes` 19 | Check that the ERR is the combined Poission and Readnoise. `~caltest.test_caldetector1.test_ramp_fit.test_err_combination` 20 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs 3 | build-dir = docs/_build 4 | all_files = 1 5 | 6 | [build_docs] 7 | source-dir = docs 8 | build-dir = docs/_build 9 | all_files = 1 10 | 11 | [upload_docs] 12 | upload-dir = docs/_build/html 13 | show-response = 1 14 | 15 | [tool:pytest] 16 | minversion = 3.0 17 | norecursedirs = build docs/_build 18 | doctest_plus = enabled 19 | addopts = -p no:warnings 20 | 21 | [ah_bootstrap] 22 | auto_use = True 23 | 24 | [pycodestyle] 25 | # E101 - mix of tabs and spaces 26 | # W191 - use of tabs 27 | # W291 - trailing whitespace 28 | # W292 - no newline at end of file 29 | # W293 - trailing whitespace 30 | # W391 - blank line at end of file 31 | # E111 - 4 spaces per indentation level 32 | # E112 - 4 spaces per indentation level 33 | # E113 - 4 spaces per indentation level 34 | # E901 - SyntaxError or IndentationError 35 | # E902 - IOError 36 | select = E101,W191,W291,W292,W293,W391,E111,E112,E113,E901,E902 37 | exclude = extern,sphinx,*parsetab.py 38 | 39 | [metadata] 40 | package_name = caltest 41 | description = calibration pipeline testing tool 42 | long_description = 43 | author = Matthew Hill 44 | author_email = mhill@stsci.edu 45 | license = BSD 3-Clause 46 | url = http://astropy.org 47 | edit_on_github = False 48 | github_project = STScI-MESA/calibration-pipeline-testing-tool 49 | # version should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386) 50 | version = 0.0.dev 51 | 52 | [entry_points] 53 | 54 | test_pipeline = caltest.runner:run_test 55 | 56 | -------------------------------------------------------------------------------- /licenses/LICENSE.rst: -------------------------------------------------------------------------------- 1 | Copyright (c) year, Matthew Hill 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright notice, this 10 | list of conditions and the following disclaimer in the documentation and/or 11 | other materials provided with the distribution. 12 | * Neither the name of the Astropy Team nor the names of its contributors may be 13 | used to endorse or promote products derived from this software without 14 | specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 17 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 20 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 23 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /docs/caldetector1/jump.rst: -------------------------------------------------------------------------------- 1 | ************** 2 | Jump Detection 3 | ************** 4 | 5 | This step detects jumps (positive or negative, presumably due to cosmic rays) within an exposure by looking for outliers 6 | in the up-the-ramp signal of all pixels in each integration within an exposure. When a jump is found, a cosmic-ray flag 7 | is inserted into the 4-D GROUPDQ array at the location corresponding to the coordinates, group, and integration of the 8 | affected pixel value. For more details refer to JWST-STScI-004355. 9 | 10 | The baseline pipeline uses a two-point difference method which is applied to all pixels in each integration. 11 | The positions of the outliers or jumps are stored in the GROUPDQ array of the data. Subarray data uses the same 12 | reference files as full frame. For more information about these Calibration Pipeline code refer to the JWST Science 13 | Calibration Pipeline documentation at http://jwst-pipeline.readthedocs.io/en/latest/jwst/jump 14 | 15 | Test Requirements 16 | ================= 17 | =========================================================== =============================================================== 18 | Requirement Fulfilled by 19 | =========================================================== =============================================================== 20 | Make sure the JumpStep runs without error. `~caltest.test_caldetector1.test_jump.test_jump_step` 21 | Check how well the Jump step detects injected cosmic rays. `~caltest.test_caldetector1.test_jump.test_jump_performance` 22 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_dq_init.py: -------------------------------------------------------------------------------- 1 | from ..utils import translate_dq 2 | 3 | import numpy as np 4 | import pytest 5 | from jwst.dq_init import DQInitStep 6 | from astropy.io import fits 7 | import os 8 | 9 | @pytest.fixture(scope='module') 10 | def fits_output(fits_input): 11 | fname = fits_input['PRIMARY'].header['filename'].replace('.fits', 12 | '_dqinitstep.fits') 13 | yield fits.open(fname) 14 | os.remove(fname) 15 | 16 | @pytest.fixture(scope='module') 17 | def fits_mask(fits_output): 18 | ref_path = fits_output['PRIMARY'].header['R_MASK'] 19 | ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/') 20 | return fits.open(ref_path) 21 | 22 | def test_dq_init_step(fits_input): 23 | """Make sure the DQInitStep runs without error.""" 24 | fname = fits_input['PRIMARY'].header['filename'].replace('.fits', 25 | '_dqinitstep.fits') 26 | DQInitStep.call(fits_input, output_file=fname, save_results=True) 27 | 28 | def test_pixeldq_initialization(fits_output, fits_mask): 29 | np.all(fits_output['PIXELDQ'].data == translate_dq(fits_mask)) 30 | 31 | def test_groupdq_initialization(fits_output): 32 | """ 33 | Check that the GROUPDQ extension is added to the data and all 34 | values are initialized to zero. 35 | """ 36 | assert 'GROUPDQ' in fits_output 37 | assert np.all(fits_output['GROUPDQ'].data == 0) 38 | 39 | def test_err_initialization(fits_output): 40 | """Check that the error array is a 4-D array initialized to zero.""" 41 | assert 'ERR' in fits_output 42 | assert fits_output['ERR'].data.ndim == 4 43 | assert np.all(fits_output['ERR'].data == 0) 44 | 45 | # def test_dq_def_initialization(fits_output): 46 | # """ 47 | # Check that a DQ_DEF extension with the definition of DQ flags is present. 48 | # """ 49 | # assert 'DQ_DEF' in fits_output -------------------------------------------------------------------------------- /caltest/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities that are used for a number of different steps 3 | """ 4 | 5 | import numpy as np 6 | 7 | dq_dict = { 8 | 'DO_NOT_USE' : 0, 9 | 'SATURATED' : 1, 10 | 'JUMP_DET' : 2, 11 | 'DROPOUT' : 3, 12 | 'RESERVED' : 4, 13 | 'RESERVED' : 5, 14 | 'RESERVED' : 6, 15 | 'RESERVED' : 7, 16 | 'UNRELIABLE_ERROR' : 8, 17 | 'NON_SCIENCE' : 9, 18 | 'DEAD' : 10, 19 | 'HOT' : 11, 20 | 'WARM' : 12, 21 | 'LOW_QE' : 13, 22 | 'RC' : 14, 23 | 'TELEGRAPH' : 15, 24 | 'NONLINEAR' : 16, 25 | 'BAD_REF_PIXEL' : 17, 26 | 'NO_FLAT_FIELD' : 18, 27 | 'NO_GAIN_VALUE' : 19, 28 | 'NO_LIN_CORR' : 20, 29 | 'NO_SAT_CHECK' : 21, 30 | 'UNRELIABLE_BIAS' : 22, 31 | 'UNRELIABLE_DARK' : 23, 32 | 'UNRELIABLE_SLOPE' : 24, 33 | 'UNRELIABLE_FLAT' : 25, 34 | 'OPEN' : 26, 35 | 'ADJ_OPEN' : 27, 36 | 'UNRELIABLE_RESET' : 28, 37 | 'MSA_FAILED_OPEN' : 29, 38 | 'OTHER_BAD_PIXEL' : 30, 39 | } 40 | 41 | def get_pixeldq_bit(name): 42 | if name in dq_dict: 43 | return dq_dict[name] 44 | else: 45 | return 'N/A' 46 | 47 | def translate_dq(ref_hdul): 48 | 49 | dq = ref_hdul['DQ'].data.astype(np.uint32) 50 | expected_dq = np.zeros_like(dq) 51 | for row in ref_hdul['DQ_DEF'].data: 52 | try: 53 | # find which pixels have the bit set 54 | flagged = (np.bitwise_and(1, np.right_shift(dq, row['BIT']))) 55 | # shift them to the correct bit for PIXELDQ 56 | flagged = np.left_shift(flagged, dq_dict[row['NAME']]) 57 | # propagate into the PIXELDQ extension 58 | expected_dq = np.bitwise_or(expected_dq, flagged) 59 | except KeyError: 60 | pass 61 | # print("No DQ mnemonic "+row['NAME']) 62 | return expected_dq 63 | 64 | def extract_subarray(array, hdul): 65 | xsize = hdul['PRIMARY'].header['SUBSIZE1'] 66 | xstart = hdul['PRIMARY'].header['SUBSTRT1'] 67 | ysize = hdul['PRIMARY'].header['SUBSIZE2'] 68 | ystart = hdul['PRIMARY'].header['SUBSTRT2'] 69 | return array[ystart - 1:ysize + ystart - 1, 70 | xstart - 1:xstart + xsize - 1] 71 | 72 | def dq_summary(pixeldq): 73 | for flag, bit in dq_dict.items(): 74 | n = np.sum((pixeldq & (1 << bit)).astype(bool)) 75 | if n: 76 | print("{} pixels flagged as {}".format(n, flag)) 77 | print("") 78 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_saturation.py: -------------------------------------------------------------------------------- 1 | from ..utils import extract_subarray, translate_dq, dq_summary 2 | 3 | import os 4 | import numpy as np 5 | import pytest 6 | from astropy.io import fits 7 | from jwst.saturation import SaturationStep 8 | from jwst import datamodels 9 | 10 | 11 | @pytest.fixture(scope='module') 12 | def fits_output(fits_input): 13 | fname = fits_input[0].header['filename'].replace('.fits', 14 | '_saturationstep.fits') 15 | yield fits.open(fname) 16 | os.remove(fname) 17 | 18 | @pytest.fixture(scope='module') 19 | def fits_saturation(fits_output): 20 | ref_path = fits_output['PRIMARY'].header['R_SATURA'] 21 | ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/') 22 | return fits.open(ref_path) 23 | 24 | def test_saturation_step(fits_input): 25 | """Make sure the DQInitStep runs without error.""" 26 | fname = fits_input[0].header['filename'].replace('.fits', 27 | '_saturationstep.fits') 28 | SaturationStep.call(datamodels.open(fits_input), output_file=fname, 29 | save_results=True) 30 | 31 | def test_groupdq_flagging(fits_output, fits_saturation): 32 | 33 | satmask = extract_subarray(fits_saturation['SCI'].data, fits_output) 34 | dqmask = translate_dq(fits_saturation) 35 | dqmask = extract_subarray(dqmask, fits_output) 36 | # flag pixels greater than saturation threshold 37 | no_sat_check = (dqmask & (1 << 21)).astype(bool) 38 | not_nan = ~np.isnan(satmask) 39 | expected_groupdq = np.zeros_like(fits_output['GROUPDQ'].data) 40 | flagged = (fits_output['SCI'].data >= satmask) & ~no_sat_check[np.newaxis, np.newaxis, :, :] & not_nan[np.newaxis, np.newaxis, :, :] 41 | expected_groupdq[flagged] = 2 42 | 43 | # make sure that pixels in groups after a flagged pixel are also flagged 44 | flagged = np.cumsum(expected_groupdq == 2, axis=1) > 0 45 | expected_groupdq[flagged] = 2 46 | 47 | assert np.all(fits_output['GROUPDQ'].data == expected_groupdq) 48 | 49 | def test_pixeldq_propagation(fits_input, fits_output, fits_saturation): 50 | 51 | # translate dq flags to standard bits 52 | pixeldq = translate_dq(fits_saturation) 53 | # extract subarray 54 | pixeldq = extract_subarray(pixeldq, fits_input) 55 | 56 | print('For step input') 57 | dq_summary(fits_input['PIXELDQ'].data) 58 | print('For reference file') 59 | dq_summary(pixeldq) 60 | print('For step output') 61 | dq_summary(fits_output['PIXELDQ'].data) 62 | assert np.all(fits_output['PIXELDQ'].data == np.bitwise_or(fits_input['PIXELDQ'].data, pixeldq)) 63 | -------------------------------------------------------------------------------- /docs/caldetector1/superbias.rst: -------------------------------------------------------------------------------- 1 | ********************* 2 | Superbias Subtraction 3 | ********************* 4 | 5 | The superbias subtraction step removes the fixed detector bias from a science data set by subtracting a superbias 6 | reference image. This superbias is subtracted from every group in every integration of the science ramp data. Any NaN’s 7 | present in the superbias image are set to zero before being subtracted from the science data. The superbias correction 8 | should apply to subarray exposures. See Kevin Volk's presentation at the 5/31/2016 JWST Cal WG meeting. 9 | 10 | For more details on this step refer to the JWST Science Pipelines Documentation at http://ssb.stsci.edu/doc/jwst_git/docs/superbias/html/ 11 | 12 | Test Requirements 13 | ================= 14 | This step requires verification only. The outcome of this step depends almost exclusively on the reference file used. 15 | Darks should have enough S/N for all possible ramps for full frame and subarrays. The Guiders should be excluded from 16 | this test. Guiders don't have darks from CV3 because of a large chamber background (5 to 10 ADU/second vs. ~0.01 17 | ADU/second dark current expected). 18 | 19 | =============================================================================================== ======================================================================== 20 | Requirement Fulfilled by 21 | =============================================================================================== ======================================================================== 22 | Check the bias is correctly subtracted. `~caltest.test_caldetector1.test_superbias.test_superbias_subtraction` 23 | Check that the PIXELDQ array of the science exposure is correctly combined with the DQ array. `~caltest.test_caldetector1.test_superbias.test_pixeldq_propagation` 24 | =============================================================================================== ======================================================================== 25 | 26 | Test Data 27 | ========= 28 | 29 | .. todo:: Determine test data including at least one subarray case. 30 | 31 | Test Procedure 32 | ============== 33 | 34 | To run these tests the ``config.json`` should contain the ``"superbias"`` section for example: 35 | 36 | .. code-block:: json 37 | 38 | { 39 | "superbias": [ 40 | "superbias/jw82600004001_02101_00001_nrcb1_dqinitstep_saturationstep.fits" 41 | ] 42 | } 43 | 44 | Using the above ``config.json`` simply run: 45 | 46 | .. code-block:: bash 47 | 48 | test_pipeline --config config.json 49 | 50 | Reference/API 51 | ============= 52 | 53 | .. automodapi:: caltest.test_caldetector1.test_superbias 54 | 55 | 56 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_ramp_fit.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from jwst.ramp_fitting import RampFitStep 4 | from jwst import datamodels 5 | from astropy.io import fits, ascii 6 | from astropy.stats import sigma_clipped_stats 7 | import os 8 | import matplotlib.pyplot as plt 9 | 10 | @pytest.fixture(scope='module') 11 | def fits_output(fits_input): 12 | fname = fits_input[0].header['filename'].replace('.fits', '_rampfitstep.fits') 13 | yield fits.open(fname) 14 | # delete the output FITS file after this module is finished 15 | os.remove(fname) 16 | 17 | @pytest.fixture(scope='module') 18 | def fits_gain(fits_output): 19 | ref_path = fits_output['PRIMARY'].header['R_GAIN'] 20 | ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/') 21 | return fits.open(ref_path) 22 | 23 | def test_ramp_fit_step(fits_input): 24 | """Make sure the RampFitStep runs without error.""" 25 | fname = fits_input[0].header['filename'].replace('.fits', '_rampfitstep.fits') 26 | RampFitStep.call(datamodels.open(fits_input), output_file=fname, save_results=True) 27 | 28 | def test_ramp_fit_slopes(fits_input, fits_output, fits_gain): 29 | """ 30 | Check that output slope is close to the input slope is within 1-sigma of 31 | input slope. 32 | """ 33 | _, med, stdev = sigma_clipped_stats((fits_output['SCI'].data * fits_gain['SCI'].data).flatten()) 34 | 35 | print("Sigma-clipped median slope: {:.5f}".format(med)) 36 | print("Sigma-clipped standard deviation of slope: {:.5f}".format(stdev)) 37 | 38 | base = fits_input[0].header['FILENAME'].split('.')[0] 39 | plot_fname = 'test_ramp_fit_slopes_'+base+'.png' 40 | plt.clf() 41 | plt.xlabel("Output Slope (count/sec)") 42 | plt.hist((fits_output['SCI'].data * fits_gain['SCI'].data).flatten(), 43 | range=(0.5, 1.5), bins = 'auto', color = 'k') 44 | plt.savefig(plot_fname) 45 | 46 | assert med - stdev < 1 < med + stdev 47 | 48 | def test_err_combination(fits_output): 49 | """ 50 | Check that values in ERR are the square root of 51 | Poisson variance + Read Noise variance 52 | """ 53 | 54 | n_neg_poisson = np.sum(fits_output['VAR_POISSON'].data < 0) 55 | n_neg_total = np.sum(fits_output['VAR_POISSON'].data 56 | + fits_output['VAR_RNOISE'].data 57 | < 0) 58 | n_nan = np.sum(np.isnan(fits_output['ERR'].data)) 59 | print("Number of pixels with negative possion variance: {}".format(n_neg_poisson)) 60 | print("Number of pixels with negative total variance: {}".format(n_neg_total)) 61 | print("Number of pixels with NaN error: {}".format(n_nan)) 62 | 63 | assert np.allclose(fits_output['ERR'].data, 64 | np.sqrt(fits_output['VAR_POISSON'].data 65 | + fits_output['VAR_RNOISE'].data), 66 | equal_nan=True) -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | JWST Calibration Pipeline Testing 2 | +++++++++++++++++++++++++++++++++ 3 | 4 | This is the documentation for calibration-pipeline-testing-tool. 5 | 6 | Installation 7 | ============ 8 | Create a JWST pipeline environment to install into and install `pytest-html` 9 | 10 | .. code:: 11 | 12 | conda create -n test_jwst --file http://ssb.stsci.edu/releases/jwstdp/0.7.8/latest-osx 13 | source activate test_jwst 14 | pip install pytest-html 15 | 16 | .. code:: bash 17 | 18 | git clone https://github.com/spacetelescope/calibration-pipeline-testing-tool.git 19 | cd calibration-pipeline-testing-tool 20 | python setup.py install 21 | 22 | Basic Usage 23 | =========== 24 | 25 | To setup tests, you specify test input files in a JSON file, with an entry for each step. 26 | You do not need to provide input for every step. The below example shows all currently available steps. 27 | Any of the step names can be omitted and tests associated with that step will be skipped. 28 | Multiple FITS files can be supplied for any given step and the tests will be repeated for each supplied file. 29 | 30 | .. code:: json 31 | 32 | { 33 | "dq_init": [ 34 | "dq_init_input.fits" 35 | ], 36 | 37 | "saturation": [ 38 | "saturation_input.fits" 39 | ], 40 | 41 | "superbias": [ 42 | "superbias_input.fits" 43 | ], 44 | 45 | "dark_current": [ 46 | "dark_current_input.fits" 47 | ], 48 | 49 | "refpix": [ 50 | "refpix_input.fits" 51 | ], 52 | 53 | "linearity": [ 54 | "linearity_input.fits" 55 | ], 56 | 57 | "rscd": [ 58 | "rscd_input.fits" 59 | ], 60 | 61 | "lastframe": [ 62 | "lastframe_input.fits" 63 | ], 64 | 65 | "jump": [ 66 | "jump_input.fits" 67 | ], 68 | 69 | "ramp_fit": [ 70 | "ramp_fit_input.fits" 71 | ] 72 | } 73 | 74 | Then from the command line simply run 75 | 76 | .. code:: bash 77 | 78 | test_pipeline --config confg.json 79 | 80 | This will produce a ``summary.html`` file with the test results as well as plots if any are produced. These files are saved based on the input filename and will be overwritten on subsequent runs, so it is advisable run the test suite in it's own directory. 81 | Note that this file and the associated plots will be saved in the current directory so it may be useful to run ``test_pipeline`` in a new directory. 82 | 83 | Contributing 84 | ============ 85 | 86 | If you would like to contribute tests to the package see the :ref:`developer`. 87 | 88 | CALDETECTOR1 89 | ============ 90 | 91 | .. toctree:: 92 | :maxdepth: 1 93 | 94 | caldetector1/dq_init.rst 95 | caldetector1/saturation.rst 96 | caldetector1/superbias.rst 97 | caldetector1/linearity.rst 98 | caldetector1/dark_current.rst 99 | caldetector1/refpix.rst 100 | caldetector1/rscd.rst 101 | caldetector1/lastframe.rst 102 | caldetector1/jump.rst 103 | caldetector1/ramp_fit.rst 104 | -------------------------------------------------------------------------------- /docs/caldetector1/dq_init.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | Data Quality Initialization 3 | *************************** 4 | 5 | The Data Quality (DQ) flags track problems in the data. In the initialization step, the PIXELDQ and/or GROUPDQ extension 6 | are created. The PIXELDQ extension is filled with information from the static Data Quality mask (Bad Pixel mask 7 | reference file) for the input dataset using a bitwise_or function. The GROUPDQ array is initialized to zero. The PIXELDQ 8 | extension is a 2-D array that contains the pixel dependent flags that are the same for all groups and integrations 9 | within an exposure, while the GROUPDQ extension is a 4-D array that stores flags that can vary from one group or 10 | integration to the next and that will be populated by subsequent steps. For more details refer to JWST-STScI-004355 and 11 | the calibration pipeline online software documentation in http://ssb.stsci.edu/doc/jwst_git/docs/dq_init/html/. 12 | 13 | Test Requirements 14 | ================= 15 | 16 | ====================================================================================== ======================================================================= 17 | Requirement Fulfilled by 18 | ====================================================================================== ======================================================================= 19 | The PIXELDQ is initialized with the information from the reference file. `~caltest.test_caldetector1.test_dq_init.test_pixeldq_initialization` 20 | The GROUPDQ extensions are added to the data and all values are initialized zero. `~caltest.test_caldetector1.test_dq_init.test_groupdq_initialization` 21 | A DQ_DEF extension with the definition of DQ flags should be present in all products. `~caltest.test_caldetector1.test_dq_init.test_dq_def_initialization` 22 | Error array is a 4-D array initialized to zero. `~caltest.test_caldetector1.test_dq_init.test_err_initialization` 23 | ====================================================================================== ======================================================================= 24 | 25 | Test Data 26 | ========= 27 | 28 | The ``dq_init`` step is applied the same to all instruments and exposure types except the NIRSpec IRS2 mode; therefore, 29 | we choose to test one NIRCam FULL frame image and one SUB640 subarray image. 30 | 31 | .. todo:: Need NIRSpec IRS2. 32 | 33 | Test Procedure 34 | ============== 35 | 36 | To run these tests the ``config.json`` should contain the ``"dq_init"`` section for example: 37 | 38 | .. code-block:: json 39 | 40 | { 41 | "dq_init": [ 42 | "dq_init/jw82600004001_02101_00001_nrcb1_uncal.fits", 43 | "dq_init/jw82600011001_02103_00001_nrcb1_uncal.fits" 44 | ] 45 | } 46 | 47 | Using the above ``config.json`` simply run: 48 | 49 | .. code-block:: bash 50 | 51 | test_pipeline --config config.json 52 | 53 | Reference/API 54 | ============= 55 | 56 | .. automodapi:: caltest.test_caldetector1.test_dq_init 57 | -------------------------------------------------------------------------------- /docs/caldetector1/saturation.rst: -------------------------------------------------------------------------------- 1 | **************** 2 | Saturation Check 3 | **************** 4 | 5 | This step flags saturated pixels by going through all the groups and integrations within an exposure and comparing them 6 | with the defined saturation threshold for each pixel, as given in the saturation reference file. The saturation limit 7 | can refer to one of three thresholds: 8 | 9 | * the threshold beyond which the linearity correction exceeds a particular accuracy requirement (e.g., 0.25% accuracy in the corrected value), 10 | 11 | * the A/D saturation limit of 65535 ADU in the raw data, or 12 | 13 | * the pixel full-well value. 14 | 15 | At present, the pipeline does not distinguish among these thresholds. Header comments in the reference file should 16 | indicate which threshold is employed. The saturation check is performed on data that have not been bias (first group) 17 | subtracted, so saturation levels should be computed accordingly. 18 | 19 | 20 | 21 | Test Requirements 22 | ================= 23 | 24 | ====================================================================================================================================================================== ==================================================================== 25 | Requirement Fulfilled by 26 | ====================================================================================================================================================================== ==================================================================== 27 | Check that the saturation flag is set when a pixel is above the threshold given by the reference file. `~caltest.test_caldetector1.test_saturation.test_groupdq_flagging` 28 | Once it is flagged as saturated in a group all subsequent groups should also be flagged as saturated. `~caltest.test_caldetector1.test_saturation.test_groupdq_flagging` 29 | Check that pixels in the reference files that have value NaN are not flagged as saturated in the data and that in the PIXELDQ array the pixel is set to NO_SAT_CHECK. `~caltest.test_caldetector1.test_saturation.test_groupdq_flagging` 30 | ====================================================================================================================================================================== ==================================================================== 31 | 32 | Test Procedure 33 | ============== 34 | 35 | To run these tests the ``config.json`` should contain the ``"saturation"`` section for example: 36 | 37 | .. code-block:: json 38 | 39 | { 40 | "saturation": [ 41 | "saturation/jw82600004001_02101_00001_nrcb1_dqinitstep.fits" 42 | ] 43 | } 44 | 45 | Using the above ``config.json`` simply run: 46 | 47 | .. code-block:: bash 48 | 49 | test_pipeline --config config.json 50 | 51 | Reference/API 52 | ============= 53 | 54 | .. automodapi:: caltest.test_caldetector1.test_saturation 55 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_superbias.py: -------------------------------------------------------------------------------- 1 | from ..utils import translate_dq, extract_subarray 2 | 3 | import os 4 | import numpy as np 5 | import pytest 6 | from astropy.io import fits 7 | from jwst.superbias import SuperBiasStep 8 | from jwst import datamodels 9 | import numpy as np 10 | from scipy.stats import normaltest 11 | from astropy.stats import sigma_clipped_stats 12 | import matplotlib.pyplot as plt 13 | 14 | @pytest.fixture(scope='module') 15 | def fits_output(fits_input): 16 | fname = fits_input[0].header['filename'].replace('.fits', 17 | '_superbiasstep.fits') 18 | yield fits.open(fname) 19 | os.remove(fname) 20 | 21 | @pytest.fixture(scope='module') 22 | def fits_superbias(fits_output): 23 | ref_path = fits_output['PRIMARY'].header['R_SUPERB'] 24 | ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/') 25 | return fits.open(ref_path) 26 | 27 | def test_superbias_step(fits_input): 28 | """Make sure the DQInitStep runs without error.""" 29 | fname = fits_input[0].header['filename'].replace('.fits', 30 | '_superbiasstep.fits') 31 | SuperBiasStep.call(datamodels.open(fits_input), output_file=fname, 32 | save_results=True) 33 | 34 | def test_superbias_subtraction(fits_input, fits_output, fits_superbias): 35 | 36 | if fits_input[0].header['SUBARRAY'] == fits_superbias[0].header['SUBARRAY']: 37 | bias = fits_superbias['SCI'].data 38 | else: 39 | bias = extract_subarray(fits_superbias['SCI'].data, fits_input) 40 | 41 | bias_to_subtract = np.copy(bias) 42 | bias_to_subtract[np.isnan(bias_to_subtract)] = 0 43 | 44 | assert np.allclose(fits_output['SCI'].data, (fits_input['SCI'].data - bias_to_subtract)) 45 | 46 | def test_superbias_residuals(fits_output, fits_input): 47 | 48 | mean, median, std = sigma_clipped_stats(fits_output['SCI'].data[0,0,:,:], 49 | fits_output['PIXELDQ'].data.astype(bool), 50 | iters=None) 51 | 52 | print("Sigma clipped stats") 53 | print("mean = {}".format(mean)) 54 | print("median = {}".format(median)) 55 | print("standard deviation = {}".format(std)) 56 | 57 | # normaltest(fits_output['SCI'].data) 58 | # make plot 59 | base = fits_input[0].header['FILENAME'].split('.')[0] 60 | plot_fname = 'test_superbias_residuals_'+base+'.png' 61 | plt.clf() 62 | plt.hist(fits_output['SCI'].data[0,0,:,:].flatten(), 63 | range=(median - 5 * std, median + 5 * std), 64 | bins=100) 65 | plt.xlabel('First Frame Counts') 66 | plt.ylabel('Number of Pixels') 67 | plt.savefig(plot_fname) 68 | 69 | 70 | def test_pixeldq_propagation(fits_input, fits_output, fits_superbias): 71 | # translate dq flags to standard bits 72 | pixeldq = translate_dq(fits_superbias) 73 | # extract subarray 74 | if fits_superbias[0].header['SUBARRAY'] == 'GENERIC': 75 | pixeldq = extract_subarray(pixeldq, fits_input) 76 | 77 | assert np.all(fits_output['PIXELDQ'].data == np.bitwise_or(fits_input['PIXELDQ'].data, pixeldq)) 78 | 79 | -------------------------------------------------------------------------------- /docs/caldetector1/linearity.rst: -------------------------------------------------------------------------------- 1 | ******************** 2 | Linearity Correction 3 | ******************** 4 | 5 | This step corrects for the detector non-linearity. The algorithm used to perform the linearity correction is described 6 | in JWST-STScI-004355. CalWG states that the linearity correction should be dependent on wavelength with predictions that 7 | at wavelengths greater than approximately 21 microns the non-linearity will be different than for shorter wavelengths.   8 | There is one reference file for all filters with wavelengths less than 21 microns and different files for each filter 9 | about 21 microns (imager and coronagraphy).   For the LRS and MRS, there will be one file per grating setting. 10 |   11 | For grouped data, the linearity correction derived from non-grouped data should be applied.  The algorithm used to 12 | perform the linearity correction is described in JWST-STScI-004355. from SOCCER. 13 | 14 | Pixels flagged as saturated are ignored. Data quality flags are also propagated from the DQ extension of the linearity 15 | reference file (Table 3‑3) into the 2-D PIXELDQ array of the science data. 16 | 17 | The correction is applied for each exposure pixel-by-pixel, group-by-group, and integration-by-integration. If a pixel 18 | has at least one coefficient with NaN, it will not have the correction applied. Likewise, pixels that are marked as 19 | saturated within a group or flagged with NO_LIN_CORR (linearity correction not determined for pixel) will not be 20 | corrected.  In the case of subarrays and where there is not a specific reference file available, the pipeline will 21 | extract a matching subarray from the full frame reference file data. For more details on this step refer to the JWST 22 | Science Pipelines Documentation at http://ssb.stsci.edu/doc/jwst_git/docs/linearity/html/ 23 | 24 | 25 | Test Requirements 26 | ================= 27 | =================================================== ======================================================================= 28 | Requirement Fulfilled by 29 | =================================================== ======================================================================= 30 | Check that the multiplication is done correctly. `~caltest.test_caldetector1.test_linearity.test_refpix_correction` 31 | Check it works for grouped and un-grouped data. `~caltest.test_caldetector1.test_linearity.test_linearity_correction` 32 | Check that the DQ flags are propagated correctly. `~caltest.test_caldetector1.test_linearity.test_pixeldq_propagation` 33 | =================================================== ======================================================================= 34 | 35 | Test Procedure 36 | ============== 37 | 38 | .. code-block:: json 39 | 40 | { 41 | "linearity": [ 42 | "linearity/jw82600004001_02101_00001_nrcb1_dqinitstep_saturationstep_superbiasstep_refpixstep.fits", 43 | "linearity/jw82600011001_02103_00001_nrcb1_dqinitstep_saturationstep_superbiasstep_refpixstep.fits", 44 | "linearity/jw87600025001_02101_00001_nis_group_scale_dq_init_saturation_superbias_refpix.fits" 45 | ] 46 | } 47 | 48 | Reference/API 49 | ============= 50 | 51 | .. automodapi:: caltest.test_caldetector1.test_linearity 52 | 53 | -------------------------------------------------------------------------------- /docs/caldetector1/dark_current.rst: -------------------------------------------------------------------------------- 1 | *************** 2 | Dark Correction 3 | *************** 4 | 5 | This step removes dark current from the science exposure by subtracting a dark current reference file. For MIRI, the 6 | correction is integration dependent. 7 | 8 | The dark-subtraction step uses dark reference files with NFRAMES=1 and GROUPGAP=0 (i.e., no averaging of frames into 9 | groups and no dropping of frames). It averages and skips dark frames to match the NFRAMES and GROUPGAP values of the 10 | science data, then performs a group-by-group subtraction of the dark data from the science data; extra dark frames are 11 | ignored. If the science exposure contains more frames than the dark reference file, the pipeline issues a warning and 12 | the entire dark subtraction process is skipped. 13 | 14 | Table 3‑4 provides with the DQ flags used to flag warm and hot pixels, which may change on short timescales, as well as 15 | pixels with unreliable dark corrections. These flags are propagated into the PIXELDQ array of the science data. 16 | 17 | When the optional parameter dark output is set to a file name, the frame-averaged dark reference data will be written to 18 | the specified FITS file. Subarrays are handled by having CRDS return the dark reference file appropriate for the subarray mode. 19 | 20 | For more details on this step refer to the JWST Science Pipelines Documentation at http://ssb.stsci.edu/doc/jwst_git/docs/dark_current/html/ 21 | 22 | Test Requirements 23 | ================= 24 | This step requires verification only. The outcome of this step depends almost exclusively on the reference file used. 25 | Darks should have enough S/N for all possible ramps for full frame and subarrays. The Guiders should be excluded from 26 | this test. Guiders don't have darks from CV3 because of a large chamber background (5 to 10 ADU/second vs. ~0.01 27 | ADU/second dark current expected). 28 | 29 | ========================================================================================================================= =========================================================================== 30 | Requirement Fulfilled by 31 | ========================================================================================================================= =========================================================================== 32 | When there are less frames in the reference file than in the data, check that there is a warning and the step is skipped `~caltest.test_caldetector1.test_dark_current.test_dark_subtraction` 33 | Check that when there are more frames in the dark reference file the extra frames are ignored. `~caltest.test_caldetector1.test_dark_current.test_dark_subtraction` 34 | Verify that when a dark has NaN, these are correctly assumed as zero and the PIXELDQ is set properly `~caltest.test_caldetector1.test_dark_current.test_dark_subtraction` 35 | Verify that the DQ array of the dark is correctly combined with the PIXELDQ array of the science data. `~caltest.test_caldetector1.test_dark_current.test_pixeldq_propagation` 36 | Verify that when the dark is not applied, the data is correctly flagged as such. 37 | Verify the Dark correction is done by integration for MIRI observations. 38 | ========================================================================================================================= =========================================================================== 39 | 40 | Test Procedure 41 | ============== 42 | 43 | .. code-block:: bash 44 | 45 | test_pipeline --dark-current 46 | 47 | Reference/API 48 | ============= 49 | 50 | .. automodapi:: caltest.test_caldetector1.test_dark_current 51 | 52 | -------------------------------------------------------------------------------- /caltest/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from astropy.io import fits 3 | from datetime import datetime 4 | from py.xml import html 5 | import jwst 6 | import crds 7 | import json 8 | import os 9 | import re 10 | 11 | def pytest_addoption(parser): 12 | parser.addoption("--config", 13 | help=("Input files to test")) 14 | 15 | 16 | def pytest_configure(config): 17 | config._metadata['jwst'] = jwst.__version__ 18 | config._metadata['crds_context'] = crds.heavy_client.get_processing_mode('jwst')[1] 19 | 20 | 21 | def pytest_runtest_setup(item): 22 | # read the JSON config 23 | if item.config.getoption("config"): 24 | with open(item.config.option.config) as config_file: 25 | config = json.load(config_file) 26 | 27 | # get the name of the step being testing in current module 28 | module, ext = os.path.splitext(item.fspath.basename) 29 | module = module.replace('test_', '') 30 | 31 | # skip the tests for this step if no data was supplied 32 | if module not in config.keys(): 33 | pytest.skip("No {} section in config".format(module)) 34 | 35 | 36 | def pytest_generate_tests(metafunc): 37 | with open(metafunc.config.option.config) as config_file: 38 | config = json.load(config_file) 39 | steps = ['dq_init', 'saturation', 'superbias', 'persistence', 40 | 'linearity', 'dark_current', 'jump', 'ramp_fit', 41 | 'assign_wcs'] 42 | # parametrize tests with the input files supplied for that step 43 | for step in steps: 44 | if step in metafunc.module.__name__ and config.get(step): 45 | if step != "persistence": 46 | metafunc.parametrize("input_file", config[step], scope='module') 47 | else: 48 | metafunc.parametrize(["input_file","trapsfilled"], config[step], scope='module') 49 | 50 | @pytest.fixture(scope='module') 51 | def fits_input(input_file): 52 | # open the input_file defined above once for each module 53 | yield fits.open(input_file) 54 | 55 | 56 | @pytest.mark.optionalhook 57 | def pytest_html_results_table_header(cells): 58 | cells.insert(0, html.th('Time', class_='sortable time', col='time')) 59 | cells.insert(1, html.th('Test', class_='sortable', col='shortname')) 60 | cells.insert(1, html.th('Module', class_='sortable', col='shortname')) 61 | cells.pop() 62 | cells.pop(-2) 63 | cells.insert(3,html.th('Input Data', class_='sortable', col='data')) 64 | 65 | 66 | @pytest.mark.optionalhook 67 | def pytest_html_results_table_row(report, cells): 68 | # get name of input file from test name 69 | full_string = report.nodeid 70 | module = full_string.split('::')[0].split('/')[-1][:-3].replace('test_', '') 71 | test = full_string.split('::')[1].split('[')[0] 72 | if '[' in full_string: 73 | data = full_string.split('::')[1].split('[')[-1][:-1] 74 | else: 75 | data = '' 76 | 77 | cells.insert(0, html.td(datetime.utcnow(), class_='col-time')) 78 | cells.insert(1, html.td(test, class_='col-time')) 79 | cells.insert(1, html.td(module, class_='col-time')) 80 | cells.pop() 81 | cells.pop(-2) 82 | cells.insert(3, html.td(data, class_='col-time')) 83 | 84 | @pytest.mark.hookwrapper 85 | def pytest_runtest_makereport(item, call): 86 | pytest_html = item.config.pluginmanager.getplugin('html') 87 | outcome = yield 88 | report = outcome.get_result() 89 | extra = getattr(report, 'extra', []) 90 | if report.when == 'call': 91 | # get filename between square brackets 92 | m = re.match('^.*\[(.*)\].*$', item.name) 93 | fname = item.name.split('[')[0]+'_'+m.group(1).split('/')[-1][:-5]+'.png' 94 | # always add url to report 95 | if os.path.isfile(fname): 96 | # add plot if it exists 97 | extra.append(pytest_html.extras.image(fname)) 98 | report.extra = extra 99 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_dark_current.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from jwst.dark_current import DarkCurrentStep 4 | from jwst import datamodels 5 | from astropy.io import fits 6 | import matplotlib.pyplot as plt 7 | import os 8 | from ..utils import translate_dq, extract_subarray 9 | 10 | 11 | @pytest.fixture(scope='module') 12 | def fits_output(fits_input): 13 | fname = fits_input[0].header['filename'].replace('.fits', 14 | '_darkcurrentstep.fits') 15 | yield fits.open(fname) 16 | # delete the output FITS file after this module is finished 17 | os.remove(fname) 18 | 19 | @pytest.fixture(scope='module') 20 | def fits_dark(fits_output): 21 | ref_path = fits_output['PRIMARY'].header['R_DARK'] 22 | ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/') 23 | return fits.open(ref_path) 24 | 25 | def test_dark_current_step(fits_input): 26 | """Make sure the DQInitStep runs without error.""" 27 | fname = fits_input[0].header['filename'].replace('.fits', 28 | '_darkcurrentstep.fits') 29 | DarkCurrentStep.call(datamodels.open(fits_input), output_file=fname, 30 | save_results=True) 31 | 32 | def test_dark_subtraction(fits_input, fits_dark, fits_output): 33 | nframes = fits_output[0].header['NFRAMES'] 34 | groupgap = fits_output[0].header['GROUPGAP'] 35 | nints, ngroups, nx, ny = fits_output['SCI'].shape 36 | nframes_tot = (nframes + groupgap) * ngroups 37 | if nframes_tot > fits_dark['SCI'].data.shape[0]: 38 | # data should remain unchanged if there are more frames in the 39 | # science data than the reference file 40 | assert np.all(fits_input['SCI'].data == fits_output['SCI'].data) 41 | 42 | else: 43 | dark_correct = np.zeros((nframes, ngroups, nx, ny)) 44 | data = fits_dark['SCI'].data[:nframes_tot, :, :] 45 | for i in range(nframes): 46 | dark_correct[i] = data[i::(nframes + groupgap), :, :] 47 | 48 | dark_correct = np.average(dark_correct, axis=0) 49 | dark_correct[np.isnan(dark_correct)] = 0 50 | result = fits_input['SCI'].data - dark_correct 51 | assert np.allclose(result, fits_output['SCI'].data) 52 | 53 | 54 | def test_dark_current_quality(fits_input, fits_output): 55 | """ 56 | Check the slope of the median ramp for the detector. The count rate of the 57 | dark subtracted ramp should be small (< 0.1?) 58 | 59 | :param fits_input: astropy.io.fits.HDUList 60 | The FITS HDUList input 61 | :param fits_output: astropy.io.fits.HDUList 62 | The FITS HDUList output 63 | """ 64 | med_in = np.median(fits_input['SCI'].data[0, :, :, :], axis=(1, 2)) 65 | med_out = np.median(fits_output['SCI'].data[0, :, :, :,], axis=(1,2)) 66 | groups = np.arange(med_in.shape[0]) 67 | 68 | slope_in, _ = np.polyfit(groups, med_in, 1) 69 | slope_out, _ = np.polyfit(groups, med_out, 1) 70 | 71 | print( 72 | "Slope of median ramp before dark subtraction: {} counts/group".format( 73 | slope_in)) 74 | print( 75 | "Slope of median ramp after dark subtraction: {} counts/group".format( 76 | slope_out)) 77 | 78 | plt.clf() 79 | plt.plot(med_in, label='input') 80 | plt.plot(med_out, label='output') 81 | base = fits_input[0].header['FILENAME'].split('.')[0] 82 | plot_fname = 'test_dark_current_quality_'+base+'.png' 83 | plt.xlabel('Group Number') 84 | plt.ylabel('Counts') 85 | plt.savefig(plot_fname) 86 | 87 | assert abs(slope_out) < 0.1 88 | 89 | def test_pixeldq_propagation(fits_input, fits_output, fits_dark): 90 | 91 | # translate dq flags to standard bits 92 | pixeldq = translate_dq(fits_dark) 93 | # extract subarray 94 | if fits_dark[0].header['SUBARRAY'] == 'GENERIC': 95 | pixeldq = extract_subarray(pixeldq, fits_input) 96 | 97 | assert np.all(fits_output['PIXELDQ'].data == np.bitwise_or(fits_input['PIXELDQ'].data, pixeldq)) 98 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_linearity.py: -------------------------------------------------------------------------------- 1 | from ..utils import translate_dq, extract_subarray 2 | 3 | import os 4 | import numpy as np 5 | import pytest 6 | from astropy.io import fits 7 | from jwst.linearity import LinearityStep 8 | from jwst import datamodels 9 | import matplotlib.pyplot as plt 10 | import os 11 | 12 | @pytest.fixture(scope='module') 13 | def fits_output(fits_input): 14 | fname = fits_input[0].header['filename'].replace('.fits', 15 | '_linearitystep.fits') 16 | yield fits.open(fname) 17 | os.remove(fname) 18 | 19 | @pytest.fixture(scope='module') 20 | def fits_linearity(fits_output): 21 | ref_path = fits_output['PRIMARY'].header['R_LINEAR'] 22 | ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/') 23 | return fits.open(ref_path) 24 | 25 | def test_linearity_step(fits_input): 26 | """Make sure the LinearityStep runs without error.""" 27 | fname = fits_input[0].header['filename'].replace('.fits', 28 | '_linearitystep.fits') 29 | LinearityStep.call(datamodels.open(fits_input), output_file=fname, 30 | save_results=True) 31 | 32 | def extract_coeffs(coeffs, hdul): 33 | xsize = hdul['PRIMARY'].header['SUBSIZE1'] 34 | xstart = hdul['PRIMARY'].header['SUBSTRT1'] 35 | ysize = hdul['PRIMARY'].header['SUBSIZE2'] 36 | ystart = hdul['PRIMARY'].header['SUBSTRT2'] 37 | return coeffs[::-1, ystart - 1:ysize + ystart - 1, 38 | xstart - 1:xstart + xsize - 1] 39 | 40 | def test_linearity_correction(fits_input, fits_linearity, fits_output): 41 | """ 42 | Check that the linearity correction is properly applied to all relevant pixels. The algorithm 43 | uses a polynomial of the form 44 | .. math:: 45 | F_c = \sum_{i=0}^N C_i F^i 46 | 47 | where :math:`F_c` is the corrected counts, :math:`C` are the correction coefficients, and :math:`F` 48 | is the uncorrected counts. The coefficients of the polynomial at each pixel are given by the 49 | reference file. 50 | """ 51 | 52 | # # ignore pixels which are saturated (GROUPDQ = 2) or NO_LIN_CORR (DQ = 2) 53 | no_lin_corr = (translate_dq(fits_linearity) & (1 << 20)).astype(bool) 54 | no_lin_corr = extract_subarray(no_lin_corr, fits_input) 55 | saturated = (fits_input['GROUPDQ'].data & (1 << 2)).astype(bool) 56 | needs_correction = np.logical_not(np.logical_or(saturated, no_lin_corr)) 57 | 58 | linearity_applied = np.allclose( 59 | np.polyval(extract_coeffs(fits_linearity['COEFFS'].data, 60 | fits_input), 61 | fits_input['SCI'].data)[needs_correction], 62 | fits_output['SCI'].data[needs_correction]) 63 | 64 | linearity_ignored = np.allclose(fits_input['SCI'].data[~needs_correction], 65 | fits_output['SCI'].data[~needs_correction]) 66 | 67 | # make sure that the values linearity correction is properly applied to relevant pixels 68 | # and ignored elsewhere 69 | assert linearity_applied and linearity_ignored 70 | 71 | def test_pixeldq_propagation(fits_input, fits_output, fits_linearity): 72 | 73 | # translate dq flags to standard bits 74 | pixeldq = translate_dq(fits_linearity) 75 | # extract subarray 76 | pixeldq = extract_subarray(pixeldq, fits_input) 77 | 78 | assert np.all(fits_output['PIXELDQ'].data == np.bitwise_or(fits_input['PIXELDQ'].data, pixeldq)) 79 | 80 | def test_linearity_residuals(fits_input, fits_output): 81 | """ 82 | Calculate the second difference of the linearity corrected ramp for each 83 | pixel. If the ramp is perfectly linear they should be zero. 84 | """ 85 | 86 | nints, ngroups, nx, ny = fits_output['SCI'].data.shape 87 | data_by_pixel = fits_output['SCI'].data.reshape(nints, ngroups, nx * ny) 88 | groupdq_by_pixel = fits_output['GROUPDQ'].data.reshape(nints, ngroups, 89 | nx * ny) 90 | masked_output = np.ma.array(data_by_pixel, mask=groupdq_by_pixel.astype(bool)) 91 | masked_input = np.ma.array(fits_input['SCI'].data.reshape(nints, ngroups, nx * ny), 92 | mask=groupdq_by_pixel.astype(bool)) 93 | second_diff = np.ma.diff(masked_output, n=2, axis=1) 94 | 95 | # make plot 96 | base = fits_input[0].header['FILENAME'].split('.')[0] 97 | plot_fname = 'test_linearity_residuals_'+base+'.png' 98 | plt.clf() 99 | plt.plot(masked_input.data[0, 1:-1, :].flatten(), 100 | second_diff.data[0, :, :].flatten(), ',k', alpha=.01) 101 | plt.ylim(-250, 250) 102 | plt.ylabel('Second Difference') 103 | plt.xlabel('Uncorrected Counts (DN)') 104 | plt.savefig(plot_fname) 105 | -------------------------------------------------------------------------------- /docs/calibration-pipeline-testing-tool/developer.rst: -------------------------------------------------------------------------------- 1 | .. _developer: 2 | 3 | *************** 4 | Developer Guide 5 | *************** 6 | 7 | This document outlines the process for creating new test modules and contributing back to the package. 8 | 9 | Global test configuration 10 | ========================= 11 | 12 | Configuration which applies to every test module is set in the ``caltest/conftest.py``. For the purposes of adding new 13 | tests the most import detail is that this is where the input for each step is set. This is done in the 14 | ``pytest_generate_tests()`` function. 15 | 16 | .. code-block:: python 17 | 18 | def pytest_generate_tests(metafunc): 19 | with open(metafunc.config.option.config) as config_file: 20 | config = json.load(config_file) 21 | steps = ['dq_init', 'saturation', 'superbias', 'linearity', 'dark_current', 22 | 'jump', 'ramp_fit'] 23 | # parametrize tests with the input files supplied for that step 24 | for step in steps: 25 | if step in metafunc.module.__name__ and config.get(step): 26 | metafunc.parametrize("input_file", config[step], scope='module') 27 | 28 | This function checks whether currently implemented ``steps`` are named in the input 29 | JSON ``config`` file and matches the current module ``__name__``. If an input file is supplied in the JSON file a 30 | fixture_ is created named ``input_file`` to supply the path to the input file. 31 | 32 | This file is path is in turn passed to another fixture ``fits_input``. This fixture has ``scope='module'`` ensuring 33 | that the input FITS file is opened only once per test module. This fixture is created for every test module and provides 34 | the starting point for testing. 35 | 36 | Writing a new test module 37 | ========================= 38 | 39 | New test modules should be written in a python file in ``caltest/test_/test_.py``. For 40 | example, tests for the DQ Initialization step are in ``caltest/test_caldetector1/test_dq_init.py``. In general, each 41 | test module will require at least to functions. A test which runs the relevant pipeline step and saves the output, and 42 | a fixture open the output fits file and makes it available for subsequent tests. 43 | 44 | The convention for naming the test which runs the test is ``test__step`` and the fixture is named 45 | ``fits_output``. Using ``test_dq_init.py`` as an example, the first test defined in the module is 46 | 47 | .. code-block:: python 48 | 49 | def test_dq_init_step(fits_input): 50 | """Make sure the DQInitStep runs without error.""" 51 | DQInitStep.call(fits_input, save_results=True) 52 | 53 | This test uses the ``fits_input`` fixture defined earlier in the ``conftest.py`` and runs the ``DQInitStep``. Note that 54 | the ``fits_output`` fixture is defined before the step in the file. In pytest, tests are run in the order they appear 55 | and fixtures are intialized the first time they are used. ``test_dq_init_step`` is always run first and requires only the 56 | ``fits_input`` fixture to run, only when ``test_pixeldq_initialization`` is run subsequently is ``fits_output`` 57 | initialized ensuring that the output file has been created. Thus, the ``test__step`` should always be the 58 | first test defined the test module. 59 | 60 | The ``fits_output`` fixture uses a ``yield`` to supply the FITS ``HDUList`` this allows for "clean up" to be done once 61 | all tests requiring the fixture have been run. 62 | 63 | .. code-block:: python 64 | 65 | @pytest.fixture(scope='module') 66 | def fits_output(fits_input): 67 | fname = '_dqinitstep.'.join(fits_input[0].header['filename'].split('.')) 68 | fname = fname.replace('_uncal', '') 69 | yield fits.open(fname) 70 | os.remove(fname) 71 | 72 | In this context we us this to delete the output FITS file after we are done testing. 73 | 74 | 75 | Making code contributions 76 | ========================= 77 | 78 | Code contributions should be made through pull requests on GitHub_. To get started, first fork the repository. This 79 | will create a copy of the repository for you to work on. Then clone your repository to local machine. 80 | 81 | .. code:: 82 | 83 | git clone https://github.com//calibration-pipeline-testing-tool.git 84 | 85 | Development should be done in a separate branch than ``master``. Before beginning work on a new feature create a branch 86 | 87 | .. code:: 88 | 89 | cd calibration-pipeline-testing-tool 90 | git checkout -b new_feature 91 | 92 | When you are ready to merge changes into the official repository open a Pull Request here_. Choose your fork and branch 93 | as the "head". Then click "Create pull request". Give a short and long description of the work done in the pull request 94 | once it is reviewed it can be merged into the official repository. 95 | 96 | .. _fixture: https://docs.pytest.org/en/latest/fixture.html 97 | .. _GitHub: https://github.com/STScI-MESA/calibration-pipeline-testing-tool 98 | .. _here: https://github.com/STScI-MESA/calibration-pipeline-testing-tool/pulls -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | #This is needed with git because git doesn't create a dir if it's empty 18 | $(shell [ -d "_static" ] || mkdir -p _static) 19 | 20 | help: 21 | @echo "Please use \`make ' where is one of" 22 | @echo " html to make standalone HTML files" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " singlehtml to make a single large HTML file" 25 | @echo " pickle to make pickle files" 26 | @echo " json to make JSON files" 27 | @echo " htmlhelp to make HTML files and a HTML help project" 28 | @echo " qthelp to make HTML files and a qthelp project" 29 | @echo " devhelp to make HTML files and a Devhelp project" 30 | @echo " epub to make an epub" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " text to make text files" 34 | @echo " man to make manual pages" 35 | @echo " changes to make an overview of all changed/added/deprecated items" 36 | @echo " linkcheck to check all external links for integrity" 37 | 38 | clean: 39 | -rm -rf $(BUILDDIR) 40 | -rm -rf api 41 | -rm -rf generated 42 | 43 | html: 44 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 45 | @echo 46 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 47 | 48 | dirhtml: 49 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 50 | @echo 51 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 52 | 53 | singlehtml: 54 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 55 | @echo 56 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 57 | 58 | pickle: 59 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 60 | @echo 61 | @echo "Build finished; now you can process the pickle files." 62 | 63 | json: 64 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 65 | @echo 66 | @echo "Build finished; now you can process the JSON files." 67 | 68 | htmlhelp: 69 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 70 | @echo 71 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 72 | ".hhp project file in $(BUILDDIR)/htmlhelp." 73 | 74 | qthelp: 75 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 76 | @echo 77 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 78 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 79 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Astropy.qhcp" 80 | @echo "To view the help file:" 81 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Astropy.qhc" 82 | 83 | devhelp: 84 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 85 | @echo 86 | @echo "Build finished." 87 | @echo "To view the help file:" 88 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Astropy" 89 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Astropy" 90 | @echo "# devhelp" 91 | 92 | epub: 93 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 94 | @echo 95 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 96 | 97 | latex: 98 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 99 | @echo 100 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 101 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 102 | "(use \`make latexpdf' here to do that automatically)." 103 | 104 | latexpdf: 105 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 106 | @echo "Running LaTeX files through pdflatex..." 107 | make -C $(BUILDDIR)/latex all-pdf 108 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 109 | 110 | text: 111 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 112 | @echo 113 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 114 | 115 | man: 116 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 117 | @echo 118 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 119 | 120 | changes: 121 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 122 | @echo 123 | @echo "The overview file is in $(BUILDDIR)/changes." 124 | 125 | linkcheck: 126 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 127 | @echo 128 | @echo "Link check complete; look for any errors in the above output " \ 129 | "or in $(BUILDDIR)/linkcheck/output.txt." 130 | 131 | doctest: 132 | @echo "Run 'python setup.py test' in the root directory to run doctests " \ 133 | @echo "in the documentation." 134 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | if NOT "%PAPER%" == "" ( 11 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 12 | ) 13 | 14 | if "%1" == "" goto help 15 | 16 | if "%1" == "help" ( 17 | :help 18 | echo.Please use `make ^` where ^ is one of 19 | echo. html to make standalone HTML files 20 | echo. dirhtml to make HTML files named index.html in directories 21 | echo. singlehtml to make a single large HTML file 22 | echo. pickle to make pickle files 23 | echo. json to make JSON files 24 | echo. htmlhelp to make HTML files and a HTML help project 25 | echo. qthelp to make HTML files and a qthelp project 26 | echo. devhelp to make HTML files and a Devhelp project 27 | echo. epub to make an epub 28 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 29 | echo. text to make text files 30 | echo. man to make manual pages 31 | echo. changes to make an overview over all changed/added/deprecated items 32 | echo. linkcheck to check all external links for integrity 33 | echo. doctest to run all doctests embedded in the documentation if enabled 34 | goto end 35 | ) 36 | 37 | if "%1" == "clean" ( 38 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 39 | del /q /s %BUILDDIR%\* 40 | goto end 41 | ) 42 | 43 | if "%1" == "html" ( 44 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 45 | if errorlevel 1 exit /b 1 46 | echo. 47 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 48 | goto end 49 | ) 50 | 51 | if "%1" == "dirhtml" ( 52 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 53 | if errorlevel 1 exit /b 1 54 | echo. 55 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 56 | goto end 57 | ) 58 | 59 | if "%1" == "singlehtml" ( 60 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 61 | if errorlevel 1 exit /b 1 62 | echo. 63 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 64 | goto end 65 | ) 66 | 67 | if "%1" == "pickle" ( 68 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 69 | if errorlevel 1 exit /b 1 70 | echo. 71 | echo.Build finished; now you can process the pickle files. 72 | goto end 73 | ) 74 | 75 | if "%1" == "json" ( 76 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished; now you can process the JSON files. 80 | goto end 81 | ) 82 | 83 | if "%1" == "htmlhelp" ( 84 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished; now you can run HTML Help Workshop with the ^ 88 | .hhp project file in %BUILDDIR%/htmlhelp. 89 | goto end 90 | ) 91 | 92 | if "%1" == "qthelp" ( 93 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 94 | if errorlevel 1 exit /b 1 95 | echo. 96 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 97 | .qhcp project file in %BUILDDIR%/qthelp, like this: 98 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\Astropy.qhcp 99 | echo.To view the help file: 100 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\Astropy.ghc 101 | goto end 102 | ) 103 | 104 | if "%1" == "devhelp" ( 105 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 106 | if errorlevel 1 exit /b 1 107 | echo. 108 | echo.Build finished. 109 | goto end 110 | ) 111 | 112 | if "%1" == "epub" ( 113 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 117 | goto end 118 | ) 119 | 120 | if "%1" == "latex" ( 121 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 122 | if errorlevel 1 exit /b 1 123 | echo. 124 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 125 | goto end 126 | ) 127 | 128 | if "%1" == "text" ( 129 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 130 | if errorlevel 1 exit /b 1 131 | echo. 132 | echo.Build finished. The text files are in %BUILDDIR%/text. 133 | goto end 134 | ) 135 | 136 | if "%1" == "man" ( 137 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 141 | goto end 142 | ) 143 | 144 | if "%1" == "changes" ( 145 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.The overview file is in %BUILDDIR%/changes. 149 | goto end 150 | ) 151 | 152 | if "%1" == "linkcheck" ( 153 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Link check complete; look for any errors in the above output ^ 157 | or in %BUILDDIR%/linkcheck/output.txt. 158 | goto end 159 | ) 160 | 161 | if "%1" == "doctest" ( 162 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 163 | if errorlevel 1 exit /b 1 164 | echo. 165 | echo.Testing of doctests in the sources finished, look at the ^ 166 | results in %BUILDDIR%/doctest/output.txt. 167 | goto end 168 | ) 169 | 170 | :end 171 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 3 | 4 | import glob 5 | import os 6 | import sys 7 | 8 | import ah_bootstrap 9 | from setuptools import setup 10 | 11 | # A dirty hack to get around some early import/configurations ambiguities 12 | if sys.version_info[0] >= 3: 13 | import builtins 14 | else: 15 | import __builtin__ as builtins 16 | builtins._ASTROPY_SETUP_ = True 17 | 18 | from astropy_helpers.setup_helpers import (register_commands, get_debug_option, 19 | get_package_info) 20 | from astropy_helpers.git_helpers import get_git_devstr 21 | from astropy_helpers.version_helpers import generate_version_py 22 | 23 | # Get some values from the setup.cfg 24 | try: 25 | from ConfigParser import ConfigParser 26 | except ImportError: 27 | from configparser import ConfigParser 28 | 29 | conf = ConfigParser() 30 | conf.read(['setup.cfg']) 31 | metadata = dict(conf.items('metadata')) 32 | 33 | PACKAGENAME = metadata.get('package_name', 'caltest') 34 | DESCRIPTION = metadata.get('description', 'calibration pipeline testing tool') 35 | AUTHOR = metadata.get('author', 'Matthew Hill') 36 | AUTHOR_EMAIL = metadata.get('author_email', '') 37 | LICENSE = metadata.get('license', 'unknown') 38 | URL = metadata.get('url', 'http://astropy.org') 39 | 40 | # order of priority for long_description: 41 | # (1) set in setup.cfg, 42 | # (2) load LONG_DESCRIPTION.rst, 43 | # (3) load README.rst, 44 | # (4) package docstring 45 | readme_glob = 'README*' 46 | _cfg_long_description = metadata.get('long_description', '') 47 | if _cfg_long_description: 48 | LONG_DESCRIPTION = _cfg_long_description 49 | 50 | elif os.path.exists('LONG_DESCRIPTION.rst'): 51 | with open('LONG_DESCRIPTION.rst') as f: 52 | LONG_DESCRIPTION = f.read() 53 | 54 | elif len(glob.glob(readme_glob)) > 0: 55 | with open(glob.glob(readme_glob)[0]) as f: 56 | LONG_DESCRIPTION = f.read() 57 | 58 | else: 59 | # Get the long description from the package's docstring 60 | __import__(PACKAGENAME) 61 | package = sys.modules[PACKAGENAME] 62 | LONG_DESCRIPTION = package.__doc__ 63 | 64 | # Store the package name in a built-in variable so it's easy 65 | # to get from other parts of the setup infrastructure 66 | builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME 67 | 68 | # VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440) 69 | VERSION = metadata.get('version', '0.0.dev') 70 | 71 | # Indicates if this version is a release version 72 | RELEASE = 'dev' not in VERSION 73 | 74 | if not RELEASE: 75 | VERSION += get_git_devstr(False) 76 | 77 | # Populate the dict of setup command overrides; this should be done before 78 | # invoking any other functionality from distutils since it can potentially 79 | # modify distutils' behavior. 80 | cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE) 81 | 82 | # Freeze build information in version.py 83 | generate_version_py(PACKAGENAME, VERSION, RELEASE, 84 | get_debug_option(PACKAGENAME)) 85 | 86 | # Treat everything in scripts except README* as a script to be installed 87 | scripts = [fname for fname in glob.glob(os.path.join('scripts', '*')) 88 | if not os.path.basename(fname).startswith('README')] 89 | 90 | 91 | # Get configuration information from all of the various subpackages. 92 | # See the docstring for setup_helpers.update_package_files for more 93 | # details. 94 | package_info = get_package_info() 95 | 96 | # Add the project-global data 97 | package_info['package_data'].setdefault(PACKAGENAME, []) 98 | package_info['package_data'][PACKAGENAME].append('data/*') 99 | 100 | # Define entry points for command-line scripts 101 | entry_points = {'console_scripts': []} 102 | 103 | if conf.has_section('entry_points'): 104 | entry_point_list = conf.items('entry_points') 105 | for entry_point in entry_point_list: 106 | entry_points['console_scripts'].append('{0} = {1}'.format( 107 | entry_point[0], entry_point[1])) 108 | 109 | # Include all .c files, recursively, including those generated by 110 | # Cython, since we can not do this in MANIFEST.in with a "dynamic" 111 | # directory name. 112 | c_files = [] 113 | for root, dirs, files in os.walk(PACKAGENAME): 114 | for filename in files: 115 | if filename.endswith('.c'): 116 | c_files.append( 117 | os.path.join( 118 | os.path.relpath(root, PACKAGENAME), filename)) 119 | package_info['package_data'][PACKAGENAME].extend(c_files) 120 | 121 | # Note that requires and provides should not be included in the call to 122 | # ``setup``, since these are now deprecated. See this link for more details: 123 | # https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM 124 | 125 | setup(name=PACKAGENAME, 126 | version=VERSION, 127 | description=DESCRIPTION, 128 | scripts=scripts, 129 | install_requires=metadata.get('install_requires', 'astropy').strip().split(), 130 | author=AUTHOR, 131 | author_email=AUTHOR_EMAIL, 132 | license=LICENSE, 133 | url=URL, 134 | long_description=LONG_DESCRIPTION, 135 | cmdclass=cmdclassd, 136 | zip_safe=False, 137 | use_2to3=False, 138 | entry_points=entry_points, 139 | **package_info 140 | ) 141 | -------------------------------------------------------------------------------- /caltest/_astropy_init.py: -------------------------------------------------------------------------------- 1 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 | 3 | __all__ = ['__version__', '__githash__', 'test'] 4 | 5 | # this indicates whether or not we are in the package's setup.py 6 | try: 7 | _ASTROPY_SETUP_ 8 | except NameError: 9 | from sys import version_info 10 | if version_info[0] >= 3: 11 | import builtins 12 | else: 13 | import __builtin__ as builtins 14 | builtins._ASTROPY_SETUP_ = False 15 | 16 | try: 17 | from .version import version as __version__ 18 | except ImportError: 19 | __version__ = '' 20 | try: 21 | from .version import githash as __githash__ 22 | except ImportError: 23 | __githash__ = '' 24 | 25 | 26 | # set up the test command 27 | def _get_test_runner(): 28 | import os 29 | from astropy.tests.helper import TestRunner 30 | return TestRunner(os.path.dirname(__file__)) 31 | 32 | 33 | def test(package=None, test_path=None, args=None, plugins=None, 34 | verbose=False, pastebin=None, remote_data=False, pep8=False, 35 | pdb=False, coverage=False, open_files=False, **kwargs): 36 | """ 37 | Run the tests using `py.test `__. A proper set 38 | of arguments is constructed and passed to `pytest.main`_. 39 | 40 | .. _py.test: http://pytest.org/latest/ 41 | .. _pytest.main: http://pytest.org/latest/builtin.html#pytest.main 42 | 43 | Parameters 44 | ---------- 45 | package : str, optional 46 | The name of a specific package to test, e.g. 'io.fits' or 'utils'. 47 | If nothing is specified all default tests are run. 48 | 49 | test_path : str, optional 50 | Specify location to test by path. May be a single file or 51 | directory. Must be specified absolutely or relative to the 52 | calling directory. 53 | 54 | args : str, optional 55 | Additional arguments to be passed to pytest.main_ in the ``args`` 56 | keyword argument. 57 | 58 | plugins : list, optional 59 | Plugins to be passed to pytest.main_ in the ``plugins`` keyword 60 | argument. 61 | 62 | verbose : bool, optional 63 | Convenience option to turn on verbose output from py.test_. Passing 64 | True is the same as specifying ``'-v'`` in ``args``. 65 | 66 | pastebin : {'failed','all',None}, optional 67 | Convenience option for turning on py.test_ pastebin output. Set to 68 | ``'failed'`` to upload info for failed tests, or ``'all'`` to upload 69 | info for all tests. 70 | 71 | remote_data : bool, optional 72 | Controls whether to run tests marked with @remote_data. These 73 | tests use online data and are not run by default. Set to True to 74 | run these tests. 75 | 76 | pep8 : bool, optional 77 | Turn on PEP8 checking via the `pytest-pep8 plugin 78 | `_ and disable normal 79 | tests. Same as specifying ``'--pep8 -k pep8'`` in ``args``. 80 | 81 | pdb : bool, optional 82 | Turn on PDB post-mortem analysis for failing tests. Same as 83 | specifying ``'--pdb'`` in ``args``. 84 | 85 | coverage : bool, optional 86 | Generate a test coverage report. The result will be placed in 87 | the directory htmlcov. 88 | 89 | open_files : bool, optional 90 | Fail when any tests leave files open. Off by default, because 91 | this adds extra run time to the test suite. Requires the 92 | `psutil `_ package. 93 | 94 | parallel : int, optional 95 | When provided, run the tests in parallel on the specified 96 | number of CPUs. If parallel is negative, it will use the all 97 | the cores on the machine. Requires the 98 | `pytest-xdist `_ plugin 99 | installed. Only available when using Astropy 0.3 or later. 100 | 101 | kwargs 102 | Any additional keywords passed into this function will be passed 103 | on to the astropy test runner. This allows use of test-related 104 | functionality implemented in later versions of astropy without 105 | explicitly updating the package template. 106 | 107 | """ 108 | test_runner = _get_test_runner() 109 | return test_runner.run_tests( 110 | package=package, test_path=test_path, args=args, 111 | plugins=plugins, verbose=verbose, pastebin=pastebin, 112 | remote_data=remote_data, pep8=pep8, pdb=pdb, 113 | coverage=coverage, open_files=open_files, **kwargs) 114 | 115 | if not _ASTROPY_SETUP_: # noqa 116 | import os 117 | from warnings import warn 118 | from astropy.config.configuration import ( 119 | update_default_config, 120 | ConfigurationDefaultMissingError, 121 | ConfigurationDefaultMissingWarning) 122 | 123 | # add these here so we only need to cleanup the namespace at the end 124 | config_dir = None 125 | 126 | if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False): 127 | config_dir = os.path.dirname(__file__) 128 | config_template = os.path.join(config_dir, __package__ + ".cfg") 129 | if os.path.isfile(config_template): 130 | try: 131 | update_default_config( 132 | __package__, config_dir, version=__version__) 133 | except TypeError as orig_error: 134 | try: 135 | update_default_config(__package__, config_dir) 136 | except ConfigurationDefaultMissingError as e: 137 | wmsg = (e.args[0] + 138 | " Cannot install default profile. If you are " 139 | "importing from source, this is expected.") 140 | warn(ConfigurationDefaultMissingWarning(wmsg)) 141 | del e 142 | except Exception: 143 | raise orig_error 144 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # We set the language to c because python isn't supported on the MacOS X nodes 2 | # on Travis. However, the language ends up being irrelevant anyway, since we 3 | # install Python ourselves using conda. 4 | language: c 5 | 6 | os: 7 | - linux 8 | 9 | # Setting sudo to false opts in to Travis-CI container-based builds. 10 | sudo: false 11 | 12 | # The apt packages below are needed for sphinx builds. A full list of packages 13 | # that can be included can be found here: 14 | # 15 | # https://github.com/travis-ci/apt-package-whitelist/blob/master/ubuntu-precise 16 | 17 | addons: 18 | apt: 19 | packages: 20 | - graphviz 21 | - texlive-latex-extra 22 | - dvipng 23 | 24 | env: 25 | global: 26 | 27 | # The following versions are the 'default' for tests, unless 28 | # overridden underneath. They are defined here in order to save having 29 | # to repeat them for all configurations. 30 | - PYTHON_VERSION=3.6 31 | - NUMPY_VERSION=stable 32 | - ASTROPY_VERSION=stable 33 | - MAIN_CMD='python setup.py' 34 | - SETUP_CMD='test' 35 | - PIP_DEPENDENCIES='' 36 | - EVENT_TYPE='pull_request push' 37 | 38 | 39 | # List other runtime dependencies for the package that are available as 40 | # conda packages here. 41 | - CONDA_DEPENDENCIES='' 42 | 43 | # List other runtime dependencies for the package that are available as 44 | # pip packages here. 45 | # - PIP_DEPENDENCIES='' 46 | 47 | # Conda packages for affiliated packages are hosted in channel 48 | # "astropy" while builds for astropy LTS with recent numpy versions 49 | # are in astropy-ci-extras. If your package uses either of these, 50 | # add the channels to CONDA_CHANNELS along with any other channels 51 | # you want to use. 52 | - CONDA_CHANNELS='astropy-ci-extras astropy' 53 | 54 | # If there are matplotlib or other GUI tests, uncomment the following 55 | # line to use the X virtual framebuffer. 56 | # - SETUP_XVFB=True 57 | 58 | matrix: 59 | # Make sure that egg_info works without dependencies 60 | - PYTHON_VERSION=2.7 SETUP_CMD='egg_info' 61 | - PYTHON_VERSION=3.4 SETUP_CMD='egg_info' 62 | - PYTHON_VERSION=3.5 SETUP_CMD='egg_info' 63 | - PYTHON_VERSION=3.6 SETUP_CMD='egg_info' 64 | 65 | matrix: 66 | 67 | # Don't wait for allowed failures 68 | fast_finish: true 69 | 70 | include: 71 | # Try MacOS X 72 | - os: osx 73 | env: SETUP_CMD='test' 74 | 75 | # Do a coverage test. 76 | - os: linux 77 | env: SETUP_CMD='test --coverage' 78 | 79 | # Check for sphinx doc build warnings - we do this first because it 80 | # may run for a long time 81 | - os: linux 82 | env: SETUP_CMD='build_docs -w' 83 | 84 | # Now try Astropy dev and LTS vesions with the latest 3.x and 2.7. 85 | - os: linux 86 | env: PYTHON_VERSION=2.7 ASTROPY_VERSION=development 87 | EVENT_TYPE='pull_request push cron' 88 | - os: linux 89 | env: ASTROPY_VERSION=development 90 | EVENT_TYPE='pull_request push cron' 91 | - os: linux 92 | env: PYTHON_VERSION=2.7 ASTROPY_VERSION=lts 93 | - os: linux 94 | env: ASTROPY_VERSION=lts 95 | 96 | # Try all python versions and Numpy versions. Since we can assume that 97 | # the Numpy developers have taken care of testing Numpy with different 98 | # versions of Python, we can vary Python and Numpy versions at the same 99 | # time. 100 | 101 | - os: linux 102 | env: PYTHON_VERSION=2.7 NUMPY_VERSION=1.9 103 | - os: linux 104 | env: PYTHON_VERSION=3.4 NUMPY_VERSION=1.10 105 | - os: linux 106 | env: PYTHON_VERSION=3.5 NUMPY_VERSION=1.11 107 | 108 | # Try numpy pre-release 109 | - os: linux 110 | env: NUMPY_VERSION=prerelease 111 | EVENT_TYPE='pull_request push cron' 112 | 113 | # Do a PEP8 test with pycodestyle 114 | - os: linux 115 | env: MAIN_CMD='pycodestyle caltest --count' SETUP_CMD='' 116 | 117 | allow_failures: 118 | # Do a PEP8 test with pycodestyle 119 | # (allow to fail unless your code completely compliant) 120 | - os: linux 121 | env: MAIN_CMD='pycodestyle caltest --count' SETUP_CMD='' 122 | 123 | install: 124 | 125 | # We now use the ci-helpers package to set up our testing environment. 126 | # This is done by using Miniconda and then using conda and pip to install 127 | # dependencies. Which dependencies are installed using conda and pip is 128 | # determined by the CONDA_DEPENDENCIES and PIP_DEPENDENCIES variables, 129 | # which should be space-delimited lists of package names. See the README 130 | # in https://github.com/astropy/ci-helpers for information about the full 131 | # list of environment variables that can be used to customize your 132 | # environment. In some cases, ci-helpers may not offer enough flexibility 133 | # in how to install a package, in which case you can have additional 134 | # commands in the install: section below. 135 | 136 | - git clone git://github.com/astropy/ci-helpers.git 137 | - source ci-helpers/travis/setup_conda.sh 138 | 139 | # As described above, using ci-helpers, you should be able to set up an 140 | # environment with dependencies installed using conda and pip, but in some 141 | # cases this may not provide enough flexibility in how to install a 142 | # specific dependency (and it will not be able to install non-Python 143 | # dependencies). Therefore, you can also include commands below (as 144 | # well as at the start of the install section or in the before_install 145 | # section if they are needed before setting up conda) to install any 146 | # other dependencies. 147 | 148 | script: 149 | - $MAIN_CMD $SETUP_CMD 150 | 151 | after_success: 152 | # If coveralls.io is set up for this package, uncomment the line below. 153 | # The coveragerc file may be customized as needed for your package. 154 | # - if [[ $SETUP_CMD == *coverage* ]]; then coveralls --rcfile='caltest/tests/coveragerc'; fi 155 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Licensed under a 3-clause BSD style license - see LICENSE.rst 3 | # 4 | # Astropy documentation build configuration file. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this file. 9 | # 10 | # All configuration values have a default. Some values are defined in 11 | # the global Astropy configuration which is loaded here before anything else. 12 | # See astropy.sphinx.conf for which values are set there. 13 | 14 | # If extensions (or modules to document with autodoc) are in another directory, 15 | # add these directories to sys.path here. If the directory is relative to the 16 | # documentation root, use os.path.abspath to make it absolute, like shown here. 17 | # sys.path.insert(0, os.path.abspath('..')) 18 | # IMPORTANT: the above commented section was generated by sphinx-quickstart, but 19 | # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left 20 | # commented out with this explanation to make it clear why this should not be 21 | # done. If the sys.path entry above is added, when the astropy.sphinx.conf 22 | # import occurs, it will import the *source* version of astropy instead of the 23 | # version installed (if invoked as "make html" or directly with sphinx), or the 24 | # version in the build directory (if "python setup.py build_sphinx" is used). 25 | # Thus, any C-extensions that are needed to build the documentation will *not* 26 | # be accessible, and the documentation will not build correctly. 27 | 28 | import datetime 29 | import os 30 | import sys 31 | 32 | try: 33 | import astropy_helpers 34 | except ImportError: 35 | # Building from inside the docs/ directory? 36 | if os.path.basename(os.getcwd()) == 'docs': 37 | a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers')) 38 | if os.path.isdir(a_h_path): 39 | sys.path.insert(1, a_h_path) 40 | 41 | # Load all of the global Astropy configuration 42 | from astropy_helpers.sphinx.conf import * 43 | 44 | # Get configuration information from setup.cfg 45 | try: 46 | from ConfigParser import ConfigParser 47 | except ImportError: 48 | from configparser import ConfigParser 49 | conf = ConfigParser() 50 | 51 | conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')]) 52 | setup_cfg = dict(conf.items('metadata')) 53 | 54 | # -- General configuration ---------------------------------------------------- 55 | 56 | # By default, highlight as Python 3. 57 | highlight_language = 'python3' 58 | 59 | # If your documentation needs a minimal Sphinx version, state it here. 60 | #needs_sphinx = '1.2' 61 | 62 | # To perform a Sphinx version check that needs to be more specific than 63 | # major.minor, call `check_sphinx_version("x.y.z")` here. 64 | # check_sphinx_version("1.2.1") 65 | 66 | # List of patterns, relative to source directory, that match files and 67 | # directories to ignore when looking for source files. 68 | exclude_patterns.append('_templates') 69 | 70 | # This is added to the end of RST files - a good place to put substitutions to 71 | # be used globally. 72 | rst_epilog += """ 73 | """ 74 | 75 | # -- Project information ------------------------------------------------------ 76 | 77 | # This does not *have* to match the package name, but typically does 78 | project = setup_cfg['package_name'] 79 | author = setup_cfg['author'] 80 | copyright = '{0}, {1}'.format( 81 | datetime.datetime.now().year, setup_cfg['author']) 82 | 83 | # The version info for the project you're documenting, acts as replacement for 84 | # |version| and |release|, also used in various other places throughout the 85 | # built documents. 86 | 87 | __import__(setup_cfg['package_name']) 88 | package = sys.modules[setup_cfg['package_name']] 89 | 90 | # The short X.Y version. 91 | version = package.__version__.split('-', 1)[0] 92 | # The full version, including alpha/beta/rc tags. 93 | release = package.__version__ 94 | 95 | 96 | # -- Options for HTML output -------------------------------------------------- 97 | 98 | # A NOTE ON HTML THEMES 99 | # The global astropy configuration uses a custom theme, 'bootstrap-astropy', 100 | # which is installed along with astropy. A different theme can be used or 101 | # the options for this theme can be modified by overriding some of the 102 | # variables set in the global configuration. The variables set in the 103 | # global configuration are listed below, commented out. 104 | import stsci_sphinx_theme 105 | 106 | 107 | # Add any paths that contain custom themes here, relative to this directory. 108 | # To use a different custom theme, add the directory containing the theme. 109 | html_theme_path = [stsci_sphinx_theme.get_html_theme_path()] 110 | 111 | # The theme to use for HTML and HTML Help pages. See the documentation for 112 | # a list of builtin themes. To override the custom theme, set this to the 113 | # name of a builtin theme or the name of a custom theme in html_theme_path. 114 | html_theme = "stsci_sphinx_theme" 115 | 116 | # Please update these texts to match the name of your package. 117 | html_theme_options = { 118 | 'logotext1': 'Calibration Pipeline Testing Tool', # white, semi-bold 119 | 'logotext2': '', # orange, light 120 | 'logotext3': '' # white, light 121 | } 122 | 123 | 124 | 125 | # Custom sidebar templates, maps document names to template names. 126 | #html_sidebars = {} 127 | 128 | # The name of an image file (relative to this directory) to place at the top 129 | # of the sidebar. 130 | #html_logo = '' 131 | 132 | # The name of an image file (within the static path) to use as favicon of the 133 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 134 | # pixels large. 135 | #html_favicon = '' 136 | 137 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 138 | # using the given strftime format. 139 | #html_last_updated_fmt = '' 140 | 141 | # The name for this set of Sphinx documents. If None, it defaults to 142 | # " v documentation". 143 | html_title = '{0} v{1}'.format(project, release) 144 | 145 | # Output file base name for HTML help builder. 146 | htmlhelp_basename = project + 'doc' 147 | 148 | 149 | # -- Options for LaTeX output ------------------------------------------------- 150 | 151 | # Grouping the document tree into LaTeX files. List of tuples 152 | # (source start file, target name, title, author, documentclass [howto/manual]). 153 | latex_documents = [('index', project + '.tex', project + u' Documentation', 154 | author, 'manual')] 155 | 156 | 157 | # -- Options for manual page output ------------------------------------------- 158 | 159 | # One entry per manual page. List of tuples 160 | # (source start file, name, description, authors, manual section). 161 | man_pages = [('index', project.lower(), project + u' Documentation', 162 | [author], 1)] 163 | 164 | 165 | # -- Options for the edit_on_github extension --------------------------------- 166 | 167 | if eval(setup_cfg.get('edit_on_github')): 168 | extensions += ['astropy_helpers.sphinx.ext.edit_on_github'] 169 | 170 | versionmod = __import__(setup_cfg['package_name'] + '.version') 171 | edit_on_github_project = setup_cfg['github_project'] 172 | if versionmod.version.release: 173 | edit_on_github_branch = "v" + versionmod.version.version 174 | else: 175 | edit_on_github_branch = "master" 176 | 177 | edit_on_github_source_root = "" 178 | edit_on_github_doc_root = "docs" 179 | 180 | # -- Resolving issue number to links in changelog ----------------------------- 181 | github_issues_url = 'https://github.com/{0}/issues/'.format(setup_cfg['github_project']) 182 | 183 | todo_include_todos = True -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_persistence.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | ''' 4 | Test the persistence step of the pipeline. Written during 5 | testing of build 7.1 6 | 7 | Validation Part 1: 8 | Check that trapsfilled file is generated correctly 9 | Check that it’s (the step?) used correctly 10 | 11 | 12 | SSB documentation: 13 | Based on a model, this step computes the number of traps 14 | that are expected to have captured or released a charge 15 | during an exposure. The released charge is proportional 16 | to the persistence signal, and this will be subtracted 17 | (group by group) from the science data. An image of the 18 | number of filled traps at the end of the exposure will 19 | be written as an output file, in order to be used as input 20 | for correcting the persistence of a subsequent exposure. 21 | 22 | Input 23 | The input science file is a RampModel. 24 | 25 | A trapsfilled file (TrapsFilledModel) may optionally be 26 | passed as input as well. This normally would be specified 27 | unless the previous exposure with the current detector was 28 | taken more than several hours previously, that is, so long 29 | ago that persistence from that exposure could be ignored. 30 | 31 | Output 32 | The output science file is a RampModel, a persistence-corrected 33 | copy of the input data. 34 | 35 | A second output file will be written, with suffix “_trapsfilled”. 36 | This is a TrapsFilledModel, the number of filled traps at each 37 | pixel at the end of the exposure. This takes into account the 38 | capture of charge by traps due to the current science exposure, 39 | as well as the release of charge from traps shown in the input 40 | trapsfilled file, if one was specified. 41 | 42 | If the user specified save_persistence=True, a third output file 43 | will be written, with suffix “_output_pers”. This is a RampModel 44 | matching the output science file, but this gives the persistence 45 | that was subtracted from each group in each integration. 46 | 47 | input file -> run persistence step -> output hdu and file -> 48 | run tests against...what truth? 49 | 50 | ''' 51 | 52 | import pytest 53 | import os 54 | import numpy as np 55 | from astropy.io import fits 56 | from jwst import datamodels 57 | from jwst.persistence import PersistenceStep 58 | #from jwst.datamodels import TrapsFilledModel 59 | from jwst.datamodels import dqflags 60 | 61 | 62 | #@pytest.fixture(scope="module") 63 | #def input_hdul(request, config): 64 | # if config.has_option("persistence", "input_file"): 65 | # curdir = os.getcwd() 66 | # config_dir = os.path.dirname(request.config.getoption("--config_file")) 67 | # os.chdir(config_dir) 68 | # hdul = fits.open(config.get("persistence", "input_file")) 69 | # os.chdir(curdir) 70 | # return hdul 71 | # else: 72 | # pytest.skip("needs persistence input_file") 73 | 74 | 75 | @pytest.fixture(scope="module") 76 | def out_hdul(fits_input): 77 | fname = '_persist.'.join(fits_input[0].header['filename'].split('.')) 78 | yield fits.open(fname) 79 | #os.remove(fname) 80 | 81 | 82 | @pytest.fixture(scope="module") 83 | def trapsfilled_hdul(trapsfilled): 84 | yield fits.open(trapsfilled) 85 | 86 | 87 | @pytest.fixture(scope='module') 88 | def traps_hdul(fits_input): 89 | fname = '_trapsfilled.'.join(fits_input[0].header['filename'].split('.')) 90 | yield fits.open(fname) 91 | #os.remove(fname) 92 | 93 | 94 | @pytest.fixture(scope='module') 95 | def pers_hdul(fits_input): 96 | fname = '_output_pers.'.join(fits_input[0].header['filename'].split('.')) 97 | try: 98 | hdul = fits.open(fname) 99 | except: 100 | print("output_pers file not present") 101 | hdul = None 102 | yield hdul 103 | #os.remove(fname) 104 | 105 | 106 | @pytest.fixture(scope="module") 107 | def persat_hdul(out_hdul): 108 | CRDS = '/grp/crds/cache/references/jwst/' 109 | ref_file = output_hdul[0].header['R_PERSAT'] 110 | if 'crds://' in ref_file: 111 | ref_file = ref_file.replace('crds://',CRDS) 112 | return fits.open(ref_file) 113 | 114 | 115 | @pytest.fixture(scope="module") 116 | def trpden_hdul(output_hdul): 117 | CRDS = '/grp/crds/cache/references/jwst/' 118 | ref_file = output_hdul[0].header['R_TRPDEN'] 119 | if 'crds://' in ref_file: 120 | ref_file = ref_file.replace('crds://',CRDS) 121 | return fits.open(ref_file) 122 | 123 | 124 | @pytest.fixture(scope="module") 125 | def trppar_hdul(output_hdul): 126 | CRDS = '/grp/crds/cache/references/jwst/' 127 | ref_file = output_hdul[0].header['R_TRPPAR'] 128 | if 'crds://' in ref_file: 129 | ref_file = ref_file.replace('crds://',CRDS) 130 | return fits.open(ref_file) 131 | 132 | 133 | def test_run_persist_step(fits_input,trapsfilled): 134 | outfile = fits_input[0].header['FILENAME'].replace('.fits','_persist.fits') 135 | if trapsfilled.lower() in ["none",""]: 136 | PersistenceStep.call(fits_input,save_persistence=True,\ 137 | output_file=outfile,save_results=True) 138 | else: 139 | PersistenceStep.call(fits_input,save_persistence=True,\ 140 | output_file=outfile,save_results=True,\ 141 | input_trapsfilled=trapsfilled) 142 | 143 | 144 | def test_persistence_trapsfilled_shape(fits_input,traps_hdul,trapsfilled): 145 | '''Check to see that the OUPUT trapsfilled 146 | file was created.''' 147 | x,y = fits_input['SCI'].data.shape[-2:] 148 | print("Science data shape (x,y) = ({},{})".format(x,y)) 149 | assert traps_hdul['SCI'].data.shape == (3,y,x) 150 | 151 | 152 | def test_persistence_output_pers_shape(fits_input,pers_hdul,trapsfilled): 153 | '''Check that the optional output file 154 | "_output_pers.fits" was created if 155 | the save_persistence option in the persistence 156 | step was set to True. (Assume this test will 157 | only be called in instances when save_persistence 158 | is True''' 159 | opshape = pers_hdul['SCI'].data.shape 160 | print("Output_pers data shape: {}".format(opshape)) 161 | assert opshape == fits_input['SCI'].data.shape 162 | 163 | 164 | def test_persistence_subtracted_signal(fits_input, out_hdul, pers_hdul, trapsfilled): 165 | '''Check that the signal values contained in the 166 | output_pers file are indeed subtracted from the original 167 | input file.''' 168 | assert np.allclose(out_hdul[1].data,fits_input[1].data - pers_hdul[1].data) 169 | 170 | 171 | def test_persistence_dq_flagged_pix(out_hdul,pers_hdul,trapsfilled,flagthresh=40): 172 | '''Pixels that have more persistence signal than flag_pers_cutoff 173 | should be flagged in the DQ array of the output file. The default 174 | value of flag_pers_cutoff is 40 DN''' 175 | # Check only integration #1 176 | pdata = pers_hdul['SCI'].data[0,:,:,:] 177 | # Keep only the maximum persistence value 178 | # for each pixel 179 | if ((flagthresh is not None) and (flagthresh > 0)): 180 | collapsed = np.max(pdata,axis=0) 181 | flagged = collapsed > flagthresh 182 | dq_data = out_hdul['PIXELDQ'].data 183 | print(("{} pixels have persistence values above the threshold " 184 | "of {}.".format(np.sum(flagged),flagthresh))) 185 | assert np.all(dq_data[flagged] & dqflags.pixel['DO_NOT_USE'] > 0) 186 | else: 187 | print("Flagthresh is {}".format(flagthresh)) 188 | assert True == True 189 | 190 | 191 | #def test_calculated_persistence(fits_input,pers_hdul,persat_hdul,trapsfilled): 192 | # '''Using Regan's paper (JWST-STScI-005689), manually 193 | # calculate the expected amount of persistence in the input 194 | # file, and compare to the pipeline's calculations 195 | # 196 | # Not sure how to do this without simply copying the 197 | # code in the jwst cal pipeline step. 198 | # ''' 199 | 200 | #data = fits_input['SCI'].data[0,:,:,:] 201 | #f21 = data[1,:,:] = data[0,:,:] 202 | #fw_frac = f21 / persat_hdul['SCI'] 203 | 204 | #trapc - total number of traps captured 205 | #trape - num of traps that fit exponential decay (?) 206 | #tau - time constant of capture 207 | #trapi - num traps instantaneously captured 208 | #S - rate of change in the depletion region in units of fraction of full 209 | # well per unit time 210 | #T - integration time 211 | 212 | #trapc = s*(T*(trape + trapi) + trape*tau*(exp(-T/tau) - 1)) 213 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_refpix.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from jwst.refpix import RefPixStep 4 | from jwst import datamodels 5 | from astropy.io import fits 6 | from scipy.stats import sigmaclip 7 | import matplotlib.pyplot as plt 8 | import os 9 | from ..utils import translate_dq, extract_subarray 10 | 11 | @pytest.fixture(scope='module') 12 | def fits_output(fits_input): 13 | fname = fits_input[0].header['filename'].replace('.fits', 14 | '_refpixstep.fits') 15 | yield fits.open(fname) 16 | # delete the output FITS file after this module is finished 17 | os.remove(fname) 18 | 19 | def test_refpix_step(fits_input): 20 | """Make sure the DQInitStep runs without error.""" 21 | fname = fits_input[0].header['filename'].replace('.fits', 22 | '_refpixstep.fits') 23 | 24 | RefPixStep.call(datamodels.open(fits_input), output_file=fname, 25 | save_results=True) 26 | 27 | def test_refpix_correction(fits_input, fits_output, use_side_ref_pixels=True, 28 | odd_even_columns=True, side_smoothing_length=11, 29 | side_gain=1.0): 30 | """ 31 | Reference pixel correction implementation by Julia Duval. 32 | 33 | Parameters 34 | ---------- 35 | fits_input: astropy.io.fits.HDUList 36 | Input data for RefPixStep 37 | fits_output: astropy.io.fits.HDUList 38 | Output data after RefPixStep is run. 39 | use_side_ref_pixels: bool, optional 40 | Whether the RefPixStep was run with `use_side_ref_pixels` 41 | (default is True, same as `jwst.refpix.RefPixStep`) 42 | odd_even_columns: bool 43 | Whether the RefPixStep was run with `odd_even_columns` 44 | (default is True, same as `jwst.refpix.RefPixStep`) 45 | side_smoothing_length: int 46 | `side_smoothing_length` used by `RefPixStep` 47 | (default is 11, same as `jwst.refpix.RefPixStep`) 48 | side_gain: float 49 | `side_gain` used by `RefPixStep` 50 | (default is 11, same as `jwst.refpix.RefPixStep`) 51 | """ 52 | 53 | delta_amp = 512 54 | if odd_even_columns==False: 55 | xs=[np.arange(delta_amp, dtype = 'uint32')] 56 | else: 57 | xs=[np.arange(delta_amp//2, dtype='uint32')*2, np.arange(delta_amp//2, dtype = 'uint32')*2 + 1] 58 | 59 | data_in = fits_input 60 | data_out = fits_output 61 | 62 | subarray = data_in[0].header['SUBARRAY'] 63 | 64 | if subarray == 'FULL': 65 | sci_in = data_in[1].data 66 | sci_out = data_out[1].data 67 | 68 | gdq_in = data_in[3].data 69 | pdq_in = data_in[2].data 70 | 71 | 72 | sci_shape = sci_in.shape 73 | niter = sci_shape[0] 74 | ngroup = sci_shape[1] 75 | 76 | if data_in[0].header['INSTRUME'] != 'NIRISS': 77 | pytest.skip('This test has only been implemented for NIRISS') 78 | 79 | # change to detector coordinate 80 | # TODO make coordinate changes for other instruments 81 | fsci_in = np.swapaxes(sci_in, 2, 3)[:, :, ::-1, ::-1] 82 | fsci_out = np.swapaxes(sci_out, 2, 3)[:, :, ::-1, ::-1] 83 | 84 | fgdq_in = np.swapaxes(gdq_in, 2, 3)[:, :, ::-1, ::-1] 85 | fpdq_in = np.swapaxes(pdq_in, 0, 1)[::-1, ::-1] 86 | 87 | 88 | fpdq_rep = np.array([fpdq_in, ] * ngroup) 89 | 90 | fsci_shape = fsci_in.shape 91 | 92 | fexp_sci_out = np.zeros(fsci_shape, dtype='float32') 93 | 94 | if odd_even_columns == True: 95 | top_means = np.zeros([niter, ngroup, 4, 2], dtype='float32') 96 | bottom_means = np.zeros([niter, ngroup, 4, 2], dtype='float32') 97 | means = np.zeros([niter, ngroup, 4, 2], dtype='float32') 98 | 99 | else: 100 | top_means = np.zeros([niter, ngroup, 4, 1], dtype='float32') 101 | bottom_means = np.zeros([niter, ngroup, 4, 1], dtype='float32') 102 | means = np.zeros([niter, ngroup, 4, 1], dtype='float32') 103 | 104 | for it in range(niter): 105 | subg_fsci_in = fsci_in[it, :, :, :] 106 | subm_fsci_in = subg_fsci_in.copy() 107 | 108 | for ig in range(ngroup): 109 | for ia in range(4): 110 | 111 | zerox = ia * delta_amp 112 | 113 | for io in range(len(xs)): 114 | sub_pdq_top = fpdq_rep[ig, 2044:2048, zerox + xs[io]] 115 | sub_gdq_top = fgdq_in[it, ig, 2044:2048, zerox + xs[io]] 116 | sub_sci_top = subg_fsci_in[ig, 2044:2048, 117 | zerox + xs[io]] 118 | 119 | sub_pdq_bottom = fpdq_rep[ig, 0:4, zerox + xs[io]] 120 | sub_gdq_bottom = fgdq_in[it, 0:4, ig, zerox + xs[io]] 121 | sub_sci_bottom = subg_fsci_in[ig, 0:4, zerox + xs[io]] 122 | 123 | valid_top = np.where( 124 | (sub_pdq_top != 1) & (sub_gdq_top != 1)) 125 | valid_bottom = np.where( 126 | (sub_pdq_bottom != 1) & (sub_gdq_bottom != 1)) 127 | 128 | top_means[it, ig, ia, io] = np.mean( 129 | sigmaclip(sub_sci_top[valid_top], low=3.0, 130 | high=3.0).clipped) 131 | bottom_means[it, ig, ia, io] = np.mean( 132 | sigmaclip(sub_sci_bottom[valid_bottom], low=3.0, 133 | high=3.0).clipped) 134 | means[it, ig, ia, io] = (top_means[it, ig, ia, io] + 135 | bottom_means[ 136 | it, ig, ia, io]) / 2. 137 | 138 | subm_fsci_in[ig, :, zerox + xs[io]] = subg_fsci_in[ig, 139 | :, 140 | zerox + xs[io]] - \ 141 | means[ 142 | it, ig, ia, io] 143 | 144 | if use_side_ref_pixels == True: 145 | sub_pdq_left = fpdq_rep[ig, :, 0:4] 146 | sub_sci_left = subm_fsci_in[ig, :, 0:4] 147 | sub_pdq_right = fpdq_rep[ig, :, 2044:2048] 148 | sub_sci_right = subm_fsci_in[ig, :, 2044:2048] 149 | 150 | left_means = median_refpix(sub_sci_left, 151 | side_smoothing_length, 152 | sub_pdq_left) 153 | right_means = median_refpix(sub_sci_right, 154 | side_smoothing_length, 155 | sub_pdq_right) 156 | 157 | lr_means = 0.5 * (left_means + right_means) * side_gain 158 | 159 | mrep = np.array([lr_means, ] * 2048) 160 | mrep = np.swapaxes(mrep, 0, 1) 161 | 162 | subm_fsci_in[ig, :, :] = subm_fsci_in[ig, :, :] - mrep 163 | 164 | fexp_sci_out[it, :, :, :] = subm_fsci_in 165 | 166 | exp_sci_out = np.swapaxes(fexp_sci_out, 2, 3)[:, :, ::-1, ::-1] 167 | 168 | dif = sci_out - exp_sci_out 169 | mins = np.min(dif) 170 | maxs = np.max(dif) 171 | good = np.where(sci_out != 0.) 172 | if len(good[0]) > 0: 173 | fmins = np.min(dif[good] / sci_out[good]) 174 | fmaxs = np.max(dif[good] / sci_out[good]) 175 | print('mins maxs frac_min frac_max') 176 | print('{} {} {} {}'.format(mins, maxs, fmins, fmaxs)) 177 | 178 | assert np.allclose(sci_out, exp_sci_out) 179 | 180 | def median_refpix(array, smoothing_length, pixel_dq): 181 | # This code computes the median reference pixel value in teh "use_side_ref_pix = True" option of the reference pixel correction. 182 | # array must be 2048x4 183 | 184 | 185 | # first pad array with reflect 186 | 187 | parray = np.pad(array, 188 | ((smoothing_length // 2, smoothing_length // 2), (0, 0)), 189 | 'reflect') 190 | ppdq = np.pad(pixel_dq, 191 | ((smoothing_length // 2, smoothing_length // 2), (0, 0)), 192 | 'constant', constant_values=0) 193 | xmin = smoothing_length 194 | xmax = 2048 + smoothing_length - 1 195 | 196 | med_arr = np.zeros(2048) 197 | 198 | for i in range(2048): 199 | sub_array = parray[ 200 | i + smoothing_length // 2 - smoothing_length // 2:i + smoothing_length // 2 + smoothing_length // 2 + 1, 201 | :] 202 | sub_pdq = ppdq[ 203 | i + smoothing_length // 2 - smoothing_length // 2:i + smoothing_length // 2 + smoothing_length // 2 + 1, 204 | :] 205 | good = np.where(sub_pdq != 1) 206 | med_arr[i] = np.median(sub_array[good]) 207 | 208 | return (med_arr) -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_jump.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import numpy as np 3 | from jwst.jump import JumpStep 4 | from jwst import datamodels 5 | from astropy.io import fits, ascii 6 | from astropy.table import Table 7 | import os 8 | import matplotlib.pyplot as plt 9 | 10 | @pytest.fixture(scope='module') 11 | def fits_output(fits_input): 12 | fname = fits_input[0].header['filename'].replace('.fits', '_jumpstep.fits') 13 | yield fits.open(fname) 14 | # delete the output FITS file after this module is finished 15 | os.remove(fname) 16 | 17 | def test_jump_step(fits_input): 18 | """Make sure the JumpStep runs without error.""" 19 | fname = fits_input[0].header['filename'].replace('.fits', '_jumpstep.fits') 20 | JumpStep.call(datamodels.open(fits_input), output_file=fname, 21 | save_results=True) 22 | 23 | 24 | def test_jump_performance(fits_input, fits_output, rejection_threshold=4.0, 25 | do_yintercept=False, yint_threshold=1.0): 26 | """ 27 | Check how well the Jump step detects injected cosmic rays 28 | 29 | Parameters 30 | ---------- 31 | fits_input 32 | fits_output 33 | rejection_threshold 34 | do_yintercept 35 | yint_threshold 36 | 37 | """ 38 | # Test function for the jump correction. Type should be set to "cv3_dark" as in "run_jump_testing()". Output_file is the file output by the pipeline after the jump correction. Optional paramters are the same as for the jump correction. 39 | 40 | rej_key = str(rejection_threshold).strip() 41 | yint_key = str(yint_threshold).strip() 42 | if do_yintercept == True: 43 | doyint_key = '_doyint' 44 | else: 45 | doyint_key = '' 46 | 47 | # First read in the output file, get the GROUPDQ extension, and transform it from DMS to detector coordinates, because the images of CRs from the library and the CR position list from the simulations are in detector coordinates. 48 | # file = output_file 49 | 50 | data = fits_output 51 | gdq_out = data['GROUPDQ'].data 52 | gdq_out = np.swapaxes(gdq_out, 2, 3)[:, :, ::-1, ::-1] 53 | 54 | # Read in the list of CRs injected in the simulation 55 | 56 | cr_list = ascii.read("/grp/jwst/ins/calibration-pipeline-testing-tool/test_data/jump/NIRISS/cosmicrays.list") 57 | # CR coordinates, amplitude, set and sample (from hte library) from which they are extracted. 58 | cr_x = np.array(cr_list['x']) 59 | cr_y = np.array(cr_list['y']) 60 | cr_z = np.array(cr_list['z']) 61 | cr_ampl = np.array(cr_list['amplitude']) 62 | cr_set = np.array(cr_list['set']) 63 | cr_sample = np.array(cr_list['sample']) 64 | nset = np.max(cr_set) + 1 65 | ncr = cr_x.shape[0] 66 | 67 | # binary decomp of GDQ array, useful to tell whether or not the jump flag is set. 68 | # binary, powers = binary_decomp(gdq_out, 69 | # gdq=True) # array that is size ( it, ig, ny, ny, 32) 70 | 71 | # just get the jump bit 72 | # print("BIN DECOMP ", binary.shape, powers.shape) 73 | binary_jump = (gdq_out & (1 << 2)).astype(bool).astype('uint32')[0, :, :, :] 74 | print("TRUNC BIN ", binary_jump.shape) 75 | 76 | # Create an zero array that will contain where the jump flags should be 77 | exp_binary_jump = np.zeros(binary_jump.shape, dtype='uint32') 78 | 79 | # create the array that will contain the amplitudes of the jumps 80 | jump_values = np.zeros(binary_jump.shape, dtype='float32') 81 | 82 | # BElow: 83 | # check that all CR in list are detected in GDQ 84 | # check that all CR detected in GDQ are in the list 85 | 86 | print(exp_binary_jump.shape) 87 | 88 | # Loop over the sets because the CR images from the library take a long time to open. So open each set, then loop over all CRs in that set 89 | 90 | for iset in range(nset): 91 | cr_image = fits.open( 92 | "/grp/jwst/nis1/JRD/rampsim/CR_lib/CRs_MCD5.5_SUNMIN_0" + str( 93 | iset).strip() + "_IPC.fits") 94 | cr_image = cr_image[1].data 95 | 96 | # identify all teh CRs in teh list that were from this set 97 | index = np.where(cr_set == iset) 98 | index = index[0] 99 | ncr_set = index.shape[0] 100 | 101 | # for each cosmic ray in that set 102 | for i in range(ncr_set): 103 | ind = index[i] 104 | xi = cr_x[ind] - 1 105 | yi = cr_y[ind] - 1 106 | zi = cr_z[ind] - 1 107 | si = cr_sample[ind] - 1 108 | 109 | # check size of affected area. A CR has a halo around it because of IPC correction. Identify pixels around the main CR that are not zero. 110 | 111 | this_image = cr_image[si, :, :] 112 | shapim = this_image.shape 113 | nx = shapim[1] 114 | ny = shapim[0] 115 | affected = np.where(this_image != 0) 116 | xaffected = affected[1] - nx // 2 + xi 117 | yaffected = affected[0] - ny // 2 + yi 118 | 119 | valid = np.where( 120 | (xaffected >= 0) & (xaffected < 2048) & (yaffected >= 0) & ( 121 | yaffected < 2048)) 122 | 123 | # all teh non-zero values in the CR image should be flagged by the pipeline, ideally, so the "expected" GDQ should have 1 in those pixels. 124 | exp_binary_jump[zi, yaffected[valid], xaffected[valid]] = 1 125 | 126 | # REcord the amplitudes of the CRs 127 | jump_values[zi, yaffected[valid], xaffected[valid]] = \ 128 | jump_values[zi, yaffected[valid], xaffected[valid]] + \ 129 | this_image[affected[0][valid], affected[1][valid]] / 1.61 130 | 131 | # compute the difference between the jump flags derived from the known CR coordinates injected in teh simulations and the flags found by the pipeline 132 | dif = exp_binary_jump - binary_jump 133 | 134 | min = np.min(dif) 135 | max = np.max(dif) 136 | 137 | # dif_file = file.replace(".fits", "jump_dif_" + type + ".fits") 138 | # dif_hdu = fits.PrimaryHDU(dif) 139 | # hdulist = fits.HDUList([dif_hdu]) 140 | # hdulist.writeto(dif_file, clobber=True) 141 | # 142 | # gdq_file = file.replace(".fits", "jump_bgdq_" + type + ".fits") 143 | # exp_hdu = fits.PrimaryHDU(exp_binary_jump) 144 | # gdq_hdu = fits.ImageHDU(binary_jump) 145 | # hdulist = fits.HDUList([exp_hdu, gdq_hdu]) 146 | # hdulist.writeto(gdq_file, overwrite=True) 147 | # 148 | # val_file = file.replace(".fits", "jump_val_" + type + ".fits") 149 | # val_hdu = fits.PrimaryHDU(jump_values) 150 | # hdulist = fits.HDUList([val_hdu]) 151 | # hdulist.writeto(val_file, clobber=True) 152 | 153 | results = Table({'min': [min], 'max': [max]}, 154 | names=['min', 'max']) 155 | print(results) 156 | # ascii.write(results, 157 | # 'one_ramp_' + type + '/jump_testing_results_' + type + "_" + rej_key + "_" + yint_key + doyint_key + '.dat') 158 | 159 | 160 | 161 | # Look into the jumps not detected or false positives 162 | bad = np.where(dif != 0) 163 | zbad = bad[0] 164 | ybad = bad[1] 165 | xbad = bad[2] 166 | 167 | nbad = len(xbad) 168 | 169 | closest_x = np.zeros(nbad, dtype='uint32') 170 | closest_y = np.zeros(nbad, dtype='uint32') 171 | closest_z = np.zeros(nbad, dtype='uint32') 172 | closest_set = np.zeros(nbad, dtype='uint32') 173 | closest_sample = np.zeros(nbad, dtype='uint32') 174 | closest_ampl = np.zeros(nbad, dtype='uint32') 175 | jump_val = np.zeros(nbad, dtype='float32') 176 | gdq_val = np.zeros(nbad, dtype='uint32') 177 | 178 | for i in range(nbad): 179 | # match a discrepant pixel/group to the closest CR in the simulations' list, record the coordinates, set sample, and amplitude. 180 | dist = np.sqrt((cr_x - xbad[i]) ** 2 + (cr_y - ybad[i]) ** 2 + ( 181 | cr_z - zbad[i]) ** 2) 182 | closest = np.argmin(dist) 183 | # closest_xyz = np.unravel_index(closest) 184 | closest_z[i] = cr_z[closest] 185 | closest_y[i] = cr_y[closest] 186 | closest_x[i] = cr_x[closest] 187 | closest_set[i] = cr_set[closest] 188 | closest_sample[i] = cr_sample[closest] 189 | closest_ampl[i] = cr_ampl[closest] 190 | jump_val[i] = jump_values[zbad[i], ybad[i], xbad[i]] 191 | gdq_val[i] = gdq_out[0, zbad[i], ybad[i], xbad[i]] 192 | 193 | t = Table( 194 | {'x': xbad, 'cl_x': closest_x, 'y': ybad, 'cl_y': closest_y, 'z': zbad, 195 | 'cl_z': closest_z, 'jump_val': jump_val, 'gdq_val': gdq_val, 196 | 'cl_set': closest_set, 'cl_sample': closest_sample, 197 | 'cl_ampl': closest_ampl}, 198 | names=['x', 'cl_x', 'y', 'cl_y', 'z', 'cl_z', 'jump_val', 'gdq_val', 199 | 'cl_set', 'cl_sample', 'cl_ampl']) 200 | print(t) 201 | z = t['z'].data 202 | x = t['x'].data 203 | y = t['y'].data 204 | val = t['jump_val'].data 205 | gdq_val = t['gdq_val'].data 206 | 207 | allcr = np.where(exp_binary_jump != 0) 208 | badcr = np.where((exp_binary_jump != 0) & (binary_jump == 0)) 209 | 210 | print("Number of CR pixels", len(allcr[0])) 211 | print("Number of undetected CR pixels", len(badcr[0])) 212 | 213 | 214 | allbad = np.where((x > 3) & (x < 2044) & (y > 3) & (y < 2044)) 215 | badsat = np.where((x > 3) & (x < 2044) & (y > 3) & (y < 2044) & (gdq_val == 2)) 216 | bad_nonsat = np.where((x > 3) & (x < 2044) & (y > 3) & (y < 2044) & (gdq_val == 0)) 217 | 218 | verybad = np.where((x > 3) & (x < 2044) & (y > 3) & (y < 2044) & (val > 100) & (gdq_val == 0)) 219 | verybad_lowz = np.where((x > 3) & (x < 2044) & (y > 3) & (y < 2044) & (val > 100) & (gdq_val == 0) & (z < 10)) 220 | 221 | low_ampl = np.where((x > 3) & (x < 2044) & (y > 3) & (y < 2044) & (val < 100) & (gdq_val == 0)) 222 | 223 | print("Number of bad undetected CRs", len(verybad[0])) 224 | print("Number of bad undetected CRs at low z", len(verybad_lowz[0])) 225 | 226 | print("Number of low undetected CRs", len(low_ampl[0])) 227 | 228 | base = fits_input[0].header['FILENAME'].split('.')[0] 229 | 230 | plot_fname = 'test_jump_performance_'+base+'.png' 231 | plt.clf() 232 | plt.cla() 233 | plt.close() 234 | plt.plot(z[bad_nonsat[0]], val[bad_nonsat[0]], 'k.') 235 | plt.yscale('log') 236 | plt.title("Amplitude of undetected jumps") 237 | plt.ylabel("Amplitude") 238 | plt.xlabel("Group Number") 239 | plt.savefig(plot_fname) 240 | -------------------------------------------------------------------------------- /caltest/test_caldetector1/test_assign_wcs.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import sys, os 3 | import numpy as np 4 | from jwst.assign_wcs import AssignWcsStep 5 | from jwst import datamodels 6 | from asdf import AsdfFile 7 | from astropy.coordinates import SkyCoord 8 | from astropy.io import fits 9 | 10 | 11 | @pytest.fixture(scope='module') 12 | def in_datamodel(fits_input): 13 | '''open input file as a datamodel''' 14 | yield datamodels.open(fits_input[0].header['FILENAME']) 15 | 16 | 17 | @pytest.fixture(scope='module') 18 | def out_datamodel(in_datamodel): 19 | '''open output file''' 20 | outname = '_assign_wcs.'.join(in_datamodel.meta.filename.split('.')) 21 | yield datamodels.open(outname) 22 | os.remove(outname) 23 | 24 | 25 | @pytest.fixture(scope='module') 26 | def dist_reffile(out_datamodel): 27 | '''determine the distortion reference file used''' 28 | origname = out_datamodel.meta.ref_file.distortion.name 29 | if 'crds://' in origname: 30 | origname = origname.replace('crds://','/grp/crds/cache/references/jwst/') 31 | yield origname 32 | 33 | 34 | def test_assign_wcs_step(in_datamodel): 35 | '''Run the assign_wcs pipeline step''' 36 | outfile = in_datamodel.meta.filename.replace('.fits','_assign_wcs.fits') 37 | AssignWcsStep.call(in_datamodel,output_file = outfile,save_results=True) 38 | 39 | 40 | def test_asdf_extension(fits_input): 41 | '''Make sure there is a new ASDF extension in the output file''' 42 | print("Test to be sure the ASDF extension was added to the file") 43 | outname = '_assign_wcs.'.join(fits_input[0].header['FILENAME'].split('.')) 44 | h = fits.open(outname) 45 | exts = [] 46 | for i in range(1,len(h)): 47 | exts.append(h[i].header['EXTNAME']) 48 | assert 'ASDF' in exts 49 | 50 | 51 | def test_inserted_wcs_model(out_datamodel): 52 | '''Check RA, Dec values from output file WCS model 53 | are correct for refrence location. Check pixel scale implied 54 | by the WCS model''' 55 | 56 | expectedra = out_datamodel.meta.wcsinfo.ra_ref 57 | expecteddec = out_datamodel.meta.wcsinfo.dec_ref 58 | 59 | refloc_x = out_datamodel.meta.wcsinfo.crpix1 - 1 60 | refloc_y = out_datamodel.meta.wcsinfo.crpix2 - 1 61 | 62 | # pixel scale in arcsec per pixel 63 | xpixscale = out_datamodel.meta.wcsinfo.cdelt1 * 3600. 64 | ypixscale = out_datamodel.meta.wcsinfo.cdelt2 * 3600. 65 | #distscale = np.sqrt(xpixscale**2 + ypixscale**2) 66 | 67 | # Tolerance to use when checking if values are close enough 68 | atol_pix = 0.01 69 | xatol_arcsec = atol_pix * xpixscale 70 | yatol_arcsec = atol_pix * ypixscale 71 | 72 | #check pixel scale by reporting RA,Dec of adjacent pixels 73 | exp_type = out_datamodel.meta.exposure.type 74 | if 'GRISM' not in exp_type: # imaging data 75 | refra,refdec = out_datamodel.meta.wcs(refloc_x,refloc_y) 76 | adra,addec = out_datamodel.meta.wcs(refloc_x+1,refloc_y) 77 | ad2ra,ad2dec = out_datamodel.meta.wcs(refloc_x,refloc_y+1) 78 | 79 | pos1 = SkyCoord(ra=adra, dec=addec, unit='deg') 80 | refpos_skycoords = SkyCoord(ra=refra, dec=refdec, unit='deg') 81 | dist = refpos_skycoords.separation(pos1).value * 3600 82 | 83 | pos2 = SkyCoord(ra=ad2ra, dec=ad2dec, unit='deg') 84 | dist2 = refpos_skycoords.separation(pos2).value * 3600 85 | 86 | print('Expected RA, Dec at reference location (deg):',expectedra,expecteddec) 87 | print('Ref loc. RA, Dec (deg):',refra,refdec) 88 | print('RA, Dec of adjacent pixel (deg):',adra,addec) 89 | print('Delta Distance, horiz. adjacent pix: (arcsec)',dist) 90 | print('Delta Distance, vertical adjacent pix: (arcsec)',dist2) 91 | 92 | assert np.allclose(expectedra,refra,atol=xatol_arcsec,rtol=0.) 93 | assert np.allclose(expecteddec,refdec,atol=yatol_arcsec,rtol=0) 94 | assert np.allclose(dist,xpixscale,atol=yatol_arcsec,rtol=0) 95 | assert np.allclose(dist2,ypixscale,atol=yatol_arcsec,rtol=0) 96 | 97 | 98 | else: # WFSS data 99 | pupil = out_datamodel.meta.instrument.pupil 100 | filter = out_datamodel.meta.instrument.filter #to support NIRISS grism 101 | if pupil[0] == 'G': 102 | pass 103 | elif filter[0] == 'G': 104 | pupil = filter 105 | grisms_r = ['GRISMR','G150R'] 106 | grisms_c = ['GRISMC','G150C'] 107 | adjpix = (None,None) 108 | adjwave = (None,None) 109 | if pupil in grisms_r: 110 | adjpix = (0,1) # delta x, delta y 111 | adjwave = (1,0) 112 | elif pupil in grism_c: 113 | adjpix = (1,0) 114 | adjwave = (0,1) 115 | else: 116 | print("Grism value of {} is not recognized. Skipping testing.".format(pupil)) 117 | 118 | if adjwave[0] is not None: 119 | # RA, Dec, Wavelength, Order for the reference location 120 | # pixel in the dispersed image 121 | refra,refdec,refwave,reforder = out_datamodel.meta.wcs(refloc_x 122 | ,refloc_y 123 | ,refloc_x 124 | ,refloc_y,1) 125 | 126 | # Move one pixel in the dispersion direction in the dispersed 127 | # image (refloc in direct image). Should have same RA, Dec as 128 | # reference location in direct image. 129 | adwavera,adwavedec,adwavewave,adwaveord = out_datamodel.meta.wcs(refloc_x+adjwave[0] 130 | ,refloc_y+adjwave[1] 131 | ,refloc_x,refloc_y,1) 132 | 133 | # Move one pixel perp to dispersion direction in dispersed 134 | # image (refloc in direct image). Should have same RA, Dec as 135 | # reference location in direct image. Should also have same 136 | # wavelength as reference location. 137 | adpixra,adpixdec,adpixwave,adpixord = out_datamodel.meta.wcs(refloc_x+adjpix[0], 138 | refloc_y+adjpix[1], 139 | refloc_x,refloc_y,1) 140 | 141 | # Move one pixel in the dispersion direction in both the dispersed 142 | # AND direct image. The resulting wavelength should be the same as 143 | # refwave case above. 144 | dra,ddec,dwave,dord = out_datamodel.meta.wcs(refloc_x+adjwave[0],refloc_y+adjwave[1], 145 | refloc_x+adjwave[0],refloc_y+adjwave[1],1) 146 | 147 | # Calculate distances to compare with the stated pixel scale 148 | pos1 = SkyCoord(ra=dra, dec=ddec, unit='deg') 149 | refpos_skycoords = SkyCoord(ra=refra, dec=refdec, unit='deg') 150 | ddist = refpos_skycoords.separation(pos1).value * 3600 151 | 152 | print('Expected RA, Dec at reference location (deg):',expectedra,expecteddec) 153 | print('Ref loc. RA, Dec (deg):',refra,refdec) 154 | print('RA, Dec of adjacent pixel (deg):',dra,ddec) 155 | print('Delta Distance, horiz. adjacent pix: (arcsec)',ddist) 156 | 157 | assert np.allclose(expectedra,refra,atol=xatol_arcsec,rtol=0.) 158 | assert np.allclose(expecteddec,refdec,atol=yatol_arcsec,rtol=0) 159 | assert np.allclose(refra,adwavera,atol=1e-8,rtol=0.) 160 | assert np.allclose(refdec,adwavedec,atol=1e-8,rtol=0.) 161 | assert np.allclose(refra,adpixra,atol=1e-8,rtol=0.) 162 | assert np.allclose(refdec,adpixdec,atol=1e-8,rtol=0.) 163 | assert np.allclose(refwave,adpixwave,atol=1e-10,rtol=0.) 164 | assert np.allclose(refwave,dwave,atol=1e-10,rtol=0.) 165 | assert np.allclose(ddist,xpixscale,atol=0.0005,rtol=0.) 166 | 167 | 168 | def test_wcs_vs_reffile(out_datamodel,dist_reffile): 169 | '''Test WCS model in the distortion reference file 170 | matches that in the output file''' 171 | 172 | print("Distortion reference file used: {}".format(dist_reffile)) 173 | distortion = AsdfFile.open(dist_reffile).tree['model'] 174 | reverse = distortion.inverse 175 | 176 | refx = out_datamodel.meta.wcsinfo.crpix1 - 1 177 | refy = out_datamodel.meta.wcsinfo.crpix2 - 1 178 | 179 | shape = out_datamodel.data.shape 180 | if len(shape) == 2: 181 | ylen,xlen = shape 182 | elif len(shape) == 3: 183 | nint,ylen,xlen = shape 184 | 185 | inx = [refx] 186 | iny = [refy] 187 | fractions = [0.1,0.3,0.5,0.7,0.9] 188 | for f in fractions: 189 | xup = int(xlen * f) 190 | yup = int(ylen * f) 191 | ydown = ylen - yup 192 | inx.append(xup) 193 | iny.append(yup) 194 | inx.append(xup) 195 | iny.append(ydown) 196 | 197 | # Convert x,y positions to RA, Dec and back, to see if 198 | # you recover the same x,y 199 | reffilex = np.array([]) 200 | reffiley = np.array([]) 201 | modx = np.array([]) 202 | mody = np.array([]) 203 | exp_type = out_datamodel.meta.exposure.type 204 | if 'GRISM' not in exp_type: # imaging data 205 | for x,y in zip(inx,iny): 206 | refra,refdec = distortion(x,y) 207 | refnewx,refnewy = reverse(refra,refdec) 208 | reffilex = np.append(reffilex,refnewx) 209 | reffiley = np.append(reffiley,refnewy) 210 | fra, fdec = out_datamodel.meta.wcs(x,y) 211 | fnewx, fnewy = out_datamodel.meta.wcs.backward_transform(fra,fdec) 212 | modx = np.append(modx,fnewx) 213 | mody = np.append(mody,fnewy) 214 | 215 | print("Input x coords:") 216 | print(inx) 217 | print("X coords from reference file WCS model:") 218 | print(reffilex) 219 | print("X coords from output file WCS model:") 220 | print(modx) 221 | 222 | print("Input y coords:") 223 | print(iny) 224 | print("Y coords from reference file WCS model:") 225 | print(reffiley) 226 | print("Y coords from output file WCS model:") 227 | print(mody) 228 | assert np.allclose(reffilex,modx,atol=1e-8,rtol=0.) 229 | assert np.allclose(reffiley,mody,atol=1e-8,rtol=0.) 230 | 231 | else: 232 | print("This test not implemented for GRISM data.") 233 | 234 | # The GRISM comparison below would need more work before 235 | # it could be used. The creation of the WCS model to go into 236 | # the output file is more complicated than in the imaging case 237 | # and it's not clear how to create this WCS without simply 238 | # copying the code in the JWST pipeline 239 | #else: # GRISM data 240 | # refdirectx = np.array([]) 241 | # refdirecty = np.array([]) 242 | # reffilewave = np.array([]) 243 | # reforder = np.array([]) 244 | # moddirectx = np.array([]) 245 | # moddirecty = np.array([]) 246 | # modwave = np.array([]) 247 | # modorder = np.array([]) 248 | # 249 | # pupil = out_datamodel.meta.instrument.pupil 250 | # filter = out_datamodel.meta.instrument.filter #to support NIRISS grism 251 | # if pupil[0] == 'G': 252 | # pass 253 | # elif filter[0] == 'G': 254 | # pupil = filter 255 | # grisms_r = ['GRISMR','G150R'] 256 | # grisms_c = ['GRISMC','G150C'] 257 | # 258 | # for x,y in zip(inx,iny): 259 | # #refra,refdec,refwave,reford = distortion(x,y,refx,refy,1) 260 | # #refnewx,refnewy,refdirx,refdiry,refneworder = reverse(refra,refdec,refwave,reford) 261 | # #reffilex = np.append(reffilex,refnewx) 262 | # #reffiley = np.append(reffiley,refnewy) 263 | # #refdirectx = np.append(refdirectx,refdirx) 264 | # #refdirecty = np.append(refdirecty,refdiry) 265 | # #reffilewave = np.append(reffilewave,refwave) 266 | # #reforder = np.append(reforder,refneworder) 267 | # refra,refdec = distortion(x,y) 268 | # refnewx,refnewy = reverse(refra,refdec) 269 | # if pupil in grisms_r: 270 | # reffilex = np.append(reffilex,refnewx) 271 | # reffiley = np.append(reffiley,refy) 272 | # elif pupil in grisms_c: 273 | # reffilex = np.append(reffilex,refx) 274 | # reffiley = np.append(reffiley,refnewy) 275 | # 276 | # fra, fdec, fwave, forder = out_datamodel.meta.wcs(x,y,refx,refy,1) 277 | # fdispx, fdispy, fdirectx, fdirecty, ford = out_datamodel.meta.wcs.backward_transform(fra,fdec,fwave,forder) 278 | # 279 | # print(x,fdispx,y,fdispy) 280 | # 281 | # 282 | # if pupil in grisms_r: 283 | # modx = np.append(modx,fdispx) 284 | # mody = np.append(mody,refy) 285 | # moddirectx = np.append(moddirectx,fdirectx) 286 | # moddirecty = np.append(moddirecty,fdirecty) 287 | # modwave = np.append(modwave,fwave) 288 | # modorder = np.append(modorder,ford) 289 | # elif pupil in grisms_c: 290 | # modx = np.append(modx,refx) 291 | # mody = np.append(mody,fdispy) 292 | # moddirectx = np.append(moddirectx,fdirectx) 293 | # moddirecty = np.append(moddirecty,fdirecty) 294 | # modwave = np.append(modwave,fwave) 295 | # modorder = np.append(modorder,ford) 296 | # 297 | # 298 | # print('reffilex, modx, reffiley, mody') 299 | # print(reffilex) 300 | # print(modx) 301 | # print(reffiley) 302 | # print(mody) 303 | # 304 | # assert np.allclose(reffilex,modx,atol=1e-8,rtol=0.) 305 | # assert np.allclose(reffiley,mody,atol=1e-8,rtol=0.) 306 | # #assert np.allclose(refdirectx,moddirectx,atol=1e-8,rtol=0.) 307 | # #assert np.allclose(refdirecty,moddirecty,atol=1e-8,rtol=0.) 308 | # #assert np.allclose(reffilewave,modwave,atol=1e-8,rtol=0.) 309 | # #assert np.allclose(reforder,modorder,atol=1e-8,rtol=0.) 310 | 311 | -------------------------------------------------------------------------------- /ez_setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Setuptools bootstrapping installer. 5 | 6 | Maintained at https://github.com/pypa/setuptools/tree/bootstrap. 7 | 8 | Run this script to install or upgrade setuptools. 9 | 10 | This method is DEPRECATED. Check https://github.com/pypa/setuptools/issues/581 for more details. 11 | """ 12 | 13 | import os 14 | import shutil 15 | import sys 16 | import tempfile 17 | import zipfile 18 | import optparse 19 | import subprocess 20 | import platform 21 | import textwrap 22 | import contextlib 23 | 24 | from distutils import log 25 | 26 | try: 27 | from urllib.request import urlopen 28 | except ImportError: 29 | from urllib2 import urlopen 30 | 31 | try: 32 | from site import USER_SITE 33 | except ImportError: 34 | USER_SITE = None 35 | 36 | # 33.1.1 is the last version that supports setuptools self upgrade/installation. 37 | DEFAULT_VERSION = "33.1.1" 38 | DEFAULT_URL = "https://pypi.io/packages/source/s/setuptools/" 39 | DEFAULT_SAVE_DIR = os.curdir 40 | DEFAULT_DEPRECATION_MESSAGE = "ez_setup.py is deprecated and when using it setuptools will be pinned to {0} since it's the last version that supports setuptools self upgrade/installation, check https://github.com/pypa/setuptools/issues/581 for more info; use pip to install setuptools" 41 | 42 | MEANINGFUL_INVALID_ZIP_ERR_MSG = 'Maybe {0} is corrupted, delete it and try again.' 43 | 44 | log.warn(DEFAULT_DEPRECATION_MESSAGE.format(DEFAULT_VERSION)) 45 | 46 | 47 | def _python_cmd(*args): 48 | """ 49 | Execute a command. 50 | 51 | Return True if the command succeeded. 52 | """ 53 | args = (sys.executable,) + args 54 | return subprocess.call(args) == 0 55 | 56 | 57 | def _install(archive_filename, install_args=()): 58 | """Install Setuptools.""" 59 | with archive_context(archive_filename): 60 | # installing 61 | log.warn('Installing Setuptools') 62 | if not _python_cmd('setup.py', 'install', *install_args): 63 | log.warn('Something went wrong during the installation.') 64 | log.warn('See the error message above.') 65 | # exitcode will be 2 66 | return 2 67 | 68 | 69 | def _build_egg(egg, archive_filename, to_dir): 70 | """Build Setuptools egg.""" 71 | with archive_context(archive_filename): 72 | # building an egg 73 | log.warn('Building a Setuptools egg in %s', to_dir) 74 | _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) 75 | # returning the result 76 | log.warn(egg) 77 | if not os.path.exists(egg): 78 | raise IOError('Could not build the egg.') 79 | 80 | 81 | class ContextualZipFile(zipfile.ZipFile): 82 | 83 | """Supplement ZipFile class to support context manager for Python 2.6.""" 84 | 85 | def __enter__(self): 86 | return self 87 | 88 | def __exit__(self, type, value, traceback): 89 | self.close() 90 | 91 | def __new__(cls, *args, **kwargs): 92 | """Construct a ZipFile or ContextualZipFile as appropriate.""" 93 | if hasattr(zipfile.ZipFile, '__exit__'): 94 | return zipfile.ZipFile(*args, **kwargs) 95 | return super(ContextualZipFile, cls).__new__(cls) 96 | 97 | 98 | @contextlib.contextmanager 99 | def archive_context(filename): 100 | """ 101 | Unzip filename to a temporary directory, set to the cwd. 102 | 103 | The unzipped target is cleaned up after. 104 | """ 105 | tmpdir = tempfile.mkdtemp() 106 | log.warn('Extracting in %s', tmpdir) 107 | old_wd = os.getcwd() 108 | try: 109 | os.chdir(tmpdir) 110 | try: 111 | with ContextualZipFile(filename) as archive: 112 | archive.extractall() 113 | except zipfile.BadZipfile as err: 114 | if not err.args: 115 | err.args = ('', ) 116 | err.args = err.args + ( 117 | MEANINGFUL_INVALID_ZIP_ERR_MSG.format(filename), 118 | ) 119 | raise 120 | 121 | # going in the directory 122 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 123 | os.chdir(subdir) 124 | log.warn('Now working in %s', subdir) 125 | yield 126 | 127 | finally: 128 | os.chdir(old_wd) 129 | shutil.rmtree(tmpdir) 130 | 131 | 132 | def _do_download(version, download_base, to_dir, download_delay): 133 | """Download Setuptools.""" 134 | py_desig = 'py{sys.version_info[0]}.{sys.version_info[1]}'.format(sys=sys) 135 | tp = 'setuptools-{version}-{py_desig}.egg' 136 | egg = os.path.join(to_dir, tp.format(**locals())) 137 | if not os.path.exists(egg): 138 | archive = download_setuptools(version, download_base, 139 | to_dir, download_delay) 140 | _build_egg(egg, archive, to_dir) 141 | sys.path.insert(0, egg) 142 | 143 | # Remove previously-imported pkg_resources if present (see 144 | # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details). 145 | if 'pkg_resources' in sys.modules: 146 | _unload_pkg_resources() 147 | 148 | import setuptools 149 | setuptools.bootstrap_install_from = egg 150 | 151 | 152 | def use_setuptools( 153 | version=DEFAULT_VERSION, download_base=DEFAULT_URL, 154 | to_dir=DEFAULT_SAVE_DIR, download_delay=15): 155 | """ 156 | Ensure that a setuptools version is installed. 157 | 158 | Return None. Raise SystemExit if the requested version 159 | or later cannot be installed. 160 | """ 161 | to_dir = os.path.abspath(to_dir) 162 | 163 | # prior to importing, capture the module state for 164 | # representative modules. 165 | rep_modules = 'pkg_resources', 'setuptools' 166 | imported = set(sys.modules).intersection(rep_modules) 167 | 168 | try: 169 | import pkg_resources 170 | pkg_resources.require("setuptools>=" + version) 171 | # a suitable version is already installed 172 | return 173 | except ImportError: 174 | # pkg_resources not available; setuptools is not installed; download 175 | pass 176 | except pkg_resources.DistributionNotFound: 177 | # no version of setuptools was found; allow download 178 | pass 179 | except pkg_resources.VersionConflict as VC_err: 180 | if imported: 181 | _conflict_bail(VC_err, version) 182 | 183 | # otherwise, unload pkg_resources to allow the downloaded version to 184 | # take precedence. 185 | del pkg_resources 186 | _unload_pkg_resources() 187 | 188 | return _do_download(version, download_base, to_dir, download_delay) 189 | 190 | 191 | def _conflict_bail(VC_err, version): 192 | """ 193 | Setuptools was imported prior to invocation, so it is 194 | unsafe to unload it. Bail out. 195 | """ 196 | conflict_tmpl = textwrap.dedent(""" 197 | The required version of setuptools (>={version}) is not available, 198 | and can't be installed while this script is running. Please 199 | install a more recent version first, using 200 | 'easy_install -U setuptools'. 201 | 202 | (Currently using {VC_err.args[0]!r}) 203 | """) 204 | msg = conflict_tmpl.format(**locals()) 205 | sys.stderr.write(msg) 206 | sys.exit(2) 207 | 208 | 209 | def _unload_pkg_resources(): 210 | sys.meta_path = [ 211 | importer 212 | for importer in sys.meta_path 213 | if importer.__class__.__module__ != 'pkg_resources.extern' 214 | ] 215 | del_modules = [ 216 | name for name in sys.modules 217 | if name.startswith('pkg_resources') 218 | ] 219 | for mod_name in del_modules: 220 | del sys.modules[mod_name] 221 | 222 | 223 | def _clean_check(cmd, target): 224 | """ 225 | Run the command to download target. 226 | 227 | If the command fails, clean up before re-raising the error. 228 | """ 229 | try: 230 | subprocess.check_call(cmd) 231 | except subprocess.CalledProcessError: 232 | if os.access(target, os.F_OK): 233 | os.unlink(target) 234 | raise 235 | 236 | 237 | def download_file_powershell(url, target): 238 | """ 239 | Download the file at url to target using Powershell. 240 | 241 | Powershell will validate trust. 242 | Raise an exception if the command cannot complete. 243 | """ 244 | target = os.path.abspath(target) 245 | ps_cmd = ( 246 | "[System.Net.WebRequest]::DefaultWebProxy.Credentials = " 247 | "[System.Net.CredentialCache]::DefaultCredentials; " 248 | '(new-object System.Net.WebClient).DownloadFile("%(url)s", "%(target)s")' 249 | % locals() 250 | ) 251 | cmd = [ 252 | 'powershell', 253 | '-Command', 254 | ps_cmd, 255 | ] 256 | _clean_check(cmd, target) 257 | 258 | 259 | def has_powershell(): 260 | """Determine if Powershell is available.""" 261 | if platform.system() != 'Windows': 262 | return False 263 | cmd = ['powershell', '-Command', 'echo test'] 264 | with open(os.path.devnull, 'wb') as devnull: 265 | try: 266 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 267 | except Exception: 268 | return False 269 | return True 270 | download_file_powershell.viable = has_powershell 271 | 272 | 273 | def download_file_curl(url, target): 274 | cmd = ['curl', url, '--location', '--silent', '--output', target] 275 | _clean_check(cmd, target) 276 | 277 | 278 | def has_curl(): 279 | cmd = ['curl', '--version'] 280 | with open(os.path.devnull, 'wb') as devnull: 281 | try: 282 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 283 | except Exception: 284 | return False 285 | return True 286 | download_file_curl.viable = has_curl 287 | 288 | 289 | def download_file_wget(url, target): 290 | cmd = ['wget', url, '--quiet', '--output-document', target] 291 | _clean_check(cmd, target) 292 | 293 | 294 | def has_wget(): 295 | cmd = ['wget', '--version'] 296 | with open(os.path.devnull, 'wb') as devnull: 297 | try: 298 | subprocess.check_call(cmd, stdout=devnull, stderr=devnull) 299 | except Exception: 300 | return False 301 | return True 302 | download_file_wget.viable = has_wget 303 | 304 | 305 | def download_file_insecure(url, target): 306 | """Use Python to download the file, without connection authentication.""" 307 | src = urlopen(url) 308 | try: 309 | # Read all the data in one block. 310 | data = src.read() 311 | finally: 312 | src.close() 313 | 314 | # Write all the data in one block to avoid creating a partial file. 315 | with open(target, "wb") as dst: 316 | dst.write(data) 317 | download_file_insecure.viable = lambda: True 318 | 319 | 320 | def get_best_downloader(): 321 | downloaders = ( 322 | download_file_powershell, 323 | download_file_curl, 324 | download_file_wget, 325 | download_file_insecure, 326 | ) 327 | viable_downloaders = (dl for dl in downloaders if dl.viable()) 328 | return next(viable_downloaders, None) 329 | 330 | 331 | def download_setuptools( 332 | version=DEFAULT_VERSION, download_base=DEFAULT_URL, 333 | to_dir=DEFAULT_SAVE_DIR, delay=15, 334 | downloader_factory=get_best_downloader): 335 | """ 336 | Download setuptools from a specified location and return its filename. 337 | 338 | `version` should be a valid setuptools version number that is available 339 | as an sdist for download under the `download_base` URL (which should end 340 | with a '/'). `to_dir` is the directory where the egg will be downloaded. 341 | `delay` is the number of seconds to pause before an actual download 342 | attempt. 343 | 344 | ``downloader_factory`` should be a function taking no arguments and 345 | returning a function for downloading a URL to a target. 346 | """ 347 | # making sure we use the absolute path 348 | to_dir = os.path.abspath(to_dir) 349 | zip_name = "setuptools-%s.zip" % version 350 | url = download_base + zip_name 351 | saveto = os.path.join(to_dir, zip_name) 352 | if not os.path.exists(saveto): # Avoid repeated downloads 353 | log.warn("Downloading %s", url) 354 | downloader = downloader_factory() 355 | downloader(url, saveto) 356 | return os.path.realpath(saveto) 357 | 358 | 359 | def _build_install_args(options): 360 | """ 361 | Build the arguments to 'python setup.py install' on the setuptools package. 362 | 363 | Returns list of command line arguments. 364 | """ 365 | return ['--user'] if options.user_install else [] 366 | 367 | 368 | def _parse_args(): 369 | """Parse the command line for options.""" 370 | parser = optparse.OptionParser() 371 | parser.add_option( 372 | '--user', dest='user_install', action='store_true', default=False, 373 | help='install in user site package') 374 | parser.add_option( 375 | '--download-base', dest='download_base', metavar="URL", 376 | default=DEFAULT_URL, 377 | help='alternative URL from where to download the setuptools package') 378 | parser.add_option( 379 | '--insecure', dest='downloader_factory', action='store_const', 380 | const=lambda: download_file_insecure, default=get_best_downloader, 381 | help='Use internal, non-validating downloader' 382 | ) 383 | parser.add_option( 384 | '--version', help="Specify which version to download", 385 | default=DEFAULT_VERSION, 386 | ) 387 | parser.add_option( 388 | '--to-dir', 389 | help="Directory to save (and re-use) package", 390 | default=DEFAULT_SAVE_DIR, 391 | ) 392 | options, args = parser.parse_args() 393 | # positional arguments are ignored 394 | return options 395 | 396 | 397 | def _download_args(options): 398 | """Return args for download_setuptools function from cmdline args.""" 399 | return dict( 400 | version=options.version, 401 | download_base=options.download_base, 402 | downloader_factory=options.downloader_factory, 403 | to_dir=options.to_dir, 404 | ) 405 | 406 | 407 | def main(): 408 | """Install or upgrade setuptools and EasyInstall.""" 409 | options = _parse_args() 410 | archive = download_setuptools(**_download_args(options)) 411 | return _install(archive, _build_install_args(options)) 412 | 413 | if __name__ == '__main__': 414 | sys.exit(main()) 415 | -------------------------------------------------------------------------------- /ah_bootstrap.py: -------------------------------------------------------------------------------- 1 | """ 2 | This bootstrap module contains code for ensuring that the astropy_helpers 3 | package will be importable by the time the setup.py script runs. It also 4 | includes some workarounds to ensure that a recent-enough version of setuptools 5 | is being used for the installation. 6 | 7 | This module should be the first thing imported in the setup.py of distributions 8 | that make use of the utilities in astropy_helpers. If the distribution ships 9 | with its own copy of astropy_helpers, this module will first attempt to import 10 | from the shipped copy. However, it will also check PyPI to see if there are 11 | any bug-fix releases on top of the current version that may be useful to get 12 | past platform-specific bugs that have been fixed. When running setup.py, use 13 | the ``--offline`` command-line option to disable the auto-upgrade checks. 14 | 15 | When this module is imported or otherwise executed it automatically calls a 16 | main function that attempts to read the project's setup.cfg file, which it 17 | checks for a configuration section called ``[ah_bootstrap]`` the presences of 18 | that section, and options therein, determine the next step taken: If it 19 | contains an option called ``auto_use`` with a value of ``True``, it will 20 | automatically call the main function of this module called 21 | `use_astropy_helpers` (see that function's docstring for full details). 22 | Otherwise no further action is taken (however, 23 | ``ah_bootstrap.use_astropy_helpers`` may be called manually from within the 24 | setup.py script). 25 | 26 | Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same 27 | names as the arguments to `use_astropy_helpers`, and can be used to configure 28 | the bootstrap script when ``auto_use = True``. 29 | 30 | See https://github.com/astropy/astropy-helpers for more details, and for the 31 | latest version of this module. 32 | """ 33 | 34 | import contextlib 35 | import errno 36 | import imp 37 | import io 38 | import locale 39 | import os 40 | import re 41 | import subprocess as sp 42 | import sys 43 | 44 | try: 45 | from ConfigParser import ConfigParser, RawConfigParser 46 | except ImportError: 47 | from configparser import ConfigParser, RawConfigParser 48 | 49 | 50 | if sys.version_info[0] < 3: 51 | _str_types = (str, unicode) 52 | _text_type = unicode 53 | PY3 = False 54 | else: 55 | _str_types = (str, bytes) 56 | _text_type = str 57 | PY3 = True 58 | 59 | 60 | # What follows are several import statements meant to deal with install-time 61 | # issues with either missing or misbehaving pacakges (including making sure 62 | # setuptools itself is installed): 63 | 64 | 65 | # Some pre-setuptools checks to ensure that either distribute or setuptools >= 66 | # 0.7 is used (over pre-distribute setuptools) if it is available on the path; 67 | # otherwise the latest setuptools will be downloaded and bootstrapped with 68 | # ``ez_setup.py``. This used to be included in a separate file called 69 | # setuptools_bootstrap.py; but it was combined into ah_bootstrap.py 70 | try: 71 | import pkg_resources 72 | _setuptools_req = pkg_resources.Requirement.parse('setuptools>=0.7') 73 | # This may raise a DistributionNotFound in which case no version of 74 | # setuptools or distribute is properly installed 75 | _setuptools = pkg_resources.get_distribution('setuptools') 76 | if _setuptools not in _setuptools_req: 77 | # Older version of setuptools; check if we have distribute; again if 78 | # this results in DistributionNotFound we want to give up 79 | _distribute = pkg_resources.get_distribution('distribute') 80 | if _setuptools != _distribute: 81 | # It's possible on some pathological systems to have an old version 82 | # of setuptools and distribute on sys.path simultaneously; make 83 | # sure distribute is the one that's used 84 | sys.path.insert(1, _distribute.location) 85 | _distribute.activate() 86 | imp.reload(pkg_resources) 87 | except: 88 | # There are several types of exceptions that can occur here; if all else 89 | # fails bootstrap and use the bootstrapped version 90 | from ez_setup import use_setuptools 91 | use_setuptools() 92 | 93 | 94 | # typing as a dependency for 1.6.1+ Sphinx causes issues when imported after 95 | # initializing submodule with ah_boostrap.py 96 | # See discussion and references in 97 | # https://github.com/astropy/astropy-helpers/issues/302 98 | 99 | try: 100 | import typing # noqa 101 | except ImportError: 102 | pass 103 | 104 | 105 | # Note: The following import is required as a workaround to 106 | # https://github.com/astropy/astropy-helpers/issues/89; if we don't import this 107 | # module now, it will get cleaned up after `run_setup` is called, but that will 108 | # later cause the TemporaryDirectory class defined in it to stop working when 109 | # used later on by setuptools 110 | try: 111 | import setuptools.py31compat # noqa 112 | except ImportError: 113 | pass 114 | 115 | 116 | # matplotlib can cause problems if it is imported from within a call of 117 | # run_setup(), because in some circumstances it will try to write to the user's 118 | # home directory, resulting in a SandboxViolation. See 119 | # https://github.com/matplotlib/matplotlib/pull/4165 120 | # Making sure matplotlib, if it is available, is imported early in the setup 121 | # process can mitigate this (note importing matplotlib.pyplot has the same 122 | # issue) 123 | try: 124 | import matplotlib 125 | matplotlib.use('Agg') 126 | import matplotlib.pyplot 127 | except: 128 | # Ignore if this fails for *any* reason* 129 | pass 130 | 131 | 132 | # End compatibility imports... 133 | 134 | 135 | # In case it didn't successfully import before the ez_setup checks 136 | import pkg_resources 137 | 138 | from setuptools import Distribution 139 | from setuptools.package_index import PackageIndex 140 | from setuptools.sandbox import run_setup 141 | 142 | from distutils import log 143 | from distutils.debug import DEBUG 144 | 145 | 146 | # TODO: Maybe enable checking for a specific version of astropy_helpers? 147 | DIST_NAME = 'astropy-helpers' 148 | PACKAGE_NAME = 'astropy_helpers' 149 | 150 | # Defaults for other options 151 | DOWNLOAD_IF_NEEDED = True 152 | INDEX_URL = 'https://pypi.python.org/simple' 153 | USE_GIT = True 154 | OFFLINE = False 155 | AUTO_UPGRADE = True 156 | 157 | # A list of all the configuration options and their required types 158 | CFG_OPTIONS = [ 159 | ('auto_use', bool), ('path', str), ('download_if_needed', bool), 160 | ('index_url', str), ('use_git', bool), ('offline', bool), 161 | ('auto_upgrade', bool) 162 | ] 163 | 164 | 165 | class _Bootstrapper(object): 166 | """ 167 | Bootstrapper implementation. See ``use_astropy_helpers`` for parameter 168 | documentation. 169 | """ 170 | 171 | def __init__(self, path=None, index_url=None, use_git=None, offline=None, 172 | download_if_needed=None, auto_upgrade=None): 173 | 174 | if path is None: 175 | path = PACKAGE_NAME 176 | 177 | if not (isinstance(path, _str_types) or path is False): 178 | raise TypeError('path must be a string or False') 179 | 180 | if PY3 and not isinstance(path, _text_type): 181 | fs_encoding = sys.getfilesystemencoding() 182 | path = path.decode(fs_encoding) # path to unicode 183 | 184 | self.path = path 185 | 186 | # Set other option attributes, using defaults where necessary 187 | self.index_url = index_url if index_url is not None else INDEX_URL 188 | self.offline = offline if offline is not None else OFFLINE 189 | 190 | # If offline=True, override download and auto-upgrade 191 | if self.offline: 192 | download_if_needed = False 193 | auto_upgrade = False 194 | 195 | self.download = (download_if_needed 196 | if download_if_needed is not None 197 | else DOWNLOAD_IF_NEEDED) 198 | self.auto_upgrade = (auto_upgrade 199 | if auto_upgrade is not None else AUTO_UPGRADE) 200 | 201 | # If this is a release then the .git directory will not exist so we 202 | # should not use git. 203 | git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git')) 204 | if use_git is None and not git_dir_exists: 205 | use_git = False 206 | 207 | self.use_git = use_git if use_git is not None else USE_GIT 208 | # Declared as False by default--later we check if astropy-helpers can be 209 | # upgraded from PyPI, but only if not using a source distribution (as in 210 | # the case of import from a git submodule) 211 | self.is_submodule = False 212 | 213 | @classmethod 214 | def main(cls, argv=None): 215 | if argv is None: 216 | argv = sys.argv 217 | 218 | config = cls.parse_config() 219 | config.update(cls.parse_command_line(argv)) 220 | 221 | auto_use = config.pop('auto_use', False) 222 | bootstrapper = cls(**config) 223 | 224 | if auto_use: 225 | # Run the bootstrapper, otherwise the setup.py is using the old 226 | # use_astropy_helpers() interface, in which case it will run the 227 | # bootstrapper manually after reconfiguring it. 228 | bootstrapper.run() 229 | 230 | return bootstrapper 231 | 232 | @classmethod 233 | def parse_config(cls): 234 | if not os.path.exists('setup.cfg'): 235 | return {} 236 | 237 | cfg = ConfigParser() 238 | 239 | try: 240 | cfg.read('setup.cfg') 241 | except Exception as e: 242 | if DEBUG: 243 | raise 244 | 245 | log.error( 246 | "Error reading setup.cfg: {0!r}\n{1} will not be " 247 | "automatically bootstrapped and package installation may fail." 248 | "\n{2}".format(e, PACKAGE_NAME, _err_help_msg)) 249 | return {} 250 | 251 | if not cfg.has_section('ah_bootstrap'): 252 | return {} 253 | 254 | config = {} 255 | 256 | for option, type_ in CFG_OPTIONS: 257 | if not cfg.has_option('ah_bootstrap', option): 258 | continue 259 | 260 | if type_ is bool: 261 | value = cfg.getboolean('ah_bootstrap', option) 262 | else: 263 | value = cfg.get('ah_bootstrap', option) 264 | 265 | config[option] = value 266 | 267 | return config 268 | 269 | @classmethod 270 | def parse_command_line(cls, argv=None): 271 | if argv is None: 272 | argv = sys.argv 273 | 274 | config = {} 275 | 276 | # For now we just pop recognized ah_bootstrap options out of the 277 | # arg list. This is imperfect; in the unlikely case that a setup.py 278 | # custom command or even custom Distribution class defines an argument 279 | # of the same name then we will break that. However there's a catch22 280 | # here that we can't just do full argument parsing right here, because 281 | # we don't yet know *how* to parse all possible command-line arguments. 282 | if '--no-git' in argv: 283 | config['use_git'] = False 284 | argv.remove('--no-git') 285 | 286 | if '--offline' in argv: 287 | config['offline'] = True 288 | argv.remove('--offline') 289 | 290 | return config 291 | 292 | def run(self): 293 | strategies = ['local_directory', 'local_file', 'index'] 294 | dist = None 295 | 296 | # First, remove any previously imported versions of astropy_helpers; 297 | # this is necessary for nested installs where one package's installer 298 | # is installing another package via setuptools.sandbox.run_setup, as in 299 | # the case of setup_requires 300 | for key in list(sys.modules): 301 | try: 302 | if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'): 303 | del sys.modules[key] 304 | except AttributeError: 305 | # Sometimes mysterious non-string things can turn up in 306 | # sys.modules 307 | continue 308 | 309 | # Check to see if the path is a submodule 310 | self.is_submodule = self._check_submodule() 311 | 312 | for strategy in strategies: 313 | method = getattr(self, 'get_{0}_dist'.format(strategy)) 314 | dist = method() 315 | if dist is not None: 316 | break 317 | else: 318 | raise _AHBootstrapSystemExit( 319 | "No source found for the {0!r} package; {0} must be " 320 | "available and importable as a prerequisite to building " 321 | "or installing this package.".format(PACKAGE_NAME)) 322 | 323 | # This is a bit hacky, but if astropy_helpers was loaded from a 324 | # directory/submodule its Distribution object gets a "precedence" of 325 | # "DEVELOP_DIST". However, in other cases it gets a precedence of 326 | # "EGG_DIST". However, when activing the distribution it will only be 327 | # placed early on sys.path if it is treated as an EGG_DIST, so always 328 | # do that 329 | dist = dist.clone(precedence=pkg_resources.EGG_DIST) 330 | 331 | # Otherwise we found a version of astropy-helpers, so we're done 332 | # Just active the found distribution on sys.path--if we did a 333 | # download this usually happens automatically but it doesn't hurt to 334 | # do it again 335 | # Note: Adding the dist to the global working set also activates it 336 | # (makes it importable on sys.path) by default. 337 | 338 | try: 339 | pkg_resources.working_set.add(dist, replace=True) 340 | except TypeError: 341 | # Some (much) older versions of setuptools do not have the 342 | # replace=True option here. These versions are old enough that all 343 | # bets may be off anyways, but it's easy enough to work around just 344 | # in case... 345 | if dist.key in pkg_resources.working_set.by_key: 346 | del pkg_resources.working_set.by_key[dist.key] 347 | pkg_resources.working_set.add(dist) 348 | 349 | @property 350 | def config(self): 351 | """ 352 | A `dict` containing the options this `_Bootstrapper` was configured 353 | with. 354 | """ 355 | 356 | return dict((optname, getattr(self, optname)) 357 | for optname, _ in CFG_OPTIONS if hasattr(self, optname)) 358 | 359 | def get_local_directory_dist(self): 360 | """ 361 | Handle importing a vendored package from a subdirectory of the source 362 | distribution. 363 | """ 364 | 365 | if not os.path.isdir(self.path): 366 | return 367 | 368 | log.info('Attempting to import astropy_helpers from {0} {1!r}'.format( 369 | 'submodule' if self.is_submodule else 'directory', 370 | self.path)) 371 | 372 | dist = self._directory_import() 373 | 374 | if dist is None: 375 | log.warn( 376 | 'The requested path {0!r} for importing {1} does not ' 377 | 'exist, or does not contain a copy of the {1} ' 378 | 'package.'.format(self.path, PACKAGE_NAME)) 379 | elif self.auto_upgrade and not self.is_submodule: 380 | # A version of astropy-helpers was found on the available path, but 381 | # check to see if a bugfix release is available on PyPI 382 | upgrade = self._do_upgrade(dist) 383 | if upgrade is not None: 384 | dist = upgrade 385 | 386 | return dist 387 | 388 | def get_local_file_dist(self): 389 | """ 390 | Handle importing from a source archive; this also uses setup_requires 391 | but points easy_install directly to the source archive. 392 | """ 393 | 394 | if not os.path.isfile(self.path): 395 | return 396 | 397 | log.info('Attempting to unpack and import astropy_helpers from ' 398 | '{0!r}'.format(self.path)) 399 | 400 | try: 401 | dist = self._do_download(find_links=[self.path]) 402 | except Exception as e: 403 | if DEBUG: 404 | raise 405 | 406 | log.warn( 407 | 'Failed to import {0} from the specified archive {1!r}: ' 408 | '{2}'.format(PACKAGE_NAME, self.path, str(e))) 409 | dist = None 410 | 411 | if dist is not None and self.auto_upgrade: 412 | # A version of astropy-helpers was found on the available path, but 413 | # check to see if a bugfix release is available on PyPI 414 | upgrade = self._do_upgrade(dist) 415 | if upgrade is not None: 416 | dist = upgrade 417 | 418 | return dist 419 | 420 | def get_index_dist(self): 421 | if not self.download: 422 | log.warn('Downloading {0!r} disabled.'.format(DIST_NAME)) 423 | return None 424 | 425 | log.warn( 426 | "Downloading {0!r}; run setup.py with the --offline option to " 427 | "force offline installation.".format(DIST_NAME)) 428 | 429 | try: 430 | dist = self._do_download() 431 | except Exception as e: 432 | if DEBUG: 433 | raise 434 | log.warn( 435 | 'Failed to download and/or install {0!r} from {1!r}:\n' 436 | '{2}'.format(DIST_NAME, self.index_url, str(e))) 437 | dist = None 438 | 439 | # No need to run auto-upgrade here since we've already presumably 440 | # gotten the most up-to-date version from the package index 441 | return dist 442 | 443 | def _directory_import(self): 444 | """ 445 | Import astropy_helpers from the given path, which will be added to 446 | sys.path. 447 | 448 | Must return True if the import succeeded, and False otherwise. 449 | """ 450 | 451 | # Return True on success, False on failure but download is allowed, and 452 | # otherwise raise SystemExit 453 | path = os.path.abspath(self.path) 454 | 455 | # Use an empty WorkingSet rather than the man 456 | # pkg_resources.working_set, since on older versions of setuptools this 457 | # will invoke a VersionConflict when trying to install an upgrade 458 | ws = pkg_resources.WorkingSet([]) 459 | ws.add_entry(path) 460 | dist = ws.by_key.get(DIST_NAME) 461 | 462 | if dist is None: 463 | # We didn't find an egg-info/dist-info in the given path, but if a 464 | # setup.py exists we can generate it 465 | setup_py = os.path.join(path, 'setup.py') 466 | if os.path.isfile(setup_py): 467 | with _silence(): 468 | run_setup(os.path.join(path, 'setup.py'), 469 | ['egg_info']) 470 | 471 | for dist in pkg_resources.find_distributions(path, True): 472 | # There should be only one... 473 | return dist 474 | 475 | return dist 476 | 477 | def _do_download(self, version='', find_links=None): 478 | if find_links: 479 | allow_hosts = '' 480 | index_url = None 481 | else: 482 | allow_hosts = None 483 | index_url = self.index_url 484 | 485 | # Annoyingly, setuptools will not handle other arguments to 486 | # Distribution (such as options) before handling setup_requires, so it 487 | # is not straightforward to programmatically augment the arguments which 488 | # are passed to easy_install 489 | class _Distribution(Distribution): 490 | def get_option_dict(self, command_name): 491 | opts = Distribution.get_option_dict(self, command_name) 492 | if command_name == 'easy_install': 493 | if find_links is not None: 494 | opts['find_links'] = ('setup script', find_links) 495 | if index_url is not None: 496 | opts['index_url'] = ('setup script', index_url) 497 | if allow_hosts is not None: 498 | opts['allow_hosts'] = ('setup script', allow_hosts) 499 | return opts 500 | 501 | if version: 502 | req = '{0}=={1}'.format(DIST_NAME, version) 503 | else: 504 | req = DIST_NAME 505 | 506 | attrs = {'setup_requires': [req]} 507 | 508 | try: 509 | if DEBUG: 510 | _Distribution(attrs=attrs) 511 | else: 512 | with _silence(): 513 | _Distribution(attrs=attrs) 514 | 515 | # If the setup_requires succeeded it will have added the new dist to 516 | # the main working_set 517 | return pkg_resources.working_set.by_key.get(DIST_NAME) 518 | except Exception as e: 519 | if DEBUG: 520 | raise 521 | 522 | msg = 'Error retrieving {0} from {1}:\n{2}' 523 | if find_links: 524 | source = find_links[0] 525 | elif index_url != INDEX_URL: 526 | source = index_url 527 | else: 528 | source = 'PyPI' 529 | 530 | raise Exception(msg.format(DIST_NAME, source, repr(e))) 531 | 532 | def _do_upgrade(self, dist): 533 | # Build up a requirement for a higher bugfix release but a lower minor 534 | # release (so API compatibility is guaranteed) 535 | next_version = _next_version(dist.parsed_version) 536 | 537 | req = pkg_resources.Requirement.parse( 538 | '{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version)) 539 | 540 | package_index = PackageIndex(index_url=self.index_url) 541 | 542 | upgrade = package_index.obtain(req) 543 | 544 | if upgrade is not None: 545 | return self._do_download(version=upgrade.version) 546 | 547 | def _check_submodule(self): 548 | """ 549 | Check if the given path is a git submodule. 550 | 551 | See the docstrings for ``_check_submodule_using_git`` and 552 | ``_check_submodule_no_git`` for further details. 553 | """ 554 | 555 | if (self.path is None or 556 | (os.path.exists(self.path) and not os.path.isdir(self.path))): 557 | return False 558 | 559 | if self.use_git: 560 | return self._check_submodule_using_git() 561 | else: 562 | return self._check_submodule_no_git() 563 | 564 | def _check_submodule_using_git(self): 565 | """ 566 | Check if the given path is a git submodule. If so, attempt to initialize 567 | and/or update the submodule if needed. 568 | 569 | This function makes calls to the ``git`` command in subprocesses. The 570 | ``_check_submodule_no_git`` option uses pure Python to check if the given 571 | path looks like a git submodule, but it cannot perform updates. 572 | """ 573 | 574 | cmd = ['git', 'submodule', 'status', '--', self.path] 575 | 576 | try: 577 | log.info('Running `{0}`; use the --no-git option to disable git ' 578 | 'commands'.format(' '.join(cmd))) 579 | returncode, stdout, stderr = run_cmd(cmd) 580 | except _CommandNotFound: 581 | # The git command simply wasn't found; this is most likely the 582 | # case on user systems that don't have git and are simply 583 | # trying to install the package from PyPI or a source 584 | # distribution. Silently ignore this case and simply don't try 585 | # to use submodules 586 | return False 587 | 588 | stderr = stderr.strip() 589 | 590 | if returncode != 0 and stderr: 591 | # Unfortunately the return code alone cannot be relied on, as 592 | # earlier versions of git returned 0 even if the requested submodule 593 | # does not exist 594 | 595 | # This is a warning that occurs in perl (from running git submodule) 596 | # which only occurs with a malformatted locale setting which can 597 | # happen sometimes on OSX. See again 598 | # https://github.com/astropy/astropy/issues/2749 599 | perl_warning = ('perl: warning: Falling back to the standard locale ' 600 | '("C").') 601 | if not stderr.strip().endswith(perl_warning): 602 | # Some other unknown error condition occurred 603 | log.warn('git submodule command failed ' 604 | 'unexpectedly:\n{0}'.format(stderr)) 605 | return False 606 | 607 | # Output of `git submodule status` is as follows: 608 | # 609 | # 1: Status indicator: '-' for submodule is uninitialized, '+' if 610 | # submodule is initialized but is not at the commit currently indicated 611 | # in .gitmodules (and thus needs to be updated), or 'U' if the 612 | # submodule is in an unstable state (i.e. has merge conflicts) 613 | # 614 | # 2. SHA-1 hash of the current commit of the submodule (we don't really 615 | # need this information but it's useful for checking that the output is 616 | # correct) 617 | # 618 | # 3. The output of `git describe` for the submodule's current commit 619 | # hash (this includes for example what branches the commit is on) but 620 | # only if the submodule is initialized. We ignore this information for 621 | # now 622 | _git_submodule_status_re = re.compile( 623 | '^(?P[+-U ])(?P[0-9a-f]{40}) ' 624 | '(?P\S+)( .*)?$') 625 | 626 | # The stdout should only contain one line--the status of the 627 | # requested submodule 628 | m = _git_submodule_status_re.match(stdout) 629 | if m: 630 | # Yes, the path *is* a git submodule 631 | self._update_submodule(m.group('submodule'), m.group('status')) 632 | return True 633 | else: 634 | log.warn( 635 | 'Unexpected output from `git submodule status`:\n{0}\n' 636 | 'Will attempt import from {1!r} regardless.'.format( 637 | stdout, self.path)) 638 | return False 639 | 640 | def _check_submodule_no_git(self): 641 | """ 642 | Like ``_check_submodule_using_git``, but simply parses the .gitmodules file 643 | to determine if the supplied path is a git submodule, and does not exec any 644 | subprocesses. 645 | 646 | This can only determine if a path is a submodule--it does not perform 647 | updates, etc. This function may need to be updated if the format of the 648 | .gitmodules file is changed between git versions. 649 | """ 650 | 651 | gitmodules_path = os.path.abspath('.gitmodules') 652 | 653 | if not os.path.isfile(gitmodules_path): 654 | return False 655 | 656 | # This is a minimal reader for gitconfig-style files. It handles a few of 657 | # the quirks that make gitconfig files incompatible with ConfigParser-style 658 | # files, but does not support the full gitconfig syntax (just enough 659 | # needed to read a .gitmodules file). 660 | gitmodules_fileobj = io.StringIO() 661 | 662 | # Must use io.open for cross-Python-compatible behavior wrt unicode 663 | with io.open(gitmodules_path) as f: 664 | for line in f: 665 | # gitconfig files are more flexible with leading whitespace; just 666 | # go ahead and remove it 667 | line = line.lstrip() 668 | 669 | # comments can start with either # or ; 670 | if line and line[0] in (':', ';'): 671 | continue 672 | 673 | gitmodules_fileobj.write(line) 674 | 675 | gitmodules_fileobj.seek(0) 676 | 677 | cfg = RawConfigParser() 678 | 679 | try: 680 | cfg.readfp(gitmodules_fileobj) 681 | except Exception as exc: 682 | log.warn('Malformatted .gitmodules file: {0}\n' 683 | '{1} cannot be assumed to be a git submodule.'.format( 684 | exc, self.path)) 685 | return False 686 | 687 | for section in cfg.sections(): 688 | if not cfg.has_option(section, 'path'): 689 | continue 690 | 691 | submodule_path = cfg.get(section, 'path').rstrip(os.sep) 692 | 693 | if submodule_path == self.path.rstrip(os.sep): 694 | return True 695 | 696 | return False 697 | 698 | def _update_submodule(self, submodule, status): 699 | if status == ' ': 700 | # The submodule is up to date; no action necessary 701 | return 702 | elif status == '-': 703 | if self.offline: 704 | raise _AHBootstrapSystemExit( 705 | "Cannot initialize the {0} submodule in --offline mode; " 706 | "this requires being able to clone the submodule from an " 707 | "online repository.".format(submodule)) 708 | cmd = ['update', '--init'] 709 | action = 'Initializing' 710 | elif status == '+': 711 | cmd = ['update'] 712 | action = 'Updating' 713 | if self.offline: 714 | cmd.append('--no-fetch') 715 | elif status == 'U': 716 | raise _AHBootstrapSystemExit( 717 | 'Error: Submodule {0} contains unresolved merge conflicts. ' 718 | 'Please complete or abandon any changes in the submodule so that ' 719 | 'it is in a usable state, then try again.'.format(submodule)) 720 | else: 721 | log.warn('Unknown status {0!r} for git submodule {1!r}. Will ' 722 | 'attempt to use the submodule as-is, but try to ensure ' 723 | 'that the submodule is in a clean state and contains no ' 724 | 'conflicts or errors.\n{2}'.format(status, submodule, 725 | _err_help_msg)) 726 | return 727 | 728 | err_msg = None 729 | cmd = ['git', 'submodule'] + cmd + ['--', submodule] 730 | log.warn('{0} {1} submodule with: `{2}`'.format( 731 | action, submodule, ' '.join(cmd))) 732 | 733 | try: 734 | log.info('Running `{0}`; use the --no-git option to disable git ' 735 | 'commands'.format(' '.join(cmd))) 736 | returncode, stdout, stderr = run_cmd(cmd) 737 | except OSError as e: 738 | err_msg = str(e) 739 | else: 740 | if returncode != 0: 741 | err_msg = stderr 742 | 743 | if err_msg is not None: 744 | log.warn('An unexpected error occurred updating the git submodule ' 745 | '{0!r}:\n{1}\n{2}'.format(submodule, err_msg, 746 | _err_help_msg)) 747 | 748 | class _CommandNotFound(OSError): 749 | """ 750 | An exception raised when a command run with run_cmd is not found on the 751 | system. 752 | """ 753 | 754 | 755 | def run_cmd(cmd): 756 | """ 757 | Run a command in a subprocess, given as a list of command-line 758 | arguments. 759 | 760 | Returns a ``(returncode, stdout, stderr)`` tuple. 761 | """ 762 | 763 | try: 764 | p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE) 765 | # XXX: May block if either stdout or stderr fill their buffers; 766 | # however for the commands this is currently used for that is 767 | # unlikely (they should have very brief output) 768 | stdout, stderr = p.communicate() 769 | except OSError as e: 770 | if DEBUG: 771 | raise 772 | 773 | if e.errno == errno.ENOENT: 774 | msg = 'Command not found: `{0}`'.format(' '.join(cmd)) 775 | raise _CommandNotFound(msg, cmd) 776 | else: 777 | raise _AHBootstrapSystemExit( 778 | 'An unexpected error occurred when running the ' 779 | '`{0}` command:\n{1}'.format(' '.join(cmd), str(e))) 780 | 781 | 782 | # Can fail of the default locale is not configured properly. See 783 | # https://github.com/astropy/astropy/issues/2749. For the purposes under 784 | # consideration 'latin1' is an acceptable fallback. 785 | try: 786 | stdio_encoding = locale.getdefaultlocale()[1] or 'latin1' 787 | except ValueError: 788 | # Due to an OSX oddity locale.getdefaultlocale() can also crash 789 | # depending on the user's locale/language settings. See: 790 | # http://bugs.python.org/issue18378 791 | stdio_encoding = 'latin1' 792 | 793 | # Unlikely to fail at this point but even then let's be flexible 794 | if not isinstance(stdout, _text_type): 795 | stdout = stdout.decode(stdio_encoding, 'replace') 796 | if not isinstance(stderr, _text_type): 797 | stderr = stderr.decode(stdio_encoding, 'replace') 798 | 799 | return (p.returncode, stdout, stderr) 800 | 801 | 802 | def _next_version(version): 803 | """ 804 | Given a parsed version from pkg_resources.parse_version, returns a new 805 | version string with the next minor version. 806 | 807 | Examples 808 | ======== 809 | >>> _next_version(pkg_resources.parse_version('1.2.3')) 810 | '1.3.0' 811 | """ 812 | 813 | if hasattr(version, 'base_version'): 814 | # New version parsing from setuptools >= 8.0 815 | if version.base_version: 816 | parts = version.base_version.split('.') 817 | else: 818 | parts = [] 819 | else: 820 | parts = [] 821 | for part in version: 822 | if part.startswith('*'): 823 | break 824 | parts.append(part) 825 | 826 | parts = [int(p) for p in parts] 827 | 828 | if len(parts) < 3: 829 | parts += [0] * (3 - len(parts)) 830 | 831 | major, minor, micro = parts[:3] 832 | 833 | return '{0}.{1}.{2}'.format(major, minor + 1, 0) 834 | 835 | 836 | class _DummyFile(object): 837 | """A noop writeable object.""" 838 | 839 | errors = '' # Required for Python 3.x 840 | encoding = 'utf-8' 841 | 842 | def write(self, s): 843 | pass 844 | 845 | def flush(self): 846 | pass 847 | 848 | 849 | @contextlib.contextmanager 850 | def _silence(): 851 | """A context manager that silences sys.stdout and sys.stderr.""" 852 | 853 | old_stdout = sys.stdout 854 | old_stderr = sys.stderr 855 | sys.stdout = _DummyFile() 856 | sys.stderr = _DummyFile() 857 | exception_occurred = False 858 | try: 859 | yield 860 | except: 861 | exception_occurred = True 862 | # Go ahead and clean up so that exception handling can work normally 863 | sys.stdout = old_stdout 864 | sys.stderr = old_stderr 865 | raise 866 | 867 | if not exception_occurred: 868 | sys.stdout = old_stdout 869 | sys.stderr = old_stderr 870 | 871 | 872 | _err_help_msg = """ 873 | If the problem persists consider installing astropy_helpers manually using pip 874 | (`pip install astropy_helpers`) or by manually downloading the source archive, 875 | extracting it, and installing by running `python setup.py install` from the 876 | root of the extracted source code. 877 | """ 878 | 879 | 880 | class _AHBootstrapSystemExit(SystemExit): 881 | def __init__(self, *args): 882 | if not args: 883 | msg = 'An unknown problem occurred bootstrapping astropy_helpers.' 884 | else: 885 | msg = args[0] 886 | 887 | msg += '\n' + _err_help_msg 888 | 889 | super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:]) 890 | 891 | 892 | BOOTSTRAPPER = _Bootstrapper.main() 893 | 894 | 895 | def use_astropy_helpers(**kwargs): 896 | """ 897 | Ensure that the `astropy_helpers` module is available and is importable. 898 | This supports automatic submodule initialization if astropy_helpers is 899 | included in a project as a git submodule, or will download it from PyPI if 900 | necessary. 901 | 902 | Parameters 903 | ---------- 904 | 905 | path : str or None, optional 906 | A filesystem path relative to the root of the project's source code 907 | that should be added to `sys.path` so that `astropy_helpers` can be 908 | imported from that path. 909 | 910 | If the path is a git submodule it will automatically be initialized 911 | and/or updated. 912 | 913 | The path may also be to a ``.tar.gz`` archive of the astropy_helpers 914 | source distribution. In this case the archive is automatically 915 | unpacked and made temporarily available on `sys.path` as a ``.egg`` 916 | archive. 917 | 918 | If `None` skip straight to downloading. 919 | 920 | download_if_needed : bool, optional 921 | If the provided filesystem path is not found an attempt will be made to 922 | download astropy_helpers from PyPI. It will then be made temporarily 923 | available on `sys.path` as a ``.egg`` archive (using the 924 | ``setup_requires`` feature of setuptools. If the ``--offline`` option 925 | is given at the command line the value of this argument is overridden 926 | to `False`. 927 | 928 | index_url : str, optional 929 | If provided, use a different URL for the Python package index than the 930 | main PyPI server. 931 | 932 | use_git : bool, optional 933 | If `False` no git commands will be used--this effectively disables 934 | support for git submodules. If the ``--no-git`` option is given at the 935 | command line the value of this argument is overridden to `False`. 936 | 937 | auto_upgrade : bool, optional 938 | By default, when installing a package from a non-development source 939 | distribution ah_boostrap will try to automatically check for patch 940 | releases to astropy-helpers on PyPI and use the patched version over 941 | any bundled versions. Setting this to `False` will disable that 942 | functionality. If the ``--offline`` option is given at the command line 943 | the value of this argument is overridden to `False`. 944 | 945 | offline : bool, optional 946 | If `False` disable all actions that require an internet connection, 947 | including downloading packages from the package index and fetching 948 | updates to any git submodule. Defaults to `True`. 949 | """ 950 | 951 | global BOOTSTRAPPER 952 | 953 | config = BOOTSTRAPPER.config 954 | config.update(**kwargs) 955 | 956 | # Create a new bootstrapper with the updated configuration and run it 957 | BOOTSTRAPPER = _Bootstrapper(**config) 958 | BOOTSTRAPPER.run() 959 | --------------------------------------------------------------------------------