├── .gitignore ├── .github ├── CODEOWNERS └── workflows │ ├── build.yml │ └── python_testing.yml ├── docs ├── rtd_environment.yaml ├── source │ ├── index.rst │ └── conf.py ├── Makefile └── make.bat ├── calcos ├── pars │ ├── calcos.cfg │ └── calcos.cfgspc ├── airglow.py ├── shiftfile.py ├── phot.py ├── __init__.py ├── average.py ├── orbit.py ├── calcosparam.py ├── osmstep.py ├── dispersion.py ├── calcos.help ├── xd_search.py ├── getinfo.py ├── burst.py └── spwcs.py ├── README.md ├── requirements-dev.txt ├── .readthedocs.yaml ├── setup.py ├── tests ├── test_acq_peakd_nuv.py ├── test_acq_peakxd.py ├── test_acq_search.py ├── test_acq_peakd_both.py ├── test_acq_peakd_fuva.py ├── test_acq_image.py ├── test_average.py ├── test_flat_nuv.py ├── test_dark_nuv.py ├── test_flat_fuva.py ├── test_wavecal_nuv_g185m.py ├── test_wavecal_nuv_g225m.py ├── test_wavecal_nuv_g230l.py ├── test_wavecal_nuv_g285m.py ├── test_wavecal_nuv_mirrora.py ├── test_wavecal_nuv_mirrorb.py ├── test_wavecal_fuva_g130m.py ├── test_dark_fuvb.py ├── test_flat_relmvreq.py ├── test_dark_fuva.py ├── conftest.py ├── test_wavecal_fuva_f140l.py ├── test_wavecal_fuvb_g160m.py ├── test_wavecal_fuvb_g160m_relmvreq.py ├── test_nuv_sci_g185m.py ├── test_nuv_sci_g230l.py ├── test_nuv_sci_g285m.py ├── test_airglow.py ├── test_fuv_timetag.py ├── test_shiftfile.py ├── test_extract.py ├── README.md └── helpers.py ├── pyproject.toml ├── JenkinsfileRT ├── tox.ini └── CODE_OF_CONDUCT.md /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | *.pyo 4 | *.so 5 | build/ 6 | dist/ 7 | *.eggs 8 | *.egg-info/ 9 | __pycache__/ 10 | .cache/ 11 | pip-wheel-metadata 12 | *idea 13 | **/version.py 14 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # automatically requests pull request reviews for files matching the given pattern; the last match takes precendence 2 | 3 | * @spacetelescope/calcos-maintainers 4 | -------------------------------------------------------------------------------- /docs/rtd_environment.yaml: -------------------------------------------------------------------------------- 1 | name: rtd311 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python=3.11 7 | - pip 8 | - graphviz 9 | - pip: 10 | - stsci_rtd_theme 11 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. calcos documentation master file, created by 2 | sphinx-quickstart on Mon Sep 27 15:35:19 2010. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to calcos's documentation! 7 | ================================== 8 | 9 | Please see https://hst-docs.stsci.edu/cosdhb/chapter-3-cos-calibration . 10 | -------------------------------------------------------------------------------- /calcos/pars/calcos.cfg: -------------------------------------------------------------------------------- 1 | _task_name_ = calcos 2 | input = "" 3 | verbosity = 1 4 | savetmp = False 5 | outdir = "" 6 | find_target = False 7 | cutoff = None 8 | shift_file = "" 9 | csum = False 10 | raw_csum = False 11 | compress = False 12 | comp_param = "gzip,-0.01" 13 | binx = 1 14 | biny = 1 15 | stimfile = "" 16 | livefile = "" 17 | burstfile = "" 18 | print_version = False 19 | print_revision = False 20 | [_RULES_] 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CALCOS 2 | 3 | Calibration software for HST/COS. 4 | 5 | [![CalCOS Pytest](https://github.com/spacetelescope/calcos/actions/workflows/python_testing.yml/badge.svg)](https://github.com/spacetelescope/calcos/actions/workflows/python_testing.yml) 6 | [![test CalCOS](https://github.com/spacetelescope/RegressionTests/actions/workflows/calcos.yml/badge.svg)](https://github.com/spacetelescope/RegressionTests/actions/workflows/calcos.yml) 7 | 8 | [Documentation](https://hst-docs.stsci.edu/cosdhb/chapter-3-cos-calibration) 9 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Use weekly astropy dev build 2 | --extra-index-url https://pypi.anaconda.org/astropy/simple astropy --pre 3 | 4 | # Use Bi-weekly numpy/scipy dev builds 5 | --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple 6 | numpy>=0.0.dev0 7 | scipy>=0.0.dev0 8 | 9 | # Use nightly Astropy dev builds 10 | --extra-index-url https://pypi.anaconda.org/astropy/simple 11 | astropy>=0.0.dev0 12 | 13 | # Other important upstream packages 14 | git+https://github.com/spacetelescope/stsci.tools 15 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | version: 2 4 | 5 | sphinx: 6 | builder: html 7 | configuration: docs/source/conf.py 8 | fail_on_warning: true 9 | 10 | build: 11 | os: ubuntu-22.04 12 | tools: 13 | python: mambaforge-4.10 14 | 15 | conda: 16 | environment: docs/rtd_environment.yaml 17 | 18 | # Set the Python requirements required to build your docs 19 | python: 20 | install: 21 | - method: pip 22 | path: . 23 | extra_requirements: 24 | - docs 25 | 26 | # Don't build any extra formats 27 | formats: [] 28 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | release: 5 | types: [ released ] 6 | pull_request: 7 | workflow_dispatch: 8 | 9 | concurrency: 10 | group: ${{ github.workflow }}-${{ github.ref }} 11 | cancel-in-progress: true 12 | 13 | jobs: 14 | build: 15 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/publish.yml@v1 16 | with: 17 | targets: | 18 | # Linux wheels 19 | - cp3*-manylinux_x86_64 20 | # MacOS wheels 21 | - cp3*-macosx_x86_64 22 | # Until we have arm64 runners, we can't automatically test arm64 wheels 23 | - cp3*-macosx_arm64 24 | sdist: true 25 | test_command: python -c "from calcos import ccos" 26 | upload_to_pypi: ${{ (github.event_name == 'release') && (github.event.action == 'released') }} 27 | secrets: 28 | pypi_token: ${{ secrets.PYPI_PASSWORD_STSCI_MAINTAINER }} 29 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup, Extension 4 | from numpy import get_include as numpy_includes 5 | from pathlib import Path 6 | 7 | 8 | def c_sources(parent: str) -> list[str]: 9 | return [str(filename) for filename in Path(parent).glob("*.c")] 10 | 11 | 12 | def c_includes(parent: str, depth: int = 1): 13 | return [ 14 | parent, 15 | *( 16 | str(filename) 17 | for filename in Path(parent).iterdir() 18 | if filename.is_dir() and len(filename.parts) - 1 <= depth 19 | ), 20 | ] 21 | 22 | 23 | PACKAGENAME = "calcos" 24 | SOURCES = c_sources("src") 25 | INCLUDES = c_includes("src") + [numpy_includes()] 26 | 27 | 28 | setup( 29 | ext_modules=[ 30 | Extension( 31 | PACKAGENAME + ".ccos", 32 | sources=SOURCES, 33 | include_dirs=INCLUDES, 34 | ), 35 | ], 36 | ) 37 | -------------------------------------------------------------------------------- /tests/test_acq_peakd_nuv.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV ACQ/PEAKD.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVACQPEAKD(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_nuv_acq_peakd(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['la8q99l7q_rawacq.fits', 20 | 'la8q99l7q_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la8q99l7q_rawacq.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # No need to compare results as this test doesn't 30 | # product any products. We are just testing that the 31 | # code runs to completion 32 | -------------------------------------------------------------------------------- /tests/test_acq_peakxd.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV ACQ/PEAKXD.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVACQPEAKXD(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuv_acq_peakxd(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['la9t01naq_rawacq.fits', 20 | 'la9t01naq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la9t01naq_rawacq.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # No need to compare results as this test doesn't 30 | # product any products. We are just testing that the 31 | # code runs to completion 32 | -------------------------------------------------------------------------------- /tests/test_acq_search.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV ACQ/SEARCH.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVACQSEARCH(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuv_acq_search(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['la9t01n9q_rawacq.fits', 20 | 'la9t01n9q_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la9t01n9q_rawacq.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # No need to compare results as this test doesn't 30 | # product any products. We are just testing that the 31 | # code runs to completion 32 | -------------------------------------------------------------------------------- /tests/test_acq_peakd_both.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/BOTH ACQ/PEAKD.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestBOTHACQPEAKD(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_both_acq_peakd(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['ld7y02rrq_rawacq.fits', 20 | 'ld7y02rrq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'ld7y02rrq_rawacq.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # No need to compare results as this test doesn't 30 | # produce any products. We are just testing that the 31 | # code runs to completion 32 | -------------------------------------------------------------------------------- /tests/test_acq_peakd_fuva.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUVA ACQ/PEAKD.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVAACQPEAKD(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuva_acq_peakd(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['lbx503kfq_rawacq.fits', 20 | 'lbx503kfq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'lbx503kfq_rawacq.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # No need to compare results as this test doesn't 30 | # product any products. We are just testing that the 31 | # code runs to completion 32 | -------------------------------------------------------------------------------- /tests/test_acq_image.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV ACQ/IMAGE.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVCQIMAGE(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_fuv_acq_image(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['ldji01ggq_rawacq.fits', 20 | 'ldji01ggq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'ldji01ggq_rawacq.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['ldji01ggq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('counts', 'flt'): 36 | fname = '{}_{}.fits'.format(outroot, sfx) 37 | outputs.append((fname, 'ref_' + fname)) 38 | self.compare_outputs(outputs, rtol=1e-7) 39 | 40 | -------------------------------------------------------------------------------- /tests/test_average.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import numpy as np 4 | from astropy.io import fits 5 | 6 | from calcos import average 7 | from generate_tempfiles import create_count_file 8 | 9 | 10 | def test_avg_image(tmp_path): 11 | """ 12 | tests avg_image() in average.py 13 | explanation of the test 14 | - create temporary count files to be used as inputs 15 | - expected values in the output file are the average of the input values 16 | - loop though the values to check if the math holds. 17 | Returns 18 | ------- 19 | pass if expected == actual fail otherwise. 20 | """ 21 | # Setup 22 | infile = [str(tmp_path / "test_count1.fits"), str(tmp_path / "test_count2.fits")] 23 | outfile = str(tmp_path / "test_output.fits") 24 | create_count_file(infile[0]) 25 | create_count_file(infile[1]) 26 | inhdr1, inhdr2 = fits.open(infile[0]), fits.open(infile[1]) 27 | # Test 28 | average.avgImage(infile, outfile) 29 | out_hdr = fits.open(outfile) 30 | 31 | # Verify 32 | assert os.path.exists(outfile) 33 | for (i, j, k) in zip(inhdr1[1].header, inhdr2[1].header, out_hdr[1].header): 34 | assert i == j == k 35 | np.testing.assert_array_equal((inhdr1[1].data + inhdr1[1].data) / 2, out_hdr[1].data) 36 | -------------------------------------------------------------------------------- /tests/test_flat_nuv.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV flat.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVFlat(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_nuv_flat(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la7u05xoq_rawtag.fits', 20 | 'la7u05xoq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7u05xoq_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7u05xoq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=3e-7) 40 | -------------------------------------------------------------------------------- /tests/test_dark_nuv.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV dark.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVDark(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_nuv_dark(self): 16 | """ 17 | NUV COS regression test 18 | """ 19 | files_to_download = ['la7u04w0q_rawtag.fits', 20 | 'la7u04w0q_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7u04w0q_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7u04w0q'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=3e-7) 40 | -------------------------------------------------------------------------------- /tests/test_flat_fuva.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV flat.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVAFlat(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuva_flat(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la8n01qkq_rawtag_a.fits', 20 | 'la8n01qkq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la8n01qkq_rawtag_a.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la8n01qkq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag_a', 'counts_a', 36 | 'flt_a'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=3e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_nuv_g185m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G185M wavecal.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestWavecalNUVG185M(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_wavecal_nuv_g185m(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la7v01dmq_rawtag.fits', 20 | 'la7v01dmq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7v01dmq_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7v01dmq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt', 'x1d'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=1e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_nuv_g225m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G225M wavecal.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestWavecalNUVG225M(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_wavecal_nuv_g122m(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la7v01doq_rawtag.fits', 20 | 'la7v01doq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7v01doq_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7v01doq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt', 'x1d'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=1e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_nuv_g230l.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G230L wavecal.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestWavecalNUVG230L(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_wavecal_nuv_g230l(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la7v01e4q_rawtag.fits', 20 | 'la7v01e4q_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7v01e4q_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7v01e4q'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt', 'x1d'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=1e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_nuv_g285m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G285M wavecal.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestWavecalNUVG285M(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_wavecal_nuv_g285m(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la7v01dqq_rawtag.fits', 20 | 'la7v01dqq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7v01dqq_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7v01dqq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt', 'x1d'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=1e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_nuv_mirrora.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV MIRRORA wavecal.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestWavecalNUVMirrorA(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_wavecal_nuv_mirrora(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la7u04w0q_rawtag.fits', 20 | 'la7u04w0q_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7u04w0q_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7u04w0q'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=1e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_nuv_mirrorb.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV MIRRORB wavecal.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestWavecalNUVMirrorB(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_wavecal_nuv_mirrorb(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['labq02osq_rawtag.fits', 20 | 'labq02osq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'labq02osq_rawtag.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['labq02osq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag', 'counts', 36 | 'flt'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=1e-7) 40 | -------------------------------------------------------------------------------- /tests/test_wavecal_fuva_g130m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUVA wavecal, G130M.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVAWavecalG130M(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuva_wavecal_g130m(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['lce823m7q_rawtag_a.fits', 20 | 'lce823m7q_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'lce823m7q_rawtag_a.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['lce823m7q'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag_a', 'counts_a', 36 | 'flt_a', 'x1d'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=3e-7) 40 | -------------------------------------------------------------------------------- /tests/test_dark_fuvb.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV dark.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVBDark(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuvb_dark(self): 16 | """ 17 | FUV COS regression test #3 18 | """ 19 | files_to_download = ['ldd306cbq_rawtag_a.fits', 'ldd306cbq_rawtag_b.fits', 20 | 'ldd306cbq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'ldd306cbq_rawtag_a.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['ldd306cbq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag_a', 'corrtag_b', 'counts_a', 'counts_b', 36 | 'flt_a', 'flt_b'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=3e-7) 40 | -------------------------------------------------------------------------------- /tests/test_flat_relmvreq.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV RelMvRec Flat""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestRelMvRecFlat(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_relmvrec_flat(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['ldc1b1waq_rawtag_a.fits', 'ldc1b1waq_rawtag_b.fits', 20 | 'ldc1b1waq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'ldc1b1waq_rawtag_a.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['ldc1b1waq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag_a', 'corrtag_b', 'counts_a', 'counts_b', 36 | 'flt_a', 'flt_b'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | outputs.append((fname, 'ref_' + fname)) 39 | self.compare_outputs(outputs, rtol=3e-7) 40 | -------------------------------------------------------------------------------- /tests/test_dark_fuva.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV dark.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVADark(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuva_dark(self): 16 | """ 17 | FUV COS regression test #2 18 | """ 19 | files_to_download = ['la7803fiq_rawtag_a.fits', 'la7803fiq_rawtag_b.fits', 20 | 'la7803fiq_spt.fits'] 21 | 22 | # Prepare input files. 23 | self.get_input_files(files_to_download) 24 | 25 | input_file = 'la7803fiq_rawtag_a.fits' 26 | # Run CALCOS 27 | calcos.calcos(input_file) 28 | 29 | # Compare results. 30 | # The first outroot is the output from whole ASN, 31 | # the rest are individual members. 32 | outroots = ['la7803fiq'] 33 | outputs = [] 34 | for outroot in outroots: 35 | for sfx in ('corrtag_a', 'corrtag_b', 'counts_a', 'counts_b', 36 | 'flt_a', 'flt_b'): 37 | fname = '{}_{}.fits'.format(outroot, sfx) 38 | comparison_name = 'ref_' + fname 39 | outputs.append((fname, comparison_name)) 40 | self.compare_outputs(outputs, rtol=3e-7) 41 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """Custom ``pytest`` configurations.""" 2 | import pytest 3 | import os 4 | 5 | #from astropy.tests.helper import enable_deprecations_as_exceptions 6 | 7 | # Turn deprecation warnings into exceptions. 8 | #enable_deprecations_as_exceptions() 9 | 10 | # Require these pytest plugins to run. 11 | pytest_plugins = ["pytest_ciwatson"] 12 | 13 | 14 | # For easy inspection on what dependencies were used in test. 15 | def pytest_report_header(config): 16 | import sys 17 | import warnings 18 | import importlib 19 | 20 | s = "\nFull Python Version: \n{0}\n\n".format(sys.version) 21 | 22 | for module_name in ('numpy', 'astropy', 'scipy', 'matplotlib', 23 | 'stsci.tools'): 24 | try: 25 | with warnings.catch_warnings(): 26 | warnings.simplefilter("ignore", DeprecationWarning) 27 | module = importlib.import_module(module_name) 28 | except ImportError: 29 | s += "{0}: not available\n".format(module_name) 30 | else: 31 | try: 32 | version = module.__version__ 33 | except AttributeError: 34 | version = 'unknown (no __version__ attribute)' 35 | s += "{0}: {1}\n".format(module_name, version) 36 | 37 | return s 38 | 39 | 40 | @pytest.fixture(scope='session') 41 | def test_data(): 42 | return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') 43 | -------------------------------------------------------------------------------- /tests/test_wavecal_fuva_f140l.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUVA wavecal, G140L.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVAWavecalG140L(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuva_wavecal_g140l(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['la8n01qqq_rawtag_a.fits', 20 | 'la8n01qqq_rawtag_b.fits', 21 | 'la8n01qqq_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | input_file = 'la8n01qqq_rawtag_a.fits' 27 | # Run CALCOS 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['la8n01qqq'] 34 | outputs = [] 35 | for outroot in outroots: 36 | for sfx in ('corrtag_a', 'corrtag_b', 37 | 'counts_a', 'counts_b', 38 | 'flt_a', 'flt_b', 'x1d'): 39 | fname = '{}_{}.fits'.format(outroot, sfx) 40 | outputs.append((fname, 'ref_' + fname)) 41 | self.compare_outputs(outputs, rtol=3e-7) 42 | -------------------------------------------------------------------------------- /tests/test_wavecal_fuvb_g160m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUVB wavecal, G160M.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVBWavecalG160M(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuvb_wavecal_g160m(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['la7803fkq_rawtag_a.fits', 20 | 'la7803fkq_rawtag_b.fits', 21 | 'la7803fkq_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | input_file = 'la7803fkq_rawtag_a.fits' 27 | # Run CALCOS 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['la7803fkq'] 34 | outputs = [] 35 | for outroot in outroots: 36 | for sfx in ('corrtag_a', 'counts_a', 37 | 'corrtag_b', 'counts_b', 38 | 'flt_a', 'flt_b', 'x1d'): 39 | fname = '{}_{}.fits'.format(outroot, sfx) 40 | outputs.append((fname, 'ref_' + fname)) 41 | self.compare_outputs(outputs, rtol=1e-7) 42 | -------------------------------------------------------------------------------- /tests/test_wavecal_fuvb_g160m_relmvreq.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUVB wavecal, G160M, relmvreq""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVBWavecalG160MRelMvReq(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuvb_wavecal_g160m_relmvreq(self): 16 | """ 17 | FUV COS regression test 18 | """ 19 | files_to_download = ['ldd9a3h6q_rawtag_a.fits', 20 | 'ldd9a3h6q_rawtag_b.fits', 21 | 'ldd9a3h6q_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | input_file = 'ldd9a3h6q_rawtag_a.fits' 27 | # Run CALCOS 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['ldd9a3h6q'] 34 | outputs = [] 35 | for outroot in outroots: 36 | for sfx in ('corrtag_a', 'counts_a', 37 | 'corrtag_b', 'counts_b', 38 | 'flt_a', 'flt_b', 'x1d'): 39 | fname = '{}_{}.fits'.format(outroot, sfx) 40 | outputs.append((fname, 'ref_' + fname)) 41 | self.compare_outputs(outputs, rtol=1e-7) 42 | -------------------------------------------------------------------------------- /calcos/pars/calcos.cfgspc: -------------------------------------------------------------------------------- 1 | _task_name_ = string_kw(default="calcos") 2 | input = string_kw(default="", comment="Input asn or raw file(s)") 3 | verbosity = option_kw(-1, 0, 1, 2, default=1, comment="Print a little or a lot?") 4 | savetmp = boolean_kw(default=False, comment="Save temporary files?") 5 | outdir = string_kw(default="", comment="Optional output directory") 6 | find_target = boolean_kw(default=False, comment="Find target in XD?") 7 | cutoff = float_or_none_kw(default=None, comment="N-sigma cutoff for find") 8 | shift_file = string_kw(default="", comment="File to specify OSM shift values") 9 | csum = boolean_kw(default=False, comment="Write 'calcos sum' (CSUM) file?") 10 | raw_csum = boolean_kw(default=False, comment="Use raw coords for CSUM file?") 11 | compress = boolean_kw(default=False, comment="Compress the CSUM file?") 12 | comp_param = string_kw(default="gzip,-0.01", comment="Compression parameters") 13 | binx = option_kw(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, default=1, comment="Bin CSUM in X by this factor") 14 | biny = option_kw(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, default=1, comment="Bin CSUM in Y by this factor") 15 | stimfile = string_kw(default="", comment="Append stim locations to file") 16 | livefile = string_kw(default="", comment="Append livetime factors to file") 17 | burstfile = string_kw(default="", comment="Append burst information to file") 18 | print_version = boolean_kw(default=False, comment="Print version number?") 19 | print_revision = boolean_kw(default=False, comment="Print full version string?") 20 | [ _RULES_ ] 21 | -------------------------------------------------------------------------------- /.github/workflows/python_testing.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: CalCOS Pytest 5 | 6 | on: 7 | push: 8 | branches: 9 | - main 10 | pull_request: 11 | branches: 12 | - main 13 | 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }} 16 | cancel-in-progress: true 17 | 18 | jobs: 19 | check: 20 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 21 | with: 22 | envs: | 23 | - linux: check-style 24 | - linux: check-build 25 | test: 26 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1 27 | with: 28 | setenv: | 29 | TEST_BIGDATA: https://bytesalad.stsci.edu/artifactory 30 | lref: /grp/hst/cdbs/lref/ 31 | envs: | 32 | - linux: py39-xdist 33 | - linux: py310-xdist 34 | - linux: py311-xdist 35 | - linux: py312-xdist 36 | # `tox` does not currently respect `requires-python` versions when creating testing environments; 37 | # if this breaks, add an upper pin to `requires-python` and revert this py3 to the latest working version 38 | - linux: py3-cov-xdist 39 | coverage: codecov 40 | pytest-results-summary: true 41 | - macos: py3-xdist 42 | pytest-results-summary: true 43 | - linux: py3-devdeps-xdist 44 | toxdeps: tox-uv 45 | -------------------------------------------------------------------------------- /tests/test_nuv_sci_g185m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G185M sci data.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVSciG185M(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_nuv_sci_g185m(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la8q99050_asn.fits', 20 | 'la8q99jbq_rawtag.fits', 21 | 'la8q99jbq_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | input_file = 'la8q99050_asn.fits' 27 | # Run CALCOS 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['la8q99050', 'la8q99jbq'] 34 | outputs = [] 35 | for sfx in ['x1dsum', 'x1dsum3']: 36 | fname = f'{outroots[0]}_{sfx}.fits' 37 | comparison_name = 'ref_' + fname 38 | outputs.append((fname, comparison_name)) 39 | for outroot in outroots[1:]: 40 | for sfx in ('corrtag', 'counts', 41 | 'flt', 'lampflash', 'x1d'): 42 | fname = '{}_{}.fits'.format(outroot, sfx) 43 | outputs.append((fname, 'ref_' + fname)) 44 | self.compare_outputs(outputs, rtol=1e-7) 45 | -------------------------------------------------------------------------------- /tests/test_nuv_sci_g230l.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G230L sci data.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVSciG230L(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_nuv_sci_g230l(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la8p93030_asn.fits', 20 | 'la8p93a7q_rawtag.fits', 21 | 'la8p93a7q_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | input_file = 'la8p93030_asn.fits' 27 | # Run CALCOS 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['la8p93030', 'la8p93a7q'] 34 | outputs = [] 35 | for sfx in ['x1dsum', 'x1dsum3']: 36 | fname = f'{outroots[0]}_{sfx}.fits' 37 | comparison_name = 'ref_' + fname 38 | outputs.append((fname, comparison_name)) 39 | for outroot in outroots[1:]: 40 | for sfx in ('corrtag', 'counts', 41 | 'flt', 'lampflash', 'x1d'): 42 | fname = '{}_{}.fits'.format(outroot, sfx) 43 | outputs.append((fname, 'ref_' + fname)) 44 | self.compare_outputs(outputs, rtol=1e-7) 45 | -------------------------------------------------------------------------------- /tests/test_nuv_sci_g285m.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/NUV G285M sci data.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestNUVSciG285M(BaseCOS): 13 | detector = 'nuv' 14 | 15 | def test_nuv_sci_g285m(self): 16 | """ 17 | COS regression test 18 | """ 19 | files_to_download = ['la8q99030_asn.fits', 20 | 'la8q99ixq_rawtag.fits', 21 | 'la8q99ixq_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | input_file = 'la8q99030_asn.fits' 27 | # Run CALCOS 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['la8q99030', 'la8q99ixq'] 34 | outputs = [] 35 | for sfx in ['x1dsum', 'x1dsum3']: 36 | fname = f'{outroots[0]}_{sfx}.fits' 37 | comparison_name = 'ref_' + fname 38 | outputs.append((fname, comparison_name)) 39 | for outroot in outroots[1:]: 40 | for sfx in ('corrtag', 'counts', 41 | 'flt', 'lampflash', 'x1d'): 42 | fname = '{}_{}.fits'.format(outroot, sfx) 43 | outputs.append((fname, 'ref_' + fname)) 44 | self.compare_outputs(outputs, rtol=1e-7) 45 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "calcos" 3 | description = "Calibration software for COS (Cosmic Origins Spectrograph)" 4 | requires-python = ">=3.9" 5 | authors = [ 6 | { name = "Phil Hodge", email = "help@stsci.edu" }, 7 | { name = "Robert Jedrzejewski" }, 8 | ] 9 | classifiers = [ 10 | "Intended Audience :: Science/Research", 11 | "License :: OSI Approved :: BSD License", 12 | "Operating System :: OS Independent", 13 | "Programming Language :: Python :: 3", 14 | "Programming Language :: C", 15 | "Topic :: Software Development :: Libraries :: Python Modules", 16 | ] 17 | dependencies = ["astropy>=5.0.4", "numpy", "scipy", "stsci.tools>=4.0.0"] 18 | dynamic = ["version"] 19 | 20 | [project.readme] 21 | file = "README.md" 22 | content-type = "text/markdown" 23 | 24 | [project.scripts] 25 | calcos = "calcos:main" 26 | 27 | [project.optional-dependencies] 28 | docs = ["sphinx<7"] 29 | test = ["ci-watson", "pytest", "pytest-cov"] 30 | 31 | [build-system] 32 | requires = [ 33 | "setuptools>=61.2", 34 | "setuptools_scm[toml]>=3.4", 35 | "wheel", 36 | "numpy>=2.0.0", 37 | ] 38 | build-backend = "setuptools.build_meta" 39 | 40 | [tool.setuptools] 41 | include-package-data = false 42 | 43 | [tool.setuptools.packages.find] 44 | 45 | [tool.setuptools.package-data] 46 | calcos = ["pars/*", "*.help"] 47 | 48 | [tool.setuptools_scm] 49 | version_file = "calcos/version.py" 50 | 51 | [tool.pytest.ini_options] 52 | minversion = "3.0" 53 | norecursedirs = ["build", "doc/build", "src"] 54 | junit_family = "xunit2" 55 | 56 | [tool.ruff.lint] 57 | exclude = ["setup.py", "__init__.py"] 58 | ignore = ["E265", "F821", "F841"] 59 | -------------------------------------------------------------------------------- /tests/test_airglow.py: -------------------------------------------------------------------------------- 1 | from calcos import airglow 2 | from generate_tempfiles import create_disptab_file 3 | import os 4 | 5 | 6 | def test_find_airglow_limits(tmp_path): 7 | """ 8 | unit test for find_airglow_limits() 9 | test ran 10 | - By providing certain values as dict to be used as filter for finding the dispersion 11 | - testing for both FUV segments 12 | - creating a temporary disptab ref file. 13 | - testing for 5 airglow lines 14 | - calculating the expected pixel numbers by following the math involved in the actual file 15 | and referring to the values in the ref file we can get the values upto a descent decimal points. 16 | 17 | Returns 18 | ------- 19 | pass if expected == actual or fail if not. 20 | 21 | """ 22 | # Setup 23 | inf = {"obstype": "SPECTROSCOPIC", "cenwave": 1055, "aperture": "PSA", "detector": "FUV", 24 | "opt_elem": "G130M", "segment": "FUVA"} 25 | seg = ["FUVA", "FUVB"] 26 | disptab = create_disptab_file(str(tmp_path / '49g17153l_disp.fits')) 27 | airglow_lines = ["Lyman_alpha", "N_I_1200", "O_I_1304", "O_I_1356", "N_I_1134"] 28 | actual_pxl = [ 29 | [], [], (15421.504705213156, 15738.02214190493), (8853.838672375898, 9135.702216258482)] 30 | # Test 31 | test_pxl = [[], []] 32 | # only works for FUV 33 | for segment in seg: 34 | for line in airglow_lines: 35 | limits = airglow.findAirglowLimits(inf, segment, disptab, line) 36 | if limits is not None: 37 | x, y = limits 38 | test_pxl.append((x, y)) 39 | # Verify 40 | for i in range(len(actual_pxl)): 41 | assert actual_pxl[i] == test_pxl[i] 42 | -------------------------------------------------------------------------------- /tests/test_fuv_timetag.py: -------------------------------------------------------------------------------- 1 | """Tests for COS/FUV timetag.""" 2 | 3 | import pytest 4 | 5 | import calcos 6 | from helpers import BaseCOS 7 | 8 | 9 | # TODO: Mark this as slow when there are faster tests added for CI tests 10 | # so that this only runs in nightly tests. 11 | @pytest.mark.slow 12 | class TestFUVTimetag(BaseCOS): 13 | detector = 'fuv' 14 | 15 | def test_fuv_timetag_1(self): 16 | """ 17 | FUV COS regression test #1 18 | """ 19 | files_to_download = ['lckg01070_asn.fits', 'lckg01czq_spt.fits', 20 | 'lckg01d4q_spt.fits', 'lckg01d9q_spt.fits', 21 | 'lckg01dcq_spt.fits'] 22 | 23 | # Prepare input files. 24 | self.get_input_files(files_to_download) 25 | 26 | # Run CALCOS 27 | input_file = 'lckg01070_asn.fits' 28 | calcos.calcos(input_file) 29 | 30 | # Compare results. 31 | # The first outroot is the output from whole ASN, 32 | # the rest are individual members. 33 | outroots = ['lckg01070', 'lckg01czq', 'lckg01d4q', 'lckg01d9q', 34 | 'lckg01dcq'] 35 | outputs = [] 36 | for sfx in ('x1dsum', 'x1dsum1', 'x1dsum2', 'x1dsum3', 'x1dsum4'): 37 | fname = '{}_{}.fits'.format(outroots[0], sfx) 38 | comparison_name = 'ref_' + fname 39 | outputs.append((fname, comparison_name)) 40 | for outroot in outroots[1:]: 41 | for sfx in ('corrtag_a', 'corrtag_b', 'counts_a', 'counts_b', 42 | 'flt_a', 'flt_b', 'lampflash', 'x1d'): 43 | fname = '{}_{}.fits'.format(outroot, sfx) 44 | comparison_name = 'ref_' + fname 45 | outputs.append((fname, comparison_name)) 46 | self.compare_outputs(outputs, rtol=3e-7) 47 | -------------------------------------------------------------------------------- /JenkinsfileRT: -------------------------------------------------------------------------------- 1 | // Obtain files from source control system. 2 | // [skip ci] and [ci skip] have no effect here. 3 | if (utils.scm_checkout(['skip_disable':true])) return 4 | 5 | // Allow modification of the job configuration, affects all relevant build configs. 6 | // Pass this object in the argument list to the`run()` function below to apply these settings to the job's execution. 7 | jobconfig = new JobConfig() 8 | jobconfig.post_test_summary = true 9 | 10 | // Run nightly tests, which include the slow ones. 11 | bc = new BuildConfig() 12 | bc.nodetype = "linux" 13 | bc.name = "release" 14 | bc.env_vars = ['TEST_BIGDATA=https://bytesalad.stsci.edu/artifactory', 15 | 'lref=/grp/hst/cdbs/lref/'] 16 | bc.conda_channels = ['http://conda.anaconda.org/conda-forge/'] 17 | bc.conda_packages = ['python=3.9'] 18 | bc.build_cmds = ["pip install codecov pytest-cov ci-watson", 19 | "pip install -e .[test]"] 20 | bc.test_cmds = ["pytest tests --basetemp=tests_output --junitxml results.xml --bigdata --slow -v"] 21 | bc.failedUnstableThresh = 1 22 | bc.failedFailureThresh = 6 23 | 24 | // Dev dependencies 25 | bc1 = utils.copy(bc) 26 | bc1.name = "dev" 27 | bc1.conda_packages[0] = "python=3.10" 28 | bc1.build_cmds[1] = "pip install -r requirements-dev.txt --upgrade -e '.[test]'" 29 | //bc1.build_cmds = ["pip install -e .[test]", 30 | // "pip install astropy>=0.0.dev0 --upgrade --no-deps", 31 | // "pip install pyyaml"] 32 | 33 | bc2 = utils.copy(bc) 34 | bc2.name = '3.11' 35 | bc2.conda_packages = ['python=3.11'] 36 | 37 | // Iterate over configurations that define the (distributed) build matrix. 38 | // Spawn a host of the given nodetype for each combination and run in parallel. 39 | // Also apply the job configuration defined in `jobconfig` above. 40 | utils.run([bc, bc1, bc2, jobconfig]) 41 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | check-{style,build} 4 | test{,-pyargs,-warnings,-regtests,-cov}-xdist 5 | build-{docs,dist} 6 | 7 | # tox environments are constructed with so-called 'factors' (or terms) 8 | # separated by hyphens, e.g. test-devdeps-cov. Lines below starting with factor: 9 | # will only take effect if that factor is included in the environment name. To 10 | # see a list of example environments that can be run, along with a description, 11 | # run: 12 | # 13 | # tox -l -v 14 | # 15 | 16 | [testenv:check-style] 17 | description = check code style, e.g. with flake8 18 | skip_install = true 19 | deps = 20 | flake8 21 | commands = 22 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics {posargs} 23 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics {posargs} 24 | 25 | [testenv:check-build] 26 | description = check build sdist/wheel and a strict twine check for metadata 27 | skip_install = true 28 | deps = 29 | build 30 | twine>=3.3 31 | commands = 32 | python -m build . 33 | twine check --strict dist/* 34 | 35 | [testenv] 36 | description = 37 | run tests 38 | devdeps: with the latest developer version of key dependencies 39 | warnings: treating warnings as errors 40 | regtests: with --bigdata and --slow flags 41 | cov: with coverage 42 | xdist: using parallel processing 43 | passenv = 44 | HOME 45 | CRDS_* 46 | CODECOV_* 47 | TEST_BIGDATA 48 | lref 49 | extras = 50 | test 51 | deps = 52 | cov: pytest-cov 53 | xdist: pytest-xdist 54 | commands_pre = 55 | devdeps: pip install -r requirements-dev.txt -U --upgrade-strategy eager 56 | pip freeze 57 | commands = 58 | pytest --slow \ 59 | regtests: --bigdata \ 60 | cov: --cov --cov-report=xml \ 61 | warnings: -W error \ 62 | xdist: -n auto \ 63 | {posargs} 64 | 65 | [testenv:build-docs] 66 | description = invoke sphinx-build to build the HTML docs 67 | skip_install = true 68 | extras = 69 | docs 70 | commands = 71 | sphinx-build -W docs/source docs/_build 72 | 73 | [testenv:build-dist] 74 | description = build wheel and sdist 75 | skip_install = true 76 | deps = 77 | build 78 | commands = 79 | python -m build . -------------------------------------------------------------------------------- /tests/test_shiftfile.py: -------------------------------------------------------------------------------- 1 | from calcos import shiftfile 2 | 3 | def create_shift_file(filename): 4 | # Create the shift file for use in tests 5 | with open(filename, "w") as file: 6 | file.write("#dataset\tfpoffset\tflash #\tstripe\tshift1\tshift2\n") 7 | for i in range(10): 8 | if i % 3 == 0: 9 | file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format("abc123def", "any", "1", "NUVA", "45.234435", "7")) 10 | elif i % 5 == 0: 11 | file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format("ghi456jkl", "any", "2", "NUVB", "34.543453", "7")) 12 | elif i % 6 == 0 or i % 8 == 0: 13 | file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format("ghi456jkl", "any", "2", "FUVA", "19.543453", "5")) 14 | elif i == 9: 15 | file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format("mno789pqr", "any", "2", "FUVB", "52.723453", "6")) 16 | else: 17 | file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format("mno789pqr", "any", "1", "NUVC", "-34.543453", "7")) 18 | 19 | return 20 | 21 | def test_shift_file(tmp_path): 22 | shift_file = str(tmp_path / "shift_file.txt") 23 | create_shift_file(shift_file) 24 | # Test 25 | ob = shiftfile.ShiftFile(shift_file, 'abc123def', 'any') 26 | # Verify 27 | assert len(ob.user_shift_dict) > 0 28 | 29 | 30 | def test_get_shifts(tmp_path): 31 | # Setup 32 | shift_file = str(tmp_path / "shift_file.txt") 33 | create_shift_file(shift_file) 34 | ob1 = shiftfile.ShiftFile(shift_file, 'ghi456jkl', 'any') 35 | ob2 = shiftfile.ShiftFile(shift_file, 'abc123def', 'any') 36 | keys = [('any', 'nuva'), ('any', 'nuvb'), (2, 'nuvc'), ('any', 'any'), ('any', 'fuva'), ('any', 'fuvb')] 37 | expected_values1 = [((None, None), 0), ((34.543453, 7.0), 1), ((None, None), 0), ((19.543453, 5.0), 2), ((19.543453, 5.0), 1), ((None, None), 0)] 38 | expected_values2 = [((45.234435, 7.0), 1), ((None, None), 0), ((None, None), 0), ((45.234435, 7.0), 1), ((None, None), 0), ((None, None), 0)] 39 | # Test 40 | test_values1 = [] 41 | test_values2 = [] 42 | for key in keys: 43 | test_values1.append(shiftfile.ShiftFile.getShifts(ob1, key)) 44 | for key in keys: 45 | test_values2.append(shiftfile.ShiftFile.getShifts(ob2, key)) 46 | # Verify 47 | for i in range(len(expected_values1)): 48 | assert expected_values1[i] == test_values1[i] 49 | assert expected_values2[i] == test_values2[i] 50 | 51 | -------------------------------------------------------------------------------- /tests/test_extract.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from calcos.x1d import * 4 | import numpy as np 5 | from generate_tempfiles import generate_fits_file 6 | 7 | def test_get_columns(tmp_path): 8 | """ 9 | Test if the function is returning the right column fields 10 | """ 11 | # Setup 12 | test_data = generate_fits_file(str(tmp_path / "lbgu17qnq_corrtag_a.fits")) 13 | dt = test_data[1].data 14 | detector = "FUV" 15 | 16 | # Truth actual values 17 | # Testing for FUV 18 | xfull = dt.field("xfull") 19 | if cosutil.findColumn(dt, "yfull"): 20 | yfull = dt.field("yfull") 21 | else: 22 | yfull = dt.field("ycorr") 23 | dq = dt.field("dq") 24 | epsilon = dt.field("epsilon") 25 | 26 | # Test function 27 | (xf, yf, dq2, epsilon2) = extract.getColumns(test_data, detector) 28 | 29 | # Verify 30 | np.testing.assert_array_equal(xfull, xf) 31 | np.testing.assert_array_equal(yfull, yf) 32 | np.testing.assert_array_equal(dq, dq2) 33 | np.testing.assert_array_equal(epsilon, epsilon2) 34 | 35 | 36 | def test_remove_unwanted_column(tmp_path): 37 | """ 38 | Old column length should be equal to new column length + amount of the removed columns 39 | """ 40 | # Setup 41 | target_cols = ['XFULL', 'YFULL'] 42 | # Truth 43 | fd = generate_fits_file(str(tmp_path / "lbgu17qnq_lampflash.fits")) 44 | table = fd[1].data 45 | cols = table.columns 46 | 47 | # Test 48 | fd = extract.remove_unwanted_columns(fd) 49 | new_cols = fd[1].data.columns 50 | # Verify 51 | deleted_cols = set(cols) - set(new_cols) 52 | deleted_cols = np.array(list(deleted_cols)) 53 | temp_cols = [d.name for d in deleted_cols] 54 | deleted_cols = deleted_cols[np.argsort(temp_cols)] 55 | # assert target_cols[0] == deleted_cols[0].name 56 | # assert target_cols[1] == deleted_cols[1].name 57 | 58 | 59 | def test_next_power_of_two(): 60 | """ 61 | Test the next_power_of_two 62 | @return: none 63 | """ 64 | # Truth 65 | next_power = 8 66 | 67 | # Verify 68 | assert next_power == extract.next_power_of_two(7) 69 | 70 | 71 | def test_add_column_comment(tmp_path): 72 | # verify if entered comment to a header is present in the fits file. 73 | # Setup 74 | ofd = generate_fits_file(str(tmp_path / "myFitsFile.fits")) 75 | comment = "This comment is generated by a unit-test." 76 | 77 | # Exercise 78 | test_table = extract.add_column_comment(ofd, 'TIME', comment) 79 | 80 | # Verify 81 | assert comment == test_table[1].header.comments['TTYPE1'] 82 | 83 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Spacetelescope Open Source Code of Conduct 2 | 3 | We expect all "spacetelescope" organization projects to adopt a code of conduct that ensures a productive, respectful environment for all open source contributors and participants. We are committed to providing a strong and enforced code of conduct and expect everyone in our community to follow these guidelines when interacting with others in all forums. Our goal is to keep ours a positive, inclusive, successful, and growing community. The community of participants in open source Astronomy projects is made up of members from around the globe with a diverse set of skills, personalities, and experiences. It is through these differences that our community experiences success and continued growth. 4 | 5 | 6 | As members of the community, 7 | 8 | - We pledge to treat all people with respect and provide a harassment- and bullying-free environment, regardless of sex, sexual orientation and/or gender identity, disability, physical appearance, body size, race, nationality, ethnicity, and religion. In particular, sexual language and imagery, sexist, racist, or otherwise exclusionary jokes are not appropriate. 9 | 10 | - We pledge to respect the work of others by recognizing acknowledgment/citation requests of original authors. As authors, we pledge to be explicit about how we want our own work to be cited or acknowledged. 11 | 12 | - We pledge to welcome those interested in joining the community, and realize that including people with a variety of opinions and backgrounds will only serve to enrich our community. In particular, discussions relating to pros/cons of various technologies, programming languages, and so on are welcome, but these should be done with respect, taking proactive measure to ensure that all participants are heard and feel confident that they can freely express their opinions. 13 | 14 | - We pledge to welcome questions and answer them respectfully, paying particular attention to those new to the community. We pledge to provide respectful criticisms and feedback in forums, especially in discussion threads resulting from code contributions. 15 | 16 | - We pledge to be conscientious of the perceptions of the wider community and to respond to criticism respectfully. We will strive to model behaviors that encourage productive debate and disagreement, both within our community and where we are criticized. We will treat those outside our community with the same respect as people within our community. 17 | 18 | - We pledge to help the entire community follow the code of conduct, and to not remain silent when we see violations of the code of conduct. We will take action when members of our community violate this code such as such as contacting conduct@stsci.edu (all emails sent to this address will be treated with the strictest confidence) or talking privately with the person. 19 | 20 | This code of conduct applies to all community situations online and offline, including mailing lists, forums, social media, conferences, meetings, associated social events, and one-to-one interactions. 21 | 22 | Parts of this code of conduct have been adapted from the Astropy and Numfocus codes of conduct. 23 | http://www.astropy.org/code_of_conduct.html 24 | https://www.numfocus.org/about/code-of-conduct/ 25 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 14 | 15 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " pickle to make pickle files" 22 | @echo " json to make JSON files" 23 | @echo " htmlhelp to make HTML files and a HTML help project" 24 | @echo " qthelp to make HTML files and a qthelp project" 25 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 26 | @echo " changes to make an overview of all changed/added/deprecated items" 27 | @echo " linkcheck to check all external links for integrity" 28 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 29 | 30 | clean: 31 | -rm -rf $(BUILDDIR)/* 32 | 33 | html: 34 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 35 | @echo 36 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 37 | 38 | dirhtml: 39 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 40 | @echo 41 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 42 | 43 | pickle: 44 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 45 | @echo 46 | @echo "Build finished; now you can process the pickle files." 47 | 48 | json: 49 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 50 | @echo 51 | @echo "Build finished; now you can process the JSON files." 52 | 53 | htmlhelp: 54 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 55 | @echo 56 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 57 | ".hhp project file in $(BUILDDIR)/htmlhelp." 58 | 59 | qthelp: 60 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 61 | @echo 62 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 63 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 64 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/calcos.qhcp" 65 | @echo "To view the help file:" 66 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/calcos.qhc" 67 | 68 | latex: 69 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 70 | @echo 71 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 72 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 73 | "run these through (pdf)latex." 74 | 75 | changes: 76 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 77 | @echo 78 | @echo "The overview file is in $(BUILDDIR)/changes." 79 | 80 | linkcheck: 81 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 82 | @echo 83 | @echo "Link check complete; look for any errors in the above output " \ 84 | "or in $(BUILDDIR)/linkcheck/output.txt." 85 | 86 | doctest: 87 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 88 | @echo "Testing of doctests in the sources finished, look at the " \ 89 | "results in $(BUILDDIR)/doctest/output.txt." 90 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | set SPHINXBUILD=sphinx-build 6 | set BUILDDIR=build 7 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source 8 | if NOT "%PAPER%" == "" ( 9 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 10 | ) 11 | 12 | if "%1" == "" goto help 13 | 14 | if "%1" == "help" ( 15 | :help 16 | echo.Please use `make ^` where ^ is one of 17 | echo. html to make standalone HTML files 18 | echo. dirhtml to make HTML files named index.html in directories 19 | echo. pickle to make pickle files 20 | echo. json to make JSON files 21 | echo. htmlhelp to make HTML files and a HTML help project 22 | echo. qthelp to make HTML files and a qthelp project 23 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 24 | echo. changes to make an overview over all changed/added/deprecated items 25 | echo. linkcheck to check all external links for integrity 26 | echo. doctest to run all doctests embedded in the documentation if enabled 27 | goto end 28 | ) 29 | 30 | if "%1" == "clean" ( 31 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 32 | del /q /s %BUILDDIR%\* 33 | goto end 34 | ) 35 | 36 | if "%1" == "html" ( 37 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 38 | echo. 39 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 40 | goto end 41 | ) 42 | 43 | if "%1" == "dirhtml" ( 44 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 45 | echo. 46 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 47 | goto end 48 | ) 49 | 50 | if "%1" == "pickle" ( 51 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 52 | echo. 53 | echo.Build finished; now you can process the pickle files. 54 | goto end 55 | ) 56 | 57 | if "%1" == "json" ( 58 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 59 | echo. 60 | echo.Build finished; now you can process the JSON files. 61 | goto end 62 | ) 63 | 64 | if "%1" == "htmlhelp" ( 65 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 66 | echo. 67 | echo.Build finished; now you can run HTML Help Workshop with the ^ 68 | .hhp project file in %BUILDDIR%/htmlhelp. 69 | goto end 70 | ) 71 | 72 | if "%1" == "qthelp" ( 73 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 74 | echo. 75 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 76 | .qhcp project file in %BUILDDIR%/qthelp, like this: 77 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\calcos.qhcp 78 | echo.To view the help file: 79 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\calcos.ghc 80 | goto end 81 | ) 82 | 83 | if "%1" == "latex" ( 84 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 85 | echo. 86 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 87 | goto end 88 | ) 89 | 90 | if "%1" == "changes" ( 91 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 92 | echo. 93 | echo.The overview file is in %BUILDDIR%/changes. 94 | goto end 95 | ) 96 | 97 | if "%1" == "linkcheck" ( 98 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 99 | echo. 100 | echo.Link check complete; look for any errors in the above output ^ 101 | or in %BUILDDIR%/linkcheck/output.txt. 102 | goto end 103 | ) 104 | 105 | if "%1" == "doctest" ( 106 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 107 | echo. 108 | echo.Testing of doctests in the sources finished, look at the ^ 109 | results in %BUILDDIR%/doctest/output.txt. 110 | goto end 111 | ) 112 | 113 | :end 114 | -------------------------------------------------------------------------------- /calcos/airglow.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, print_function 2 | from . import cosutil 3 | from . import dispersion 4 | from .calcosparam import * # parameter definitions 5 | 6 | # Half width (pixels) of airglow region to be excluded. 7 | AIRGLOW_LyA = 250. # Lyman alpha 8 | AIRGLOW_FUV = 100. # anything but Lyman alpha, but still FUV 9 | AIRGLOW_NUV = 30. # any NUV airglow line 10 | 11 | # Wavelengths in Angstroms of airglow lines. 12 | # The values in the tuple are the wavelengths of the lines in the multiplet. 13 | AIRGLOW_WAVELENGTHS = {"Lyman_alpha": (1215.67,), 14 | "N_I_1200": (1199.550, 1200.223, 1200.710), 15 | "O_I_1304": (1302.168, 1304.858, 1306.029), 16 | "O_I_1356": (1355.598, 1358.512), 17 | "N_I_1134": (1134.165, 1134.415, 1134.980)} 18 | 19 | # ? "O_I_2973": (2973.154,)} 20 | 21 | def findAirglowLimits(info, segment, disptab, airglow_line): 22 | """Find the pixel region corresponding to a set of airglow lines. 23 | 24 | Parameters 25 | ---------- 26 | info: dictionary 27 | Keywords and values. 28 | 29 | segment: str 30 | Segment or stripe name: "FUVA", "FUVB", "NUVA", "NUVB", "NUVC". 31 | 32 | disptab: str 33 | Name of reference table for dispersion solution. 34 | 35 | airglow_line: str 36 | The key for extracting an element from AIRGLOW_WAVELENGTHS. 37 | 38 | Returns 39 | ------- 40 | tuple (x0, x1) of floats, or None 41 | x0 and x1 are the left and right pixel numbers of the region 42 | that should be omitted to avoid contamination by an airglow line. 43 | These are inclusive limits (pixels), not the elements of a slice. 44 | None will be returned if the specified line (or multiplet) is off 45 | the detector, the mode was not found in a reference table, or 46 | the obstype is not spectroscopic. 47 | """ 48 | if info["obstype"] != "SPECTROSCOPIC": 49 | print("Data is not spectroscopic") 50 | return None 51 | 52 | wl_airglow = AIRGLOW_WAVELENGTHS[airglow_line] 53 | 54 | if info["detector"] == "FUV": 55 | axis_length = FUV_X 56 | if airglow_line == "Lyman_alpha": 57 | exclude = AIRGLOW_LyA 58 | else: 59 | exclude = AIRGLOW_FUV 60 | else: 61 | axis_length = NUV_X 62 | exclude = AIRGLOW_NUV 63 | 64 | # This filter is used for both xtractab and disptab. 65 | filter = {"opt_elem": info["opt_elem"], 66 | "cenwave": info["cenwave"], 67 | "segment": segment, 68 | "aperture": info["aperture"]} 69 | 70 | # currently not necessary: filter["fpoffset"] = info["fpoffset"] 71 | disp_rel = dispersion.Dispersion(disptab, filter) 72 | if not disp_rel.isValid(): 73 | cosutil.printWarning("Dispersion relation is not valid; filter is:") 74 | cosutil.printContinuation(str(filter)) 75 | disp_rel.close() 76 | return None 77 | 78 | min_wl = min(wl_airglow) 79 | max_wl = max(wl_airglow) 80 | # First check whether the airglow line is off the detector. 81 | # NOTE that we assume that wavelength increases with x. 82 | wl_left_edge = disp_rel.evalDisp(-exclude) 83 | if max_wl < wl_left_edge: 84 | disp_rel.close() 85 | return None 86 | wl_right_edge = disp_rel.evalDisp(axis_length - 1. + exclude) 87 | if min_wl > wl_right_edge: 88 | disp_rel.close() 89 | return None 90 | 91 | # x_left and x_right are the pixel coordinates for the minimum 92 | # and maximum airglow wavelengths in the multiplet. 93 | x_left = float(disp_rel.evalInvDisp(min_wl, tiny=1.e-8)) 94 | x_right = float(disp_rel.evalInvDisp(max_wl, tiny=1.e-8)) 95 | x0 = x_left - exclude 96 | x1 = x_right + exclude 97 | disp_rel.close() 98 | 99 | return (x0, x1) 100 | -------------------------------------------------------------------------------- /calcos/shiftfile.py: -------------------------------------------------------------------------------- 1 | from __future__ import division # confidence high 2 | 3 | class ShiftFile(object): 4 | """Read shift_file. 5 | 6 | The shift_file is a text file supplied by the user to specify either 7 | shift1 or shift2 (or both). Blank lines and lines beginning with '#' 8 | will be ignored; otherwise, every line must have either five or six 9 | words: 10 | 11 | rootname, fpoffset, flash_number, segment/stripe, shift1, shift2 12 | 13 | All words given as strings are case insensitive (internally they will be 14 | converted to lower case). 15 | flash_number is one-indexed, to agree with the header keywords and with 16 | the information written to the trailer file. 17 | If shift1 is not to be specified, use the value "N/A". 18 | If shift2 is not to be specified, it may be given as "N/A" or simply left 19 | off (i.e. only five words on the line). 20 | Any or all of rootname, fpoffset, flash_number and segment/stripe may be 21 | given as "ANY", which is interpreted as a wildcard, i.e. it matches any 22 | rootname, fpoffset, etc. 23 | 24 | user_shifts = shiftfile.ShiftFile(shift_file, rootname, fpoffset) 25 | 26 | getShifts is a public method: 27 | ((shift1, shift2), nfound) = user_shifts.getShifts(key) 28 | key is a tuple of flash number (or "any") and segment/stripe name. 29 | 30 | Parameters 31 | ---------- 32 | shift_file: str 33 | Name of text file supplied by user. 34 | 35 | rootname: str 36 | Rootname of the current exposure. 37 | 38 | fpoffset: int 39 | Fpoffset of the current exposure. 40 | """ 41 | 42 | def __init__(self, shift_file, rootname, fpoffset): 43 | """Constructor.""" 44 | 45 | # This is a dictionary of shifts for the current exposure, with 46 | # keys (flash_number, segment) and values (shift1, shift2). 47 | self.user_shift_dict = None 48 | 49 | fd = open(shift_file, "r") 50 | lines = fd.readlines() 51 | fd.close() 52 | 53 | user_shift_dict = {} 54 | for line in lines: 55 | line = line.strip() 56 | if not line: # ignore blank lines 57 | continue 58 | if line[0] == '#': # ignore comments 59 | continue 60 | words = line.split() 61 | if not words: # ignore blank lines 62 | continue 63 | nwords = len(words) 64 | if nwords < 5 or nwords > 6: 65 | raise RuntimeError("error reading this line of " 66 | "shift_file: '%s'" % line) 67 | for i in range(nwords): 68 | words[i] = words[i].lower() 69 | # Select rows matching rootname and fpoffset. 70 | if words[0] != "any" and rootname != words[0]: 71 | continue 72 | if words[1] != "any" and fpoffset != int(words[1]): 73 | continue 74 | if words[2] == "any": 75 | flash_number = "any" 76 | else: 77 | flash_number = int(words[2]) 78 | segment = words[3].lower() # could be "any" 79 | key = (flash_number, segment) 80 | if words[4] != "n/a": 81 | shift1 = float(words[4]) 82 | else: 83 | shift1 = None 84 | if nwords == 6 and words[5] != "n/a": 85 | shift2 = float(words[5]) 86 | else: 87 | shift2 = None 88 | user_shift_dict[key] = (shift1, shift2) 89 | 90 | self.user_shift_dict = user_shift_dict 91 | 92 | def getShifts(self, key): 93 | """Return the shifts corresponding to key, if any. 94 | 95 | Parameters 96 | ---------- 97 | key: tuple 98 | Flash number (one indexed) and segment; if flash number 99 | is "any" it matches any flash number (use "any" for auto/GO 100 | wavecals), and if segment is "any" it matches any segment or 101 | stripe (strings are case insensitive) 102 | 103 | Returns 104 | ------- 105 | tuple 106 | ((shift1, shift2), nfound), where nfound is the number of 107 | elements--which should be either 0 or 1--that match key; 108 | either shift1 or shift2 may be None, and they will both be 109 | None if nfound is 0 110 | """ 111 | 112 | (flash_number, segment) = key 113 | segment = segment.lower() 114 | if isinstance(flash_number, str): 115 | flash_number = flash_number.lower() 116 | 117 | nfound = 0 118 | shifts = (None, None) 119 | # sf_key is the flash number and segment read from the shift file 120 | for sf_key in self.user_shift_dict.keys(): 121 | if sf_key[0] == "any" or flash_number == "any" or \ 122 | flash_number == sf_key[0]: 123 | if sf_key[1] == "any" or segment == "any" or \ 124 | segment == sf_key[1]: 125 | shifts = self.user_shift_dict[sf_key] 126 | nfound += 1 127 | 128 | return (shifts, nfound) 129 | -------------------------------------------------------------------------------- /calcos/phot.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division # confidence high 2 | 3 | from . import cosutil 4 | 5 | def doPhot(imphttab, obsmode, hdr): 6 | """Update photometry parameter keywords for imaging data. 7 | 8 | PHOTFLAM, inverse sensitivity, ergs/s/cm2/Ang per count/s 9 | PHOTFNU, inverse sensitivity, ergs/s/cm2/Hz per count/s 10 | PHOTBW, RMS bandwidth of filter plus detector (Angstroms) 11 | PHOTPLAM, Pivot wavelength (Angstroms) 12 | PHOTZPT = -21.10, ST magnitude system zero point 13 | 14 | Parameters 15 | ---------- 16 | imphttab: str 17 | The name of the imaging photometric parameters table. 18 | 19 | obsmode: str 20 | Observation mode (e.g. "cos,nuv,mirrora,psa"). 21 | 22 | hdr: pyfits Header object 23 | The first extension header, updated in-place. 24 | """ 25 | 26 | (photflam, photfnu, photbw, photplam, photzpt) = \ 27 | readImPhtTab(imphttab, obsmode) 28 | 29 | hdr["photflam"] = photflam 30 | hdr["photfnu"] = photfnu 31 | hdr["photbw"] = photbw 32 | hdr["photplam"] = photplam 33 | hdr["photzpt"] = photzpt 34 | 35 | def readImPhtTab(imphttab, obsmode): 36 | """Read the photometry parameters for imaging data from the imphttab. 37 | 38 | This version has hardcoded values, since the imphttab hasn't been 39 | created yet. The values were determined using pysynphot (and synphot 40 | for the bandwidth) as follows: 41 | 42 | % setenv PYSYN_CDBS 43 | % python 44 | > import pysynphot as S 45 | > for obsmode in ["cos,nuv,mirrora,psa", 46 | > "cos,nuv,mirrora,boa", 47 | > "cos,nuv,mirrorb,psa", 48 | > "cos,nuv,mirrorb,boa"]: 49 | > sp = S.FlatSpectrum(1., fluxunits="flam") 50 | > bp = S.ObsBandpass(obsmode) 51 | > obs = S.Observation(sp, bp) 52 | > print "#", fluxunits, obsmode 53 | > print 1. / obs.countrate() # photflam 54 | > print obs.pivot() # photplam 55 | 56 | obs.pivot() gave different values for flam vs fnu, so the values 57 | obtained via bandpar were used instead. The bandwidth was also 58 | gotten via bandpar. Here is an example (showing only the first lines): 59 | --> bandpar "cos,nuv,mirrora,psa" 60 | # OBSMODE URESP PIVWV BANDW 61 | cos,nuv,mirrora,psa 4.8214E-18 2319.7 382.88 62 | 63 | The values of photfnu were gotten from photflam as follows: 64 | 65 | bp = S.ObsBandpass("cos,nuv,mirrora,psa") 66 | sp = S.FlatSpectrum(4.816554456084e-18, fluxunits="flam") 67 | print obs.effstim("fnu") 68 | photfnu = 8.64540709538e-30 69 | 70 | bp = S.ObsBandpass("cos,nuv,mirrora,boa") 71 | sp = S.FlatSpectrum(1.107251346369e-15, fluxunits="flam") 72 | photfnu = 1.90968620531e-27 73 | 74 | bp = S.ObsBandpass("cos,nuv,mirrorb,psa") 75 | sp = S.FlatSpectrum(9.720215320058e-17, fluxunits="flam") 76 | photfnu = 1.48789056193e-28 77 | 78 | bp = S.ObsBandpass("cos,nuv,mirrorb,boa") 79 | sp = S.FlatSpectrum(1.866877735677e-14, fluxunits="flam") 80 | photfnu = 2.68068135014e-26 81 | 82 | Parameters 83 | ---------- 84 | imphttab: str 85 | The name of the imaging photometry parameters table. 86 | 87 | obsmode: str 88 | Observation mode. 89 | 90 | Returns 91 | ------- 92 | param: tuple of floats 93 | Photflam, photfnu, photbw, photplam, photpzt. 94 | """ 95 | 96 | # These values are photflam, photfnu, photbw, photplam, photzpt: 97 | 98 | photdict = { 99 | "mirrora,psa": [4.816554456084e-18, 100 | 8.64540709538e-30, 101 | 382.88, 102 | 2319.7, 103 | -21.1], 104 | "mirrora,boa": [1.107251346369e-15, 105 | 1.90968620531e-27, 106 | 370.65, 107 | 2273.9, 108 | -21.1], 109 | "mirrorb,psa": [9.720215320058e-17, 110 | 1.48789056193e-28, 111 | 466.56, 112 | 2142.4, 113 | -21.1], 114 | "mirrorb,boa": [1.866877735677e-14, 115 | 2.68068135014e-26, 116 | 451.56, 117 | 2075.3, 118 | -21.1] 119 | } 120 | 121 | if obsmode.find(",") >= 0: 122 | words = obsmode.split(",") 123 | else: 124 | words = obsmode.split() 125 | w = [] 126 | for word in words: 127 | w.append(word.strip()) 128 | words = w 129 | 130 | keylist = ["dummy", "dummy"] 131 | for word_orig in words: 132 | word = word_orig.lower() 133 | if word == "cos": 134 | continue 135 | elif word == "nuv": 136 | continue 137 | elif word == "mirrora" or word == "mirrorb": 138 | keylist[0] = word 139 | elif word == "psa" or word == "boa": 140 | keylist[1] = word 141 | else: 142 | cosutil.printWarning("Don't recognize obsmode component %s" % 143 | word_orig) 144 | 145 | if keylist[1] == "dummy": 146 | cosutil.printWarning("No valid aperture found in obsmode %s;" 147 | % obsmode) 148 | cosutil.printContinuation("assuming PSA instead.") 149 | keylist[1] = "psa" 150 | 151 | key = keylist[0] + "," + keylist[1] 152 | 153 | if key in photdict: 154 | param = photdict[key] 155 | else: 156 | raise RuntimeError("obsmode '%s' not recognized, expected " 157 | "'mirrora' or 'mirrorb', 'psa' or 'boa'" % obsmode) 158 | 159 | return param 160 | -------------------------------------------------------------------------------- /calcos/__init__.py: -------------------------------------------------------------------------------- 1 | from importlib.metadata import version 2 | 3 | __version__ = version(__name__) 4 | 5 | # Hack fix for RTD 6 | try: 7 | from .calcos import * 8 | except ImportError: 9 | pass 10 | 11 | from stsci.tools import teal 12 | 13 | __taskname__ = "calcos" 14 | 15 | __usage__ = """ 16 | 17 | 1. To run this task from within Python:: 18 | 19 | >>> import calcos 20 | >>> calcos.calcos("rootname_asn.fits") 21 | >>> calcos.calcos("rootname_rawtag_a.fits") 22 | 23 | >>> from stsci.tools import teal 24 | >>> teal.teal("calcos") 25 | 26 | 2. To run this task using the TEAL GUI to set the parameters under PyRAF:: 27 | 28 | >>> import calcos 29 | >>> teal calcos # or 'epar calcos' 30 | 31 | 3. To run this task from the operating system command line:: 32 | 33 | # Calibrate an entire association. 34 | % calcos rootname_asn.fits 35 | 36 | # Calibrate xyz_rawtag_a.fits (and xyz_rawtag_b.fits, if present) 37 | % calcos xyz_rawtag_a.fits 38 | """ 39 | 40 | if __doc__: 41 | __doc__ += __usage__ 42 | else: 43 | __doc__ = __usage__ 44 | 45 | def localcalcos(input, 46 | verbosity=1, savetmp=False, 47 | outdir="", 48 | find=False, cutoff=None, 49 | shift_file=None, 50 | csum=False, raw_csum=False, 51 | compress=False, 52 | comp_param="gzip,-0.01", 53 | binx=None, biny=None, 54 | stimfile=None, livetimefile=None, burstfile=None, 55 | print_version=False, print_revision=False): 56 | 57 | if print_version: 58 | print("%s" % CALCOS_VERSION_NUMBER) 59 | return 60 | if print_revision: 61 | print("%s" % CALCOS_VERSION) 62 | return 63 | 64 | # Split the input string into words, expand environment variables and 65 | # wildcards, delete duplicates. 66 | words = splitInputString(input) 67 | infiles = uniqueInput(words) 68 | 69 | if not outdir: 70 | outdir = None 71 | 72 | if not shift_file: 73 | shift_file = None 74 | if not stimfile: 75 | stimfile = None 76 | if not livetimefile: 77 | livetimefile = None 78 | if not burstfile: 79 | burstfile = None 80 | 81 | only_csum = False 82 | 83 | status = 0 84 | for input in infiles: 85 | stat = calcos(input, outdir=outdir, verbosity=verbosity, 86 | find_target={"flag": find, "cutoff": cutoff}, 87 | create_csum_image=csum, 88 | raw_csum_coords=raw_csum, 89 | only_csum=only_csum, 90 | binx=binx, biny=biny, 91 | compress_csum=compress, 92 | compression_parameters=comp_param, 93 | shift_file=shift_file, 94 | save_temp_files=savetmp, 95 | stimfile=stimfile, 96 | livetimefile=livetimefile, 97 | burstfile=burstfile) 98 | status |= stat 99 | 100 | return status 101 | 102 | def splitInputString(input): 103 | """Split on comma and/or space. 104 | 105 | Parameters 106 | ---------- 107 | input: str 108 | One or more values (e.g. file names), separated by a comma and/or 109 | a space. 110 | 111 | Returns 112 | ------- 113 | words: list of strings 114 | """ 115 | 116 | if isinstance(input, str): 117 | if input.strip() == "": 118 | words = [""] 119 | else: 120 | # First split on comma, then check for blanks. 121 | temp_words = input.split(",") 122 | words = [] 123 | for word in temp_words: 124 | word = word.strip() 125 | if word == "": 126 | words.append("") 127 | else: 128 | words.extend(word.split()) 129 | else: 130 | words = input 131 | 132 | return words 133 | 134 | # 135 | #### Interfaces used by TEAL 136 | # 137 | def run(configobj=None): 138 | """TEAL interface for running this code.""" 139 | ### version 2013 November 25 140 | 141 | localcalcos(input=configobj["input"], 142 | verbosity=configobj["verbosity"], 143 | savetmp=configobj["savetmp"], 144 | outdir=configobj["outdir"], 145 | find=configobj["find_target"], 146 | cutoff=configobj["cutoff"], 147 | shift_file=configobj["shift_file"], 148 | csum=configobj["csum"], 149 | raw_csum=configobj["raw_csum"], 150 | compress=configobj["compress"], 151 | comp_param=configobj["comp_param"], 152 | binx=int(configobj["binx"]), 153 | biny=int(configobj["biny"]), 154 | stimfile=configobj["stimfile"], 155 | livetimefile=configobj["livefile"], 156 | burstfile=configobj["burstfile"], 157 | print_version=configobj["print_version"], 158 | print_revision=configobj["print_revision"]) 159 | 160 | def getHelpAsString(fulldoc=True): 161 | """Return help info from .help in the script directory""" 162 | 163 | if fulldoc: 164 | basedoc = __doc__ 165 | else: 166 | basedoc = "" 167 | helpString = basedoc + "\n" 168 | helpString += "Version " + __version__ + "\n" 169 | 170 | helpString += teal.getHelpFileAsString(__taskname__, __file__) 171 | 172 | return helpString 173 | 174 | # Set up doc string without the module level docstring included for 175 | # use with Sphinx, since Sphinx will already include module level docstring 176 | # xxx calcos.__doc__ = getHelpAsString(fulldoc=False) 177 | 178 | def help(): 179 | print(getHelpAsString()) 180 | -------------------------------------------------------------------------------- /calcos/average.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division # confidence high 2 | import numpy as np 3 | import astropy.io.fits as fits 4 | from . import cosutil 5 | from .calcosparam import * # parameter definitions 6 | 7 | def avgImage(input, output): 8 | """Average 2-D image sets, assumed to be aligned. 9 | 10 | Parameters 11 | ---------- 12 | input: str 13 | Name of the input file. 14 | 15 | output: str 16 | Name of the output file. 17 | """ 18 | 19 | nimages = len(input) 20 | 21 | assert nimages >= 1 22 | 23 | cosutil.printIntro("Average images") 24 | names = [("Input", repr(input)), ("Output", output)] 25 | cosutil.printFilenames(names) 26 | 27 | if nimages == 1: 28 | cosutil.copyFile(input[0], output) 29 | if cosutil.isProduct(output): 30 | fd = fits.open(output, mode="update") 31 | asn_mtyp = fd[1].header.get("asn_mtyp", "missing") 32 | asn_mtyp = cosutil.modifyAsnMtyp(asn_mtyp) 33 | if asn_mtyp != "missing": 34 | fd[1].header["asn_mtyp"] = asn_mtyp 35 | fd.close() 36 | return 37 | 38 | # Average the SCI extensions. 39 | 40 | got_data = 0 # initial values 41 | sum_exptime = 0. 42 | sum_plantime = 0. 43 | sum_globrate = 0. 44 | 45 | # Open the first file just to get some header keywords. 46 | ifd = fits.open(input[0], mode="copyonwrite") 47 | phdr = ifd[0].header 48 | sci_extn = ifd["SCI"] 49 | statflag = phdr.get("statflag", False) 50 | if phdr["detector"] == "FUV": 51 | segment = phdr["segment"] 52 | globrate_keyword = "globrt_" + segment[-1].lower() 53 | else: 54 | globrate_keyword = "globrate" 55 | expstart = sci_extn.header["expstart"] 56 | expend = sci_extn.header["expend"] 57 | ifd.close() 58 | 59 | for i in range(nimages): 60 | ifd = fits.open(input[i], mode="copyonwrite") 61 | sci_extn = ifd["SCI"] 62 | exptime = sci_extn.header["exptime"] 63 | sum_plantime += sci_extn.header.get("plantime", exptime) 64 | expstart = min(expstart, sci_extn.header["expstart"]) 65 | expend = max(expend, sci_extn.header["expend"]) 66 | if sci_extn.data is not None: 67 | if got_data: 68 | sci_data += (sci_extn.data * exptime) 69 | else: 70 | hdr = sci_extn.header 71 | sci_data = sci_extn.data * exptime 72 | got_data = 1 73 | sum_exptime += exptime 74 | sum_globrate += (sci_extn.header[globrate_keyword] * exptime) 75 | ifd.close() 76 | del ifd 77 | 78 | if got_data: 79 | if sum_exptime <= 0.: 80 | raise RuntimeError("ERROR in avgImage; invalid EXPTIME.") 81 | sci_data /= sum_exptime 82 | globrate = sum_globrate / sum_exptime 83 | else: 84 | sci_data = None 85 | globrate = 0. 86 | 87 | # Create the output file, and write the averaged SCI extension. 88 | primary_hdu = fits.PrimaryHDU(header=phdr) 89 | cosutil.updateFilename(primary_hdu.header, output) 90 | ofd = fits.HDUList(primary_hdu) 91 | scihdu = fits.ImageHDU(data=sci_data, header=hdr, name="SCI") 92 | if cosutil.isProduct(output): 93 | asn_mtyp = scihdu.header.get("asn_mtyp", "missing") 94 | asn_mtyp = cosutil.modifyAsnMtyp(asn_mtyp) 95 | if asn_mtyp != "missing": 96 | scihdu.header["asn_mtyp"] = asn_mtyp 97 | scihdu.header["exptime"] = sum_exptime 98 | scihdu.header["expstart"] = expstart 99 | scihdu.header["expend"] = expend 100 | scihdu.header["expstrtj"] = expstart + MJD_TO_JD 101 | scihdu.header["expendj"] = expend + MJD_TO_JD 102 | scihdu.header["plantime"] = sum_plantime 103 | scihdu.header[globrate_keyword] = round(globrate, 4) 104 | ofd.append(scihdu) 105 | ofd.writeto(output, output_verify='silentfix') 106 | del ofd, phdr, hdr, primary_hdu, sci_data, scihdu 107 | 108 | # Average the ERR extensions in quadrature. 109 | 110 | got_data = 0 111 | for i in range(nimages): 112 | ifd = fits.open(input[i], mode="copyonwrite") 113 | sci_extn = ifd["SCI"] 114 | err_extn = ifd["ERR"] 115 | exptime = sci_extn.header["exptime"] # exptime is in SCI extension 116 | if err_extn.data is not None: 117 | if got_data: 118 | err_data += (err_extn.data * exptime)**2 119 | else: 120 | hdr = err_extn.header 121 | err_data = (err_extn.data * exptime)**2 122 | got_data = 1 123 | elif i == 0: 124 | hdr = err_extn.header 125 | ifd.close() 126 | del ifd 127 | 128 | if got_data: 129 | np.sqrt(err_data, err_data) 130 | err_data /= sum_exptime 131 | else: 132 | err_data = None 133 | 134 | ofd = fits.open(output, mode="append") 135 | errhdu = fits.ImageHDU(data=err_data, header=hdr, name="ERR") 136 | ofd.append(errhdu) 137 | ofd.close() 138 | del ofd, hdr, err_data, errhdu 139 | 140 | # Combine the DQ extensions. 141 | 142 | got_data = 0 143 | for i in range(nimages): 144 | ifd = fits.open(input[i], mode="copyonwrite") 145 | dq_extn = ifd["DQ"] 146 | if dq_extn.data is not None: 147 | if got_data: 148 | np.bitwise_or(dq_data, dq_extn.data, dq_data) 149 | else: 150 | hdr = dq_extn.header 151 | dq_data = dq_extn.data 152 | got_data = 1 153 | elif i == 0: 154 | hdr = dq_extn.header 155 | ifd.close() 156 | del ifd 157 | 158 | ofd = fits.open(output, mode="append") 159 | dqhdu = fits.ImageHDU(data=dq_data, header=hdr, name="DQ") 160 | ofd.append(dqhdu) 161 | ofd.close() 162 | del ofd, hdr, dq_data, dqhdu 163 | 164 | if statflag: 165 | cosutil.doImageStat(output) 166 | -------------------------------------------------------------------------------- /calcos/orbit.py: -------------------------------------------------------------------------------- 1 | from __future__ import division # confidence unknown 2 | import math 3 | import numpy as np 4 | import astropy.io.fits as fits 5 | 6 | TWOPI = 2. * math.pi 7 | SEC_PER_DAY = 86400.0 8 | 9 | class HSTOrbit(object): 10 | """Orbital parameters. 11 | 12 | The public methods are getOrbitper and getPos. 13 | 14 | This was originially written in IDL (hst_pos_mjd.pro) by Tom Ake. 15 | 16 | Parameters 17 | ---------- 18 | sptfile: str 19 | The name of the support file (rootname_spt.fits). 20 | """ 21 | 22 | def __init__(self, sptfile): 23 | """Constructor.""" 24 | 25 | # attributes 26 | self.argperig = 0. # argument of perigee (revolutions) 27 | self.cirveloc = 0. # circular orbit linear velocity (meters/second) 28 | self.cosincli = 0. # cosine of inclination 29 | self.ecbdx3 = 0. # eccentricity cubed times 3 30 | self.eccentry = 0. # eccentricity 31 | self.eccentx2 = 0. # eccentricity times 2 32 | self.ecbdx4d3 = 0. # eccentricity cubed times 4/3 33 | self.epchtime = 0. # epoch time of parameters (secs since 1/1/85) 34 | self.esqdx5d2 = 0. # eccentricity squared times 5/2 35 | self.fdmeanan = 0. # 1st derivative coef for mean anomly (revs/sec) 36 | self.hsthorb = 0. # half the duration of the ST orbit (seconds) 37 | self.meananom = 0. # mean anomaly (radians) 38 | self.rascascn = 0. # right ascension of ascending node (revolutions) 39 | self.rcargper = 0. # rate change of argument of perigee (revs/sec) 40 | self.rcascnrv = 0. # rt chge right ascension ascend node (revs/sec) 41 | self.sdmeanan = 0. # 2nd deriv coef for mean anomaly (revs/sec/sec) 42 | self.semilrec = 0. # semi-latus rectum (meters) 43 | self.sineincl = 0. # sine of inclination 44 | 45 | self._readOrbitalParameters(sptfile) 46 | 47 | def _readOrbitalParameters(self, sptfile): 48 | """Get the orbital parameters from the spt primary header. 49 | 50 | Parameters 51 | ---------- 52 | sptfile: str 53 | The name of the support file. 54 | """ 55 | 56 | fd = fits.open(sptfile, mode="readonly") 57 | phdr = fd[0].header 58 | 59 | # Orbital elements for HST. 60 | self.argperig = phdr["argperig"] 61 | self.cirveloc = phdr["cirveloc"] 62 | self.cosincli = phdr["cosincli"] 63 | self.ecbdx3 = phdr["ecbdx3"] 64 | self.eccentry = phdr["eccentry"] 65 | self.eccentx2 = phdr["eccentx2"] 66 | self.ecbdx4d3 = phdr["ecbdx4d3"] 67 | self.epchtime = phdr["epchtime"] 68 | self.esqdx5d2 = phdr["esqdx5d2"] 69 | self.fdmeanan = phdr["fdmeanan"] 70 | self.hsthorb = phdr["hsthorb"] 71 | self.meananom = phdr["meananom"] 72 | self.rascascn = phdr["rascascn"] 73 | self.rcargper = phdr["rcargper"] 74 | self.rcascnrv = phdr["rcascnrv"] 75 | self.sdmeanan = phdr["sdmeanan"] 76 | self.semilrec = phdr["semilrec"] 77 | self.sineincl = phdr["sineincl"] 78 | 79 | fd.close() 80 | 81 | def getOrbitper(self): 82 | """Return the orbital period. 83 | 84 | Returns 85 | ------- 86 | float 87 | The orbital period in seconds. 88 | """ 89 | 90 | return 2. * self.hsthorb 91 | 92 | def getPos(self, mjd): 93 | """Get position and velocity at a given time. 94 | 95 | # S. Hulbert, Oct 91 Original 96 | # PEH, 2008 Oct 3 Converted from SPP to Python 97 | 98 | Parameters 99 | ---------- 100 | mjd: float 101 | The time (MJD) at which to compute the position and velocity. 102 | 103 | Returns 104 | ------- 105 | tuple of two array_like 106 | The first array is the position vector (km), the second array 107 | is the velocity vector (km/s). 108 | """ 109 | 110 | # These will be returned, after assigning the actual values. 111 | x_hst = np.zeros(3, dtype=np.float64) 112 | v_hst = np.zeros(3, dtype=np.float64) 113 | 114 | argperig = self.argperig 115 | cirveloc = self.cirveloc 116 | cosincli = self.cosincli 117 | ecbdx3 = self.ecbdx3 118 | eccentry = self.eccentry 119 | eccentx2 = self.eccentx2 120 | ecbdx4d3 = self.ecbdx4d3 121 | epchtime = self.epchtime 122 | esqdx5d2 = self.esqdx5d2 123 | fdmeanan = self.fdmeanan 124 | hsthorb = self.hsthorb 125 | meananom = self.meananom 126 | rascascn = self.rascascn 127 | rcargper = self.rcargper 128 | rcascnrv = self.rcascnrv 129 | sdmeanan = self.sdmeanan 130 | semilrec = self.semilrec 131 | sineincl = self.sineincl 132 | 133 | # convert time from MJD to seconds since 1985 Jan 1 134 | sec85 = (mjd - 46066.0) * SEC_PER_DAY 135 | 136 | # calculate time difference between observation and epoch time 137 | deltim = sec85 - epchtime 138 | 139 | # mean anomaly 140 | temp2 = fdmeanan * deltim 141 | temp3 = 0.5 * sdmeanan * deltim*deltim 142 | m = meananom + TWOPI * (temp2 + temp3) 143 | 144 | sin_m = math.sin(m) 145 | cos_m = math.cos(m) 146 | 147 | # true anomaly (equation of the center) 148 | v = m + sin_m * (eccentx2 + ecbdx3 * cos_m * cos_m - 149 | ecbdx4d3 * sin_m * sin_m + esqdx5d2 * cos_m) 150 | sin_v = math.sin(v) 151 | cos_v = math.cos(v) 152 | 153 | # distance 154 | r = semilrec / (1.0 + eccentry * cos_v) 155 | 156 | # argument of perigee 157 | wsmall = TWOPI * (argperig + rcargper * deltim) 158 | 159 | # longitude of the ascending node 160 | wbig = TWOPI * (rascascn + rcascnrv * deltim) 161 | sin_wbig = math.sin(wbig) 162 | cos_wbig = math.cos(wbig) 163 | 164 | # calculate the rectangular coordinates 165 | # (see Smart, Spherical Astronomy, section 75, page 122-124) 166 | 167 | f = wsmall + v 168 | sin_f = math.sin(f) 169 | cos_f = math.cos(f) 170 | 171 | x_hst[0] = r * (cos_wbig * cos_f - cosincli * sin_wbig * sin_f) 172 | x_hst[1] = r * (sin_wbig * cos_f + cosincli * cos_wbig * sin_f) 173 | x_hst[2] = r * sineincl * sin_f 174 | 175 | a0 = cirveloc * eccentry * sin_v / r 176 | a1 = cirveloc * (1.0 + eccentry * cos_v) + \ 177 | TWOPI * rcargper * r 178 | v_hst[0] = a0 * x_hst[0] - \ 179 | a1 * (cos_wbig * sin_f + cosincli * sin_wbig * cos_f) - \ 180 | TWOPI * rcascnrv * x_hst[1] 181 | v_hst[1] = a0 * x_hst[1] - \ 182 | a1 * (sin_wbig * sin_f - cosincli * cos_wbig * cos_f) + \ 183 | TWOPI * rcascnrv * x_hst[0] 184 | v_hst[2] = a0 * x_hst[2] + a1 * sineincl * cos_f 185 | 186 | # Convert from meters to kilometers. 187 | x_hst /= 1000.0 188 | v_hst /= 1000.0 189 | 190 | return (x_hst, v_hst) 191 | -------------------------------------------------------------------------------- /tests/README.md: -------------------------------------------------------------------------------- 1 | # CalCOS Unit tests 2 | # 3 | 4 | The purpose of this directory is for generating unit tests for CalCOS and this is done by using "pytest". 5 | * All python scripts with names that start with "test_" are unit test module for CalCOS modules. 6 | * A calcos module with the name "average.py" would have a unit test module with the name "test_average.py" 7 | * Every test script is created with the assumption that all the dependencies that calcos requires are satisfied. 8 | 9 | Below are descriptions of each test module and the steps used to test some of the CalCOS functions. 10 | 11 | | Script name | Script type | 12 | |:---------------------:|:-----------------:| 13 | | test_cosutil.py | Test script | 14 | | test_extract.py | Test script | 15 | | test_airglow.py | Test script | 16 | | test_average.py | Test script | 17 | | test_shift_file.py | Test script | 18 | | generate_tempfiles.py | Supporting script | 19 | 20 | > Most unit tests follow 3 simple steps 4 in some special cases 21 | >1. Setup: prepare expected values (values the function is supposed to return) 22 | >2. Test: get test values from the target function (values the function actually returned) 23 | >3. Verify: compare expected value against test values 24 | >4. Clean-up: remove any temp files created during the test. 25 | 26 | ## 1. test_cosutil.py 27 | Unit tests that have the word "print" in their name using the same algorithm. 28 | - open an IO stream 29 | - initialize the message you want to print 30 | - call the function that is being tested and pass the message string to it. 31 | - redirect the output stream towards the function to catch the printed message 32 | - write the value to a variable 33 | - assert the captured message with the original message. 34 | 35 | ### test_center_of_quadratic(): 36 | unit test for center_of_quadratic(coeff, var) 37 | - create a randomized coeff and var arrays 38 | - follow the math to calculate x_min and x_min_sigma aka center of the quadratic 39 | - x_min = -coeff[1] / (2 * coeff[2]) 40 | - x_min_sigma = 0.5 * math.sqrt(var1 / coeff[2] ** 2 + var2 * coeff[1] ** 2 / coeff[2] ** 4) 41 | - assert the expected result with the functions return. 42 | 43 | ### test_precess(): 44 | unit test for precess(t, target) 45 | - set a time in MJD 46 | - create a unit vector toward the target 47 | - calculate the expected coordinates 48 | - assert expected with the actual. 49 | 50 | ### test_err_frequentist(): 51 | unit test for err_frequentist(counts) 52 | - create 3 arrays similar to the test in err_gehrels(). 53 | - find the poisson confidence interval for each array. 54 | - assert the result with the expected err_lower and err_upper. 55 | 56 | ### test_err_gehrels(): 57 | unit test for err_gehrels(counts) 58 | test ran 59 | - create 3 arrays, one should be random float values, the other two should be the lower (zero) and upper (one) error estimates 60 | - following the math for calculating the upper limit by taking the sqrt of counts + 0.5 and then adding 1 to the result. 61 | - similarly for the lower we add counts + 0.5 and then counts - counts * (1.0 - 1.0 / (9.0 * counts) - 1.0 / (3.0 * np.sqrt(counts))) ** 3 62 | we will be able to get the lower array. 63 | - finally assert the upper array and the lower array with the results obtained from err_gehrels(). 64 | 65 | ### test_is_product(): 66 | - NOTE: 67 | no test to be done here since we're checking the file if its a product or not 68 | the return of the function isProduct() is a boolean hence, assert it directly. 69 | ###------------------------------------------------------------------------------------------------------------------------------------ 70 | >Note: Generaly unit test functions that end with the word "exception" follow a different way of testing. 71 | > we use pytest.raises("Type of error") where we specify the error type we expect and if that error is returned the function passes. 72 | ###------------------------------------------------------------------------------------------------------------------------------------- 73 | 74 | ### test_guess_aper_from_locn(): 75 | unit test for guessAperFromLocn() 76 | - create lists for LPs and aperture positions (2 in this case). 77 | - use the ranges provided to guess which aperture is being used 78 | 1. LP: 1 79 | - (116.0, 135) ---> PSA 80 | - (-163.0, -143.0) ---> BOA 81 | 2. LP: 2 82 | - (52.0, 72.0) ---> PSA 83 | - (-227.0, -207.0) ---> BOA 84 | 3. LP: 3 and above 85 | - aperture will be none 86 | - assert expected positions with the actual position. 87 | 88 | 89 | ## 2. test_extract.py 90 | ### test_get_columns(): 91 | Test if the function is returning the right column fields 92 | 93 | ### test_remove_unwanted_column(): 94 | Old column length should be equal to new column length + amount of the removed columns 95 | 96 | ### test_next_power_of_two(): 97 | check if function returns the next_power_of_two 98 | 99 | ### test_add_column_comment(): 100 | 101 | 102 | ## 3. test_airglow.py 103 | ### test_find_airglow_limits(): 104 | unit test for find_airglow_limits() 105 | test ran 106 | - By providing certain values as dict to be used as filter for finding the dispersion 107 | - testing for both FUV segments 108 | - creating a temporary disptab ref file. 109 | - testing for 5 airglow lines 110 | - calculating the expected pixel numbers by following the math involved in the actual file 111 | and referring to the values in the ref file we can get the values upto a descent decimal points. 112 | 113 | ## 4. test_average.py 114 | ### test_avg_image(): 115 | tests avg_image() in average.py 116 | explanation of the test 117 | - create temporary count files to be used as inputs 118 | - expected values in the output file are the average of the input values 119 | - loop though the values to check if the math holds. 120 | 121 | ## 5. test_shift_file.py 122 | ### test_shift_file(): 123 | Creates a temporary txt file with some arbitrary values and verify if the shift_file objects are created with the neccessary variables initialized with the right values. 124 | 125 | ### test_get_shifts(): 126 | Instantiate 2 objects with different dataset name and fpoffset and also create a key to use as a filter 127 | the function getShifts() returns a tuple so using a loop find the shift for different combinations of key and finally assert it with the expected values list. 128 | 129 | # Supporting script 130 | ## 1. generate_tempfiles.py 131 | 132 | ### create_count_file(file=None): 133 | Creates a temp count file for testing avg_image. 134 | 135 | Parameters 136 | ---------- 137 | file: str 138 | the filename string 139 | 140 | Returns 141 | ------- 142 | filename string 143 | 144 | ### create_disptab_file(file=None): 145 | Creates a disptab file. 146 | 147 | Parameters 148 | ---------- 149 | file: str 150 | name of the temp file to be created. 151 | 152 | Returns 153 | ------- 154 | name of the temp file created. 155 | ### generate_fits_file(file): 156 | Creates a corrtag file for testing. 157 | 158 | Parameters 159 | ---------- 160 | file: str 161 | the file path. 162 | Returns 163 | ------- 164 | the HDU_List 165 | 166 | 167 | 168 | ### All files listed in mentioned here are written by Michael Asfaw - masfaw@stsci.edu. 169 | -------------------------------------------------------------------------------- /calcos/calcosparam.py: -------------------------------------------------------------------------------- 1 | from __future__ import division # confidence high 2 | 3 | # This file defines parameters used by calcos. 4 | 5 | 6 | # Version numbers used to be defined here. The d2to1 based install 7 | # keeps the version numbers in setup.cfg, where they get copied to 8 | # version.py at install time. In principle, everybody who needs 9 | # version numbers could get them from version.py, but notice that 10 | # CALCOS_VERSION does not correspond to anything that is there. 11 | # Rather than having some versions in one file and some versions 12 | # in another, it seems conceptually cleaner to continue making 13 | # all the version information available here in calcosparam. 14 | from . import __version__ 15 | 16 | CALCOS_VERSION_NUMBER = __version__ 17 | CALCOS_VERSION = "%s" % (CALCOS_VERSION_NUMBER) 18 | 19 | # These are the values to indicate the detector (original) and user 20 | # (flipped or rotated) COS coordinates. 21 | DETECTOR_COORDINATES = "DETECTOR" 22 | USER_COORDINATES = "USER" 23 | 24 | SPEED_OF_LIGHT = 299792.458 # km/s 25 | 26 | DAYS_PER_YEAR = 365.25 27 | SEC_PER_DAY = 86400. 28 | 29 | MJD_TO_JD = 2400000.5 # add to MJD to get Julian Day Number 30 | 31 | # Live time estimates should not differ by more than this fraction of 32 | # the live time. 33 | LIVETIME_CRITERION = 0.1 34 | 35 | # This is the wavelength below which no significant flux could be detected. 36 | MIN_WAVELENGTH = 900. # Angstroms 37 | 38 | # These give the axis lengths of the FUV and NUV detectors, in pixels. 39 | FUV_X = 16384 # more rapidly varying axis 40 | FUV_Y = 1024 41 | NUV_X = 1024 # more rapidly varying axis 42 | NUV_Y = 1024 43 | 44 | # X_OFFSET is the offset of the detector in a calibrated image. 45 | # Pixel X in a calibrated image = XFULL + X_OFFSET 46 | FUV_X_OFFSET = 0 47 | FUV_EXTENDED_X = FUV_X 48 | NUV_X_OFFSET = 100 49 | NUV_EXTENDED_X = NUV_X + 250 50 | 51 | # These are the default binning factors for FUV and NUV "calcos sum" images. 52 | FUV_BIN_X = 1 53 | FUV_BIN_Y = 1 54 | NUV_BIN_X = 1 55 | NUV_BIN_Y = 1 56 | 57 | # These give the number of spectra per detector (used in extract.py). 58 | FUV_SPECTRA = 1 # one spectrum on one FUV segment 59 | NUV_SPECTRA = 3 # three stripes on NUV detector 60 | 61 | # These are the possible values for verbosity. 62 | QUIET = 0 63 | VERBOSE = 1 64 | VERY_VERBOSE = 2 65 | 66 | # These are the possible values for the TAGFLASH keyword, and corresponding 67 | # integer codes. 68 | TAGFLASH_NONE = "NONE" 69 | TAGFLASH_AUTO = "AUTO" 70 | TAGFLASH_UNIFORMLY_SPACED = "UNIFORMLY SPACED" 71 | TAGFLASH_TYPE_NONE = 0 72 | TAGFLASH_TYPE_AUTO = 1 73 | TAGFLASH_TYPE_UNIFORMLY_SPACED = 2 74 | 75 | # The following section pertains to changes in the position of the aperture 76 | # block to move the target on the detector, to extend the lifetime of the 77 | # FUV detector. 78 | 79 | # These are the names of the apertures. 80 | APERTURE_NAMES = ["PSA", "WCA", "BOA", "FCA"] 81 | # The aperture keyword for a dark exposure may have this value instead. 82 | OTHER_APERTURE_NAMES = ["N/A"] 83 | 84 | # Nominal values of aperypos for life_adj = 1. 85 | APERTURE_POSN1 = {"PSA": 126., 86 | "WCA": 126., 87 | "BOA": -153., 88 | "FCA": -153.} 89 | 90 | # pixels per arcsecond in the cross-dispersion direction 91 | XD_PLATE_SCALE = { 92 | "G130M": 11.9, # email from Charles, 2012 Feb 2 93 | "G160M": 11.9, 94 | "G140L": 11.9, 95 | "G185M": 41.85, 96 | "G225M": 41.89, 97 | "G285M": 41.80, 98 | "G230L": 42.27, 99 | "MIRRORA": 42.5, # COS ISR 2010-10 100 | "MIRRORB": 42.5} 101 | 102 | # arcseconds per step of the aperture block in the cross-dispersion direction 103 | ARCSEC_PER_XD_APER_STEP = -0.0476 104 | 105 | # This value is read/write. It can be set to a value other than zero 106 | # if LIFE_ADJ = -1. 107 | LIFE_ADJ_OFFSET = 0. # pixels 108 | 109 | # This is the list of segment-specific (or in some cases stripe-specific) 110 | # keywords, with "X" (case sensitive) replaced by "a", "b" or "c". 111 | segment_specific_keywords = \ 112 | ["stimX_lx", "stimX_ly", "stimX_rx", "stimX_ry", 113 | "stimX0lx", "stimX0ly", "stimX0rx", "stimX0ry", 114 | "stimXslx", "stimXsly", "stimXsrx", "stimXsry", 115 | "npha_X", "phalowrX", "phaupprX", 116 | "tbrst_X", "nbrst_X", "tbadt_X", "nbadt_X", 117 | "nout_X", "nbadevtX", 118 | "exptimeX", "neventsX", 119 | "globrt_X", 120 | "deadrt_X", "deadmt_X", "livetm_X", 121 | "sp_loc_X", "sp_off_X", "sp_nom_X", "sp_slp_X", "sp_hgt_X", "sp_err_X", 122 | "b_bkg1_X", "b_bkg2_X", 123 | "b_hgt1_X", "b_hgt2_X", 124 | "shift1X", "shift2X", "dpixel1X", 125 | "chi_sq_X", "ndf_X"] 126 | 127 | # The pulse height values range from 0 to 127. The values in the PHA 128 | # column of an EVENTS table, however, come from a five-bit value, i.e. 129 | # the last two bits have been truncated, resulting in their being a 130 | # factor of four smaller. 131 | TWO_BITS = 4 132 | 133 | # The following three parameters are used by getTable. 134 | # NOT_APPLICABLE will be assigned as the value of a keyword that is 135 | # missing from the header; this is done because some keywords may 136 | # actually not be present, while others that are not relevant will be 137 | # present but have the value "N/A". 138 | STRING_WILDCARD = "ANY" 139 | NOT_APPLICABLE = "N/A" 140 | INT_WILDCARD = -1 141 | 142 | # These are the data quality flags. 143 | DQ_OK = 0 # no anomalous condition noted 144 | DQ_SOFTERR = 1 # Reed-Solomon error 145 | DQ_UNUSED_2 = 2 # [currently unused] 146 | DQ_DETECTOR_SHADOW = 4 # FUV grid shadow mark or NUV vignetting 147 | DQ_POORLY_CALIBRATED = 8 # poorly calibrated (incl. detector edge) 148 | DQ_VERY_LOW_RESPONSE = 16 # > 80% depression 149 | DQ_BACKGROUND_FEATURE = 32 # background feature 150 | DQ_BURST = 64 # count rate implies a burst (FUV) 151 | DQ_PIXEL_OUT_OF_BOUNDS = 128 # pixel out of bounds 152 | DQ_DATA_FILL = 256 # fill data 153 | DQ_PHA_OUT_OF_BOUNDS = 512 # pulse height is either too low or too high 154 | DQ_LOW_RESPONSE_REGION = 1024 # > 50% depression 155 | DQ_BAD_TIME = 2048 # time is within a bad time interval 156 | DQ_LOW_PHA_FEATURE = 4096 # low PHA feature 157 | DQ_GAIN_SAG_HOLE = 8192 # low gain area 158 | DQ_UNUSED_16384 = 16384 # [currently unused] 159 | 160 | # Use this when binning TIME-TAG data to images, or extracting spectra from 161 | # TIME-TAG data. 162 | SERIOUS_DQ_FLAGS = (DQ_BURST | DQ_BAD_TIME | DQ_PHA_OUT_OF_BOUNDS) 163 | 164 | # Define an exception for the case that the APERTURE keyword is not recognized. 165 | class BadApertureError(Exception): 166 | def __init__(self, message=None): 167 | self.message = message 168 | def __str__(self): 169 | return self.message 170 | 171 | # Define an exception for a missing row in a reference file. 172 | class MissingRowError(Exception): 173 | def __init__(self, message=None): 174 | self.message = message 175 | def __str__(self): 176 | return self.message 177 | 178 | # Define an exception for a missing column in a reference file. 179 | class MissingColumnError(Exception): 180 | def __init__(self, message=None): 181 | self.message = message 182 | def __str__(self): 183 | return self.message 184 | -------------------------------------------------------------------------------- /calcos/osmstep.py: -------------------------------------------------------------------------------- 1 | from __future__ import division # confidence high 2 | 3 | ta1image_range = [8952, 8965] 4 | ta1bright_range = [9419, 9425] 5 | nuv_tv_dayrange = [264, 294] 6 | 7 | fuv_osm1_dict = \ 8 | {7999: ("G130M", 1291, -2), 9 | 8000: ("G130M", 1291, -1), 10 | 8001: ("G130M", 1291, 0), 11 | 8002: ("G130M", 1291, 1), 12 | 7995: ("G130M", 1300, -2), 13 | 7996: ("G130M", 1300, -1), 14 | 7997: ("G130M", 1300, 0), 15 | 7998: ("G130M", 1300, 1), 16 | 7991: ("G130M", 1309, -2), 17 | 7992: ("G130M", 1309, -1), 18 | 7993: ("G130M", 1309, 0), 19 | 7994: ("G130M", 1309, 1), 20 | 7987: ("G130M", 1318, -2), 21 | 7988: ("G130M", 1318, -1), 22 | 7989: ("G130M", 1318, 0), 23 | 7990: ("G130M", 1318, 1), 24 | 7983: ("G130M", 1327, -2), 25 | 7984: ("G130M", 1327, -1), 26 | 7985: ("G130M", 1327, 0), 27 | 7986: ("G130M", 1327, 1), 28 | 11201: ("G160M", 1577, -2), 29 | 11202: ("G160M", 1577, -1), 30 | 11203: ("G160M", 1577, 0), 31 | 11204: ("G160M", 1577, 1), 32 | 11197: ("G160M", 1589, -2), 33 | 11198: ("G160M", 1589, -1), 34 | 11199: ("G160M", 1589, 0), 35 | 11200: ("G160M", 1589, 1), 36 | 11193: ("G160M", 1600, -2), 37 | 11194: ("G160M", 1600, -1), 38 | 11195: ("G160M", 1600, 0), 39 | 11196: ("G160M", 1600, 1), 40 | 11189: ("G160M", 1611, -2), 41 | 11190: ("G160M", 1611, -1), 42 | 11191: ("G160M", 1611, 0), 43 | 11192: ("G160M", 1611, 1), 44 | 11185: ("G160M", 1623, -2), 45 | 11186: ("G160M", 1623, -1), 46 | 11187: ("G160M", 1623, 0), 47 | 11188: ("G160M", 1623, 1), 48 | 1596: ("G140L", 1105, -2), 49 | 1597: ("G140L", 1105, -1), 50 | 1598: ("G140L", 1105, 0), 51 | 1599: ("G140L", 1105, 1), 52 | 1589: ("G140L", 1230, -2), 53 | 1590: ("G140L", 1230, -1), 54 | 1591: ("G140L", 1230, 0), 55 | 1592: ("G140L", 1230, 1), 56 | 1593: ("G140L", 1230, 0) 57 | } 58 | 59 | nuv_osm2_dict_early = \ 60 | {1304: ("G185M", 1817, 0), 61 | 1283: ("G185M", 1850, -2), 62 | 1284: ("G185M", 1850, -1), 63 | 1285: ("G185M", 1850, 0), 64 | 1286: ("G185M", 1850, 1), 65 | 1267: ("G185M", 1882, 0), 66 | 1245: ("G185M", 1921, 0), 67 | 1226: ("G185M", 1953, 0), 68 | 1208: ("G185M", 1986, 0), 69 | 6421: ("G225M", 2217, 0), 70 | 6399: ("G225M", 2250, -2), 71 | 6400: ("G225M", 2250, -1), 72 | 6401: ("G225M", 2250, 0), 73 | 6402: ("G225M", 2250, 1), 74 | 6390: ("G225M", 2268, 0), 75 | 6381: ("G225M", 2283, 0), 76 | 6356: ("G225M", 2325, 0), 77 | 6337: ("G225M", 2357, 0), 78 | 6344: ("G225M", 2357, 7), 79 | 6318: ("G225M", 2390, 0), 80 | 3949: ("G285M", 2637, 0), 81 | 3939: ("G285M", 2657, 0), 82 | 3929: ("G285M", 2676, 0), 83 | 3912: ("G285M", 2709, 0), 84 | 3838: ("G285M", 2850, -2), 85 | 3839: ("G285M", 2850, -1), 86 | 3840: ("G285M", 2850, 0), 87 | 3841: ("G285M", 2850, 1), 88 | 3793: ("G285M", 2952, 5), 89 | 3788: ("G285M", 2952, 0), 90 | 3774: ("G285M", 2979, 0), 91 | 3754: ("G285M", 3018, 0), 92 | 3751: ("G285M", 3035, 0), 93 | 3734: ("G285M", 3057, 0), 94 | 3728: ("G285M", 3074, -2), 95 | 3729: ("G285M", 3074, -1), 96 | 3730: ("G285M", 3074, 0), 97 | 3731: ("G285M", 3074, 1), 98 | 11540: ("G230L", 2635, 0), 99 | 11519: ("G230L", 3000, -2), 100 | 11520: ("G230L", 3000, -1), 101 | 11521: ("G230L", 3000, 0), 102 | 11522: ("G230L", 3000, 1), 103 | 11502: ("G230L", 3360, 0) 104 | } 105 | 106 | nuv_osm2_dict_middle = \ 107 | {1324: ("G185M", 1786, 0), 108 | 1306: ("G185M", 1817, -2), 109 | 1307: ("G185M", 1817, -1), 110 | 1308: ("G185M", 1817, 0), 111 | 1309: ("G185M", 1817, 1), 112 | 1298: ("G185M", 1835, 0), 113 | 1287: ("G185M", 1850, -2), 114 | 1288: ("G185M", 1850, -1), 115 | 1289: ("G185M", 1850, 0), 116 | 1290: ("G185M", 1850, 1), 117 | 1282: ("G185M", 1864, 0), 118 | 1272: ("G185M", 1882, 0), 119 | 1267: ("G185M", 1890, 0), 120 | 1261: ("G185M", 1900, -2), 121 | 1262: ("G185M", 1900, -1), 122 | 1263: ("G185M", 1900, 0), 123 | 1264: ("G185M", 1900, 1), 124 | 1255: ("G185M", 1913, 0), 125 | 1249: ("G185M", 1921, -2), 126 | 1250: ("G185M", 1921, -1), 127 | 1251: ("G185M", 1921, 0), 128 | 1252: ("G185M", 1921, 1), 129 | 1240: ("G185M", 1941, 0), 130 | 1231: ("G185M", 1953, -2), 131 | 1232: ("G185M", 1953, -1), 132 | 1233: ("G185M", 1953, 0), 133 | 1234: ("G185M", 1953, 1), 134 | 1223: ("G185M", 1971, 0), 135 | 1214: ("G185M", 1986, 0), 136 | 1150: ("G185M", 2010, -50), 137 | 1175: ("G185M", 2010, -25), 138 | 1190: ("G185M", 2010, -10), 139 | 1200: ("G185M", 2010, 0), 140 | 6432: ("G225M", 2186, -10), 141 | 6442: ("G225M", 2186, 0), 142 | 6424: ("G225M", 2217, 0), 143 | 6412: ("G225M", 2233, -2), 144 | 6413: ("G225M", 2233, -1), 145 | 6414: ("G225M", 2233, 0), 146 | 6415: ("G225M", 2233, 1), 147 | 6404: ("G225M", 2250, 0), 148 | 6393: ("G225M", 2268, 0), 149 | 6384: ("G225M", 2283, 0), 150 | 6372: ("G225M", 2306, 0), 151 | 6358: ("G225M", 2325, -2), 152 | 6359: ("G225M", 2325, -1), 153 | 6360: ("G225M", 2325, 0), 154 | 6361: ("G225M", 2325, 1), 155 | 6352: ("G225M", 2339, 0), 156 | 6340: ("G225M", 2357, 0), 157 | 6329: ("G225M", 2373, -2), 158 | 6330: ("G225M", 2373, -1), 159 | 6331: ("G225M", 2373, 0), 160 | 6332: ("G225M", 2373, 1), 161 | 6321: ("G225M", 2390, 0), 162 | 6309: ("G225M", 2410, 0), 163 | 3959: ("G285M", 2617, 0), 164 | 3947: ("G285M", 2637, -2), 165 | 3948: ("G285M", 2637, -1), 166 | 3949: ("G285M", 2637, 0), 167 | 3950: ("G285M", 2637, 1), 168 | 3940: ("G285M", 2657, 0), 169 | 3931: ("G285M", 2676, 0), 170 | 3919: ("G285M", 2695, -2), 171 | 3920: ("G285M", 2695, -1), 172 | 3921: ("G285M", 2695, 0), 173 | 3922: ("G285M", 2695, 1), 174 | 3914: ("G285M", 2709, 0), 175 | 3908: ("G285M", 2719, -2), 176 | 3909: ("G285M", 2719, -1), 177 | 3910: ("G285M", 2719, 0), 178 | 3911: ("G285M", 2719, 1), 179 | 3900: ("G285M", 2739, 0), 180 | 3834: ("G285M", 2850, -10), 181 | 3842: ("G285M", 2850, -2), 182 | 3843: ("G285M", 2850, -1), 183 | 3844: ("G285M", 2850, 0), 184 | 3845: ("G285M", 2850, 1), 185 | 3854: ("G285M", 2850, 10), 186 | 3791: ("G285M", 2952, -2), 187 | 3792: ("G285M", 2952, -1), 188 | 3793: ("G285M", 2952, 0), 189 | 3794: ("G285M", 2952, 1), 190 | 3780: ("G285M", 2979, 0), 191 | 3771: ("G285M", 2996, 0), 192 | 3760: ("G285M", 3018, 0), 193 | 3739: ("G285M", 3057, 0), 194 | 3720: ("G285M", 3094, 0), 195 | 11541: ("G230L", 2635, -2), 196 | 11542: ("G230L", 2635, -1), 197 | 11543: ("G230L", 2635, 0), 198 | 11544: ("G230L", 2635, 1), 199 | 11527: ("G230L", 2950, 0), 200 | 11522: ("G230L", 3000, -2), 201 | 11523: ("G230L", 3000, -1), 202 | 11524: ("G230L", 3000, 0), 203 | 11525: ("G230L", 3000, 1), 204 | 11503: ("G230L", 3360, -2), 205 | 11504: ("G230L", 3360, -1), 206 | 11505: ("G230L", 3360, 0), 207 | 11506: ("G230L", 3360, 1) 208 | } 209 | 210 | nuv_osm2_dict_late = \ 211 | {1319: ("G185M", 1786, 0), 212 | 1303: ("G185M", 1817, 0), 213 | 1293: ("G185M", 1835, 0), 214 | 1284: ("G185M", 1850, 0), 215 | 1277: ("G185M", 1864, 0), 216 | 1267: ("G185M", 1882, 0), 217 | 1262: ("G185M", 1890, 0), 218 | 1258: ("G185M", 1900, 0), 219 | 1250: ("G185M", 1913, 0), 220 | 1246: ("G185M", 1921, 0), 221 | 1235: ("G185M", 1941, 0), 222 | 1228: ("G185M", 1953, 0), 223 | 1218: ("G185M", 1971, 0), 224 | 1209: ("G185M", 1986, 0), 225 | 1195: ("G185M", 2010, 0), 226 | 6437: ("G225M", 2186, 0), 227 | 6419: ("G225M", 2217, 0), 228 | 6409: ("G225M", 2233, 0), 229 | 6399: ("G225M", 2250, 0), 230 | 6388: ("G225M", 2268, 0), 231 | 6379: ("G225M", 2283, 0), 232 | 6367: ("G225M", 2306, 0), 233 | 6355: ("G225M", 2325, 0), 234 | 6347: ("G225M", 2339, 0), 235 | 6335: ("G225M", 2357, 0), 236 | 6326: ("G225M", 2373, 0), 237 | 6316: ("G225M", 2390, 0), 238 | 6304: ("G225M", 2410, 0), 239 | 3839: ("G285M", 2850, 0), 240 | 11519: ("G230L", 3000, 0) 241 | } 242 | -------------------------------------------------------------------------------- /calcos/dispersion.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division # confidence high 2 | import numpy as np 3 | from . import cosutil 4 | 5 | class Dispersion(object): 6 | """Dispersion relation. 7 | 8 | The public methods are: 9 | flag = disprel.isValid() 10 | nrows = disprel.getNRows() 11 | filter = disprel.getFilter() 12 | wavelength = disprel.evalDisp(x) 13 | dwavelength / dx = disprel.evalDerivDisp(x) 14 | x = disprel.evalInvDisp(wavelength, tiny=1.e-8) 15 | disprel.info() 16 | disprel.close() 17 | 18 | Parameters 19 | ---------- 20 | disptab: str 21 | name of table containing dispersion relations 22 | 23 | filter: dictionary 24 | parameters for selecting a row from the disptab 25 | 26 | use_fpoffset: boolean 27 | if True, include fpoffset in the filter; 28 | if False, exclude it from the filter 29 | """ 30 | 31 | def __init__(self, disptab, filter, use_fpoffset=True): 32 | 33 | # This will be a local copy of filter, possibly excluding "fpoffset". 34 | self.filter = {} 35 | self.use_fpoffset = use_fpoffset 36 | self.ncoeff = 0 37 | self.coeff = None 38 | self.delta = 0. 39 | self.fpoffset = 0 # save for information 40 | self._nrows = 0 # number of matching rows (should be 1) 41 | self._valid = True 42 | 43 | for key in filter.keys(): 44 | key_lower = key.lower() 45 | if key_lower == "fpoffset": 46 | self.fpoffset = filter[key] 47 | continue 48 | self.filter[key_lower] = filter[key] 49 | if use_fpoffset: 50 | self.filter["fpoffset"] = self.fpoffset 51 | 52 | if not cosutil.findColumn(disptab, "fpoffset"): 53 | if "fpoffset" in self.filter: 54 | del(self.filter["fpoffset"]) 55 | 56 | disp_info = cosutil.getTable(disptab, self.filter) 57 | if disp_info is None: 58 | self._valid = False 59 | del disp_info 60 | return 61 | else: 62 | self._nrows = len(disp_info) 63 | 64 | self.ncoeff = disp_info.field("nelem")[0] 65 | if self.ncoeff < 2: 66 | raise ValueError("Dispersion relation has too few coefficients") 67 | self.coeff = disp_info.field("coeff")[0][0:self.ncoeff] 68 | if cosutil.findColumn(disp_info, "delta"): 69 | self.delta = disp_info.field("delta")[0] 70 | else: 71 | if cosutil.findColumn(disp_info, "d_tv03"): 72 | d_tv03 = disp_info.field("d_tv03")[0] 73 | else: 74 | d_tv03 = 0. 75 | if cosutil.findColumn(disp_info, "d"): 76 | d = disp_info.field("d")[0] 77 | else: 78 | d = 0. 79 | self.delta = d_tv03 - d 80 | 81 | del disp_info 82 | 83 | def info(self): 84 | 85 | cosutil.printMsg("filter = %s" % str(self.filter)) 86 | cosutil.printMsg("use_fpoffset = %s" % str(self.use_fpoffset)) 87 | cosutil.printMsg("fpoffset = %d" % self.fpoffset) 88 | cosutil.printMsg("number of coefficients = %d" % self.ncoeff) 89 | cosutil.printMsg("coeff = %s" % str(self.coeff)) 90 | cosutil.printMsg("delta = %.6g" % self.delta) 91 | cosutil.printMsg("number of matching rows = %d" % self._nrows) 92 | cosutil.printMsg("valid = %s" % str(self._valid)) 93 | 94 | def isValid(self): 95 | """Return True if a matching row was found in disptab.""" 96 | 97 | return self._valid 98 | 99 | def getNRows(self): 100 | """Return the number of rows in disptab that match the filter.""" 101 | 102 | return self._nrows 103 | 104 | def getFilter(self): 105 | """Return the filter dictionary.""" 106 | 107 | return self.filter 108 | 109 | def close(self): 110 | """Delete coefficients and reset attributes.""" 111 | 112 | del self.coeff 113 | self.filter = {} 114 | self.ncoeff = 0 115 | self.delta = 0. 116 | self.fpoffset = 0 117 | self._nrows = 0 118 | self._valid = False 119 | 120 | def evalDisp(self, x): 121 | """Evaluate the dispersion relation at x. 122 | 123 | The function value will be the wavelength (or array of wavelengths) 124 | at x, in Angstroms. 125 | 126 | Parameters 127 | ---------- 128 | x: array_like or float 129 | Pixel coordinate (or array of coordinates) 130 | 131 | Returns 132 | ------- 133 | array_like or float 134 | Wavelength (or array of wavelengths) at x 135 | """ 136 | 137 | x_prime = np.float64(x) + np.float64(self.delta) 138 | 139 | sum = self.coeff[self.ncoeff-1] 140 | for i in range(self.ncoeff-2, -1, -1): 141 | sum = sum * x_prime + self.coeff[i] 142 | 143 | return sum 144 | 145 | def evalDerivDisp(self, x): 146 | """Evaluate the derivative of the dispersion relation at x. 147 | 148 | The function value will be the slope (or array of slopes) at x, 149 | in Angstroms per pixel. 150 | 151 | Parameters 152 | ---------- 153 | x: array_like or float 154 | Pixel coordinate (or array of coordinates) 155 | 156 | Returns 157 | ------- 158 | array_like or float 159 | Slope at x, in Angstroms per pixel 160 | """ 161 | 162 | x_prime = np.float64(x) + np.float64(self.delta) 163 | 164 | sum = (self.ncoeff - 1.) * self.coeff[self.ncoeff-1] 165 | for n in range(self.ncoeff-2, 0, -1): 166 | sum = sum * x_prime + n * self.coeff[n] 167 | 168 | return sum 169 | 170 | def evalInvDisp(self, wavelength, tiny=1.e-8): 171 | """Evaluate the inverse of the dispersion relation at wavelength. 172 | 173 | The function value will be the pixel number (or array of pixel numbers) 174 | at the specified wavelength(s). Newton's method is used for finding 175 | the pixel numbers, and the iterations are stopped when the largest 176 | difference between the specified wavelengths and computed wavelengths 177 | is less than tiny. 178 | 179 | Parameters 180 | ---------- 181 | wavelength: array_like or float 182 | Wavelength or array of wavelengths 183 | 184 | tiny: float 185 | Maximum allowed difference between the final pixel number(s) 186 | and the value from the previous iteration 187 | 188 | Returns 189 | ------- 190 | array_like or float 191 | Pixel number (or array of pixel numbers) at wavelength 192 | """ 193 | 194 | tiny = abs(tiny) 195 | 196 | # initial value 197 | try: 198 | nelem = len(wavelength) 199 | x = np.arange(nelem, dtype=np.float64) 200 | except TypeError: 201 | nelem = 0 202 | x = 0. 203 | 204 | # Iterate to find the pixel number(s) x such that evaluating the 205 | # dispersion relation at that point or points gives the specified 206 | # wavelength(s). 207 | done = False 208 | while not done: 209 | if nelem > 0: 210 | x_prev = x.copy() 211 | else: 212 | x_prev = x 213 | wl = self.evalDisp(x) 214 | slope = self.evalDerivDisp(x) 215 | wl_diff = wavelength - wl 216 | x += wl_diff / slope 217 | diff = np.abs(x - x_prev) 218 | if diff.max() < tiny: 219 | done = True 220 | 221 | return x 222 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # calcos documentation build configuration file, created by 4 | # sphinx-quickstart on Mon Sep 27 15:35:19 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import os 15 | import sys 16 | 17 | import stsci_rtd_theme 18 | 19 | def setup(app): 20 | try: 21 | app.add_css_file("stsci.css") 22 | except AttributeError: 23 | app.add_stylesheet("stsci.css") 24 | 25 | # If extensions (or modules to document with autodoc) are in another directory, 26 | # add these directories to sys.path here. If the directory is relative to the 27 | # documentation root, use os.path.abspath to make it absolute, like shown here. 28 | sys.path.insert(1, os.path.abspath('.')) 29 | sys.path.insert(1, os.path.abspath(os.path.join('..'))) 30 | sys.path.insert(1, os.path.abspath(os.path.join('..', '..'))) 31 | 32 | # -- General configuration ----------------------------------------------------- 33 | 34 | # Add any Sphinx extension module names here, as strings. They can be extensions 35 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 36 | extensions = [ 37 | 'sphinx.ext.autodoc', 38 | 'sphinx.ext.doctest', 39 | 'sphinx.ext.intersphinx', 40 | 'sphinx.ext.imgmath', 41 | ] 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | templates_path = ['_templates'] 45 | 46 | # The suffix of source filenames. 47 | source_suffix = '.rst' 48 | 49 | # The encoding of source files. 50 | #source_encoding = 'utf-8' 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # General information about the project. 56 | project = u'calcos' 57 | copyright = u'2010, Phil Hodge' 58 | 59 | # The version info for the project you're documenting, acts as replacement for 60 | # |version| and |release|, also used in various other places throughout the 61 | # built documents. 62 | 63 | from calcos import __version__ as VERSION 64 | # The short X.Y version. 65 | version = VERSION 66 | # The full version, including alpha/beta/rc tags. 67 | release = VERSION 68 | 69 | # The language for content autogenerated by Sphinx. Refer to documentation 70 | # for a list of supported languages. 71 | #language = None 72 | 73 | # There are two options for replacing |today|: either, you set today to some 74 | # non-false value, then it is used: 75 | #today = '' 76 | # Else, today_fmt is used as the format for a strftime call. 77 | #today_fmt = '%B %d, %Y' 78 | 79 | # List of documents that shouldn't be included in the build. 80 | #unused_docs = [] 81 | 82 | # List of directories, relative to source directory, that shouldn't be searched 83 | # for source files. 84 | exclude_trees = [] 85 | 86 | # The reST default role (used for this markup: `text`) to use for all documents. 87 | #default_role = None 88 | 89 | # If true, '()' will be appended to :func: etc. cross-reference text. 90 | #add_function_parentheses = True 91 | 92 | # If true, the current module name will be prepended to all description 93 | # unit titles (such as .. function::). 94 | #add_module_names = True 95 | 96 | # If true, sectionauthor and moduleauthor directives will be shown in the 97 | # output. They are ignored by default. 98 | #show_authors = False 99 | 100 | # The name of the Pygments (syntax highlighting) style to use. 101 | pygments_style = 'sphinx' 102 | 103 | # A list of ignored prefixes for module index sorting. 104 | #modindex_common_prefix = [] 105 | 106 | 107 | # -- Options for HTML output --------------------------------------------------- 108 | 109 | # The theme to use for HTML and HTML Help pages. Major themes that come with 110 | # Sphinx are currently 'default' and 'sphinxdoc'. 111 | html_theme = 'stsci_rtd_theme' 112 | 113 | # Theme options are theme-specific and customize the look and feel of a theme 114 | # further. For a list of options available for each theme, see the 115 | # documentation. 116 | html_theme_options = { 117 | "collapse_navigation": True 118 | # "nosidebar": "false", 119 | # "sidebarbgcolor": "#4db8ff", 120 | # "sidebartextcolor": "black", 121 | # "sidebarlinkcolor": "black", 122 | # "headbgcolor": "white", 123 | } 124 | 125 | # Add any paths that contain custom themes here, relative to this directory. 126 | html_theme_path = [stsci_rtd_theme.get_html_theme_path()] 127 | 128 | # The name for this set of Sphinx documents. If None, it defaults to 129 | # " v documentation". 130 | #html_title = None 131 | 132 | # A shorter title for the navigation bar. Default is the same as html_title. 133 | #html_short_title = None 134 | 135 | # The name of an image file (relative to this directory) to place at the top 136 | # of the sidebar. 137 | #html_logo = None 138 | 139 | # The name of an image file (within the static path) to use as favicon of the 140 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 141 | # pixels large. 142 | #html_favicon = None 143 | 144 | # Add any paths that contain custom static files (such as style sheets) here, 145 | # relative to this directory. They are copied after the builtin static files, 146 | # so a file named "default.css" will overwrite the builtin "default.css". 147 | html_static_path = [] 148 | 149 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 150 | # using the given strftime format. 151 | #html_last_updated_fmt = '%b %d, %Y' 152 | 153 | # If true, SmartyPants will be used to convert quotes and dashes to 154 | # typographically correct entities. 155 | #html_use_smartypants = True 156 | 157 | # Custom sidebar templates, maps document names to template names. 158 | #html_sidebars = {} 159 | 160 | # Additional templates that should be rendered to pages, maps page names to 161 | # template names. 162 | #html_additional_pages = {} 163 | 164 | # If false, no module index is generated. 165 | #html_use_modindex = True 166 | 167 | # If false, no index is generated. 168 | #html_use_index = True 169 | 170 | # If true, the index is split into individual pages for each letter. 171 | #html_split_index = False 172 | 173 | # If true, links to the reST sources are added to the pages. 174 | #html_show_sourcelink = True 175 | 176 | # If true, an OpenSearch description file will be output, and all pages will 177 | # contain a tag referring to it. The value of this option must be the 178 | # base URL from which the finished HTML is served. 179 | #html_use_opensearch = '' 180 | 181 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 182 | #html_file_suffix = '' 183 | 184 | # Output file base name for HTML help builder. 185 | htmlhelp_basename = 'calcosdoc' 186 | 187 | 188 | # -- Options for LaTeX output -------------------------------------------------- 189 | 190 | # The paper size ('letter' or 'a4'). 191 | #latex_paper_size = 'letter' 192 | 193 | # The font size ('10pt', '11pt' or '12pt'). 194 | #latex_font_size = '10pt' 195 | 196 | # Grouping the document tree into LaTeX files. List of tuples 197 | # (source start file, target name, title, author, documentclass [howto/manual]). 198 | latex_documents = [ 199 | ('index', 'calcos.tex', u'calcos Documentation', 200 | u'Phil Hodge', 'manual'), 201 | ] 202 | 203 | # The name of an image file (relative to this directory) to place at the top of 204 | # the title page. 205 | #latex_logo = None 206 | 207 | # For "manual" documents, if this is true, then toplevel headings are parts, 208 | # not chapters. 209 | #latex_use_parts = False 210 | 211 | # Additional stuff for the LaTeX preamble. 212 | #latex_preamble = '' 213 | 214 | # Documents to append as an appendix to all manuals. 215 | #latex_appendices = [] 216 | 217 | # If false, no module index is generated. 218 | #latex_use_modindex = True 219 | 220 | 221 | # Example configuration for intersphinx: refer to the Python standard library. 222 | intersphinx_mapping = { 223 | 'python': ('https://docs.python.org/3/', None), 224 | } 225 | -------------------------------------------------------------------------------- /calcos/calcos.help: -------------------------------------------------------------------------------- 1 | This is an interface to CalCOS. 2 | 3 | :Author: Phil Hodge, STScI, November 2013. 4 | 5 | Notes 6 | ----- 7 | The parameters for the calcos function differ somewhat from the parameters 8 | for this TEAL interface. The parameters for the calcos function and their 9 | default values are: 10 | 11 | asntable 12 | outdir = None 13 | verbosity = None 14 | find_target = {"flag": False, "cutoff": None} 15 | create_csum_image = False 16 | raw_csum_coords = False 17 | only_csum = False 18 | binx = None 19 | biny = None 20 | compress_csum = False 21 | compression_parameters = "gzip,-0.01" 22 | shift_file = None 23 | save_temp_files = False 24 | stimfile = None 25 | livetimefile = None 26 | burstfile = None 27 | 28 | Parameters 29 | ---------- 30 | input: str 31 | One or more comma and/or blank separated file names. The names may 32 | be either an association file name or a raw (or corrtag) file name. 33 | Environment variables and wildcards may be used. 34 | 35 | verbosity: int 36 | This indicates that very few (verbosity=0), a lot (1), or even more (2) 37 | messages should be printed to the standard output. The default is 1. 38 | 39 | savetmp: bool 40 | The default is False, meaning that temporary files created by calcos 41 | will be deleted. If `savetmp` is set to True, these files will not 42 | be deleted. 43 | 44 | outdir: str 45 | The name of an optional output directory. If outdir is "", the output 46 | files will be written to the directory that contains the input files. 47 | If outdir was specified but no directory of that name exists, it will 48 | be created. 49 | 50 | find: bool 51 | If `find` is True, the 1-D spectrum or spectra will be extracted at 52 | the Y (XD) location at which the spectra were actually found (but see 53 | `cutoff` below). If `find` is False (the default), calcos will find 54 | the spectra and print the Y locations, but the spectra will be 55 | extracted where they were expected, based on the wavecal (OSM) offsets 56 | and the locations given in the B_SPEC column in the XTRACTAB 57 | (_1dx.fits) table. 58 | 59 | cutoff: float or int or None 60 | If None (the default), calcos will use or not use the found locations 61 | of the spectra, depending on the value of `find`. If `cutoff` has a 62 | positive value, that value will be interpreted as an N-sigma cutoff 63 | for `find`; that is, if the error estimate of the found location is 64 | less than the cutoff, then calcos will extract the spectrum at the 65 | found location. 66 | 67 | shift_file: str 68 | If `shift_file` is not an empty string, this should be a text file 69 | to allow the user to override (i.e. explicitly set) the values of 70 | SHIFT1A, SHIFT1B, or SHIFT1C. This should be a text file with five 71 | columns; the columns are: 72 | 73 | rootname fpoffset flash_number segment/stripe shift1 74 | 75 | The first four columns are used for identifying which particular lamp 76 | exposure is to be overridden; however, any or all of those columns may 77 | be given as "any" (without the quotes), which matches any value. All 78 | strings are case insensitive. Blank lines and lines beginning with 79 | "#" will be ignored. A single shift file can be used for an entire 80 | association; that's why rootname is included as a selection criterion. 81 | The rootname in this case is the portion of the file name that precedes 82 | the suffix. Note that this is the name of a particular exposure, not 83 | an association name. The rootname corresponds to the actual name of 84 | the raw file, rather than from the ROOTNAME keyword, so that if the raw 85 | file has been renamed without changing the keyword, the name in the 86 | shift file will be the name that most people would expect. It is 87 | redundant to specify both rootname and fpoffset; both are included to 88 | make it easier to set shift1 to the same value for all rootnames for a 89 | given fpoffset. If rootname is specified, fpoffset can be given as 90 | "any". The flash number is one indexed. The keywords (LMP_ON1, etc.) 91 | for lamp flashes are one indexed, following the FITS convention, and 92 | the information about the flashes and the shifts that is written to the 93 | trailer file (and standard output) also show the flash number starting 94 | with one. For those reasons it was felt that mistakes would be less 95 | likely if one indexing was used for flash number. The segment or 96 | stripe name should be the complete string "FUVA", "FUVB", "NUVA", 97 | "NUVB" or "NUVC", not just the single letter "A", "B" or "C". The 98 | shift itself is the pixel offset from the template lamp spectrum for 99 | FPOFFSET=0 (FP-POS=3); a positive shift means the features in the 100 | observed spectrum are at higher pixel number than the features in the 101 | template. 102 | 103 | csum: bool 104 | The default is False. Set `csum` to True in order to create the 105 | "calcos sum" file. Note that most users would never want to use this 106 | option. The csum file will be a FITS file with suffix "_csum". 107 | 108 | raw_csum: bool 109 | This is only relevant if `csum` is True. If `raw_csum` is True (the 110 | default is False), raw pixel coordinates will be used instead of 111 | thermally and geometrically corrected pixel coordinates when the csum 112 | file is written. Keyword COORDFRM is written to the primary header 113 | of the csum file, and this keyword will be set to "raw" if raw pixel 114 | coordinates were used (otherwise COORDFRM will be "corrected"). 115 | 116 | compress: bool 117 | If `csum` is True, setting 'compress' to True results in a csum FITS 118 | file that contains a compressed image. This uses the on-the-fly 119 | compression in pyfits, but the algorithm and parameters are the same 120 | as in CFITSIO, so reading the resulting image using CFITSIO should be 121 | transparent. 122 | 123 | comp_param: str 124 | If `csum` and `compress` are True, specify the compression parameters 125 | in the `comp_param` string. This consists of two parts separated by 126 | a comma; the default is "gzip,-0.01". The first part is the type of 127 | compression, which may be "gzip", "rice", or "hcompress". The second 128 | part is the quantization level, which specifies how the floating-point 129 | image values are to be converted to integer before compression. If 130 | the quantization level is positive, it is interpreted as a value 131 | relative to the RMS noise level in the image background. A value of 132 | 16, for example, means that the quantization level will be 1/16 of the 133 | noise level. If the quantization level is negative, it's the actual 134 | floating-point increment that corresponds to a difference of one in 135 | the scaled integer image. 136 | 137 | binx: int 138 | If `csum` is True, `binx` gives the binning factor in the X axis 139 | (the dispersion direction, for spectroscopic data). The default is 1, 140 | and the allowed values are 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024. 141 | 142 | biny: int 143 | If `csum` is True, `biny` gives the binning factor in the Y axis. 144 | The default is 1, and the allowed values are 1, 2, 4, 8, 16, 32, 64, 145 | 128, 256, 512. 146 | 147 | stimfile: str 148 | For FUV data, stim locations will be written (appended) to this text 149 | file. 150 | 151 | livetimefile: str 152 | Livetime factors will be written (appended) to this text file. 153 | 154 | burstfile: str 155 | For FUV data, stim locations will be written (appended) to this text 156 | file. 157 | 158 | print_version: bool 159 | If True, calcos will print the version number and return without 160 | doing anything else. 161 | 162 | print_revision: bool 163 | If True, calcos will print the full version string and return without 164 | doing anything else. 165 | 166 | Examples 167 | -------- 168 | From Python: 169 | 170 | >>> from stsci.tools import teal 171 | >>> import calcos 172 | >>> teal.teal("calcos") 173 | 174 | From PyRAF: 175 | 176 | --> import calcos 177 | --> teal calcos 178 | --> epar calcos 179 | 180 | --> calcos.calcos("rootname_rawtag_a.fits", "out/") 181 | 182 | From the Unix command line: 183 | 184 | % calcos.py -v -o out/ rootname_rawtag_a.fits 185 | -------------------------------------------------------------------------------- /tests/helpers.py: -------------------------------------------------------------------------------- 1 | """CALCOS regression test helpers.""" 2 | 3 | import os 4 | import sys 5 | 6 | import pytest 7 | from ci_watson.artifactory_helpers import get_bigdata 8 | from ci_watson.hst_helpers import raw_from_asn, ref_from_image, download_crds 9 | 10 | from astropy.io import fits 11 | from astropy.io.fits import FITSDiff 12 | 13 | __all__ = ['calref_from_image', 'BaseCOS'] 14 | 15 | 16 | def calref_from_image(input_image): 17 | """ 18 | Return a list of reference filenames, as defined in the primary 19 | header of the given input image, necessary for calibration; i.e., 20 | only those associated with ``*CORR`` set to ``PERFORM`` will be 21 | considered. 22 | """ 23 | # NOTE: Add additional mapping as needed. 24 | # Map mandatory CRDS reference file for instrument/detector combo. 25 | # This is for file not tied to any particular *CORR or used throughout. 26 | det_lookup = { 27 | ('COS', 'FUV'): ['PROFTAB', 'SPWCSTAB'], 28 | ('COS', 'NUV'): []} 29 | 30 | # NOTE: Add additional mapping as needed. 31 | # Map *CORR to associated CRDS reference file. 32 | corr_lookup = { 33 | 'BADTCORR': ['BADTTAB'], 34 | 'TEMPCORR': ['BRFTAB'], 35 | 'GEOCORR': ['GEOFILE'], 36 | 'DGEOCORR': ['DGEOFILE'], 37 | 'YWLKCORR': ['YWLKFILE'], 38 | 'XWLKCORR': ['XWLKFILE'], 39 | 'DEADCORR': ['DEADTAB'], 40 | 'PHACORR': ['PHATAB', 'PHAFILE'], 41 | 'FLATCORR': ['FLATFILE'], 42 | 'WAVECORR': ['LAMPTAB', 'DISPTAB', 'TWOZXTAB', 'XTRACTAB'], 43 | 'BRSTCORR': ['BRSTTAB'], 44 | 'TRCECORR': ['TRACETAB'], 45 | 'ALGNCORR': ['TWOZXTAB'], 46 | 'DQICORR': ['SPOTTAB', 'TRACETAB', 'BPIXTAB', 'GSAGTAB'], 47 | 'X1DCORR': ['WCPTAB', 'TWOZXTAB', 'XTRACTAB'], 48 | 'BACKCORR': ['TWOZXTAB', 'XTRACTAB'], 49 | 'FLUXCORR': ['FLUXTAB', 'TDSTAB', 'PHOTTAB'], 50 | 'WALKCORR': ['WALKTAB']} 51 | 52 | hdr = fits.getheader(input_image, ext=0) 53 | ref_files = ref_from_image( 54 | input_image, det_lookup[(hdr['INSTRUME'], hdr['DETECTOR'])]) 55 | 56 | for step in corr_lookup: 57 | # Not all images have the CORR step and it is not always on. 58 | if (step not in hdr) or (hdr[step].strip().upper() != 'PERFORM'): 59 | continue 60 | ref_files += ref_from_image(input_image, corr_lookup[step]) 61 | # Special case for STATFLAG=T, which requires XTRACTAB, but MissingRefFiles() 62 | # doesn't know 63 | if hdr['STATFLAG']: 64 | ref_files += ref_from_image(input_image, ['XTRACTAB']) 65 | 66 | return list(set(ref_files)) # Remove duplicates 67 | 68 | 69 | # Base class for actual tests. 70 | # NOTE: Named in a way so pytest will not pick them up here. 71 | # NOTE: bigdata marker requires TEST_BIGDATA environment variable to 72 | # point to a valid big data directory, whether locally or on Artifactory. 73 | # NOTE: envopt would point tests to "dev" or "stable". 74 | # NOTE: _jail fixture ensures each test runs in a clean tmpdir. 75 | @pytest.mark.bigdata 76 | @pytest.mark.usefixtures('_jail', 'envopt') 77 | class BaseCOS: 78 | 79 | instrument = 'cos' 80 | ignore_keywords = ['DATE', 'CAL_VER'] 81 | 82 | # To be defined by test class in actual test modules. 83 | detector = '' 84 | 85 | @pytest.fixture(autouse=True) 86 | def setup_class(self, envopt): 87 | """ 88 | Class-level setup that is done at the beginning of the test. 89 | 90 | Parameters 91 | ---------- 92 | envopt : {'dev', 'stable'} 93 | This is a ``pytest`` fixture that defines the test 94 | environment in which input and truth files reside. 95 | 96 | """ 97 | # Since CALCOS still runs in PY2, need to check here because 98 | # tests can only run in PY3. 99 | if sys.version_info < (3, ): 100 | raise SystemError('tests can only run in Python 3') 101 | 102 | self.env = envopt 103 | 104 | def get_input_files(self, filenames): 105 | """ 106 | Copy input files (ASN, RAW, etc) into the working directory. 107 | If ASN is given, RAW files in the ASN table are also copied. 108 | The associated CRDS reference files are also copied or 109 | downloaded, if necessary. 110 | 111 | Data directory layout for CALCOS:: 112 | 113 | detector/ 114 | input/ 115 | truth/ 116 | 117 | Parameters 118 | ---------- 119 | filename : list 120 | List of filenames of the ASN/RAW/etc to copy over, along with their 121 | associated files. 122 | 123 | """ 124 | all_raws = [] 125 | for file in filenames: 126 | if 'rawtag' in file: 127 | all_raws.append(file) 128 | # List of filenames can include _rawtag, _asn and _spt files 129 | dest = get_bigdata('scsb-calcos', self.env, self.detector, 'input', 130 | file) 131 | # If file is an association table, download raw files specified in the table 132 | if file.endswith('_asn.fits'): 133 | if self.detector == 'nuv': 134 | asn_raws = raw_from_asn(file, '_rawtag.fits') 135 | else: 136 | asn_raws = raw_from_asn(file, '_rawtag_a.fits') 137 | asn_raws += raw_from_asn(file, '_rawtag_b.fits') 138 | for raw in asn_raws: # Download RAWs in ASN. 139 | get_bigdata('scsb-calcos', self.env, self.detector, 'input', 140 | raw) 141 | all_raws += asn_raws 142 | 143 | first_pass = ('JENKINS_URL' in os.environ and 144 | 'ssbjenkins' in os.environ['JENKINS_URL']) 145 | for raw in all_raws: 146 | ref_files = calref_from_image(raw) 147 | for ref_file in ref_files: 148 | print("Getting reference file {}".format(ref_file)) 149 | # Special reference files that live with inputs. 150 | if ('$' not in ref_file and 151 | os.path.basename(ref_file) == ref_file): 152 | get_bigdata('scsb-calcos', self.env, self.detector, 153 | 'input', ref_file) 154 | print('{} downloaded successfully') 155 | continue 156 | 157 | # Jenkins cannot see Central Storage on push event, 158 | # and somehow setting, say, jref to "." does not work anymore. 159 | # So, we need this hack. 160 | if '$' in ref_file and first_pass: 161 | first_pass = False 162 | if not os.path.isdir('/grp/hst/cdbs'): 163 | ref_path = os.path.dirname(dest) + os.sep 164 | var = ref_file.split('$')[0] 165 | os.environ[var] = ref_path # hacky hack hack 166 | 167 | # Download reference files, if needed only. 168 | download_crds(ref_file, verbose=True) 169 | 170 | def compare_outputs(self, outputs, atol=0, rtol=1e-7, raise_error=True, 171 | ignore_keywords_overwrite=None): 172 | """ 173 | Compare CALXXX output with "truth" using ``fitsdiff``. 174 | 175 | Parameters 176 | ---------- 177 | outputs : list of tuple 178 | A list of tuples, each containing filename (without path) 179 | of CALXXX output and truth, in that order. Example:: 180 | 181 | [('output1.fits', 'truth1.fits'), 182 | ('output2.fits', 'truth2.fits'), 183 | ...] 184 | 185 | atol, rtol : float 186 | Absolute and relative tolerance for data comparison. 187 | 188 | raise_error : bool 189 | Raise ``AssertionError`` if difference is found. 190 | 191 | ignore_keywords_overwrite : list of str or `None` 192 | If not `None`, these will overwrite 193 | ``self.ignore_keywords`` for the calling test. 194 | 195 | Returns 196 | ------- 197 | report : str 198 | Report from ``fitsdiff``. 199 | This is part of error message if ``raise_error=True``. 200 | 201 | """ 202 | all_okay = True 203 | creature_report = '' 204 | 205 | if ignore_keywords_overwrite is None: 206 | ignore_keywords = self.ignore_keywords 207 | else: 208 | ignore_keywords = ignore_keywords_overwrite 209 | 210 | for actual, desired in outputs: 211 | desired = get_bigdata('scsb-calcos', self.env, self.detector, 212 | 'truth', desired) 213 | fdiff = FITSDiff(actual, desired, rtol=rtol, atol=atol, 214 | ignore_keywords=ignore_keywords) 215 | creature_report += fdiff.report() 216 | 217 | if not fdiff.identical and all_okay: 218 | all_okay = False 219 | 220 | if not all_okay and raise_error: 221 | raise AssertionError(os.linesep + creature_report) 222 | 223 | return creature_report 224 | -------------------------------------------------------------------------------- /calcos/xd_search.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division # confidence unknown 2 | import numpy as np 3 | from scipy.signal.windows import boxcar 4 | from scipy import ndimage 5 | from . import cosutil 6 | from .calcosparam import * # parameter definitions 7 | from . import ccos 8 | 9 | MASK_X = 189 # width of region to mask for each geocoronal line 10 | SEARCH_Y = 91 # height of search region 11 | 12 | # for comparison between values of fwhm 13 | SIGNFICANTLY_LARGER = 2. # pixels 14 | 15 | # Lyman alpha, oxygen I, oxygen I 16 | AIRGLOW_WAVELENGTHS = [1215.67, 1304., 1356.] 17 | 18 | """Info pertaining to geocoronal features: 19 | For G130M, 1309, PSA, the image of geocoronal LyA is about 184 x 60 pixels. 20 | The PSA and WCA apertures are about 100 pixels apart, for all FUV gratings. 21 | NUVA and NUVB are closer, about 94 pixels for the M gratings. 22 | 23 | The plate scale (arcsec per pixel) in the dispersion direction is roughly 24 | the same for all gratings; it should be OK to mask a region 189 pixels wide, 25 | centered on each geocoronal line. 26 | 27 | A search range of 91 pixels seems reasonable (-45 to +45 pixels inclusive). 28 | 29 | In t9h1220sl_phot.fits for NUV, the minimum wavelength for the M gratings 30 | is 1648.58 Angstroms. Wavelengths go down to 1213.77 for G230L, 2635, 31 | but the throughput at short wavelengths is very low. 32 | """ 33 | 34 | def xdSearch(data, dq_data, wavelength, axis, slope, y_nominal, 35 | x_offset, detector): 36 | """Find the cross-dispersion location of the target spectrum. 37 | 38 | Parameters 39 | ---------- 40 | data: array_like, 2-D 41 | SCI data from the flt file. 42 | 43 | dq_data: array_like, 2-D 44 | DQ data from the flt file. 45 | 46 | wavelength: array_like, 1-D 47 | Wavelength at each pixel (only needed for FUV). 48 | 49 | axis: int 50 | The dispersion axis, 0 (Y) or 1 (X). 51 | 52 | slope: float 53 | Slope of spectrum, pixels per pixel. 54 | 55 | y_nominal: float 56 | Location of spectrum at left edge of detector, i.e. at 57 | X = x_offset. 58 | 59 | x_offset: intls 60 | Offset of the detector in the data array. 61 | 62 | detector: str 63 | Detector name ("FUV" or "NUV"). 64 | 65 | Returns 66 | ------- 67 | (offset2, y_locn, y_locn_sigma, fwhm): tuple of four values 68 | offset2 is the offset of the spectrum from y_nominal (positive 69 | if the spectrum was found at a larger Y pixel number). y_locn 70 | is the Y pixel number at which the spectrum was found (at pixel 71 | x_offset from the left edge of data). y_locn_sigma is the 72 | error estimate for y_locn. fwhm is the full-width at half 73 | maximum of the peak in the cross-dispersion profile; this can be 74 | either a float or an int. 75 | """ 76 | 77 | (e_j, zero_point) = extractBand(data, dq_data, wavelength, 78 | axis, slope, y_nominal, 79 | x_offset, detector) 80 | 81 | box = 3 82 | 83 | (y_locn, y_locn_sigma, fwhm) = findPeak(e_j, box) 84 | 85 | if y_locn is None: 86 | offset2 = 0. 87 | else: 88 | # Shift y_locn to account for the offset of e_j from Y = 0 in 'data', 89 | # and shift y_locn to where the spectrum crosses X = x_offset. 90 | y_locn += zero_point 91 | y_locn += slope * float(x_offset) 92 | offset2 = y_locn - y_nominal 93 | 94 | return (offset2, y_locn, y_locn_sigma, fwhm) 95 | 96 | def extractBand(data, dq_data, wavelength, axis, slope, y_nominal, 97 | x_offset, detector): 98 | """Extract a 2-D stripe centered on the nominal location of the target. 99 | 100 | Parameters 101 | ---------- 102 | data: array_like 103 | SCI data from the flt file 104 | 105 | dq_data: array_like 106 | DQ data from the flt file 107 | 108 | wavelength: array_like 109 | Wavelength at each pixel (to locate the airglow lines) 110 | 111 | axis: int 112 | The dispersion axis, 0 (Y) or 1 (X) 113 | 114 | slope: float 115 | Slope of spectrum, pixels per pixel 116 | 117 | y_nominal: float 118 | Intercept of spectrum at left edge of detector 119 | 120 | x_offset: int 121 | Offset of the detector in the data array 122 | 123 | detector: str 124 | Detector name ("FUV" or "NUV") 125 | 126 | Returns 127 | ------- 128 | tuple 129 | (e_j, zero_point), where e_j is a 1-D array containing a section of 130 | data collapsed along the dispersion direction and zero_point is 131 | the Y pixel number at the left edge of data corresponding to 132 | pixel 0 of e_j 133 | """ 134 | 135 | extr_height = SEARCH_Y 136 | axis_length = data.shape[axis] 137 | e_ij = np.zeros((extr_height, axis_length), dtype=np.float32) 138 | ccos.extractband(data, axis, slope, y_nominal, x_offset, e_ij) 139 | 140 | # Clobber any region flagged as bad; note that this won't work well if a 141 | # flagged region covers part but not all of a spectral feature. 142 | if dq_data is not None: 143 | dq_ij = np.zeros((extr_height, axis_length), dtype=np.int16) 144 | ccos.extractband(dq_data, axis, slope, y_nominal, x_offset, dq_ij) 145 | dq = np.where(dq_ij == 0, 1, 0) 146 | e_ij *= dq 147 | 148 | if detector == "FUV": 149 | # Block out (i.e. set to zero) regions affected by airglow lines. 150 | for airglow in AIRGLOW_WAVELENGTHS: 151 | pixel_center = findPixelNumber(wavelength, airglow) 152 | pixel0 = pixel_center - (MASK_X // 2) 153 | pixel1 = pixel_center + (MASK_X // 2) 154 | if pixel1 < 0 or pixel0 >= axis_length: 155 | continue 156 | pixel0 = max(pixel0, 0) 157 | pixel1 = min(pixel1, axis_length-1) 158 | e_ij[:,int(pixel0):int(pixel1)] = 0. 159 | 160 | # sum the data along the dispersion direction 161 | e_j = e_ij.sum(axis=1) 162 | 163 | # Y pixel number in data corresponding to e_j[0] 164 | zero_point = int(round(y_nominal - slope * float(x_offset))) - \ 165 | SEARCH_Y // 2 166 | 167 | return (e_j, zero_point) 168 | 169 | def findPixelNumber(wl, wavelength): 170 | """Find the nearest pixel to 'wavelength'. 171 | 172 | Parameters 173 | ---------- 174 | wl: array_like, float64 175 | Wavelength at each pixel, assumed to be increasing 176 | 177 | wavelength: float 178 | A particular wavelength 179 | 180 | Returns 181 | ------- 182 | int 183 | Pixel number closest to wavelength in the array wl 184 | """ 185 | 186 | nelem = len(wl) 187 | 188 | dispersion = (wl[-1] - wl[0]) / float(nelem) 189 | if wavelength < wl[0]: 190 | x = (wavelength - wl[0]) / dispersion 191 | return int(round(x)) 192 | elif wavelength >= wl[-1]: 193 | x = (wavelength - wl[-1]) / dispersion + float(nelem) - 1. 194 | return int(round(x)) 195 | 196 | i0 = 0 197 | i1 = nelem - 1 198 | while (i1 - i0) >= 5: 199 | if i0 == i1: 200 | break 201 | slope = (wl[i1] - wl[i0]) / (i1 - i0) 202 | if slope == 0.: 203 | raise RuntimeError("Bad wavelength array.") 204 | mid = (i1 + i0) // 2 205 | x = int(round((wavelength - wl[mid]) / slope)) + mid 206 | dx = i1 - i0 207 | i0 = x - dx // 16 208 | i1 = x + dx // 16 209 | i0 = max(i0, 0) 210 | i1 = min(i1, nelem-1) 211 | 212 | x = i0 213 | diff = abs(wavelength - wl[x]) 214 | for i in range(i0, i1+1): 215 | if abs(wavelength - wl[i]) < diff: 216 | x = i 217 | diff = abs(wavelength - wl[x]) 218 | 219 | return x 220 | 221 | def findPeak(e_j, box): 222 | """Find the location of the maximum within the subset. 223 | 224 | Note that the data were collapsed to the left edge to get e_j, so 225 | the location is the intercept on the edge, rather than where the 226 | spectrum crosses the middle of the detector or where it crosses 227 | X = x_offset. 228 | Also, e_j is not the full height of the detector, just a subset 229 | centered on the nominal Y location of the spectrum. 230 | 231 | Parameters 232 | ---------- 233 | e_j: array_like 234 | 1-D array of data collapsed along dispersion axis, taking into 235 | account the tilt of the spectrum 236 | 237 | box: int 238 | Smooth e_j with a box of this width before looking for the 239 | maximum 240 | 241 | Returns 242 | ------- 243 | tuple 244 | The location (float) in the cross-dispersion direction relative 245 | to the first pixel in e_j, an estimate of the uncertainty in 246 | that location, and the FWHM of the peak in the cross-dispersion 247 | profile 248 | """ 249 | 250 | boxcar_kernel = boxcar(box) / box 251 | e_j_sm = ndimage.convolve(e_j, boxcar_kernel, mode="nearest") 252 | 253 | index = np.argsort(e_j_sm) 254 | ymax = index[-1] 255 | 256 | nelem = len(e_j) 257 | 258 | # This may be done again later, after we have found the location more 259 | # accurately. 260 | fwhm = findFwhm(e_j, ymax) 261 | 262 | # fit a quadratic to at least five points centered on ymax 263 | MIN_NPTS = 5 264 | npts = int(round(fwhm)) 265 | npts = max(npts, MIN_NPTS) 266 | if npts // 2 * 2 == npts: 267 | npts += 1 268 | x = np.arange(nelem, dtype=np.float64) 269 | j1 = ymax - npts // 2 270 | j1 = max(j1, 0) 271 | j2 = j1 + npts 272 | if j2 > nelem: 273 | j2 = nelem 274 | j1 = j2 - npts 275 | j1 = max(j1, 0) 276 | (coeff, var) = cosutil.fitQuadratic(x[j1:j2], e_j_sm[j1:j2]) 277 | 278 | (y_locn, y_locn_sigma) = cosutil.centerOfQuadratic(coeff, var) 279 | if y_locn is None: 280 | y_locn = ymax 281 | y_locn_sigma = 999. 282 | 283 | # Find the FWHM again if the location is far from the brightest pixel. 284 | if abs(y_locn - ymax) > fwhm / 4.: 285 | fwhm = findFwhm(e_j, y_locn) 286 | 287 | return (y_locn, y_locn_sigma, fwhm) 288 | 289 | def findFwhm(e_j, y_locn): 290 | """Find the FWHM of the cross-dispersion profile of the spectrum. 291 | 292 | Two different approaches are used to find the FWHM. The first method 293 | is to count the number of elements in the cross-dispersion profile with 294 | values above the half-maximum value; this value will be an integer. 295 | The second method is to follow the profile to the half-maximum value on 296 | either side of the maximum, using linear interpolation to get a better 297 | estimate of where the profile cuts across the half-maximum level; this 298 | value will be a float. The value from the second method is expected to 299 | be more accurate if the target was actually found and has good 300 | signal-to-noise, so normally that value will be returned. If the first 301 | method gives a significantly larger value, however, that value will be 302 | returned because it may indicate that the cross-dispersion profile is 303 | just noise. 304 | 305 | Parameters 306 | ---------- 307 | e_j: array_like 308 | 1-D array of data collapsed along dispersion axis 309 | 310 | y_locn: float 311 | The location in the cross-dispersion direction, relative to the 312 | first pixel in e_j. 313 | 314 | Returns 315 | ------- 316 | float or int 317 | The full width half maximum of the peak in e_j. 318 | """ 319 | 320 | nelem = len(e_j) 321 | y_locn_nint = int(round(y_locn)) 322 | if y_locn_nint < 0 or y_locn_nint >= nelem: 323 | return -1. 324 | 325 | e_max = e_j[y_locn_nint] 326 | if e_max <= 0: 327 | return -1. 328 | 329 | e_j_sorted = np.sort(e_j) 330 | 331 | third = nelem // 3 332 | background = e_j_sorted[0:third].mean(dtype=np.float64) 333 | 334 | find_this_level = (e_max - background) / 2. + background 335 | 336 | # first estimate of FWHM 337 | # Count all elements in the sorted array that are greater than the 338 | # halfway level. This will be large if the array is just noise 339 | # (at least, that's the idea). 340 | j = nelem - 1 341 | while j >= 0: 342 | if e_j_sorted[j] < find_this_level: 343 | break 344 | j -= 1 345 | fwhm_1 = nelem - 1 - j # this is an int 346 | 347 | # second estimate of FWHM 348 | # Find where the cross-dispersion profile crosses the halfway level 349 | # on either side of the maximum. 350 | 351 | j_low = 0 # initial values 352 | j_high = nelem - 1 353 | 354 | # first the low side 355 | j = y_locn_nint 356 | while j >= 0: 357 | if e_j[j] < find_this_level: 358 | j_low = j 359 | break 360 | j -= 1 361 | 362 | # Use linear interpolation to find where e_j would equal find_this_level. 363 | denom = e_j[j_low+1] - e_j[j_low] 364 | if denom == 0.: 365 | low = float(j_low) + 0.5 # 0.5 is an estimate 366 | else: 367 | low = float(j_low) + (find_this_level - e_j[j_low]) / denom 368 | 369 | # now the high side 370 | j = y_locn_nint 371 | while j < nelem: 372 | if e_j[j] < find_this_level: 373 | j_high = j 374 | break 375 | j += 1 376 | 377 | denom = e_j[j_high] - e_j[j_high-1] 378 | if denom == 0.: 379 | high = float(j_high) - 0.5 380 | else: 381 | high = float(j_high-1) + (find_this_level - e_j[j_high-1]) / denom 382 | 383 | fwhm_2 = high - low # this is a float 384 | 385 | if fwhm_1 > fwhm_2 + SIGNFICANTLY_LARGER: 386 | fwhm = fwhm_1 387 | else: 388 | fwhm = fwhm_2 389 | 390 | return fwhm 391 | -------------------------------------------------------------------------------- /calcos/getinfo.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division # confidence high 2 | import os 3 | import astropy.io.fits as fits 4 | from . import cosutil 5 | from .calcosparam import * 6 | 7 | def initialInfo(filename): 8 | """Get DETECTOR, OBSMODE, and EXPTYPE from the primary header. 9 | 10 | Parameters 11 | ---------- 12 | filename: str 13 | Name of the input file. 14 | 15 | Returns 16 | ------- 17 | info: dictionary 18 | Keywords and values (just a few) from the headers of the input 19 | file. 20 | """ 21 | 22 | fd = fits.open(filename, mode="readonly") 23 | phdr = fd[0].header 24 | 25 | info = {} 26 | 27 | if "DETECTOR" in phdr: 28 | detector = phdr["DETECTOR"] 29 | else: 30 | raise RuntimeError("File " + filename + 31 | " does not have DETECTOR keyword.") 32 | 33 | if "OBSMODE" in phdr: 34 | obsmode = phdr["OBSMODE"] 35 | else: 36 | raise RuntimeError("File " + filename + 37 | " does not have OBSMODE keyword.") 38 | 39 | if "EXPTYPE" in phdr: 40 | exptype = phdr["EXPTYPE"] 41 | else: 42 | raise RuntimeError("File " + filename + 43 | " does not have EXPTYPE keyword.") 44 | 45 | if detector != "FUV" and detector != "NUV": 46 | raise ValueError("File " + filename + 47 | " has invalid DETECTOR = " + detector) 48 | 49 | if obsmode != "TIME-TAG" and obsmode != "ACCUM": 50 | raise ValueError("File " + filename + 51 | " has invalid OBSMODE = " + obsmode) 52 | 53 | info["detector"] = detector 54 | info["obsmode"] = obsmode 55 | info["exptype"] = exptype 56 | 57 | fd.close() 58 | 59 | return info 60 | 61 | def getGeneralInfo(phdr, hdr): 62 | """Get keyword values from the primary and extension header. 63 | 64 | The input argument phdr is the primary header, and the second hdr is 65 | the first extension header, as provided by the pyfits module. The 66 | function value is a dictionary of keyword = value pairs. If a keyword 67 | is missing from the header, it will still be included in the dictionary, 68 | but its value will be set to the NOT_APPLICABLE string, or a reasonable 69 | default for keyword values that are not text strings. npix (a tuple 70 | giving the output image size) will also be assigned. If the data 71 | portion is empty (based on the NAXIS keyword), the value will be (0,); 72 | otherwise, the value will be assigned an appropriate value for the 73 | detector, rather than being read directly from the header. The 74 | heliocentric velocity will be initialized to zero. 75 | 76 | Parameters 77 | ---------- 78 | phdr: pyfits Header object 79 | Primary header. 80 | 81 | hdr: pyfits Header object 82 | Extension header. 83 | 84 | Returns 85 | ------- 86 | info: dictionary 87 | Keywords and values from the headers of the input file. 88 | """ 89 | 90 | info = {} 91 | 92 | # Get keywords from the primary header. 93 | 94 | # This is a list of primary header keywords and default values. 95 | keylist = { 96 | "detector": NOT_APPLICABLE, 97 | "segment": NOT_APPLICABLE, 98 | "obstype": NOT_APPLICABLE, 99 | "obsmode": NOT_APPLICABLE, 100 | "exptype": NOT_APPLICABLE, 101 | "opt_elem": NOT_APPLICABLE, 102 | "targname": NOT_APPLICABLE, 103 | "lampused": NOT_APPLICABLE, 104 | "lampplan": NOT_APPLICABLE, 105 | "rootname": "", 106 | "subarray": False, 107 | "tagflash": False, 108 | "cenwave": 0, 109 | "randseed": -1, 110 | "fppos": 1, 111 | "fpoffset": 0, 112 | "life_adj": -1, 113 | "aperypos": NOT_APPLICABLE, 114 | "coscoord": DETECTOR_COORDINATES, 115 | "ra_targ": -999., 116 | "dec_targ": -999., 117 | "xtrctalg": "BOXCAR"} 118 | 119 | for key in keylist.keys(): 120 | info[key] = phdr.get(key, default=keylist[key]) 121 | 122 | # Set output image size (variables defined in calcosparam.py). 123 | if info["detector"] == "FUV": 124 | info["npix"] = (FUV_Y, FUV_EXTENDED_X) 125 | info["x_offset"] = FUV_X_OFFSET 126 | else: 127 | if info["obstype"] == "IMAGING": 128 | info["npix"] = (NUV_Y, NUV_X) 129 | info["x_offset"] = 0 130 | else: 131 | info["npix"] = (NUV_Y, NUV_EXTENDED_X) 132 | info["x_offset"] = NUV_X_OFFSET 133 | 134 | # Replace the value for npix if there's no data (based on extension header). 135 | if hdr["NAXIS"] == 0: 136 | info["npix"] = (0,) 137 | elif hdr["NAXIS"] == 2 and hdr["NAXIS2"] == 0: 138 | info["npix"] = (0,) 139 | 140 | # Assign an initial value for the heliocentric velocity 141 | info["v_helio"] = 0. 142 | 143 | (info["aperture"], message) = cosutil.getApertureKeyword(phdr) 144 | if message: 145 | cosutil.printWarning(message) 146 | 147 | # Update info["life_adj_offset"], if life_adj = -1. 148 | cosutil.computeLifeAdjOffset(info) 149 | 150 | if info["tagflash"] == TAGFLASH_AUTO: 151 | info["tagflash"] = True 152 | info["tagflash_type"] = TAGFLASH_TYPE_AUTO 153 | elif info["tagflash"] == TAGFLASH_UNIFORMLY_SPACED: 154 | info["tagflash"] = True 155 | info["tagflash_type"] = TAGFLASH_TYPE_UNIFORMLY_SPACED 156 | else: 157 | info["tagflash"] = False 158 | info["tagflash_type"] = TAGFLASH_TYPE_NONE 159 | 160 | # Engineering keywords relevant to deadtime correction. 161 | 162 | if info["detector"] == "FUV": 163 | if info["segment"] == "FUVA": 164 | countrate = phdr.get("DEVENTA", default=0.) 165 | info["countrate"] = hdr.get("DEVENTA", default=countrate) 166 | else: 167 | countrate = phdr.get("DEVENTB", default=0.) 168 | info["countrate"] = hdr.get("DEVENTB", default=countrate) 169 | else: 170 | countrate = phdr.get("MEVENTS", default=0.) 171 | info["countrate"] = hdr.get("MEVENTS", default=countrate) 172 | 173 | # Now get keywords from the extension header. 174 | 175 | if info["detector"] == "FUV": 176 | # The header keyword is the rate for both stims together; we want 177 | # the rate for one stim. 178 | info["stimrate"] = hdr.get("STIMRATE", default=0.) / 2. 179 | else: 180 | info["stimrate"] = 0. 181 | 182 | # This is a list of extension header keywords and default values. 183 | keylist = { 184 | "dispaxis": 0, 185 | "sdqflags": 184, # 8 + 16 + 32 + 128 186 | "nsubarry": 0, 187 | "numflash": 0, 188 | "expstart": -1., 189 | "expend": -1., 190 | "doppon": False, 191 | "doppont": False, 192 | "doppmagv": -1., 193 | "dopmagt": -1., 194 | "doppzero": -1., 195 | "dopzerot": -1., 196 | "orbitper": -1., 197 | "orbtpert": -1., 198 | "ra_aper": 0., 199 | "dec_aper": 0., 200 | "pa_aper": 0.} 201 | 202 | for key in keylist.keys(): 203 | info[key] = hdr.get(key, default=keylist[key]) 204 | 205 | # For FUV, the keyword for exposure time depends on segment. 206 | exptime_key = cosutil.segmentSpecificKeyword("exptime", info["segment"]) 207 | exptime_default = hdr.get("exptime", default=0.) 208 | info["exptime"] = hdr.get(exptime_key, default=exptime_default) 209 | 210 | # FUV detector high voltage level (commanded, raw) 211 | hvlevel_key = cosutil.segmentSpecificKeyword("hvlevel", info["segment"]) 212 | info["hvlevel"] = hdr.get(hvlevel_key, default=NOT_APPLICABLE) 213 | 214 | # Copy exptime to orig_exptime, so we can modify exptime but save the 215 | # original value. NOTE: for TIME-TAG data this value will be replaced 216 | # with the difference between the first and last values in the TIME column. 217 | info["orig_exptime"] = info["exptime"] 218 | 219 | if info["tagflash"] and info["numflash"] < 1: 220 | info["tagflash"] = False 221 | 222 | # Reset the subarray flag if the "subarray" is the entire detector. 223 | if info["subarray"]: 224 | if info["detector"] == "FUV": 225 | # Indices 0, 1, 2, 3 are for FUVA, while 4, 5, 6, 7 are for FUVB. 226 | if info["segment"] == "FUVA": 227 | sub_number = "0" 228 | else: 229 | sub_number = "4" 230 | else: 231 | sub_number = "0" 232 | xsize = hdr.get("size"+sub_number+"x", default=0) 233 | ysize = hdr.get("size"+sub_number+"y", default=0) 234 | if info["detector"] == "FUV" and xsize == FUV_X and ysize == FUV_Y: 235 | info["subarray"] = False 236 | elif xsize == NUV_X and ysize == NUV_Y: 237 | info["subarray"] = False 238 | 239 | return info 240 | 241 | def getSwitchValues(phdr): 242 | """Get calibration switch values from the primary header. 243 | 244 | The input argument phdr is the primary header, as provided by the fits 245 | module. The function value is a dictionary of keyword = value pairs. 246 | Note that the keyword values will be converted to upper case. If a 247 | keyword is missing from the header, it will still be included in the 248 | dictionary, but its value will be set to the NOT_APPLICABLE string. 249 | 250 | Parameters 251 | ---------- 252 | phdr: pyfits Header object 253 | Primary header. 254 | 255 | Returns 256 | ------- 257 | switches: dictionary 258 | Calibration switch keywords and their values from the primary 259 | header of the input file. 260 | """ 261 | 262 | switches = {} 263 | 264 | for key in ["dqicorr", "randcorr", "tempcorr", "geocorr", "igeocorr", 265 | "dgeocorr", "xwlkcorr", "ywlkcorr", "trcecorr", "algncorr", 266 | "deadcorr", "flatcorr", "doppcorr", "helcorr", "phacorr", 267 | "brstcorr", "badtcorr", "x1dcorr", "wavecorr", "backcorr", 268 | "fluxcorr", "photcorr", "tdscorr", "hvdscorr", "statflag"]: 269 | switches[key] = cosutil.getSwitch(phdr, key) 270 | 271 | return switches 272 | 273 | def getRefFileNames(phdr): 274 | """Get reference file names from the primary header. 275 | 276 | The input argument phdr is the primary header, as provided by the pyfits 277 | module. The function value is a dictionary of keyword = value pairs. 278 | If a keyword is missing from the header, it will still be included in 279 | the dictionary, but its value will be set to the NOT_APPLICABLE string. 280 | If the name includes an environment variable (Unix-style or IRAF-style), 281 | the name will be expanded to a complete pathname. Keys of the form 282 | "bpixtab_hdr" (for example) are the values read directly from the 283 | header, while keys of the form "bpixtab" have been translated to full 284 | path names (operating system dependent). 285 | 286 | Parameters 287 | ---------- 288 | phdr: pyfits Header object 289 | Primary header. 290 | 291 | Returns 292 | ------- 293 | reffiles: dictionary 294 | Reference file keywords and their values from the primary 295 | header of the input file. 296 | """ 297 | 298 | reffiles = {} 299 | 300 | for key in ["flatfile", "hvtab", "xwlkfile", "ywlkfile", 301 | "bpixtab", "gsagtab", "spottab", "brftab", 302 | "geofile", "dgeofile", "twozxtab", "deadtab", "phafile", 303 | "phatab", "brsttab", "badttab", "tracetab", 304 | "xtractab", "lamptab", "disptab", "fluxtab", 305 | "imphttab", "phottab", "spwcstab", "wcptab", 306 | "tdstab", "proftab", "hvdstab"]: 307 | reffiles[key+"_hdr"] = phdr.get(key, default=NOT_APPLICABLE) 308 | reffiles[key] = cosutil.expandFileName(reffiles[key+"_hdr"]) 309 | 310 | if phdr["obstype"] == "SPECTROSCOPIC": 311 | if phdr.get("fluxtab", "missing") == "missing": 312 | reffiles["fluxtab"] = reffiles["phottab"] 313 | reffiles["fluxtab_hdr"] = reffiles["phottab_hdr"] 314 | else: 315 | if phdr.get("imphttab", "missing") == "missing": 316 | reffiles["imphttab"] = reffiles["phottab"] 317 | reffiles["imphttab_hdr"] = reffiles["phottab_hdr"] 318 | 319 | return reffiles 320 | 321 | def resetSwitches(switches, reffiles): 322 | """Reset calibration switches if required reference file is "N/A". 323 | 324 | If a calibration step needs one or more reference files, and if the 325 | name of any such file is given in the header as "N/A", the calibration 326 | step cannot be done. This function checks some steps and resets the 327 | switch from PERFORM to SKIPPED if a required reference file is "N/A". 328 | 329 | Parameters 330 | ---------- 331 | switches: dictionary 332 | Keyword and value for calibration switches. 333 | 334 | reffiles: dictionary 335 | Keyword and value for reference file names. 336 | """ 337 | 338 | check_these = {"badtcorr": ["badttab"], 339 | "xwlkcorr": ["xwlkfile"], 340 | "ywlkcorr": ["ywlkfile"], 341 | "tdscorr": ["tdstab"]} 342 | #check_these = {"badtcorr": ["badttab"], 343 | # "brstcorr": ["brsttab"], 344 | # "dqicorr": ["bpixtab"], 345 | # "flatcorr": ["flatfile"], 346 | # "deadcorr": ["deadtab"], 347 | # "geocorr": ["geofile"], 348 | # "dgeocorr": ["dgeofile"], 349 | # "x1dcorr": ["xtractab", "disptab"], 350 | # "fluxcorr": ["fluxtab"], 351 | # "photcorr": ["imphttab"]} 352 | 353 | for switch_key in check_these.keys(): 354 | not_specified = [] 355 | if switches[switch_key] == "PERFORM": 356 | for reffile_key in check_these[switch_key]: 357 | if reffiles[reffile_key] == NOT_APPLICABLE: 358 | not_specified.append(reffile_key) 359 | if not_specified: 360 | switches[switch_key] = "SKIPPED" 361 | cosutil.printWarning("%s will be set to SKIPPED because" % 362 | switch_key.upper()) 363 | for (i, reffile_key) in enumerate(not_specified): 364 | keyword = reffile_key.upper() 365 | if i == 0: 366 | message = "%s = %s" % (keyword, NOT_APPLICABLE) 367 | else: 368 | message += ", %s = %s" % (keyword, NOT_APPLICABLE) 369 | cosutil.printContinuation(message) 370 | -------------------------------------------------------------------------------- /calcos/burst.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division # confidence high 2 | import math 3 | import numpy as np 4 | from .calcosparam import * 5 | from . import cosutil 6 | from . import ccos 7 | LARGE_BURST = -20 # flag value in bkg_counts 8 | SMALL_BURST = -10 # flag value in bkg_counts 9 | 10 | def burstFilter(time, y, dq, reffiles, info, burstfile=None, 11 | high_countrate=False): 12 | """Flag regions where the count rate is unreasonably high. 13 | 14 | For each burst interval detected, a flag will be set in the 15 | data quality column for each event within that time interval. 16 | This is based on cf_screen_burst.c for FUSE. 17 | 18 | Parameters 19 | ---------- 20 | time: array_like 21 | The time column in the events table. 22 | 23 | y: array_like 24 | The column of cross dispersion locations of events. 25 | 26 | dq: array_like 27 | The data quality column in the events table (updated in-place). 28 | 29 | reffiles: dictionary 30 | Reference file names. 31 | 32 | info: dictionary 33 | Header keywords and values. 34 | 35 | burstfile: str or None 36 | Name of output text file for burst info. 37 | 38 | high_countrate: boolean 39 | This flag can be set to true to force the time interval to be 40 | short, as if the data were high count rate. 41 | 42 | Returns 43 | ------- 44 | bursts: list of two floats 45 | List of [bad_start, bad_stop] intervals during which a burst 46 | was detected (seconds since expstart). 47 | """ 48 | 49 | bursts = None 50 | 51 | if info["segment"][:3] != "FUV": 52 | return None 53 | 54 | if info["exptime"] <= 0.: 55 | cosutil.printWarning("burstFilter: Can't screen for bursts because" 56 | " exptime = %g" % info["exptime"]) 57 | return None 58 | 59 | cosutil.printMsg("Screen for bursts", VERBOSE) 60 | 61 | # Read parameters from burst reference table. 62 | (median_n, delta_t, delta_t_high, median_dt, burst_min, 63 | stdrej, source_frac, max_iter, high_rate) = \ 64 | getBurstParam(reffiles["brsttab"], info["segment"]) 65 | 66 | # Read location of active area from baseline reference table and 67 | # source and background locations from 1-D extraction reference table. 68 | try: 69 | (active_low, active_high, src_low, src_high, 70 | bkg1_low, bkg1_high, bkg2_low, bkg2_high) = \ 71 | getRegionLocations(reffiles, info) 72 | except: 73 | cosutil.printWarning("Can't screen for bursts" \ 74 | " due to missing row in reference table") 75 | return None 76 | 77 | # rows of extraction aperture / background rows 78 | bkgsf = float(src_high - src_low + 1) / \ 79 | float(bkg1_high - bkg1_low + bkg2_high - bkg2_low + 2) 80 | 81 | exptime = info["exptime"] 82 | countrate = float(len(time)) / exptime 83 | cosutil.printMsg("Total counts = %d, exposure time = %g s," 84 | " count rate = %g c/s" % (len(time), exptime, countrate), VERBOSE) 85 | 86 | t0 = time[0] 87 | last = time[len(time)-1] 88 | if last <= t0: 89 | return None 90 | 91 | if countrate > high_rate: 92 | delta_t = delta_t_high 93 | cosutil.printMsg("High count rate; time bin set to %g s" % delta_t, 94 | VERBOSE) 95 | elif high_countrate: 96 | delta_t = delta_t_high 97 | cosutil.printMsg("time bin set to %g s" % delta_t, VERBOSE) 98 | nbins = int(math.ceil((last - t0) / delta_t)) 99 | del countrate, last 100 | if nbins <= 3: 101 | cosutil.printWarning("There are so few time bins (%d) that " 102 | "burst detection is not practical." % nbins) 103 | return None 104 | 105 | printParam(median_n, delta_t, median_dt, burst_min, 106 | stdrej, source_frac, max_iter, high_rate, 107 | active_low, active_high, src_low, src_high, 108 | bkg1_low, bkg1_high, bkg2_low, bkg2_high) 109 | 110 | # istart & istop are arrays of indices for slicing up the time and y 111 | # columns into intervals of length delta_t seconds. 112 | istart = np.zeros(nbins, dtype=np.int32) 113 | istop = np.zeros(nbins, dtype=np.int32) 114 | bkg_counts = np.zeros(nbins, dtype=np.int32) 115 | src_counts = np.zeros(nbins, dtype=np.int32) 116 | 117 | # Find istart & istop for each delta_t interval. 118 | ccos.getstartstop(time, istart, istop, delta_t) 119 | 120 | # find the counts within the background and source regions, for each 121 | # delta_t interval. 122 | ccos.getbkgcounts(y, dq, istart, istop, 123 | bkg_counts, src_counts, 124 | bkg1_low, bkg1_high, bkg2_low, bkg2_high, 125 | src_low, src_high, bkgsf) 126 | 127 | bkg_counts_save = bkg_counts.copy() 128 | 129 | # Find the median of the values in the background counts array. 130 | index = np.argsort(bkg_counts) 131 | mid = nbins // 2 132 | median = bkg_counts[index[mid]] 133 | del index 134 | if median < 1: 135 | cosutil.printWarning("median = %d is unreasonable, reset to 1." 136 | % median, VERBOSE) 137 | median = 1 138 | cutoff = median_n * median 139 | cosutil.printMsg("Initial check for large bursts,", VERBOSE) 140 | cosutil.printMsg(" median = %d, cutoff = %g; time interval = %.1f" 141 | % (median, cutoff, delta_t), VERBOSE) 142 | # Identify intervals where the counts are greater than median_n * median. 143 | b1_flags = bkg_counts > cutoff 144 | index = np.nonzero(b1_flags)[0] 145 | nreject = len(index) 146 | for k in range(nreject): 147 | i = index[k] 148 | cosutil.printMsg("large burst at time %d, counts = %d" 149 | % (int(time[istart[i]] + delta_t/2.), bkg_counts[i]), 150 | VERBOSE) 151 | # Flag all events in the interval. 152 | dq[istart[i]:istop[i]] |= DQ_BURST 153 | if nreject > 0: 154 | # Set bkg_counts to a negative value for each burst. 155 | bkg_counts = np.where(b1_flags, LARGE_BURST, bkg_counts) 156 | cosutil.printMsg("%d large bursts detected." % nreject, VERBOSE) 157 | else: 158 | cosutil.printMsg("No large burst detected.", VERBOSE) 159 | del b1_flags 160 | 161 | # Search for smaller bursts. 162 | cosutil.printMsg("Check for smaller bursts;", VERBOSE) 163 | cosutil.printMsg(" median filter over time = %d s" % median_dt, VERBOSE) 164 | smallest_burst = burst_min * delta_t 165 | half_block = int(round(median_dt / delta_t)) // 2 166 | ccos.smallerbursts(time, dq, 167 | istart, istop, bkg_counts, src_counts, 168 | delta_t, smallest_burst, stdrej, source_frac, 169 | half_block, max_iter, 170 | LARGE_BURST, SMALL_BURST, DQ_BURST, 171 | cosutil.checkVerbosity(VERBOSE)) 172 | 173 | if burstfile is not None: 174 | # Write the time (middle of the interval), the background counts, 175 | # and whether the interval was flagged as a burst, large or small. 176 | fd = open(burstfile, "a") 177 | for i in range(nbins): 178 | t = t0 + (i+0.5) * delta_t 179 | fd.write("%.3f %d %d %d\n" % 180 | (t, bkg_counts_save[i], 181 | bkg_counts[i] == LARGE_BURST, 182 | bkg_counts[i] == SMALL_BURST)) 183 | fd.close() 184 | 185 | # Construct the list of start, stop intervals containing bursts. 186 | bursts = extractIntervals(time, istart, bkg_counts) 187 | 188 | return bursts 189 | 190 | def getBurstParam(brsttab, segment): 191 | """Read parameters from burst reference table. 192 | 193 | Parameters 194 | ---------- 195 | brsttab: str 196 | The name of the burst reference table. 197 | 198 | segment: str {"FUVA", "FUVB"} 199 | FUV segment name. 200 | 201 | Returns 202 | ------- 203 | tuple 204 | The parameters read from the brsttab. 205 | """ 206 | 207 | burst_info = cosutil.getTable(brsttab, filter={"segment": segment}, 208 | exactly_one=True) 209 | 210 | median_n = burst_info.field("median_n")[0] 211 | delta_t = burst_info.field("delta_t")[0] 212 | delta_t_high = burst_info.field("delta_t_high")[0] 213 | median_dt = burst_info.field("median_dt")[0] 214 | burst_min = burst_info.field("burst_min")[0] 215 | stdrej = burst_info.field("stdrej")[0] 216 | source_frac = burst_info.field("source_frac")[0] 217 | max_iter = burst_info.field("max_iter")[0] 218 | high_rate = burst_info.field("high_rate")[0] 219 | 220 | return (median_n, delta_t, delta_t_high, median_dt, burst_min, 221 | stdrej, source_frac, max_iter, high_rate) 222 | 223 | def getRegionLocations(reffiles, info): 224 | """Read region locations from reference tables. 225 | 226 | The lower and upper limits of the active area will be read from the 227 | baseline reference table. The location and height of the source 228 | extraction region will be read from the 1-D extraction parameters 229 | table, and these will be used to define the source and background 230 | regions. 231 | 232 | Parameters 233 | ---------- 234 | reffiles: dictionary 235 | Reference file names. 236 | 237 | info: dictionary 238 | Header keywords and values. 239 | 240 | Returns 241 | ------- 242 | tuple 243 | A tuple with the lower and upper limits (in the cross-dispersion 244 | direction) of the active area, the source extraction region, and 245 | the two background regions. 246 | """ 247 | 248 | (active_low, active_high, active_left, active_right) = \ 249 | cosutil.activeArea(info["segment"], reffiles["brftab"]) 250 | 251 | filter = {"segment": info["segment"], 252 | "opt_elem": info["opt_elem"], 253 | "cenwave": info["cenwave"], 254 | "aperture": info["aperture"]} 255 | xtract_info = cosutil.getTable(reffiles["xtractab"], 256 | filter, exactly_one=True) 257 | b_spec = xtract_info.field("b_spec")[0] 258 | b_spec = int(round(b_spec)) 259 | height = xtract_info.field("height")[0] 260 | 261 | src_low = b_spec - height // 2 262 | src_high = b_spec + height // 2 263 | 264 | bkg1_low = max(0, active_low) 265 | bkg2_high = min(1023, active_high) 266 | bkg1_high = src_low - height // 4 267 | bkg2_low = src_high + height // 4 268 | 269 | if info["tagflash"]: 270 | # Reset bkg2_low to a point above the wavecal spectrum. 271 | filter["aperture"] = "WCA" 272 | xtract_info = cosutil.getTable(reffiles["xtractab"], 273 | filter, exactly_one=True) 274 | b_spec = xtract_info.field("b_spec")[0] 275 | b_spec = int(round(b_spec)) 276 | bkg2_low = b_spec + height * 3 // 4 277 | 278 | return (active_low, active_high, src_low, src_high, 279 | bkg1_low, bkg1_high, bkg2_low, bkg2_high) 280 | 281 | def extractIntervals(time, istart, bkg_counts): 282 | """Construct list of bad time intervals. 283 | 284 | Parameters 285 | ---------- 286 | time: array_like 287 | Time column from events table. 288 | 289 | istart: array_like 290 | Array of indices; time[istart[i]] is the time at the start of bin i. 291 | 292 | bkg_counts: array_like 293 | Negative values are used to flag bursts (otherwise, this is the 294 | array of background counts within each time bin). 295 | 296 | Returns 297 | ------- 298 | list of two-element lists, or None 299 | List of [bad_start, bad_stop] intervals during which a burst was 300 | detected (seconds since expstart). The function value will be 301 | None of no burst was detected. 302 | """ 303 | 304 | if bkg_counts.min() >= 0: 305 | return None 306 | 307 | bursts = [] 308 | nbins = len(bkg_counts) 309 | 310 | in_bad_interval = False 311 | for i in range(nbins): 312 | if bkg_counts[i] < 0 and not in_bad_interval: 313 | in_bad_interval = True 314 | t1 = time[istart[i]] # time at start of current bin 315 | elif bkg_counts[i] >= 0 and in_bad_interval: 316 | in_bad_interval = False 317 | t2 = time[istart[i]] # time at end of previous bin 318 | bursts.append([t1, t2]) 319 | 320 | if in_bad_interval: 321 | bursts.append([t1, time[-1]]) 322 | 323 | return bursts 324 | 325 | def printParam(median_n, delta_t, median_dt, burst_min, 326 | stdrej, source_frac, max_iter, high_rate, 327 | active_low, active_high, src_low, src_high, 328 | bkg1_low, bkg1_high, bkg2_low, bkg2_high): 329 | """Print the parameters that will be used.""" 330 | 331 | if not cosutil.checkVerbosity(VERY_VERBOSE): 332 | return 333 | 334 | cosutil.printMsg("The burst parameters are:", VERY_VERBOSE) 335 | 336 | cosutil.printMsg( 337 | "reject counts higher than %.1f times the global median" % median_n, 338 | VERY_VERBOSE) 339 | 340 | cosutil.printMsg( 341 | "%.1f counts/s is considered to be high count rate" % high_rate, 342 | VERY_VERBOSE) 343 | 344 | cosutil.printMsg( 345 | "time interval for binning events = %.1f s" % delta_t, VERY_VERBOSE) 346 | 347 | cosutil.printMsg( 348 | "time interval for median filter = %.1f s" % median_dt, VERY_VERBOSE) 349 | 350 | cosutil.printMsg( 351 | "%.1f counts/s is minimum count rate that can be regarded as a burst" % 352 | burst_min, VERY_VERBOSE) 353 | 354 | cosutil.printMsg( 355 | "reject counts greater than %.1f standard deviations" % stdrej, VERY_VERBOSE) 356 | 357 | cosutil.printMsg( 358 | "burst must exceed %.3f of the source count rate before it is" % source_frac, 359 | VERY_VERBOSE) 360 | 361 | cosutil.printMsg(" considered to be significant", VERY_VERBOSE) 362 | 363 | cosutil.printMsg("maximum number of iterations = %d" % max_iter, 364 | VERY_VERBOSE) 365 | 366 | cosutil.printMsg( 367 | "active area is rows %d to %d inclusive" % (active_low, active_high), 368 | VERY_VERBOSE) 369 | 370 | cosutil.printMsg( 371 | "source extraction region is rows %d to %d inclusive" % (src_low, src_high), 372 | VERY_VERBOSE) 373 | 374 | cosutil.printMsg( 375 | "background regions are %d to %d and %d to %d inclusive" % 376 | (bkg1_low, bkg1_high, bkg2_low, bkg2_high), VERY_VERBOSE) 377 | -------------------------------------------------------------------------------- /calcos/spwcs.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import math 3 | import astropy.io.fits as fits 4 | from .calcosparam import * 5 | from . import cosutil 6 | 7 | # These are the column numbers (one indexed) for the XFULL and YFULL 8 | # columns in a corrtag file. See SpWcsCorrtag.__init__(). 9 | Xi = 7 10 | Eta = 8 11 | 12 | class SpWCS(object): 13 | """Base class for spectroscopic coordinate parameters. 14 | 15 | usage: 16 | 17 | import spwcs 18 | 19 | wcs = spwcs.SpWcsCorrtag(corrtag_filename, info, helcorr, 20 | spwcstab, xtractab) 21 | flag = wcs.writeWCSKeywords() 22 | 23 | wcs = spwcs.SpWcsImage(image_filename, info, helcorr, 24 | spwcstab, xtractab) 25 | flag = wcs.writeWCSKeywords() 26 | 27 | Parameters 28 | ---------- 29 | filename: str 30 | Name of file within which keywords will be updated. 31 | 32 | info: dictionary 33 | Header keywords and values. 34 | 35 | helcorr: str 36 | PERFORM or COMPLETE if heliocentric correction should be applied 37 | to the wavelengths (CRVAL1). 38 | 39 | spwcstab: str 40 | Name of reference table containing spectroscopic WCS parameters. 41 | 42 | xtractab: str 43 | Name of reference table for extraction parameters. 44 | """ 45 | 46 | def __init__(self, filename, info, helcorr, spwcstab, xtractab): 47 | """Constructor.""" 48 | 49 | self.filename = filename 50 | self.info = info 51 | self.helcorr = helcorr 52 | self.spwcstab = spwcstab 53 | self.xtractab = xtractab 54 | 55 | # These four will be assigned in a subclass. For the dictionaries, 56 | # the key is a generic form for the keyword (e.g. crval1), and the 57 | # value is the actual keyword (e.g. crval1, crval1a, tcrvl7, tcrv7a) 58 | # except that it does not include the trailing letter for the 59 | # alternate WCS. The actual keyword will either be taken directly 60 | # from primary_key_dict, or it will be constructed by appending the 61 | # alternate WCS letter to alternate_key_dict. 62 | self.extension = 1 # default 63 | self.keywords = [] 64 | self.primary_key_dict = {} 65 | self.alternate_key_dict = {} # does not include the letter (A, B, C) 66 | 67 | self.detector = info["detector"] 68 | self.ra_aper = info["ra_aper"] 69 | self.dec_aper = info["dec_aper"] 70 | self.pa_aper = info["pa_aper"] 71 | self.x_offset = info["x_offset"] 72 | self.v_helio = 0. # assigned later 73 | 74 | def writeWCSKeywords(self): 75 | """Update keywords in-place in the extension header. 76 | 77 | Returns 78 | ------- 79 | boolean 80 | True if keywords were actually written. False if the file is 81 | a wavecal or an FCA exposure. 82 | """ 83 | 84 | if self.detector == "FUV": 85 | segment_list = [self.info["segment"]] 86 | else: 87 | # "primary" refers to the primary axis description 88 | segment_list = ["primary", "NUVA", "NUVB", "NUVC"] 89 | 90 | aperture = self.info["aperture"] 91 | if aperture not in ["PSA", "BOA"]: 92 | return False 93 | 94 | fd = fits.open(self.filename, mode="update") 95 | hdr = fd[self.extension].header 96 | self.v_helio = hdr.get("v_helio", 0.) 97 | 98 | # Delete some redundant or unnecessary keywords. 99 | self.deleteKeywords(hdr) 100 | 101 | for segment in segment_list: 102 | if self.detector == "FUV": 103 | alt = "" 104 | else: 105 | if segment == "primary": 106 | segment = "NUVB" # use NUVB for the primary WCS 107 | alt = "" 108 | else: 109 | alt = segment[-1] 110 | 111 | filter = {"opt_elem": self.info["opt_elem"], 112 | "cenwave": self.info["cenwave"], 113 | "segment": segment, 114 | "aperture": aperture} 115 | wcs_info = cosutil.getTable(self.spwcstab, filter, 116 | exactly_one=True) 117 | 118 | wcs_dict = self.computeKeywordValues(wcs_info[0], alt) 119 | self.addKeywords(hdr, wcs_dict) 120 | 121 | fd.close() 122 | return True 123 | 124 | def computeKeywordValues(self, wcs_info, alt): 125 | """Defined in a subclass.""" 126 | pass 127 | 128 | def computeCrpix2(self, wcs_info): 129 | """Determine the value of the crpix2 keyword. 130 | 131 | crpix2 should be the location of the spectrum, at the point where 132 | it crosses the middle (crpix1) of the detector. This depends on 133 | the segment or stripe. 134 | 135 | Parameters 136 | ---------- 137 | wcs_info: array_like 138 | One row from the spwcstab. 139 | 140 | Returns 141 | ------- 142 | crpix2: float 143 | The value for the crpix2 keyword (one indexed). 144 | """ 145 | 146 | segment = wcs_info.field("segment") 147 | filter = {"segment": segment, 148 | "opt_elem": self.info["opt_elem"], 149 | "cenwave": self.info["cenwave"], 150 | "aperture": self.info["aperture"]} 151 | xtract_info = cosutil.getTable(self.xtractab, filter, 152 | exactly_one=True) 153 | slope = xtract_info.field("slope")[0] 154 | b_spec = xtract_info.field("b_spec")[0] 155 | 156 | middle = wcs_info.field("crpix1") - 1. # zero indexing 157 | crpix2 = b_spec + middle * slope 158 | 159 | return (crpix2 + 1.) # one indexing 160 | 161 | def makeKeyword(self, generic_keyword, alt): 162 | """Construct the actual keyword name. 163 | 164 | Parameters 165 | ---------- 166 | generic_keyword: str 167 | Generic WCS keyword (e.g. ctype1) 168 | 169 | alt: str 170 | Alternate WCS letter, or "" for the primary WCS 171 | 172 | Returns 173 | ------- 174 | str 175 | Actual keyword to use in header (e.g. ctype1a, tcty7a) 176 | """ 177 | 178 | if alt and alt != " ": 179 | keyword = self.alternate_key_dict[generic_keyword] + alt 180 | else: 181 | keyword = self.primary_key_dict[generic_keyword] 182 | 183 | return keyword 184 | 185 | def doHelcorr(self, crval1): 186 | """Apply heliocentric correction (if helcorr is perform) to crval1. 187 | 188 | Parameters 189 | ---------- 190 | crval1: float 191 | Wavelength at the reference pixel, as read from sptrctab 192 | 193 | Returns 194 | ------- 195 | float 196 | Crval1 with heliocentric velocity correction applied 197 | """ 198 | 199 | if self.helcorr == "PERFORM" or self.helcorr == "COMPLETE": 200 | crval1 -= (crval1 * self.v_helio / SPEED_OF_LIGHT) 201 | return crval1 202 | 203 | def deleteKeywords(self, hdr): 204 | """Defined in a subclass.""" 205 | pass 206 | 207 | def addKeywords(self, hdr, wcs_dict): 208 | """Add (or update) WCS keywords in the header. 209 | 210 | Parameters 211 | ---------- 212 | hdr: pyfits Header object 213 | header to be updated in-place 214 | 215 | wcs_dict: dictionary 216 | Key is the generic WCS keyword (lower case), value is a tuple 217 | of the actual keyword (lower case) and the value to assign to 218 | that keyword in the header 219 | """ 220 | 221 | for generic_keyword in self.keywords: 222 | (actual_keyword, value) = wcs_dict[generic_keyword] 223 | if generic_keyword == "wcsaxes": 224 | # It is a FITS requirement that WCSAXES precede all other 225 | # WCS keywords in a header. 226 | if actual_keyword in hdr: 227 | hdr[actual_keyword] = value 228 | else: 229 | # GCOUNT is the last of the set of keywords that must be 230 | # present at the beginning of an extension header. 231 | if actual_keyword == "wcsaxes": 232 | hdr.set(actual_keyword, value, after="gcount") # xxx 233 | # xxx hdr.insert("gcount", (actual_keyword, value), 234 | # xxx after=True) 235 | elif actual_keyword == "wcsaxesa": 236 | hdr.set(actual_keyword, value, after="wcsaxes") # xxx 237 | # xxx hdr.insert("wcsaxes", (actual_keyword, value), 238 | # xxx after=True) 239 | elif actual_keyword == "wcsaxesb": 240 | hdr.set(actual_keyword, value, after="wcsaxesa") # xxx 241 | # xxx hdr.insert("wcsaxesa", (actual_keyword, value), 242 | # xxx after=True) 243 | elif actual_keyword == "wcsaxesc": 244 | hdr.set(actual_keyword, value, after="wcsaxesb") # xxx 245 | # xxx hdr.insert("wcsaxesb", (actual_keyword, value), 246 | # xxx after=True) 247 | else: # don't really expect anything else 248 | hdr.set(actual_keyword, value, after="gcount") # xxx 249 | # xxx hdr.insert("gcount", (actual_keyword, value), 250 | # xxx after=True) 251 | else: 252 | hdr[actual_keyword] = value 253 | 254 | class SpWcsImage(SpWCS): 255 | """Spectroscopic WCS for image data. 256 | 257 | Parameters 258 | ---------- 259 | filename: str 260 | Name of image file. 261 | 262 | info: dictionary 263 | Header keywords and values. 264 | 265 | helcorr: str 266 | PERFORM or COMPLETE if heliocentric correction should be applied 267 | to the wavelengths (CRVAL1). 268 | 269 | spwcstab: str 270 | Name of reference table containing spectroscopic WCS parameters. 271 | 272 | xtractab: str 273 | Name of reference table for extraction parameters. 274 | """ 275 | 276 | def __init__(self, filename, info, helcorr, spwcstab, xtractab): 277 | """Constructor.""" 278 | 279 | SpWCS.__init__(self, filename, info, helcorr, spwcstab, xtractab) 280 | 281 | self.extension = ("sci",1) 282 | 283 | # The WCS keywords that we'll update in the header. This is in 284 | # a list (copied to a dictionary below) so that the order will be 285 | # well defined, in case the header doesn't have all these keywords. 286 | self.keywords = ["wcsaxes", 287 | "ctype1", "ctype2", "ctype3", 288 | "crpix1", "crpix2", 289 | "crval1", "crval2", "crval3", 290 | "pc1_1", "pc1_2", "pc2_1", "pc2_2", "pc3_1", "pc3_2", 291 | "cdelt1", "cdelt2", "cdelt3", 292 | "cunit1", 293 | "pv1_0", "pv1_1", "pv1_2", "pv1_6"] 294 | 295 | # Keywords for an image array. 296 | for key in self.keywords: 297 | self.primary_key_dict[key] = key 298 | self.alternate_key_dict[key] = key 299 | 300 | def computeKeywordValues(self, wcs_info, alt): 301 | """Determine the values of the WCS keywords. 302 | 303 | Parameters 304 | ---------- 305 | wcs_info: pyfits record object 306 | One row from the spwcstab 307 | 308 | alt: str 309 | Alternate WCS letter, or "" for the primary WCS 310 | 311 | Returns 312 | ------- 313 | dictionary 314 | Key is the generic WCS keyword (but lower case), value is a 315 | tuple of the actual keyword (lower case) and the value to 316 | assign to that keyword in the header 317 | """ 318 | 319 | cos_pa = math.cos(self.pa_aper * math.pi / 180.) 320 | sin_pa = math.sin(self.pa_aper * math.pi / 180.) 321 | pc2_1 = cos_pa 322 | pc2_2 = -sin_pa 323 | pc3_1 = sin_pa 324 | pc3_2 = cos_pa 325 | 326 | # The key will be a generic keyword, and the value will be a tuple 327 | # with the actual keyword and the value to assign for that keyword. 328 | wcs_dict = {} 329 | 330 | wcs_dict["wcsaxes"] = (self.makeKeyword("wcsaxes", alt), 3) 331 | 332 | wcs_dict["ctype1"] = (self.makeKeyword("ctype1", alt), 333 | wcs_info.field("ctype1")) 334 | wcs_dict["ctype2"] = (self.makeKeyword("ctype2", alt), "RA---TAN") 335 | wcs_dict["ctype3"] = (self.makeKeyword("ctype3", alt), "DEC--TAN") 336 | 337 | crval1 = self.doHelcorr(wcs_info.field("crval1")) 338 | wcs_dict["crval1"] = (self.makeKeyword("crval1", alt), crval1) 339 | wcs_dict["crval2"] = (self.makeKeyword("crval2", alt), self.ra_aper) 340 | wcs_dict["crval3"] = (self.makeKeyword("crval3", alt), self.dec_aper) 341 | 342 | wcs_dict["cunit1"] = (self.makeKeyword("cunit1", alt), "angstrom") 343 | 344 | wcs_dict["crpix1"] = (self.makeKeyword("crpix1", alt), 345 | wcs_info.field("crpix1") + self.x_offset) 346 | wcs_dict["crpix2"] = (self.makeKeyword("crpix2", alt), 347 | self.computeCrpix2(wcs_info)) 348 | 349 | wcs_dict["pc1_1"] = (self.makeKeyword("pc1_1", alt), 1.) 350 | wcs_dict["pc1_2"] = (self.makeKeyword("pc1_2", alt), 0.) 351 | wcs_dict["pc2_1"] = (self.makeKeyword("pc2_1", alt), pc2_1) 352 | wcs_dict["pc2_2"] = (self.makeKeyword("pc2_2", alt), pc2_2) 353 | wcs_dict["pc3_1"] = (self.makeKeyword("pc3_1", alt), pc3_1) 354 | wcs_dict["pc3_2"] = (self.makeKeyword("pc3_2", alt), pc3_2) 355 | 356 | wcs_dict["cdelt1"] = (self.makeKeyword("cdelt1", alt), 357 | wcs_info.field("cdelt1")) 358 | wcs_dict["cdelt2"] = (self.makeKeyword("cdelt2", alt), 359 | wcs_info.field("cdelt2")) 360 | wcs_dict["cdelt3"] = (self.makeKeyword("cdelt3", alt), 361 | wcs_info.field("cdelt3")) 362 | 363 | wcs_dict["pv1_0"] = (self.makeKeyword("pv1_0", alt), 364 | wcs_info.field("g")) 365 | wcs_dict["pv1_1"] = (self.makeKeyword("pv1_1", alt), 366 | wcs_info.field("sporder")) 367 | wcs_dict["pv1_2"] = (self.makeKeyword("pv1_2", alt), 368 | wcs_info.field("alpha")) 369 | wcs_dict["pv1_6"] = (self.makeKeyword("pv1_6", alt), 370 | wcs_info.field("theta")) 371 | 372 | return wcs_dict 373 | 374 | def deleteKeywords(self, hdr): 375 | """Delete some keywords (if they're present) in the header.""" 376 | 377 | keyword_list = ["talen2", "talen3", "cunit2", 378 | "cd1_1", "cd1_2", "cd2_1", "cd2_2"] 379 | for keyword in keyword_list: 380 | if keyword in hdr: 381 | del hdr[keyword] 382 | 383 | # The following keywords could be left around if the input file 384 | # was corrtag rather than raw. 385 | keyword_list = ["tctyp7", "tctyp8", "tcrpx7", "tcrpx8", 386 | "tcrvl7", "tcrvl8", "tcdlt7", "tcdlt8", 387 | "tpc7_7", "tpc7_8", "tpc8_7", "tpc8_8", 388 | "tcuni7", "tcuni8", 389 | "tpv7_0", "tpv7_1", "tpv7_2", "tpv7_6"] 390 | if self.detector == "FUV": 391 | for keyword in keyword_list: 392 | if keyword in hdr: 393 | del hdr[keyword] 394 | else: 395 | for alt in ["", "a", "b", "c"]: 396 | for key in keyword_list: 397 | keyword = key + alt 398 | if keyword in hdr: 399 | del hdr[keyword] 400 | more_keywords = ["tcty7", "tcty8", "tcrp7", "tcrp8", 401 | "tcrv7", "tcrv8", "tcde7", "tcde8", 402 | "tcun7", "tcun8"] 403 | # These keywords are only used for an alternate WCS, so drop "". 404 | for alt in ["a", "b", "c"]: 405 | for key in more_keywords: 406 | keyword = key + alt 407 | if keyword in hdr: 408 | del hdr[keyword] 409 | 410 | class SpWcsCorrtag(SpWCS): 411 | """Spectroscopic WCS for pixel list (corrtag) data. 412 | 413 | Parameters 414 | ---------- 415 | filename: str 416 | Name of corrtag file. 417 | 418 | info: dictionary 419 | Header keywords and values. 420 | 421 | helcorr: str 422 | PERFORM or COMPLETE if heliocentric correction should 423 | be applied to the wavelengths (CRVAL1). 424 | 425 | spwcstab: str 426 | Name of reference table containing spectroscopic WCS 427 | parameters. 428 | 429 | xtractab: str 430 | Name of reference table for extraction parameters. 431 | """ 432 | 433 | def __init__(self, filename, info, helcorr, spwcstab, xtractab): 434 | """Constructor.""" 435 | 436 | SpWCS.__init__(self, filename, info, helcorr, spwcstab, xtractab) 437 | 438 | self.extension = ("events",1) 439 | 440 | # These are the generic names for the keywords that we'll update in 441 | # the header; the actual names are listed below (in the same order!) 442 | # separately for primary and alternate coordinate axes. 443 | # This is in a list (copied to a dictionary below) so that the order 444 | # will be well defined, in case the header doesn't have all these 445 | # keywords. 446 | self.keywords = ["ctype1", "ctype2", 447 | "crpix1", "crpix2", 448 | "crval1", "crval2", 449 | "pc1_1", 450 | "pc1_2", 451 | "pc2_1", 452 | "pc2_2", 453 | "cdelt1", "cdelt2", 454 | "cunit1", "cunit2", 455 | "pv1_0", "pv1_1", "pv1_2", "pv1_6"] 456 | # These are the actual keywords for the primary coordinate system. 457 | primary_keywords = ["tctyp%d" % Xi, "tctyp%d" % Eta, 458 | "tcrpx%d" % Xi, "tcrpx%d" % Eta, 459 | "tcrvl%d" % Xi, "tcrvl%d" % Eta, 460 | "tpc%d_%d" % (Xi, Xi), 461 | "tpc%d_%d" % (Xi, Eta), 462 | "tpc%d_%d" % (Eta, Xi), 463 | "tpc%d_%d" % (Eta, Eta), 464 | "tcdlt%d" % Xi, "tcdlt%d" % Eta, 465 | "tcuni%d" % Xi, "tcuni%d" % Eta, 466 | "tpv%d_0" % Xi, 467 | "tpv%d_1" % Xi, 468 | "tpv%d_2" % Xi, 469 | "tpv%d_6" % Xi] 470 | # These are the actual keywords for an alternate coordinate system, 471 | # except that the letter (A, B, C) indicating the alternate system 472 | # is not included here. 473 | alternate_keywords = ["tcty%d" % Xi, "tcty%d" % Eta, 474 | "tcrp%d" % Xi, "tcrp%d" % Eta, 475 | "tcrv%d" % Xi, "tcrv%d" % Eta, 476 | "tpc%d_%d" % (Xi, Xi), 477 | "tpc%d_%d" % (Xi, Eta), 478 | "tpc%d_%d" % (Eta, Xi), 479 | "tpc%d_%d" % (Eta, Eta), 480 | "tcde%d" % Xi, "tcde%d" % Eta, 481 | "tcun%d" % Xi, "tcun%d" % Eta, 482 | "tpv%d_0" % Xi, 483 | "tpv%d_1" % Xi, 484 | "tpv%d_2" % Xi, 485 | "tpv%d_6" % Xi] 486 | 487 | # Copy keywords from the lists to dictionaries. 488 | for i in range(len(self.keywords)): 489 | key = self.keywords[i] 490 | self.primary_key_dict[key] = primary_keywords[i] 491 | self.alternate_key_dict[key] = alternate_keywords[i] 492 | 493 | def computeKeywordValues(self, wcs_info, alt): 494 | """Determine the values of the WCS keywords. 495 | 496 | wcs_info: pyfits record object 497 | One row from the spwcstab 498 | 499 | alt: str 500 | Alternate WCS letter, or "" for the primary WCS 501 | 502 | dictionary 503 | Key is the generic WCS keyword (but lower case), value is a 504 | tuple of the actual keyword (lower case) and the value to 505 | assign to that keyword in the header 506 | """ 507 | 508 | wcs_dict = {} 509 | 510 | wcs_dict["ctype1"] = (self.makeKeyword("ctype1", alt), 511 | wcs_info.field("ctype1")) 512 | wcs_dict["ctype2"] = (self.makeKeyword("ctype2", alt), "ANGLE") 513 | 514 | crval1 = self.doHelcorr(wcs_info.field("crval1")) 515 | wcs_dict["crval1"] = (self.makeKeyword("crval1", alt), crval1) 516 | wcs_dict["crval2"] = (self.makeKeyword("crval2", alt), 0.) 517 | 518 | wcs_dict["cunit1"] = (self.makeKeyword("cunit1", alt), "angstrom") 519 | wcs_dict["cunit2"] = (self.makeKeyword("cunit2", alt), "deg") 520 | 521 | wcs_dict["crpix1"] = (self.makeKeyword("crpix1", alt), 522 | wcs_info.field("crpix1") + self.x_offset) 523 | wcs_dict["crpix2"] = (self.makeKeyword("crpix2", alt), 524 | self.computeCrpix2(wcs_info)) 525 | 526 | wcs_dict["pc1_1"] = (self.makeKeyword("pc1_1", alt), 1.) 527 | wcs_dict["pc1_2"] = (self.makeKeyword("pc1_2", alt), 0.) 528 | wcs_dict["pc2_1"] = (self.makeKeyword("pc2_1", alt), 0.) 529 | wcs_dict["pc2_2"] = (self.makeKeyword("pc2_2", alt), 1.) 530 | 531 | wcs_dict["cdelt1"] = (self.makeKeyword("cdelt1", alt), 532 | wcs_info.field("cdelt1")) 533 | # note that the value is cdelt3 from the table, which is the Y axis 534 | wcs_dict["cdelt2"] = (self.makeKeyword("cdelt2", alt), 535 | wcs_info.field("cdelt3")) 536 | 537 | wcs_dict["pv1_0"] = (self.makeKeyword("pv1_0", alt), 538 | wcs_info.field("g")) 539 | wcs_dict["pv1_1"] = (self.makeKeyword("pv1_1", alt), 540 | wcs_info.field("sporder")) 541 | wcs_dict["pv1_2"] = (self.makeKeyword("pv1_2", alt), 542 | wcs_info.field("alpha")) 543 | wcs_dict["pv1_6"] = (self.makeKeyword("pv1_6", alt), 544 | wcs_info.field("theta")) 545 | 546 | return wcs_dict 547 | 548 | def deleteKeywords(self, hdr): 549 | """Delete some keywords (if they're present) in the header.""" 550 | 551 | keyword_list = ["tctyp2", "tctyp3", "tcrvl2", "tcrvl3", 552 | "tcdlt2", "tcdlt3", "tcrpx2", "tcrpx3", 553 | "tc2_2", "tc2_3", "tc3_2", "tc3_3", 554 | "tcuni2", "tcuni3"] 555 | 556 | for keyword in keyword_list: 557 | if keyword in hdr: 558 | del hdr[keyword] 559 | --------------------------------------------------------------------------------