├── docs ├── source │ ├── _static │ │ ├── .gitignore │ │ ├── stsci_logo.png │ │ └── ci_watson.css │ ├── ref_api.rst │ ├── bigdata.rst │ ├── scripts.rst │ ├── index.rst │ ├── plugin.rst │ └── conf.py ├── Makefile └── make.bat ├── ci_watson ├── __init__.py ├── jwst_helpers.py ├── resource_tracker.py ├── plugin.py ├── hst_helpers.py ├── scripts │ └── okify_regtests.py └── artifactory_helpers.py ├── MANIFEST.in ├── .github ├── workflows │ ├── build.yml │ └── ci_workflows.yml └── dependabot.yml ├── .readthedocs.yaml ├── conftest.py ├── tests ├── test_markers.py ├── test_hst_helpers.py ├── test_fixtures.py ├── test_resource_tracker.py └── test_artifactory_helpers.py ├── README.md ├── CHANGES.rst ├── LICENSE.md ├── tox.ini ├── .gitignore ├── pyproject.toml └── CODE_OF_CONDUCT.md /docs/source/_static/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/source/_static/stsci_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spacetelescope/ci_watson/main/docs/source/_static/stsci_logo.png -------------------------------------------------------------------------------- /ci_watson/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from .version import version as __version__ 3 | except ImportError: 4 | __version__ = '' 5 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE.md 3 | include setup.cfg 4 | include pyproject.toml 5 | recursive-include docs * 6 | 7 | prune build 8 | prune docs/build 9 | 10 | global-exclude *.pyc *.o 11 | -------------------------------------------------------------------------------- /docs/source/_static/ci_watson.css: -------------------------------------------------------------------------------- 1 | /* Main page overview cards */ 2 | 3 | .sd-card .sd-card-img-top { 4 | height: 52px; 5 | width: 52px; 6 | margin-left: auto; 7 | margin-right: auto; 8 | margin-top: 10px; 9 | } 10 | 11 | /* Dark theme tweaking */ 12 | html[data-theme=dark] .sd-card img[src*='.svg'] { 13 | filter: invert(0.82) brightness(0.8) contrast(1.2); 14 | } 15 | -------------------------------------------------------------------------------- /docs/source/ref_api.rst: -------------------------------------------------------------------------------- 1 | .. _ci_watson_api: 2 | 3 | Reference/API 4 | ============= 5 | 6 | .. automodapi:: ci_watson.artifactory_helpers 7 | :no-inheritance-diagram: 8 | 9 | .. automodapi:: ci_watson.hst_helpers 10 | :no-inheritance-diagram: 11 | 12 | .. automodapi:: ci_watson.jwst_helpers 13 | :no-inheritance-diagram: 14 | 15 | .. automodapi:: ci_watson.resource_tracker 16 | :no-inheritance-diagram: 17 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | release: 5 | types: [released] 6 | pull_request: 7 | workflow_dispatch: 8 | 9 | jobs: 10 | build: 11 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/publish_pure_python.yml@v2 12 | with: 13 | upload_to_pypi: ${{ (github.event_name == 'release') && (github.event.action == 'released') }} 14 | secrets: 15 | pypi_token: ${{ secrets.PYPI_PASSWORD_STSCI_MAINTAINER }} 16 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | version: 2 4 | 5 | build: 6 | os: ubuntu-22.04 7 | tools: 8 | python: "3.11" 9 | 10 | sphinx: 11 | builder: html 12 | configuration: docs/source/conf.py 13 | fail_on_warning: true 14 | 15 | # Set the version of Python and requirements required to build your docs 16 | python: 17 | install: 18 | - method: pip 19 | path: . 20 | extra_requirements: 21 | - docs 22 | 23 | # Don't build any extra formats 24 | formats: [] 25 | -------------------------------------------------------------------------------- /docs/source/bigdata.rst: -------------------------------------------------------------------------------- 1 | .. _bigdata_setup: 2 | 3 | Big Data 4 | ======== 5 | 6 | The ``--bigdata`` option is used together with the environment variable, 7 | ``TEST_BIGDATA``, as used by 8 | :func:`~ci_watson.artifactory_helpers.get_bigdata_root`. For local testing, 9 | set this variable to where you downloaded your Artifactory data. 10 | For remote testing (e.g., with GitHub Actions), set it to your Artifactory path 11 | in the GitHub Actions workflow file, as appropriate. For more details, 12 | please refer to STScI Innerspace document for 13 | "Users Guide: Running Regression Tests". 14 | -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | from pytest_astropy_header.display import (PYTEST_HEADER_MODULES, 2 | TESTED_VERSIONS) 3 | 4 | 5 | def pytest_configure(config): 6 | PYTEST_HEADER_MODULES.pop('Scipy') 7 | PYTEST_HEADER_MODULES.pop('Matplotlib') 8 | PYTEST_HEADER_MODULES.pop('h5py') 9 | PYTEST_HEADER_MODULES.pop('Pandas') 10 | PYTEST_HEADER_MODULES['astropy'] = 'astropy' 11 | PYTEST_HEADER_MODULES['requests'] = 'requests' 12 | PYTEST_HEADER_MODULES['crds'] = 'crds' 13 | 14 | from ci_watson.version import version 15 | TESTED_VERSIONS['ci-watson'] = version 16 | -------------------------------------------------------------------------------- /tests/test_markers.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from ci_watson.artifactory_helpers import get_bigdata, BigdataError 3 | 4 | 5 | @pytest.mark.slow 6 | def test_skip_slow(pytestconfig): 7 | if not pytestconfig.getoption('slow'): 8 | pytest.fail('@pytest.mark.slow was not skipped') 9 | 10 | 11 | @pytest.mark.bigdata 12 | def test_skip_bigdata(pytestconfig): 13 | if not pytestconfig.getoption('bigdata'): 14 | pytest.fail('@pytest.mark.bigdata was not skipped') 15 | 16 | # User use bigdata option and decorator but has no big data access. 17 | else: 18 | with pytest.raises(BigdataError): 19 | get_bigdata('foo', 'bar') 20 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" # See documentation for possible values 9 | directory: ".github/workflows" # Location of package manifests 10 | schedule: 11 | interval: "weekly" 12 | groups: 13 | actions: 14 | patterns: 15 | - "*" 16 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ci_watson 2 | 3 | [![DOI](https://zenodo.org/badge/143923649.svg)](https://zenodo.org/doi/10.5281/zenodo.12699836) 4 | [![CI Status](https://github.com/spacetelescope/ci_watson/workflows/CI/badge.svg)](https://github.com/spacetelescope/ci_watson/actions) 5 | [![Documentation Status](https://readthedocs.org/projects/ci-watson/badge/?version=latest)](https://ci-watson.readthedocs.io/en/latest/?badge=latest) 6 | 7 | CI helper for STScI regression tests. 8 | If you ask nicely, it might also help you solve crimes. 9 | 10 | Nightly regression test results are available only from within the STScI 11 | network at this time. 12 | 13 | Full documentation: https://ci-watson.readthedocs.io/en/latest 14 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = -W 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = ci_watson 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /.github/workflows/ci_workflows.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | tags: 8 | - '*' 9 | pull_request: 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | tests: 17 | uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2 18 | with: 19 | submodules: false 20 | coverage: '' 21 | envs: | 22 | # PEP 23 | - linux: check-style 24 | - linux: check-build 25 | 26 | # Basic tests (Windows not supported) 27 | - linux: py39-test 28 | - macos: py310-test 29 | - linux: py311-test 30 | 31 | # --bigdata untestable due to Artifactory lockdown 32 | - linux: py310-slow-stable 33 | 34 | # Test with dev versions of upstream dependencies 35 | - linux: py312-slow-devdeps 36 | posargs: --verbose 37 | -------------------------------------------------------------------------------- /tests/test_hst_helpers.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from ci_watson.hst_helpers import raw_from_asn, download_crds 4 | 5 | try: 6 | from astropy.table import Table 7 | HAS_ASTROPY = True 8 | except ImportError: 9 | HAS_ASTROPY = False 10 | 11 | 12 | @pytest.mark.skipif(not HAS_ASTROPY, reason='Need astropy to run') 13 | def test_raw_from_asn(_jail): 14 | # Make a dummy input file (to avoid package data headache) 15 | tab = Table() 16 | tab['MEMNAME'] = ['J6LQ01NAQ', 'J6LQ01NDQ', 'J6LQ01011'] 17 | tab['MEMTYPE'] = ['EXP-CRJ', 'EXP-CRJ', 'PROD-CRJ'] 18 | tab['MEMPRSNT'] = [True, True, True] 19 | datafile = 'dummy_asn.fits' 20 | tab.write(datafile, format='fits', overwrite=True) 21 | 22 | raw_files = raw_from_asn(datafile) 23 | assert raw_files == ['j6lq01naq_raw.fits', 'j6lq01ndq_raw.fits'] 24 | 25 | # Make sure do not download existing file. 26 | # This will fail if download is attemped. 27 | download_crds(datafile) 28 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | set SPHINXPROJ=ci_watson 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | 0.11.0 (unreleased) 2 | =================== 3 | 4 | - ``okify_regtests`` no longer supports ``okify_op="folder_copy"`` 5 | for JWST regression tests because it is no longer used downstream. [#92] 6 | 7 | 0.10.0 (2025-06-18) 8 | =================== 9 | 10 | - ``okify_regtests`` now supports new ``okify_op="sdp_pool_copy"`` 11 | for JWST regression tests. [#86] 12 | 13 | 0.9.0 (2025-06-05) 14 | ================== 15 | 16 | - Add ``resource_tracker`` and ``log_tracked_resources`` fixtures. [#74] 17 | - Add new ``--version`` and ``--output-dir`` options for 18 | ``okify_regtests`` CLI. [#84] 19 | 20 | 0.8.0 (2024-12-09) 21 | ================== 22 | 23 | - fix Ruff configuration [#67] 24 | - move build configuration into ``pyproject.toml`` [#68] 25 | - write ``okify_regtests`` script, generalizing ``jwst`` and ``romancal`` versions [#69] 26 | 27 | 0.7.0 (2024-07-09) 28 | ================== 29 | 30 | - Removed deprecated timeout keyword from ``download_crds``. [#58] 31 | -------------------------------------------------------------------------------- /ci_watson/jwst_helpers.py: -------------------------------------------------------------------------------- 1 | """Helper module for JWST tests.""" 2 | import pytest 3 | 4 | __all__ = ['require_crds_context'] 5 | 6 | 7 | # This is not in the plugin due to CRDS dependency. 8 | def require_crds_context(required_context): 9 | """ 10 | Ensure CRDS context is a certain level. 11 | 12 | Parameters 13 | ---------- 14 | required_context : int 15 | The minimal level required. 16 | 17 | Returns 18 | ------- 19 | decor : ``pytest.mark.skipif`` decorator 20 | Decorator to skip if ``CRDS_CONTEXT`` is not at lest a certain level. 21 | 22 | """ 23 | import re 24 | import crds 25 | 26 | current_context_string = crds.get_context_name('jwst') 27 | match = re.match(r"jwst_(\d\d\d\d)\.pmap", current_context_string) 28 | current_context = int(match.group(1)) 29 | 30 | return pytest.mark.skipif( 31 | current_context < required_context, 32 | reason='CRDS context {} less than required context {}'.format( 33 | current_context_string, required_context 34 | ) 35 | ) 36 | -------------------------------------------------------------------------------- /docs/source/scripts.rst: -------------------------------------------------------------------------------- 1 | .. _ci_watson_scripts: 2 | 3 | Scripts 4 | ======= 5 | 6 | This package also provides the following CLI: 7 | 8 | * :ref:`ci_watson_okify`: Assist with okifying new outputs 9 | as new truths to resolve failing tests; only run this 10 | when you are very sure the new outputs are correct. 11 | 12 | .. _ci_watson_okify: 13 | 14 | okify_regtests 15 | -------------- 16 | 17 | The ``okify_regtests`` command "okifies" a set of failing regression test 18 | results, by overwriting truth files on Artifactory so that a set of 19 | failing regression test results becomes correct. It requires 20 | JFrog CLI (https://jfrog.com/getcli/) configured with valid credentials 21 | (``jf login``) and write access to the desired truth file repository 22 | (``jwst-pipeline``, ``roman-pipeline``, etc.). 23 | 24 | To see the syntax and usage, from a terminal, type:: 25 | 26 | okify_regtests --help 27 | 28 | .. okifyregtestsclihelp:: 29 | 30 | Example for ``jwst``:: 31 | 32 | okify_regtests jwst 956 --dry-run 33 | 34 | Example for ``roman``: 35 | 36 | okify_regtests roman 1317 37 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2018-2025, Space Telescope Science Institute, AURA 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /tests/test_fixtures.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | 4 | 5 | def test_envopt(pytestconfig, envopt): 6 | """Test ``envopt`` fixture that is tied to ``--env`` option.""" 7 | input_env = pytestconfig.getoption('env') 8 | assert envopt == input_env 9 | 10 | 11 | # https://docs.pytest.org/en/latest/fixture.html 12 | # Not that different from cleandir example. 13 | @pytest.mark.usefixtures('_jail') 14 | class TestDirectoryInit: 15 | """Test ``_jail`` fixture.""" 16 | def test_cwd_starts_empty(self): 17 | assert os.listdir(os.getcwd()) == [] 18 | with open("myfile", "w") as f: 19 | f.write("hello") 20 | 21 | @pytest.mark.parametrize('x', [1, 2]) 22 | def test_cwd_again_starts_empty(self, x): 23 | assert os.listdir(os.getcwd()) == [] 24 | 25 | 26 | class TestJail: 27 | """Test restoring working folders after jailing 28 | 29 | Note that if tests are run in parallel, these results may mean nothing. 30 | """ 31 | 32 | @classmethod 33 | def setup_class(cls): 34 | cls.cwd = os.getcwd() 35 | 36 | def test_notintemp(self): 37 | """Ensure start state.""" 38 | assert os.getcwd() == self.cwd 39 | 40 | @pytest.mark.usefixtures('_jail') 41 | def test_intemp(self): 42 | """Ensure that jailing occured""" 43 | assert not len(os.listdir(os.getcwd())) 44 | assert os.getcwd() != self.cwd 45 | 46 | def test_notintemppostjail(self): 47 | """Ensure that start state was recovered""" 48 | assert os.getcwd() == self.cwd 49 | 50 | 51 | def test_get_jail_as_string(_jail): 52 | """Test that the _jail fixture returns the cwd as a string""" 53 | cwd = os.getcwd() 54 | cwd_jail = _jail 55 | 56 | assert cwd == cwd_jail 57 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | check-{style,build} 4 | py{39,310,311,312}-test{,-slow}{,-stable,-devdeps} 5 | requires = 6 | setuptools >= 30.3.0 7 | pip >= 19.3.1 8 | 9 | [testenv] 10 | # Pass through the following environment variables which are needed for the CI 11 | passenv = HOME,CI 12 | 13 | setenv = 14 | devdeps: PIP_EXTRA_INDEX_URL = https://pypi.anaconda.org/astropy/simple https://pypi.anaconda.org/scientific-python-nightly-wheels/simple 15 | 16 | # tox environments are constructed with so-called 'factors' (or terms) 17 | # separated by hyphens, e.g. test-devdeps-cov. Lines below starting with factor: 18 | # will only take effect if that factor is included in the environment name. To 19 | # see a list of example environments that can be run, along with a description, 20 | # run: 21 | # 22 | # tox -l -v 23 | # 24 | description = 25 | run tests 26 | slow: with slow flag 27 | stable: with env set to stable 28 | devdeps: with the latest developer version of key dependencies 29 | 30 | extras = 31 | test 32 | all 33 | 34 | deps = 35 | devdeps: numpy>=0.0.dev0 36 | devdeps: astropy>=0.0.dev0 37 | 38 | commands = 39 | pip freeze 40 | pytest tests \ 41 | slow: --slow --basetemp=tests_output --junitxml results.xml \ 42 | stable: --env stable \ 43 | {posargs} 44 | 45 | [testenv:check-style] 46 | description = check code style with ruff 47 | skip_install = true 48 | deps = 49 | ruff 50 | commands = 51 | ruff check . {posargs} 52 | 53 | [testenv:check-build] 54 | description = check build sdist/wheel and a strict twine check for metadata 55 | skip_install = true 56 | deps = 57 | build 58 | twine>=3.3 59 | commands = 60 | python -m build . 61 | twine check --strict dist/* 62 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. ci_watson documentation master file, created by 2 | sphinx-quickstart on Tue Aug 7 16:43:10 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | .. _ci_watson_index: 7 | 8 | ********* 9 | ci_watson 10 | ********* 11 | 12 | ``ci_watson`` is a test helper for Continuous Integration (CI) testing 13 | at STScI using GitHub Actions and Artifactory. 14 | 15 | This package has two components: 16 | 17 | * ``pytest`` plugin containing markers and fixtures. 18 | * Generic CI helpers for STScI tests using GitHub Actions and Artifactory. 19 | 20 | To install the stable version of this package from PyPI:: 21 | 22 | pip install ci-watson 23 | 24 | To cite this package, please use its Zenodo DOI available at 25 | https://zenodo.org/doi/10.5281/zenodo.12699836 . 26 | 27 | .. toctree:: 28 | :maxdepth: 1 29 | :hidden: 30 | 31 | plugin 32 | bigdata 33 | scripts 34 | ref_api 35 | 36 | .. grid:: 2 37 | 38 | .. grid-item-card:: 39 | 40 | .. button-ref:: plugin 41 | :expand: 42 | :color: primary 43 | :click-parent: 44 | 45 | ``pytest`` plugin 46 | 47 | .. grid-item-card:: 48 | 49 | .. button-ref:: bigdata 50 | :expand: 51 | :color: primary 52 | :click-parent: 53 | 54 | Handling big data 55 | 56 | .. grid-item-card:: 57 | 58 | .. button-ref:: scripts 59 | :expand: 60 | :color: primary 61 | :click-parent: 62 | 63 | Scripts (e.g., okify_regtests) 64 | 65 | .. grid-item-card:: 66 | 67 | .. button-ref:: ref_api 68 | :expand: 69 | :color: primary 70 | :click-parent: 71 | 72 | Reference/API 73 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | ci_watson/version.py 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/build/ 70 | docs/source/api/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # Environments 88 | .env 89 | .venv 90 | env/ 91 | venv/ 92 | ENV/ 93 | env.bak/ 94 | venv.bak/ 95 | 96 | # Spyder project settings 97 | .spyderproject 98 | .spyproject 99 | 100 | # Rope project settings 101 | .ropeproject 102 | 103 | # mkdocs documentation 104 | /site 105 | 106 | # mypy 107 | .mypy_cache/ 108 | 109 | # Misc 110 | *~ -------------------------------------------------------------------------------- /tests/test_resource_tracker.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | import pytest 5 | 6 | from ci_watson.resource_tracker import ( 7 | ResourceTracker, 8 | _TrackPeakMemory, 9 | _TrackRuntime, 10 | ) 11 | 12 | 13 | class FakeNode: 14 | def __init__(self): 15 | self.user_properties = [] 16 | 17 | 18 | class FakeRequest: 19 | def __init__(self): 20 | self.node = FakeNode() 21 | 22 | 23 | def test_runtime(): 24 | tracker = _TrackRuntime() 25 | with tracker: 26 | time.sleep(1.0) 27 | # a 1 second sleep is sometimes too much to ask 28 | # of CI runners. Use a wide margin to make this test 29 | # less brittle in those cases. 30 | threshold = 0.5 if "CI" in os.environ else 0.1 31 | assert abs(tracker.log()[1] - 1.0) < threshold 32 | 33 | 34 | def test_memory(): 35 | tracker = _TrackPeakMemory() 36 | N = 1024 * 1024 37 | with tracker: 38 | b = b"0" * N # noqa: F841 39 | assert abs(tracker.log()[1] - N) / N < 0.01 40 | 41 | 42 | def test_resource_tracker(): 43 | tracker = ResourceTracker() 44 | with tracker.track(): 45 | pass 46 | fake_request = FakeRequest() 47 | tracker.log(fake_request) 48 | keys = {log[0] for log in fake_request.node.user_properties} 49 | assert keys == {"tracked-time", "tracked-peakmem"} 50 | 51 | 52 | def test_log(): 53 | tracker = ResourceTracker() 54 | fake_request = FakeRequest() 55 | with tracker.track(log=fake_request): 56 | pass 57 | keys = {log[0] for log in fake_request.node.user_properties} 58 | assert keys == {"tracked-time", "tracked-peakmem"} 59 | 60 | 61 | @pytest.fixture(scope="module") 62 | def long_fixture(resource_tracker): 63 | with resource_tracker.track(): 64 | pass 65 | 66 | 67 | def test_fixture_log_tracked_resources(log_tracked_resources, long_fixture, request): 68 | log_tracked_resources() 69 | keys = {log[0] for log in request.node.user_properties} 70 | assert keys == {"tracked-time", "tracked-peakmem"} 71 | 72 | 73 | def test_fixutre_log_in_test(resource_tracker, request): 74 | with resource_tracker.track(log=request): 75 | pass 76 | keys = {log[0] for log in request.node.user_properties} 77 | assert keys == {"tracked-time", "tracked-peakmem"} 78 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ci_watson" 3 | description = "CI helper for STScI regression testing" 4 | requires-python = ">=3.9" 5 | authors = [ 6 | { name = "STScI" }, 7 | ] 8 | classifiers = [ 9 | "Framework :: Pytest", 10 | "Intended Audience :: Science/Research", 11 | "Operating System :: OS Independent", 12 | "Programming Language :: Python :: 3", 13 | "Programming Language :: Python :: Implementation :: CPython", 14 | "Topic :: Software Development :: Testing", 15 | "Topic :: Software Development :: Libraries :: Python Modules", 16 | ] 17 | dependencies = [ 18 | "crds", 19 | "colorama>=0.4.1", 20 | "pytest>=6", 21 | "readchar>=3.0", 22 | "requests", 23 | ] 24 | license-files = ["LICENSE.md"] 25 | dynamic = [ 26 | "version", 27 | ] 28 | 29 | [project.readme] 30 | file = "README.md" 31 | content-type = "text/markdown" 32 | 33 | [project.urls] 34 | Homepage = "https://github.com/spacetelescope/ci_watson" 35 | 36 | [project.entry-points.pytest11] 37 | pytest_ciwatson = "ci_watson.plugin" 38 | 39 | [project.scripts] 40 | okify_regtests = "ci_watson.scripts.okify_regtests:main" 41 | 42 | [project.optional-dependencies] 43 | all = [ 44 | "astropy>=6", 45 | ] 46 | test = [ 47 | "pytest-astropy-header", 48 | ] 49 | docs = [ 50 | "numpydoc", 51 | "pydata-sphinx-theme", 52 | "sphinx_design", 53 | "sphinx-copybutton", 54 | "sphinx-automodapi", 55 | ] 56 | 57 | [build-system] 58 | requires = [ 59 | "setuptools", 60 | "setuptools_scm", 61 | "wheel", 62 | ] 63 | build-backend = "setuptools.build_meta" 64 | 65 | [tool.setuptools] 66 | zip-safe = false 67 | include-package-data = true 68 | 69 | [tool.setuptools.packages.find] 70 | exclude = [ 71 | "tests", 72 | ] 73 | namespaces = false 74 | 75 | [tool.setuptools_scm] 76 | version_file = "ci_watson/version.py" 77 | 78 | [tool.pytest.ini_options] 79 | minversion = "6" 80 | testpaths = [ 81 | "tests", 82 | ] 83 | norecursedirs = [ 84 | ".eggs", 85 | "build", 86 | ] 87 | astropy_header = true 88 | xfail_strict = true 89 | inputs_root = "ci-watson" 90 | junit_family = "xunit2" 91 | filterwarnings = [ 92 | "error", 93 | "ignore:numpy.ndarray size changed:RuntimeWarning", 94 | ] 95 | addopts = "-p no:legacypath" 96 | 97 | [tool.ruff.lint] 98 | select = [ 99 | "E9", # syntax / io error 100 | "F63", # assertion traps 101 | "F7", # keyword outside special block 102 | "F82", # undefined variables 103 | ] 104 | -------------------------------------------------------------------------------- /docs/source/plugin.rst: -------------------------------------------------------------------------------- 1 | .. _ci_watson_plugin: 2 | 3 | Plugin 4 | ====== 5 | 6 | .. note:: 7 | 8 | The ``--slow`` option conflicts in marker name with ``--run-slow`` 9 | provided by ``pytest-astropy``. If you have both ``ci-watson`` 10 | and ``pytest-astropy`` installed, you need to provide *both* 11 | option flags to enable tests marked as slow. See 12 | https://github.com/spacetelescope/ci_watson/issues/83 , 13 | 14 | The plugin portion of ``ci_watson`` contains: 15 | 16 | * ``--slow`` option and ``@pytest.mark.slow`` decorator to run or skip 17 | tests that are resource intensive. What counts as resource intensive 18 | is up to the author of the test. 19 | * ``--bigdata`` option and ``@pytest.mark.bigdata`` decorator to run or skip 20 | tests that require intranet (Artifactory, Central Storage, etc) access. 21 | Additional setup is required for these tests to run successfully 22 | (see :ref:`bigdata_setup`). 23 | It is up to the author of the test to perform such setup properly. 24 | * ``--env`` option and ``envopt`` fixture to set the test environment to 25 | ``"dev"`` or ``"stable"``. This plugin only sets the value. It is up to 26 | the author of the test to use this environment setting properly. 27 | * ``_jail`` fixture to enable a test to run in a pristine temporary working 28 | directory. This is particularly useful for pipeline tests. 29 | * ``resource_tracker`` and ``log_tracked_resources`` fixtures to track 30 | memory and runtime and log them in the junit XML results file. 31 | 32 | Configuration Options 33 | --------------------- 34 | 35 | ``inputs_root``/``results_root`` - The 'bigdata' remote repository name/local 36 | data root directory for testing input/output files. Setting the value of 37 | either option will make it availble to tests via the ``pytestconfig`` fixture. 38 | Test code can then obtain the name of the artifactory repository/local data 39 | root directory to use when accessing locations needed for running tests. 40 | 41 | .. note:: 42 | 43 | If used, these values should appear in either ``pytest.ini`` OR the appropriate 44 | section in ``pyproject.toml``, *not both*. 45 | 46 | Example configuration within ``pyproject.toml``:: 47 | 48 | [tool.pytest.ini_options] 49 | inputs_root = my_data_repo 50 | results_root = my_results_repo 51 | 52 | Example configuration within ``pytest.ini``:: 53 | 54 | [pytest] 55 | inputs_root = my_data_repo 56 | results_root = my_results_repo 57 | 58 | The value(s) defined in the pytest configuration file may be accessed as a list 59 | by test code via the ``pytestconfig`` fixture which must be passed in as an 60 | argument to the test method or function that will use the value. 61 | 62 | Example of accessing configuration values within test code itself: 63 | 64 | .. code-block:: python 65 | 66 | def test_important_thing(pytestconfig): 67 | setup_cfg_inputs_root = pytestconfig.getini('inputs_root')[0] 68 | assert setup_cfg_inputs_root == 'my_data_repo' 69 | 70 | From within a fixture or a test class the configuration values must be accessed 71 | using a slightly different approach: 72 | 73 | .. code-block:: python 74 | 75 | import pytest 76 | inputs_root = pytest.config.getini('inputs_root')[0] 77 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Spacetelescope Open Source Code of Conduct 2 | 3 | We expect all "spacetelescope" organization projects to adopt a code of conduct that ensures a productive, respectful environment for all open source contributors and participants. We are committed to providing a strong and enforced code of conduct and expect everyone in our community to follow these guidelines when interacting with others in all forums. Our goal is to keep ours a positive, inclusive, successful, and growing community. The community of participants in open source Astronomy projects is made up of members from around the globe with a diverse set of skills, personalities, and experiences. It is through these differences that our community experiences success and continued growth. 4 | 5 | 6 | As members of the community, 7 | 8 | - We pledge to treat all people with respect and provide a harassment- and bullying-free environment, regardless of sex, sexual orientation and/or gender identity, disability, physical appearance, body size, race, nationality, ethnicity, and religion. In particular, sexual language and imagery, sexist, racist, or otherwise exclusionary jokes are not appropriate. 9 | 10 | - We pledge to respect the work of others by recognizing acknowledgment/citation requests of original authors. As authors, we pledge to be explicit about how we want our own work to be cited or acknowledged. 11 | 12 | - We pledge to welcome those interested in joining the community, and realize that including people with a variety of opinions and backgrounds will only serve to enrich our community. In particular, discussions relating to pros/cons of various technologies, programming languages, and so on are welcome, but these should be done with respect, taking proactive measure to ensure that all participants are heard and feel confident that they can freely express their opinions. 13 | 14 | - We pledge to welcome questions and answer them respectfully, paying particular attention to those new to the community. We pledge to provide respectful criticisms and feedback in forums, especially in discussion threads resulting from code contributions. 15 | 16 | - We pledge to be conscientious of the perceptions of the wider community and to respond to criticism respectfully. We will strive to model behaviors that encourage productive debate and disagreement, both within our community and where we are criticized. We will treat those outside our community with the same respect as people within our community. 17 | 18 | - We pledge to help the entire community follow the code of conduct, and to not remain silent when we see violations of the code of conduct. We will take action when members of our community violate this code such as such as contacting conduct@stsci.edu (all emails sent to this address will be treated with the strictest confidence) or talking privately with the person. 19 | 20 | This code of conduct applies to all community situations online and offline, including mailing lists, forums, social media, conferences, meetings, associated social events, and one-to-one interactions. 21 | 22 | Parts of this code of conduct have been adapted from the Astropy and Numfocus codes of conduct. 23 | http://www.astropy.org/code_of_conduct.html 24 | https://www.numfocus.org/about/code-of-conduct/ 25 | -------------------------------------------------------------------------------- /ci_watson/resource_tracker.py: -------------------------------------------------------------------------------- 1 | """Resource tracking regtest utilities. 2 | 3 | Can be used within module-scoped fixtures (often used to 4 | run Steps or Pipelines) or within tests. 5 | 6 | For uses where the resource usage occurs within a test: 7 | 8 | .. code-block:: python 9 | 10 | def test_long_step(resource_tracker, request): 11 | with resource_tracker.track(log=request): 12 | # something that takes memory and time 13 | pass 14 | 15 | For a module-scoped fixture the resource tracking can 16 | be performed in the fixture but the logging/reporting of 17 | the resource usage must occur during a test: 18 | 19 | .. code-block:: python 20 | 21 | @pytest.fixture(scope="module") 22 | def resource_tracker(): 23 | return ResourceTracker() 24 | 25 | @pytest.fixture() 26 | def log_tracked_resources(resource_tracker, request): 27 | def callback(): 28 | resource_tracker.log(request) 29 | 30 | yield callback 31 | 32 | @pytest.fixture 33 | def my_long_fixture(resource_tracker): 34 | with resource_tracker.track(): 35 | # something that takes memory and time 36 | pass 37 | 38 | def test_log_tracked_resources(log_tracked_resources, my_long_fixture): 39 | log_tracked_resources() 40 | 41 | Use of the module-scoped fixture has fixture-reuse 42 | considerations similar to the ``rtdata_module`` fixture. Having 43 | more than one module scoped fixture that uses ``resource_tracker`` 44 | per module is discouraged (as both will use the same ``ResourceTracker`` 45 | instance). Parameterization of a fixture using ``resource_tracker`` 46 | is supported (same as ``rtdata_module``). 47 | """ 48 | 49 | import time 50 | import tracemalloc 51 | from contextlib import ExitStack, contextmanager 52 | 53 | 54 | __all__ = ["ResourceTracker"] 55 | 56 | 57 | class _TrackRuntime: 58 | """Runtime tracker context.""" 59 | 60 | def __enter__(self): 61 | self._t0 = time.monotonic() 62 | 63 | def __exit__(self, exc_type, exc_value, traceback): 64 | self.value = time.monotonic() - self._t0 65 | 66 | def log(self): 67 | return ("tracked-time", self.value) 68 | 69 | 70 | class _TrackPeakMemory: 71 | """Peak memory tracker context.""" 72 | 73 | def __enter__(self): 74 | tracemalloc.start() 75 | 76 | def __exit__(self, exc_type, exc_value, traceback): 77 | _, self.value = tracemalloc.get_traced_memory() 78 | tracemalloc.stop() 79 | 80 | def log(self): 81 | return ("tracked-peakmem", self.value) 82 | 83 | 84 | class ResourceTracker: 85 | """Track resources used during track context.""" 86 | 87 | def __init__(self): 88 | self._trackers = [_TrackPeakMemory(), _TrackRuntime()] 89 | 90 | def log(self, request): 91 | """Log tracked resource usage to the pytest request user properties. 92 | 93 | Parameters 94 | ---------- 95 | request : pytest.FixtureRequest 96 | Must be a function-scoped pytest request fixture result. 97 | """ 98 | request.node.user_properties.extend(t.log() for t in self._trackers) 99 | 100 | @contextmanager 101 | def track(self, log=None): 102 | """Context during which resources are tracked. 103 | 104 | Parameters 105 | ---------- 106 | log : pytest.FixtureRequest, optional 107 | If provided, log the usage to the provided request fixture result. 108 | """ 109 | try: 110 | with ExitStack() as stack: 111 | [stack.enter_context(t) for t in self._trackers] 112 | yield self 113 | finally: 114 | if log: 115 | self.log(log) 116 | -------------------------------------------------------------------------------- /ci_watson/plugin.py: -------------------------------------------------------------------------------- 1 | """ 2 | These are automatically available when ``ci_watson`` is used as a 3 | pytest plugin. 4 | """ 5 | import os 6 | import pytest 7 | 8 | from ci_watson.resource_tracker import ResourceTracker 9 | 10 | __all__ = [] 11 | 12 | 13 | def pytest_addoption(parser): 14 | """ 15 | These pytest hooks allow us to mark tests and run the marked tests with 16 | specific command line options. 17 | """ 18 | # Add option to run slow tests 19 | parser.addoption( 20 | "--slow", 21 | action="store_true", 22 | help="run slow tests" 23 | ) 24 | 25 | # Add option to use big data sets 26 | parser.addoption( 27 | "--bigdata", 28 | action="store_true", 29 | help="use big data sets (intranet)" 30 | ) 31 | 32 | # Choose to test under dev or stable 33 | parser.addoption( 34 | "--env", 35 | default="dev", 36 | help="specify what environment to test" 37 | ) 38 | 39 | # Data file input/output source/destination customization. 40 | parser.addini( 41 | "inputs_root", 42 | "Root dir (or data repository name) for test input files.", 43 | type="args", 44 | default=None, 45 | ) 46 | 47 | parser.addini( 48 | "results_root", 49 | "Root dir (or data repository name) for test result/output files.", 50 | type="args", 51 | default=None, 52 | ) 53 | 54 | 55 | def pytest_configure(config): 56 | config.getini('markers').append( 57 | 'slow: Run tests that are resource intensive') 58 | 59 | config.getini('markers').append( 60 | 'bigdata: Run tests that require intranet access') 61 | 62 | 63 | def pytest_runtest_setup(item): 64 | if 'slow' in item.keywords and not item.config.getvalue("slow"): 65 | pytest.skip("need --slow option to run") 66 | 67 | if 'bigdata' in item.keywords and not item.config.getvalue("bigdata"): 68 | pytest.skip("need --bigdata option to run") 69 | 70 | 71 | @pytest.fixture(scope='function') 72 | def _jail(tmp_path): 73 | """Perform test in a pristine temporary working directory.""" 74 | old_dir = os.getcwd() 75 | os.chdir(tmp_path) 76 | try: 77 | yield str(tmp_path) 78 | finally: 79 | os.chdir(old_dir) 80 | 81 | 82 | @pytest.fixture(scope='session') 83 | def envopt(request): 84 | """Get the ``--env`` command-line option specifying test environment""" 85 | return request.config.getoption("env") 86 | 87 | 88 | @pytest.fixture(scope="module") 89 | def resource_tracker(): 90 | """Fixture to return the current module-scoped ResourceTracker. 91 | 92 | Use by calling ``track`` to generate a context in which resource 93 | usage will be tracked. 94 | 95 | .. code-block:: python 96 | 97 | with resource_tracker.track(): 98 | # do stuff 99 | 100 | For resources used during tests providing a function-scoped 101 | request fixture result as the log argument will also log the 102 | used resources to the junit results.xml. 103 | 104 | .. code-block:: python 105 | 106 | def test_something(resource_tracker, request): 107 | with resource_tracker.track(log=request): 108 | # do stuff 109 | 110 | For resources used during fixtures the tracked resources 111 | can be logged in a separate test using ``log_tracked_resources``. 112 | """ 113 | return ResourceTracker() 114 | 115 | 116 | @pytest.fixture() 117 | def log_tracked_resources(resource_tracker, request): 118 | """Fixture to log resources tracked by ``resource_tracker``. 119 | 120 | .. code-block:: python 121 | 122 | @pytest.fixture 123 | def my_fixture(resource_tracker): 124 | with resource_tracker.track(): 125 | # do stuff 126 | 127 | def test_write_log(log_tracked_resources, my_fixture): 128 | log_tracked_resources() 129 | """ 130 | 131 | def callback(): 132 | resource_tracker.log(request) 133 | 134 | yield callback 135 | -------------------------------------------------------------------------------- /ci_watson/hst_helpers.py: -------------------------------------------------------------------------------- 1 | """Helper module for HST tests.""" 2 | 3 | import os 4 | import glob 5 | import shutil 6 | 7 | import crds 8 | 9 | 10 | __all__ = ['ref_from_image', 'raw_from_asn', 'download_crds'] 11 | 12 | CRDS_SERVER_URL = "https://hst-crds.stsci.edu" 13 | HST_INSTRUMENTS = ['acs', 'wfc3', 'stis', 'cos', 'wfpc2'] 14 | 15 | 16 | def _get_reffile(hdr, key): 17 | """Get ref file from given key in given FITS header.""" 18 | ref_file = None 19 | if key in hdr: # Keyword might not exist 20 | ref_file = hdr[key].strip() 21 | if ref_file.upper() == 'N/A': # Not all ref file is defined 22 | ref_file = None 23 | return ref_file 24 | 25 | 26 | def ref_from_image(input_image, reffile_lookup): 27 | """ 28 | Return a list of reference filenames, as defined in the primary 29 | header of the given input image, necessary for calibration. 30 | 31 | Parameters 32 | ---------- 33 | input_image : str 34 | FITS image to extract info from. 35 | 36 | reffile_lookup : list of str 37 | List of primary header keywords to check. Example:: 38 | 39 | ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE'] 40 | 41 | Returns 42 | ------- 43 | ref_files : list of str 44 | List of reference files needed for the test with given 45 | input file. 46 | 47 | """ 48 | from astropy.io import fits 49 | 50 | ref_files = [] 51 | hdr = fits.getheader(input_image, ext=0) 52 | 53 | for reffile in reffile_lookup: 54 | s = _get_reffile(hdr, reffile) 55 | if s is not None: 56 | ref_files.append(s) 57 | 58 | return ref_files 59 | 60 | 61 | def raw_from_asn(asn_file, suffix='_raw.fits'): 62 | """ 63 | Return a list of RAW input files in a given ASN. 64 | 65 | Parameters 66 | ---------- 67 | asn_file : str 68 | Filename for the ASN file. 69 | 70 | suffix : str 71 | Suffix to append to the filenames in ASN table. 72 | 73 | Returns 74 | ------- 75 | raw_files : list of str 76 | A list of input files to process. 77 | 78 | """ 79 | from astropy.table import Table 80 | 81 | raw_files = [] 82 | tab = Table.read(asn_file, format='fits') 83 | 84 | for row in tab: 85 | if row['MEMTYPE'].startswith('PROD'): 86 | continue 87 | pfx = row['MEMNAME'].lower().strip().replace('\x00', '') 88 | raw_files.append(pfx + suffix) 89 | 90 | return raw_files 91 | 92 | 93 | def download_crds(refname, *, verbose=False): 94 | """ 95 | Download a CRDS file from HTTP to current directory. 96 | 97 | Parameters 98 | ---------- 99 | refname : str 100 | Filename. Examples:: 101 | 102 | '012345678_bia.fits' 103 | 'jref$012345678_bia.fits' 104 | '/path/to/012345678_bia.fits' 105 | 106 | But only filename with ``dir$name`` format would 107 | proceed to download stage. 108 | 109 | verbose : bool 110 | If `True`, print messages to screen. 111 | This is useful for debugging. 112 | 113 | """ 114 | refdir = None 115 | fname = refname 116 | 117 | # Expand IRAF-style dir shortcut. 118 | if '$' in refname: 119 | refdir, fname = refname.split('$') 120 | if refdir in os.environ: 121 | refname = os.path.join(os.environ[refdir], fname) 122 | else: 123 | refname = fname 124 | 125 | # CRDS file for given name never changes, so no need to re-download 126 | # if already copied over prior or directly accessible on disk somewhere. 127 | if os.path.isfile(refname): 128 | if verbose: 129 | print('{} already exists, skipping download'.format(refname)) 130 | return 131 | 132 | # Do not know where to download. 133 | if refdir is None: 134 | raise ValueError('Unknown HTTP destination for {}'.format(refname)) 135 | 136 | # need to insure CRDS has been cached locally 137 | if 'CRDS_SERVER_URL' not in os.environ: 138 | os.environ['CRDS_SERVER_URL'] = CRDS_SERVER_URL 139 | os.environ['CRDS_PATH'] = '.' + os.sep 140 | # Make sure expected output directory is present in local directory 141 | tmpbase = os.path.join('references', 'hst') 142 | tmpref = os.path.join(tmpbase, HST_INSTRUMENTS[0]) 143 | try: 144 | if not os.path.exists(tmpref): 145 | os.makedirs(tmpref) 146 | for inst in HST_INSTRUMENTS[1:]: 147 | tmppath = os.path.join(tmpbase, inst) 148 | if not os.path.exists(tmppath): 149 | os.mkdir(tmppath) 150 | 151 | # run the command to sync this CRDS file with local directory 152 | sync_cmd = crds.sync.SyncScript('sync --files ' + fname) 153 | sync_cmd.sync_explicit_files() # copies file into subdir 154 | 155 | # Move the sync'd reference file to locally defined directory now 156 | # We need to find it first, though, since we are not sure what 157 | # instrument that reference file was for... 158 | tmpfile = glob.glob(os.path.join(tmpbase, '*', '*.fits'))[0] 159 | shutil.move(tmpfile, refname) 160 | 161 | if verbose: 162 | print('Downloaded {} from {}'.format(refname, CRDS_SERVER_URL)) 163 | 164 | except Exception: 165 | print(f"Failed to download {fname}") 166 | 167 | finally: 168 | # delete tmp CRDS directories now, if possible. 169 | try: 170 | shutil.rmtree('references') 171 | except Exception: 172 | pass 173 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # ci_watson documentation build configuration file, created by 5 | # sphinx-quickstart on Tue Aug 7 16:43:10 2018. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import subprocess 17 | from datetime import datetime 18 | 19 | from docutils import nodes 20 | from sphinx.util.docutils import SphinxDirective 21 | 22 | import ci_watson 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | # 28 | # needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = ['sphinx.ext.autodoc', 34 | 'sphinx_automodapi.automodapi', 35 | 'numpydoc', 36 | 'sphinx.ext.intersphinx', 37 | 'sphinx.ext.imgmath', 38 | 'sphinx.ext.viewcode', 39 | "sphinx_design", 40 | "sphinx_copybutton"] 41 | numpydoc_show_class_members = False 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | templates_path = ['_templates'] 45 | 46 | # The suffix(es) of source filenames. 47 | # You can specify multiple suffix as a list of string: 48 | source_suffix = '.rst' 49 | 50 | # The master toctree document. 51 | master_doc = 'index' 52 | 53 | # General information about the project. 54 | project = 'ci_watson' 55 | copyright = f'{datetime.today().year}, STScI' 56 | author = 'STScI' 57 | 58 | # The version info for the project you're documenting, acts as replacement for 59 | # |version| and |release|, also used in various other places throughout the 60 | # built documents. 61 | # 62 | # The full version, including alpha/beta/rc tags. 63 | release = ci_watson.__version__ 64 | # The short X.Y version. 65 | version = '.'.join(release.split('.')[:2]) 66 | dev = "dev" in release 67 | 68 | # List of patterns, relative to source directory, that match files and 69 | # directories to ignore when looking for source files. 70 | # This patterns also effect to html_static_path and html_extra_path 71 | exclude_patterns = [] 72 | 73 | # The name of the Pygments (syntax highlighting) style to use. 74 | pygments_style = 'sphinx' 75 | 76 | # If true, `todo` and `todoList` produce output, else they produce nothing. 77 | todo_include_todos = False 78 | 79 | # -- Options for HTML output ---------------------------------------------- 80 | 81 | # The theme to use for HTML and HTML Help pages. See the documentation for 82 | # a list of builtin themes. 83 | # 84 | html_theme = 'pydata_sphinx_theme' 85 | 86 | # Theme options are theme-specific and customize the look and feel of a theme 87 | # further. For a list of options available for each theme, see the 88 | # documentation. 89 | # 90 | html_theme_options = { 91 | "collapse_navigation": True, 92 | "icon_links": [], 93 | "navigation_depth": 2, 94 | "show_nav_level": 2, 95 | "use_edit_page_button": False, 96 | "github_url": "https://github.com/spacetelescope/ci_watson", 97 | "external_links": [ 98 | {"name": "Help Desk", "url": "http://jwsthelp.stsci.edu/"}, 99 | ], 100 | # https://github.com/pydata/pydata-sphinx-theme/issues/1492 101 | "navigation_with_keys": False, 102 | } 103 | 104 | html_context = { 105 | "default_mode": "light", 106 | "to_be_indexed": ["stable", "latest"], 107 | "is_development": dev, 108 | "github_user": "spacetelescope", 109 | "github_repo": "ci_watson", 110 | "github_version": "main", 111 | "doc_path": "docs", 112 | } 113 | 114 | # Hide primary sidebar 115 | html_sidebars = { 116 | "**": [] 117 | } 118 | 119 | html_copy_source = False 120 | 121 | # Add any paths that contain custom static files (such as style sheets) here, 122 | # relative to this directory. They are copied after the builtin static files, 123 | # so a file named "default.css" will overwrite the builtin "default.css". 124 | html_static_path = ['_static'] 125 | html_css_files = ["ci_watson.css"] 126 | html_logo = "_static/stsci_logo.png" 127 | 128 | # Example configuration for intersphinx: refer to the Python standard library. 129 | intersphinx_mapping = {'python': ('https://docs.python.org/', None)} 130 | 131 | # Report broken ref as errors. 132 | nitpicky = True 133 | 134 | # -- Options for HTMLHelp output ------------------------------------------ 135 | 136 | # Output file base name for HTML help builder. 137 | htmlhelp_basename = 'ci_watsondoc' 138 | 139 | 140 | # -- Custom directive ------------------------------------------- 141 | 142 | class CIWatsonCLIHelpDirective(SphinxDirective): 143 | 144 | def run(self): 145 | help_text = subprocess.check_output( 146 | ["okify_regtests", "--help"], encoding="utf-8") 147 | paragraph_node = nodes.literal_block(text=help_text) 148 | return [paragraph_node] 149 | 150 | 151 | def setup(app): 152 | app.add_directive('okifyregtestsclihelp', CIWatsonCLIHelpDirective) 153 | 154 | 155 | # -- Options for manual page output --------------------------------------- 156 | 157 | # One entry per manual page. List of tuples 158 | # (source start file, name, description, authors, manual section). 159 | man_pages = [ 160 | (master_doc, 'ci_watson', 'ci_watson Documentation', 161 | [author], 1) 162 | ] 163 | -------------------------------------------------------------------------------- /tests/test_artifactory_helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests requiring internet connection are treated as if they are big data tests. 3 | We could use pytest-remotedata plugin but requiring another plugin to test 4 | a plugin package is a little too meta. 5 | """ 6 | import json 7 | import os 8 | 9 | import pytest 10 | 11 | from ci_watson.artifactory_helpers import ( 12 | HAS_ASTROPY, BigdataError, get_bigdata_root, get_bigdata, 13 | check_url, compare_outputs, generate_upload_params, generate_upload_schema) 14 | 15 | 16 | @pytest.mark.bigdata 17 | @pytest.mark.parametrize( 18 | ('val', 'ans'), 19 | [('/local/path', False), 20 | ('https://google.com', True), 21 | ('https://github.com/spacetelescopehstcalblahblah', False)]) 22 | def test_check_url(val, ans): 23 | assert check_url(val) is ans 24 | 25 | 26 | class TestBigdataRoot: 27 | def setup_class(self): 28 | self.key = 'FOOFOO' 29 | 30 | def teardown_class(self): 31 | if self.key in os.environ: 32 | del os.environ[self.key] 33 | 34 | def test_no_env(self): 35 | if self.key in os.environ: 36 | del os.environ[self.key] 37 | with pytest.raises(BigdataError): 38 | get_bigdata_root(envkey=self.key) 39 | 40 | @pytest.mark.bigdata 41 | def test_has_env_url(self): 42 | path = 'https://google.com' 43 | os.environ[self.key] = path 44 | assert get_bigdata_root(envkey=self.key) == path 45 | 46 | def test_has_env_local(self): 47 | path = os.path.abspath(os.curdir) 48 | os.environ[self.key] = path 49 | assert get_bigdata_root(envkey=self.key) == path 50 | 51 | def test_no_path(self): 52 | os.environ[self.key] = '/some/fake/path' 53 | assert get_bigdata_root(envkey=self.key) is None 54 | 55 | 56 | @pytest.mark.bigdata 57 | class TestGetBigdata: 58 | def setup_class(self): 59 | self.root = get_bigdata_root() 60 | 61 | def test_nocopy(self, _jail, pytestconfig): 62 | args = (pytestconfig.getini('inputs_root')[0], 63 | 'dev', 64 | 'input', 65 | 'j6lq01010_asn.fits') 66 | dest = get_bigdata(*args, docopy=False) 67 | assert dest == os.path.abspath(os.path.join(self.root, *args)) 68 | assert len(os.listdir()) == 0 69 | 70 | @pytest.mark.parametrize('docopy', [True, False]) 71 | def test_no_data(self, docopy): 72 | with pytest.raises(BigdataError): 73 | get_bigdata('fake', 'path', 'somefile.txt', docopy=docopy) 74 | 75 | def test_get_data(self, _jail, pytestconfig): 76 | """ 77 | This tests download when TEST_BIGDATA is pointing to Artifactory. 78 | And tests copy when it is pointing to local path. 79 | """ 80 | args = (pytestconfig.getini('inputs_root')[0], 81 | 'dev', 82 | 'input', 83 | 'j6lq01010_asn.fits') 84 | dest = get_bigdata(*args) 85 | assert dest == os.path.abspath(os.path.join(os.curdir, args[-1])) 86 | 87 | 88 | @pytest.mark.bigdata 89 | @pytest.mark.usefixtures('_jail') 90 | @pytest.mark.skipif(not HAS_ASTROPY, reason='requires astropy to run') 91 | class TestCompareOutputs: 92 | """ 93 | Test a few common comparison scenarios. 94 | 95 | FITSDiff and HDUDiff are tested in Astropy, so here we simply 96 | test if they report differences or not, but we do not check 97 | the content too closely. 98 | 99 | .. note:: Upload schema functions are tested separately elsewhere. 100 | 101 | """ 102 | def setup_class(self): 103 | self.inpath = ('ci-watson', 'dev', 'input') 104 | 105 | if os.environ.get('TEST_BIGDATA').startswith('http'): 106 | self.copy = True 107 | else: 108 | self.copy = False 109 | 110 | def test_raise_error_fits(self): 111 | """Test mismatched extensions from the same file.""" 112 | get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True) 113 | outputs = [('j6lq01010_asn.fits[PRIMARY]', 'j6lq01010_asn.fits[asn]')] 114 | with pytest.raises(AssertionError) as exc: 115 | compare_outputs(outputs, input_path=self.inpath, 116 | docopy=self.copy, verbose=False) 117 | assert 'Headers contain differences' in str(exc) 118 | 119 | def test_difference_ascii(self): 120 | """ 121 | Test ASCII with differences but suppress error to inspect 122 | returned report. 123 | """ 124 | get_bigdata(*self.inpath, 'j6lq01010_asn_mod.txt', docopy=True) 125 | report = compare_outputs( 126 | [('j6lq01010_asn_mod.txt', 'j6lq01010_asn.txt')], 127 | input_path=self.inpath, docopy=self.copy, verbose=False, 128 | raise_error=False) 129 | s = report.split(os.linesep) 130 | assert s[2:] == ['@@ -1,4 +1,4 @@', 131 | ' # MEMNAME MEMTYPE MEMPRSNT', 132 | '-J6LQ01NAQ EXP-CRJ 2', 133 | '+J6LQ01NAQ EXP-CRJ 1', 134 | ' J6LQ01NDQ EXP-CRJ 1', 135 | '-J6LQ01013 PROD-RPT 1', 136 | '+J6LQ01011 PROD-CRJ 1', 137 | ''] 138 | 139 | @pytest.mark.parametrize( 140 | 'filename', ['j6lq01010_asn.fits', 'j6lq01010_asn.txt']) 141 | def test_all_okay(self, filename): 142 | """Same file has no difference.""" 143 | get_bigdata(*self.inpath, filename, docopy=True) 144 | report = compare_outputs( 145 | [(filename, filename)], input_path=self.inpath, 146 | docopy=self.copy, verbose=False) 147 | assert 'No differences found' in report 148 | 149 | @pytest.mark.parametrize('docopy', [False, True]) 150 | def test_truth_missing(self, docopy): 151 | get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True) 152 | with pytest.raises(AssertionError) as exc: 153 | compare_outputs( 154 | [('j6lq01010_asn.fits', 'doesnotexist.fits')], 155 | input_path=self.inpath, docopy=docopy, verbose=False) 156 | assert 'Cannot find doesnotexist.fits' in str(exc) 157 | 158 | @pytest.mark.parametrize( 159 | 'outputs', 160 | [[('j6lq01010_asn.fits[ASN]', 'j6lq01010_asn_mod.fits', ['image'])], 161 | [('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits[ASN]', ['image'])]]) 162 | def test_ambiguous_extlist(self, outputs): 163 | """Too many ways to do the same thing.""" 164 | get_bigdata(*self.inpath, 'j6lq01010_asn.fits', docopy=True) 165 | with pytest.raises(AssertionError) as exc: 166 | compare_outputs(outputs, input_path=self.inpath, docopy=self.copy, 167 | verbose=False) 168 | assert 'Ambiguous extension requirements' in str(exc) 169 | 170 | def test_mixed_bunch(self): 171 | """ 172 | Test different forms of acceptable ``outputs``. 173 | 174 | .. note:: Some other crazy combos are theoretically possible given 175 | the logic but they are not officially supported, hence 176 | not tested here. Add new combo as its support is added. 177 | 178 | """ 179 | for filename in ('j6lq01010_asn.fits', 'j6lq01010_asn.txt'): 180 | get_bigdata(*self.inpath, filename, docopy=True) 181 | 182 | outputs = [('j6lq01010_asn.fits', 'j6lq01010_asn.fits'), 183 | ('j6lq01010_asn.fits[asn]', 'j6lq01010_asn.fits[ASN]'), 184 | {'files': ('j6lq01010_asn.fits[image]', 185 | 'j6lq01010_asn_mod.fits[IMAGE]'), 186 | 'pars': {'rtol': 1e-7, 'atol': 0.05}}, 187 | {'files': ('j6lq01010_asn.fits', 188 | 'j6lq01010_asn_mod.fits', 189 | ['image']), 190 | 'pars': {'rtol': 1e-7, 'atol': 0.05}}, 191 | {'files': ('j6lq01010_asn.txt', 'j6lq01010_asn.txt')}, 192 | ('j6lq01010_asn.fits', 'j6lq01010_asn_mod.fits', 193 | ['primary', 'IMAGE']), 194 | ('j6lq01010_asn.txt', 'j6lq01010_asn.txt')] 195 | report = compare_outputs( 196 | outputs, input_path=self.inpath, docopy=self.copy, 197 | verbose=False, raise_error=False) 198 | 199 | # There are 7 comparisons, and only 1 should show a difference 200 | assert report.count("No differences found") == 6 201 | assert report.count("different pixels found") == 1 202 | 203 | 204 | class TestGenerateUploadParams: 205 | def setup_class(self): 206 | self.old_envs = {} 207 | for key in ('BUILD_TAG', 'BUILD_MATRIX_SUFFIX'): 208 | self.old_envs[key] = os.environ.get(key) 209 | 210 | # Set up something reproducible 211 | os.environ['BUILD_TAG'] = 'tag0' 212 | os.environ['BUILD_MATRIX_SUFFIX'] = 'foo' 213 | 214 | def teardown_class(self): 215 | for key, val in self.old_envs.items(): 216 | if val is None: 217 | del os.environ[key] 218 | else: 219 | os.environ[key] = val 220 | 221 | def test_gen(self, _jail): 222 | # Dummy file to move. 223 | datafile = 'actual.txt' 224 | with open(datafile, 'w') as f: 225 | f.write('\n') 226 | 227 | updated_outputs = [(datafile, '/path/to/desired.txt')] 228 | schema_pattern, tree, testname = generate_upload_params( 229 | 'groot', updated_outputs, verbose=False) 230 | 231 | assert schema_pattern == ['*.log', os.path.abspath('desired.txt')] 232 | assert isinstance(testname, str) # Actual value non-deterministic 233 | 234 | # TODO: Use regex? 235 | split_tree = tree.split(os.sep) 236 | assert split_tree[0] == 'groot' 237 | assert split_tree[1].endswith('_tag0_foo') 238 | assert split_tree[3] == '' 239 | 240 | # Make sure file is moved properly. 241 | dirlist = os.listdir() 242 | assert dirlist == ['desired.txt'] 243 | 244 | 245 | def test_generate_upload_schema_multi(_jail): 246 | generate_upload_schema( 247 | ['*.log', 'desired.txt'], 'reponame/repopath', 'foo') 248 | # TODO: Better way to compare JSON? 249 | with open('foo_results.json') as f: 250 | j = json.load(f) 251 | assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [ 252 | '{', 253 | ' "files": [', 254 | ' {', 255 | ' "excludePatterns": [],', 256 | ' "explode": "false",', 257 | ' "flat": "true",', 258 | ' "pattern": "*.log",', 259 | ' "props": null,', 260 | ' "recursive": "false",', 261 | ' "regexp": "false",', 262 | ' "target": "reponame/repopath"', 263 | ' },', 264 | ' {', 265 | ' "excludePatterns": [],', 266 | ' "explode": "false",', 267 | ' "flat": "true",', 268 | ' "pattern": "desired.txt",', 269 | ' "props": null,', 270 | ' "recursive": "false",', 271 | ' "regexp": "false",', 272 | ' "target": "reponame/repopath"', 273 | ' }', 274 | ' ]', 275 | '}'] 276 | 277 | 278 | def test_generate_upload_schema_one(_jail): 279 | generate_upload_schema( 280 | 'desired.txt', 'reponame/repopath', 'foo', recursive=True) 281 | # TODO: Better way to compare JSON? 282 | with open('foo_results.json') as f: 283 | j = json.load(f) 284 | assert json.dumps(j, indent=4, sort_keys=True).split(os.linesep) == [ 285 | '{', 286 | ' "files": [', 287 | ' {', 288 | ' "excludePatterns": [],', 289 | ' "explode": "false",', 290 | ' "flat": "true",', 291 | ' "pattern": "desired.txt",', 292 | ' "props": null,', 293 | ' "recursive": "true",', 294 | ' "regexp": "false",', 295 | ' "target": "reponame/repopath"', 296 | ' }', 297 | ' ]', 298 | '}'] 299 | -------------------------------------------------------------------------------- /ci_watson/scripts/okify_regtests.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import shutil 4 | import subprocess 5 | import tempfile 6 | from argparse import ArgumentParser 7 | from contextlib import contextmanager, nullcontext 8 | from enum import Enum 9 | from pathlib import Path 10 | 11 | import asdf 12 | import readchar 13 | from colorama import Fore 14 | 15 | import ci_watson 16 | 17 | __all__ = [] 18 | 19 | JSON_SPEC_FILE_SUFFIX = "_okify.json" 20 | ASDF_BREADCRUMB_FILE_SUFFIX = "_rtdata.asdf" 21 | TERMINAL_WIDTH = shutil.get_terminal_size((80, 20)).columns 22 | 23 | 24 | class Observatory(Enum): 25 | jwst = "jwst" 26 | roman = "roman" 27 | 28 | def __str__(self): 29 | return self.value 30 | 31 | @property 32 | def runs_directory(self) -> str: 33 | """Directory on Artifactory where run results are stored.""" 34 | if self == Observatory.jwst: 35 | return "jwst-pipeline-results/" 36 | elif self == Observatory.roman: 37 | return "roman-pipeline-results/regression-tests/runs/" 38 | else: 39 | raise NotImplementedError(f"runs directory not defined for '{self}'") 40 | 41 | 42 | def artifactory_copy( 43 | json_spec_file: os.PathLike, 44 | dry_run: bool = False, 45 | ): 46 | """ 47 | Copy files with ``jf rt cp`` based on instructions in the specfile. 48 | 49 | Parameters 50 | ---------- 51 | json_spec_file : Path 52 | JSON file indicating file transfer patterns and targets 53 | (see https://docs.jfrog-applications.jfrog.io/jfrog-applications/jfrog-cli/cli-for-jfrog-artifactory/using-file-specs). 54 | dry_run : bool 55 | Do nothing (passes ``--dry-run`` to JFrog CLI). 56 | 57 | Raises 58 | ------ 59 | CalledProcessError 60 | If JFrog command fails. 61 | """ 62 | 63 | jfrog_args = [] 64 | 65 | if dry_run: 66 | jfrog_args.append("--dry-run") 67 | 68 | subprocess.run( 69 | ["jfrog", "rt", "cp", *jfrog_args, f"--spec={Path(json_spec_file).absolute()}"], 70 | check=True, 71 | ) 72 | 73 | 74 | def artifactory_folder_replace_copy( 75 | json_spec_file: os.PathLike, 76 | dry_run: bool = False, 77 | ): 78 | """ 79 | Copy files with ``jf rt cp`` based on instructions in the specfile, 80 | deleting the destination folder first. 81 | 82 | Parameters 83 | ---------- 84 | json_spec_file : Path 85 | JSON file indicating file transfer patterns and targets 86 | (see https://docs.jfrog-applications.jfrog.io/jfrog-applications/jfrog-cli/cli-for-jfrog-artifactory/using-file-specs). 87 | dry_run : bool 88 | Do nothing (passes ``--dry-run`` to JFrog CLI). 89 | 90 | Raises 91 | ------ 92 | CalledProcessError 93 | If JFrog command fails. 94 | """ 95 | 96 | jfrog_args = ["--quiet=true"] 97 | if dry_run: 98 | jfrog_args.append("--dry-run") 99 | 100 | # Since two different jfrog operations are required, need to read in 101 | # the spec to perform the delete. 102 | with open(json_spec_file) as file_handle: 103 | spec = json.load(file_handle) 104 | 105 | folder_pattern = spec["files"][0]["pattern"] + "/" 106 | folder_target = spec["files"][0]["target"] 107 | 108 | # Remove the target 109 | subprocess.run( 110 | [ 111 | "jfrog", 112 | "rt", 113 | "del", 114 | *jfrog_args, 115 | f"{folder_target}{Path(folder_pattern).stem}", 116 | ], 117 | check=True, 118 | ) 119 | 120 | artifactory_copy(json_spec_file, dry_run) 121 | 122 | 123 | def artifactory_dispatch( 124 | json_spec_file: os.PathLike, 125 | replace_whole_folders: bool = False, 126 | dry_run: bool = False, 127 | ): 128 | """ 129 | Perform the indicated artifactory operation. 130 | 131 | Parameters 132 | ---------- 133 | json_spec_file : Path 134 | JSON file indicating file transfer patterns and targets 135 | (see https://docs.jfrog-applications.jfrog.io/jfrog-applications/jfrog-cli/cli-for-jfrog-artifactory/using-file-specs). 136 | replace_whole_folders : bool 137 | Delete entire folders before copying. 138 | dry_run : bool 139 | Do nothing (passes ``--dry-run`` to JFrog CLI). 140 | 141 | Raises 142 | ------ 143 | CalledProcessError 144 | If JFrog command fails. 145 | """ 146 | 147 | if not replace_whole_folders: 148 | artifactory_copy(json_spec_file, dry_run=dry_run) 149 | else: 150 | artifactory_folder_replace_copy(json_spec_file, dry_run=dry_run) 151 | 152 | 153 | def artifactory_download_run_files( 154 | runs_directory: os.PathLike | str, 155 | run_number: int, 156 | suffix: str, 157 | ) -> list[Path]: 158 | """ 159 | Download files with the given suffix from the given run. 160 | 161 | Parameters 162 | ---------- 163 | runs_directory : Path or str 164 | Repository path where run directories are stored, i.e., 165 | ``jwst-pipeline-results/`` or 166 | ``roman-pipeline-results/regression-tests/runs/``. 167 | run_number : int 168 | GitHub Actions job number of regression test run. 169 | suffix : str 170 | Filename suffix to search for. 171 | 172 | Returns 173 | ------- 174 | path_list : list 175 | Sorted list of downloaded files on the local file system. 176 | 177 | Raises 178 | ------ 179 | CalledProcessError 180 | If JFrog command fails. 181 | 182 | Examples 183 | -------- 184 | Some example searches would be: 185 | 186 | .. code-block:: shell 187 | 188 | jfrog rt search jwst-pipeline-results/*_GITHUB_CI_*-586/*_okify.json 189 | jfrog rt search roman-pipeline-results/*/*_okify.json --props='build.number=540;build.name=RT :: romancal' 190 | """ 191 | 192 | subprocess.run( 193 | [ 194 | "jfrog", 195 | "rt", 196 | "dl", 197 | str(Path(runs_directory) / f"*_GITHUB_CI_*-{run_number}" / f"*{suffix}"), 198 | ], 199 | check=True, 200 | capture_output=True, 201 | ) 202 | 203 | return sorted(Path().rglob(f'*{suffix}')) 204 | 205 | 206 | def artifactory_download_regtest_artifacts( 207 | observatory: Observatory, 208 | run_number: int, 209 | ) -> tuple[list[Path], list[Path]]: 210 | """ 211 | Download both JSON spec files and ASDF breadcrumb files from 212 | Artifactory associated with a regression test run 213 | (via a job number), and return a list of their downloaded 214 | locations on the local file system. 215 | 216 | Parameters 217 | ---------- 218 | observatory : `Observatory` 219 | Observatory to use. 220 | run_number : int 221 | GitHub Actions job number of regression test run. 222 | 223 | Returns 224 | ------- 225 | specfiles, asdffiles : list 226 | Two lists of downloaded files on the local file system; 227 | JSON specfiles, and ASDF breadcrumb files. 228 | 229 | Raises 230 | ------ 231 | CalledProcessError 232 | If JFrog command fails. 233 | """ 234 | 235 | specfiles = artifactory_download_run_files( 236 | observatory.runs_directory, run_number, JSON_SPEC_FILE_SUFFIX 237 | ) 238 | asdffiles = artifactory_download_run_files( 239 | observatory.runs_directory, run_number, ASDF_BREADCRUMB_FILE_SUFFIX 240 | ) 241 | 242 | if len(specfiles) != len(asdffiles): 243 | raise RuntimeError("Different number of `_okify.json` and `_rtdata.asdf` files") 244 | 245 | for a, b in zip(specfiles, asdffiles): 246 | if str(a).replace(JSON_SPEC_FILE_SUFFIX, "") != str(b).replace( 247 | ASDF_BREADCRUMB_FILE_SUFFIX, "" 248 | ): 249 | raise RuntimeError( 250 | "The `_okify.json` and `_rtdata.asdf` files are not matched" 251 | ) 252 | 253 | return specfiles, asdffiles 254 | 255 | 256 | @contextmanager 257 | def pushd(newdir: os.PathLike | str): 258 | """Transient context that emulates ``pushd`` with ``chdir``.""" 259 | 260 | prevdir = os.getcwd() 261 | os.chdir(os.path.expanduser(newdir)) 262 | try: 263 | yield 264 | finally: 265 | os.chdir(prevdir) 266 | 267 | 268 | def main(): 269 | parser = ArgumentParser( 270 | description='"Okifies" a set of failing regression test results, by overwriting ' 271 | "truth files on Artifactory so that a set of failing regression test results becomes correct. " 272 | "Requires JFrog CLI (https://jfrog.com/getcli/) configured with credentials (jf login) " 273 | "and write access to the desired truth file repository (jwst-pipeline, roman-pipeline, etc.)." 274 | ) 275 | parser.add_argument( 276 | "observatory", 277 | type=Observatory, 278 | choices=list(Observatory), 279 | help="Observatory to overwrite truth files for on Artifactory.", 280 | ) 281 | parser.add_argument( 282 | "run_number", 283 | help=("GitHub Actions job number of regression test run (see " 284 | "https://github.com/spacetelescope/RegressionTests/actions)."), 285 | metavar="run-number", 286 | ) 287 | parser.add_argument( 288 | "--version", 289 | action="version", 290 | version=f"ci-watson {ci_watson.__version__}", 291 | help="Print package version and exit.", 292 | ) 293 | parser.add_argument( 294 | "--dry-run", 295 | action="store_true", 296 | help="Do nothing (passes the --dry-run flag to JFrog CLI).", 297 | ) 298 | parser.add_argument( 299 | "--output-dir", 300 | type=str, 301 | default="", 302 | help=("Store downloaded artifacts in the given path. " 303 | "Defaults to a temporary directory."), 304 | ) 305 | 306 | args = parser.parse_args() 307 | run = args.run_number 308 | observatory = args.observatory 309 | 310 | if args.output_dir == "": 311 | ctx = tempfile.TemporaryDirectory() 312 | else: 313 | ctx = nullcontext() 314 | 315 | # Create and chdir to a temporary directory to store specfiles 316 | with ctx as tmp_path: 317 | if tmp_path is None: 318 | tmp_path = args.output_dir 319 | if not os.path.exists(tmp_path): 320 | os.makedirs(tmp_path) 321 | 322 | print(f"Downloading test logs to {tmp_path}") 323 | 324 | with pushd(tmp_path): 325 | # Retrieve all the okify specfiles for failed tests. 326 | json_spec_files, asdf_breadcrumb_files = ( 327 | artifactory_download_regtest_artifacts(observatory, run) 328 | ) 329 | 330 | number_failed_tests = len(json_spec_files) 331 | 332 | print(f"{number_failed_tests} failed tests to okify") 333 | 334 | for index, (json_spec_file, asdf_breadcrumb_file) in enumerate( 335 | zip(json_spec_files, asdf_breadcrumb_files) 336 | ): 337 | # Print traceback and OKify info for this test failure 338 | with asdf.open(asdf_breadcrumb_file) as asdf_breadcrumb: 339 | # okify_op only useful for JWST 340 | okify_op = ( 341 | asdf_breadcrumb.tree["okify_op"] 342 | if observatory == Observatory.jwst 343 | else "file_copy" 344 | ) 345 | traceback = asdf_breadcrumb.tree["traceback"] 346 | remote_results_path = Path( 347 | asdf_breadcrumb.tree["remote_results_path"] 348 | ) 349 | output = Path(asdf_breadcrumb.tree["output"]) 350 | truth_remote = asdf_breadcrumb.tree["truth_remote"] 351 | try: 352 | test_name = asdf_breadcrumb.tree["test_name"] 353 | except KeyError: 354 | test_name = "test_name" 355 | 356 | if okify_op == "sdp_pool_copy": 357 | ok_dst = os.path.dirname(truth_remote) 358 | replace_whole_folders = True 359 | else: 360 | ok_dst = truth_remote 361 | replace_whole_folders = False 362 | 363 | print( 364 | f"{Fore.RED}" 365 | + f" {test_name} ".center(TERMINAL_WIDTH, "—") 366 | + f"{Fore.RESET}" 367 | ) 368 | print(f"{traceback}\n" 369 | f"{Fore.RED}{'—' * TERMINAL_WIDTH}{Fore.RESET}\n" 370 | f"{Fore.GREEN}OK: {remote_results_path / output.name}\n" 371 | f"--> {ok_dst}{Fore.RESET}") 372 | print( 373 | f"{Fore.RED}" 374 | + f"[ test {index + 1} of {number_failed_tests} ]".center(TERMINAL_WIDTH, "—") 375 | + f"{Fore.RESET}" 376 | ) 377 | 378 | # Ask if user wants to okify this test 379 | commands = { 380 | "o": ("okify", Fore.GREEN), 381 | "s": ("skip", Fore.CYAN), 382 | "q": ("quit", Fore.MAGENTA), 383 | } 384 | while True: 385 | print( 386 | ", ".join( 387 | f"{color}'{command}' to {verb}{Fore.RESET}" 388 | for command, (verb, color) in commands.items() 389 | ) 390 | + ": " 391 | ) 392 | # Get the keyboard character input without pressing return 393 | result = readchar.readkey() 394 | if result not in commands: 395 | print(f"Unrecognized command '{result}', try again") 396 | else: 397 | break 398 | if result == "q": 399 | break 400 | elif result == "s": 401 | pass 402 | else: 403 | artifactory_dispatch( 404 | json_spec_file, 405 | replace_whole_folders=replace_whole_folders, 406 | dry_run=args.dry_run, 407 | ) 408 | print("") 409 | 410 | 411 | if __name__ == "__main__": 412 | main() 413 | -------------------------------------------------------------------------------- /ci_watson/artifactory_helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helpers for Artifactory or local big data handling. 3 | """ 4 | import copy 5 | from datetime import datetime 6 | import json 7 | import os 8 | import re 9 | import shutil 10 | import sys 11 | import time 12 | from difflib import unified_diff 13 | from io import StringIO 14 | 15 | try: 16 | from astropy.io import fits 17 | from astropy.io.fits import FITSDiff, HDUDiff 18 | HAS_ASTROPY = True 19 | except ImportError: 20 | HAS_ASTROPY = False 21 | 22 | __all__ = ['BigdataError', 'check_url', 'get_bigdata_root', 'get_bigdata', 23 | 'compare_outputs', 'generate_upload_params', 24 | 'generate_upload_schema'] 25 | 26 | RE_URL = re.compile(r"\w+://\S+") 27 | 28 | UPLOAD_SCHEMA = {"files": [ 29 | {"pattern": "", 30 | "target": "", 31 | "props": None, 32 | "recursive": "false", 33 | "flat": "true", 34 | "regexp": "false", 35 | "explode": "false", 36 | "excludePatterns": []}]} 37 | 38 | TODAYS_DATE = datetime.now().strftime("%Y-%m-%d") 39 | TIMEOUT = int(os.environ.get("TEST_BIGDATA_TIMEOUT", 30)) 40 | CHUNK_SIZE = int(os.environ.get("TEST_BIGDATA_CHUNK_SIZE", 16384)) 41 | RETRY_MAX = int(os.environ.get("TEST_BIGDATA_RETRY_MAX", 3)) 42 | RETRY_DELAY = int(os.environ.get("TEST_BIGDATA_RETRY_DELAY", 5)) 43 | 44 | # Negative value disables timeout (i.e. hang forever) 45 | if TIMEOUT < 0: 46 | TIMEOUT = None 47 | # Timeout length cannot be zero 48 | elif not TIMEOUT: 49 | TIMEOUT = 1 50 | 51 | # Prevent chunks from being smaller than the usual physical block size 52 | if CHUNK_SIZE < 512: 53 | CHUNK_SIZE = 512 54 | 55 | # Prevent infinite retry loops 56 | if RETRY_MAX < 0: 57 | RETRY_MAX = 0 58 | 59 | # Prevent infinite retry wait 60 | if RETRY_DELAY < 0: 61 | RETRY_DELAY = 0 62 | 63 | 64 | class BigdataError(Exception): 65 | """Exception related to big data access.""" 66 | pass 67 | 68 | 69 | def retry(retries=RETRY_MAX, delay=RETRY_DELAY, trap=(Exception,)): 70 | """Execute a function again on error 71 | 72 | Parameters 73 | ---------- 74 | retries: int 75 | Maximum number of attempts 76 | 77 | delay: int, float, None 78 | Maximum time to wait per attempt (seconds) 79 | 80 | trap: tuple of type Exception 81 | Type of exceptions to trap. Untrapped exceptions raise normally. 82 | Default: `Exception` (all exceptions) 83 | """ 84 | def decorator(fn): 85 | def wrapper(*args, **kwargs): 86 | retry = 0 87 | while retry < retries: 88 | try: 89 | return fn(*args, **kwargs) 90 | except trap as e: 91 | print("{}: {}: will try again in {} second(s) " 92 | "[attempt: {} of {}]".format( 93 | fn, e, delay, retry + 1, retries), file=sys.stderr) 94 | retry += 1 95 | time.sleep(delay) 96 | return fn(*args, **kwargs) 97 | return wrapper 98 | return decorator 99 | 100 | 101 | @retry() 102 | def check_url(url, timeout=TIMEOUT): 103 | """Determine if URL can be resolved without error.""" 104 | if RE_URL.match(url) is None: 105 | return False 106 | 107 | # Optional import: requests is not needed for local big data setup. 108 | import requests 109 | 110 | # requests.head does not work with Artifactory landing page. 111 | r = requests.get(url, allow_redirects=True, timeout=timeout) 112 | # TODO: Can we simply return r.ok here? 113 | if r.status_code >= 400: 114 | return False 115 | return True 116 | 117 | 118 | @retry() 119 | def _download(url, dest, timeout=TIMEOUT, chunk_size=CHUNK_SIZE): 120 | """Simple HTTP/HTTPS downloader.""" 121 | # Optional import: requests is not needed for local big data setup. 122 | import requests 123 | 124 | dest = os.path.abspath(dest) 125 | 126 | with requests.get(url, stream=True, timeout=timeout) as r: 127 | with open(dest, 'w+b') as data: 128 | for chunk in r.iter_content(chunk_size=chunk_size): 129 | data.write(chunk) 130 | 131 | return dest 132 | 133 | 134 | def get_bigdata_root(envkey='TEST_BIGDATA'): 135 | """ 136 | Find and returns the path to the nearest big datasets. 137 | 138 | Parameters 139 | ---------- 140 | envkey : str 141 | Environment variable name. It must contain a string 142 | defining the root Artifactory URL or path to local 143 | big data storage. 144 | 145 | """ 146 | if envkey not in os.environ: 147 | raise BigdataError( 148 | 'Environment variable {} is undefined'.format(envkey)) 149 | 150 | path = os.environ[envkey] 151 | 152 | if os.path.exists(path) or check_url(path): 153 | return path 154 | 155 | return None 156 | 157 | 158 | def get_bigdata(*args, docopy=True, timeout=TIMEOUT, chunk_size=CHUNK_SIZE): 159 | """ 160 | Acquire requested data from a managed resource 161 | to the current directory. 162 | 163 | Parameters 164 | ---------- 165 | args : tuple of str 166 | Location of file relative to ``TEST_BIGDATA``. 167 | 168 | docopy : bool 169 | Switch to control whether or not to copy a file 170 | into the test output directory when running the test. 171 | If you wish to open the file directly from remote 172 | location or just to see path to source, set this to `False`. 173 | Default: `True` 174 | 175 | Returns 176 | ------- 177 | dest : str 178 | Absolute path to local copy of data 179 | (i.e., ``/path/to/example.fits``). 180 | 181 | Examples 182 | -------- 183 | >>> import os 184 | >>> print(os.getcwd()) 185 | /path/to 186 | >>> from ci_watson.artifactory_helpers import get_bigdata 187 | >>> filename = get_bigdata('abc', '123', 'example.fits') 188 | >>> print(filename) 189 | /path/to/example.fits 190 | >>> get_bigdata('abc', '123', 'example.fits', docopy=False) 191 | /remote/root/abc/123/example.fits 192 | 193 | """ 194 | src = os.path.join(get_bigdata_root(), *args) 195 | src_exists = os.path.exists(src) 196 | src_is_url = check_url(src) 197 | 198 | # No-op 199 | if not docopy: 200 | if src_exists or src_is_url: 201 | return os.path.abspath(src) 202 | else: 203 | raise BigdataError('Failed to find data: {}'.format(src)) 204 | 205 | filename = os.path.basename(src) 206 | dest = os.path.abspath(os.path.join(os.curdir, filename)) 207 | 208 | if src_exists: 209 | # Found src file on locally accessible directory 210 | if src == dest: # pragma: no cover 211 | raise BigdataError('Source and destination paths are identical: ' 212 | '{}'.format(src)) 213 | shutil.copy2(src, dest) 214 | 215 | elif src_is_url: 216 | _download(src, dest, timeout, chunk_size) 217 | 218 | else: 219 | raise BigdataError('Failed to retrieve data: {}'.format(src)) 220 | 221 | return dest 222 | 223 | 224 | def compare_outputs(outputs, raise_error=True, ignore_keywords=[], 225 | ignore_hdus=[], ignore_fields=[], rtol=0.0, atol=0.0, 226 | input_path=[], docopy=True, results_root=None, 227 | verbose=True): 228 | """ 229 | Compare output with "truth" using appropriate 230 | diff routine; namely: 231 | 232 | * ``fitsdiff`` for FITS file comparisons. 233 | * ``unified_diff`` for ASCII products. 234 | 235 | Only after all elements of ``outputs`` have been 236 | processed will the method report any success or failure, with 237 | failure of any one comparison *not* preventing the rest of the 238 | comparisons to be performed. 239 | 240 | Parameters 241 | ---------- 242 | outputs : list of tuple or dict 243 | This list defines what outputs from running the test will be 244 | compared. Three distinct types of values as list elements 245 | are supported: 246 | 247 | * 2-tuple : ``(test output filename, truth filename)`` 248 | * 3-tuple : ``(test output filename, truth filename, HDU names)`` 249 | * dict : ``{'files': (output, truth), 'pars': {key: val}}`` 250 | 251 | If filename contains extension such as ``[hdrtab]``, 252 | it will be interpreted as specifying comparison of just that HDU. 253 | 254 | raise_error : bool 255 | Raise ``AssertionError`` if difference is found. 256 | 257 | ignore_keywords : list of str 258 | List of FITS header keywords to be ignored by 259 | ``FITSDiff`` and ``HDUDiff``. 260 | 261 | ignore_hdus : list of str 262 | List of FITS HDU names to ignore by ``FITSDiff``. 263 | This is only available for ``astropy>=3.1``. 264 | 265 | ignore_fields : list of str 266 | List FITS table column names to be ignored by 267 | ``FITSDiff`` and ``HDUDiff``. 268 | 269 | rtol, atol : float 270 | Relative and absolute tolerance to be used by 271 | ``FITSDiff`` and ``HDUDiff``. 272 | 273 | input_path : list or tuple 274 | A series of sub-directory names under :func:`get_bigdata_root` 275 | that leads to the path of the 'truth' files to be compared 276 | against. If not provided, it assumes that 'truth' is in the 277 | working directory. For example, with :func:`get_bigdata_root` 278 | pointing to ``/grp/test_data``, a file at:: 279 | 280 | /grp/test_data/pipeline/dev/ins/test_1/test_a.py 281 | 282 | would require ``input_path`` of:: 283 | 284 | ["pipeline", "dev", "ins", "test_1"] 285 | 286 | docopy : bool 287 | If `True`, 'truth' will be copied to output directory before 288 | comparison is done. 289 | 290 | results_root : str or `None` 291 | If not `None`, for every failed comparison, the test output 292 | is automatically renamed to the given 'truth' in the output 293 | directory and :func:`generate_upload_schema` will be called 294 | to generate a JSON scheme for Artifactory upload. 295 | If you do not need this functionality, use ``results_root=None``. 296 | 297 | verbose : bool 298 | Print extra info to screen. 299 | 300 | Returns 301 | ------- 302 | creature_report : str 303 | Report from FITS or ASCII comparator. 304 | This is part of error message if ``raise_error=True``. 305 | 306 | Examples 307 | -------- 308 | There are multiple use cases for this method, specifically 309 | related to how ``outputs`` are defined upon calling this method. 310 | The specification of the ``outputs`` can be any combination of the 311 | following patterns: 312 | 313 | 1. 2-tuple inputs:: 314 | 315 | outputs = [('file1.fits', 'file1_truth.fits')] 316 | 317 | This definition indicates that ``file1.fits`` should be compared 318 | as a whole with ``file1_truth.fits``. 319 | 320 | 2. 2-tuple inputs with extensions:: 321 | 322 | outputs = [('file1.fits[hdrtab]', 'file1_truth.fits[hdrtab]')] 323 | 324 | This definition indicates that only the HDRTAB extension from 325 | ``file1.fits`` will be compared to the HDRTAB extension from 326 | ``file1_truth.fits``. 327 | 328 | 3. 3-tuple inputs:: 329 | 330 | outputs = [('file1.fits', 'file1_truth.fits', ['primary', 'sci'])] 331 | 332 | This definition indicates that only the PRIMARY and SCI extensions 333 | should be compared between the two files. This creates a temporary 334 | ``HDUList`` object comprising only the given extensions for comparison. 335 | 336 | 4. Dictionary of inputs and parameters:: 337 | 338 | outputs = [{'files': ('file1.fits', 'file1_truth.fits'), 339 | 'pars': {'ignore_keywords': ['ROOTNAME']}}] 340 | 341 | This definition indicates that ROOTNAME will be ignored during 342 | the comparison between the files specified in ``'files'``. 343 | Any input parameter for ``FITSDiff`` or ``HDUDiff`` can be specified 344 | as part of the ``'pars'`` dictionary. 345 | In addition, the input files listed in ``'files'`` can also include 346 | an extension specification, such as ``[hdrtab]``, to limit the 347 | comparison to just that extension. 348 | 349 | This example from an actual test definition demonstrates 350 | how multiple input defintions can be used at the same time:: 351 | 352 | outputs = [ 353 | ('jw99999_nircam_f140m-maskbar_psfstack.fits', 354 | 'jw99999_nircam_f140m-maskbar_psfstack_ref.fits' 355 | ), 356 | ('jw9999947001_02102_00002_nrcb3_a3001_crfints.fits', 357 | 'jw9999947001_02102_00002_nrcb3_a3001_crfints_ref.fits' 358 | ), 359 | {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits', 360 | 'jw99999_nircam_f140m-maskbar_i2d_ref.fits'), 361 | 'pars': {'ignore_hdus': ['HDRTAB']}, 362 | {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits', 363 | 'jw99999_nircam_f140m-maskbar_i2d_ref.fits', 364 | ['primary','sci','dq']), 365 | 'pars': {'rtol': 0.000001} 366 | }, 367 | {'files': ('jw99999_nircam_f140m-maskbar_i2d.fits[hdrtab]', 368 | 'jw99999_nircam_f140m-maskbar_i2d_ref.fits[hdrtab]'), 369 | 'pars': {'ignore_keywords': ['NAXIS1', 'TFORM*'], 370 | 'ignore_fields': ['COL1', 'COL2']} 371 | }] 372 | 373 | .. note:: Each ``outputs`` entry in the list gets interpreted and processed 374 | separately. 375 | 376 | """ 377 | default_kwargs = {'rtol': rtol, 'atol': atol, 378 | 'ignore_keywords': ignore_keywords, 379 | 'ignore_fields': ignore_fields, 380 | 'ignore_hdus': ignore_hdus} 381 | 382 | all_okay = True 383 | creature_report = '' 384 | updated_outputs = [] # To track outputs for Artifactory JSON schema 385 | 386 | for entry in outputs: 387 | diff_kwargs = copy.deepcopy(default_kwargs) 388 | extn_list = None 389 | num_entries = len(entry) 390 | 391 | if isinstance(entry, dict): 392 | entry_files = entry['files'] 393 | actual = entry_files[0] 394 | desired = entry_files[1] 395 | if len(entry_files) > 2: 396 | extn_list = entry_files[2] 397 | diff_kwargs.update(entry.get('pars', {})) 398 | elif num_entries == 2: 399 | actual, desired = entry 400 | elif num_entries == 3: 401 | actual, desired, extn_list = entry 402 | else: 403 | all_okay = False 404 | creature_report += '\nERROR: Cannot handle entry {}\n'.format( 405 | entry) 406 | continue 407 | 408 | # TODO: Use regex? 409 | if actual.endswith(']'): 410 | if extn_list is not None: 411 | all_okay = False 412 | creature_report += ( 413 | '\nERROR: Ambiguous extension requirements ' 414 | 'for {} ({})\n'.format(actual, extn_list)) 415 | continue 416 | actual_name, actual_extn = actual.split('[') 417 | actual_extn = actual_extn.replace(']', '') 418 | else: 419 | actual_name = actual 420 | actual_extn = None 421 | 422 | if desired.endswith(']'): 423 | if extn_list is not None: 424 | all_okay = False 425 | creature_report += ( 426 | '\nERROR: Ambiguous extension requirements ' 427 | 'for {} ({})\n'.format(desired, extn_list)) 428 | continue 429 | desired_name, desired_extn = desired.split('[') 430 | desired_extn = desired_extn.replace(']', '') 431 | else: 432 | desired_name = desired 433 | desired_extn = None 434 | 435 | # Get "truth" image 436 | try: 437 | desired = get_bigdata(*input_path, desired_name, docopy=docopy) 438 | except BigdataError: 439 | all_okay = False 440 | creature_report += '\nERROR: Cannot find {} in {}\n'.format( 441 | desired_name, input_path) 442 | continue 443 | 444 | if desired_extn is not None: 445 | desired_name = desired 446 | desired = "{}[{}]".format(desired, desired_extn) 447 | 448 | if verbose: 449 | print("\nComparing:\n {} \nto\n {}".format(actual, desired)) 450 | 451 | if actual.endswith('.fits') and desired.endswith('.fits'): 452 | # Build HDULists for comparison based on user-specified extensions 453 | if extn_list is not None: 454 | with fits.open(actual) as f_act: 455 | with fits.open(desired) as f_des: 456 | actual_hdu = fits.HDUList( 457 | [f_act[extn] for extn in extn_list]) 458 | desired_hdu = fits.HDUList( 459 | [f_des[extn] for extn in extn_list]) 460 | fdiff = FITSDiff(actual_hdu, desired_hdu, 461 | **diff_kwargs) 462 | creature_report += '\na: {}\nb: {}\n'.format( 463 | actual, desired) # diff report only gives hash 464 | # Working with FITS files... 465 | else: 466 | fdiff = FITSDiff(actual, desired, **diff_kwargs) 467 | 468 | creature_report += fdiff.report() 469 | 470 | if not fdiff.identical: 471 | all_okay = False 472 | # Only keep track of failed results which need to 473 | # be used to replace the truth files (if OK). 474 | updated_outputs.append((actual, desired)) 475 | 476 | elif actual_extn is not None or desired_extn is not None: 477 | if 'ignore_hdus' in diff_kwargs: # pragma: no cover 478 | diff_kwargs.pop('ignore_hdus') # Not applicable 479 | 480 | # Specific element of FITS file specified 481 | with fits.open(actual_name) as f_act: 482 | with fits.open(desired_name) as f_des: 483 | actual_hdu = f_act[actual_extn] 484 | desired_hdu = f_des[desired_extn] 485 | fdiff = HDUDiff(actual_hdu, desired_hdu, **diff_kwargs) 486 | 487 | creature_report += '\na: {}\nb: {}\n'.format(actual, desired) 488 | creature_report += fdiff.report() 489 | 490 | if not fdiff.identical: 491 | all_okay = False 492 | # Only keep track of failed results which need to 493 | # be used to replace the truth files (if OK). 494 | updated_outputs.append((actual_name, desired_name)) 495 | 496 | else: 497 | # ASCII-based diff 498 | with open(actual) as afile: 499 | actual_lines = afile.readlines() 500 | with open(desired) as dfile: 501 | desired_lines = dfile.readlines() 502 | 503 | udiff = unified_diff(actual_lines, desired_lines, 504 | fromfile=actual, tofile=desired) 505 | udiffIO = StringIO() 506 | udiffIO.writelines(udiff) 507 | udiff_report = udiffIO.getvalue() 508 | udiffIO.close() 509 | 510 | if len(udiff_report) == 0: 511 | creature_report += ('\na: {}\nb: {}\nNo differences ' 512 | 'found.\n'.format(actual, desired)) 513 | else: 514 | all_okay = False 515 | creature_report += udiff_report 516 | # Only keep track of failed results which need to 517 | # be used to replace the truth files (if OK). 518 | updated_outputs.append((actual, desired)) 519 | 520 | if not all_okay and results_root is not None: # pragma: no cover 521 | schema_pattern, tree, testname = generate_upload_params( 522 | results_root, updated_outputs, verbose=verbose) 523 | generate_upload_schema(schema_pattern, tree, testname) 524 | 525 | if not all_okay and raise_error: 526 | raise AssertionError(os.linesep + creature_report) 527 | 528 | return creature_report 529 | 530 | 531 | def generate_upload_params(results_root, updated_outputs, verbose=True): 532 | """ 533 | Generate pattern, target, and test name for :func:`generate_upload_schema`. 534 | 535 | This uses ``BUILD_TAG`` and ``BUILD_MATRIX_SUFFIX`` on CI to create 536 | meaningful Artifactory target path. They are optional for local runs. 537 | Other attributes like user, time stamp, and test name are also 538 | automatically determined. 539 | 540 | In addition to renamed outputs, ``*.log``is also inserted into the 541 | ``schema_pattern``. 542 | 543 | Parameters 544 | ---------- 545 | results_root : str 546 | See :func:`compare_outputs` for more info. 547 | 548 | updated_outputs : list 549 | List containing tuples of ``(actual, desired)`` of failed 550 | test output comparison to be processed. 551 | 552 | verbose : bool 553 | Print extra info to screen. 554 | 555 | Returns 556 | ------- 557 | schema_pattern, tree, testname 558 | Analogous to ``pattern``, ``target``, and ``testname`` that are 559 | passed into :func:`generate_upload_schema`, respectively. 560 | 561 | """ 562 | import getpass 563 | 564 | # Create instructions for uploading results to artifactory for use 565 | # as new comparison/truth files 566 | testname = os.path.split(os.path.abspath(os.curdir))[1] 567 | 568 | # Meaningful test dir from build info. 569 | # TODO: Organize results by day test was run. Could replace with git-hash 570 | whoami = getpass.getuser() or 'nobody' 571 | user_tag = 'NOT_CI_{}'.format(whoami) 572 | build_tag = os.environ.get('BUILD_TAG', user_tag) 573 | build_matrix_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', '0') 574 | subdir = '{}_{}_{}'.format(TODAYS_DATE, build_tag, build_matrix_suffix) 575 | tree = os.path.join(results_root, subdir, testname) + os.sep 576 | schema_pattern = [] 577 | # Upload all log files 578 | schema_pattern.append('*.log') 579 | 580 | # Write out JSON file to enable retention of different results. 581 | # Also rename outputs as new truths. 582 | for test_result, truth in updated_outputs: 583 | new_truth = os.path.basename(truth) 584 | shutil.move(test_result, new_truth) 585 | schema_pattern.append(os.path.abspath(new_truth)) 586 | if verbose: 587 | print("Renamed {} as new 'truth' file: {}".format( 588 | os.path.abspath(test_result), os.path.abspath(new_truth))) 589 | 590 | return schema_pattern, tree, testname 591 | 592 | 593 | def generate_upload_schema(pattern, target, testname, recursive=False): 594 | """ 595 | Write out JSON file to upload results from test to 596 | Artifactory storage area. 597 | 598 | This function relies on the JFROG JSON schema for uploading data into 599 | Artifactory. Docs can be found at 600 | https://www.jfrog.com/confluence/display/RTF/Using+File+Specs 601 | 602 | Parameters 603 | ---------- 604 | pattern : str or list of strings 605 | Specifies the local file system path to test results which should be 606 | uploaded to Artifactory. You can specify multiple artifacts by using 607 | wildcards or a regular expression as designated by the regexp property. 608 | 609 | target : str 610 | Specifies the target path in Artifactory in the following format:: 611 | 612 | [repository_name]/[repository_path] 613 | 614 | testname : str 615 | Name of test that generate the results. This will be used to create the 616 | name of the JSON file to enable these results to be uploaded to 617 | Artifactory. 618 | 619 | recursive : bool, optional 620 | Specify whether or not to identify files listed in sub-directories 621 | for uploading. Default: `False` 622 | 623 | """ 624 | jsonfile = "{}_results.json".format(testname) 625 | recursive = repr(recursive).lower() 626 | 627 | if not isinstance(pattern, str): 628 | # Populate schema for this test's data 629 | upload_schema = {"files": []} 630 | 631 | for p in pattern: 632 | temp_schema = copy.deepcopy(UPLOAD_SCHEMA["files"][0]) 633 | temp_schema.update({"pattern": p, "target": target, 634 | "recursive": recursive}) 635 | upload_schema["files"].append(temp_schema) 636 | 637 | else: 638 | # Populate schema for this test's data 639 | upload_schema = copy.deepcopy(UPLOAD_SCHEMA) 640 | upload_schema["files"][0].update({"pattern": pattern, "target": target, 641 | "recursive": recursive}) 642 | 643 | # Write out JSON file with description of test results 644 | with open(jsonfile, 'w') as outfile: 645 | json.dump(upload_schema, outfile, indent=2) 646 | --------------------------------------------------------------------------------