├── .github ├── dependabot.yml └── workflows │ ├── publish.yml │ └── test.yml ├── .gitmodules ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── light-curve-python ├── .gitignore ├── Cargo.toml ├── MANIFEST.in ├── README.md ├── pyproject.toml ├── setup.cfg └── setup.py └── light-curve ├── .gitignore ├── .readme ├── benchplot.png ├── benchplot_v2.png ├── multi_bench_v2.png └── nobs_bench_v2.png ├── Cargo.lock ├── Cargo.toml ├── README.md ├── light_curve ├── __init__.py ├── light_curve_ext.py └── light_curve_py │ ├── __init__.py │ ├── dataclass_field.py │ ├── features │ ├── __init__.py │ ├── _base.py │ ├── _base_meta.py │ ├── _lstsq.py │ ├── adnormal.py │ ├── amplitude.py │ ├── beyondnstd.py │ ├── bins.py │ ├── color_of_median.py │ ├── cusum.py │ ├── eta.py │ ├── etae.py │ ├── excvar.py │ ├── extractor.py │ ├── flux_n_not_det_before_fd.py │ ├── intpercrange.py │ ├── kurtosis.py │ ├── linfit.py │ ├── lintrend.py │ ├── magnitude_n_not_det_before_fd.py │ ├── magnpratio.py │ ├── maxslope.py │ ├── mean.py │ ├── meanvar.py │ ├── medabsdev.py │ ├── medbufrperc.py │ ├── median.py │ ├── otsusplit.py │ ├── pdiffmperc.py │ ├── percampl.py │ ├── ptp_var.py │ ├── rainbow.py │ ├── rainbow │ │ ├── __init__.py │ │ ├── _bands.py │ │ ├── _base.py │ │ ├── _parameters.py │ │ ├── _scaler.py │ │ ├── bolometric.py │ │ ├── generic.py │ │ └── temperature.py │ ├── redchi2.py │ ├── roms.py │ ├── skew.py │ ├── stdev.py │ ├── stetsonk.py │ └── weightmean.py │ ├── minuit_lsq.py │ ├── minuit_ml.py │ └── warnings.py ├── pyproject.toml ├── src ├── check.rs ├── cont_array.rs ├── dmdt.rs ├── errors.rs ├── features.rs ├── lib.rs ├── ln_prior.rs ├── np_array.rs └── transform.rs └── tests ├── __init__.py ├── conftest.py ├── light_curve_ext ├── __init__.py ├── test_dmdt.py ├── test_feature.py ├── test_ln_prior.py └── test_periodogram.py ├── light_curve_py ├── __init__.py ├── features │ ├── __init__.py │ ├── test_adnormal.py │ ├── test_amplitude.py │ ├── test_beyondnstd.py │ ├── test_bins.py │ ├── test_color_of_median.py │ ├── test_cusum.py │ ├── test_eta.py │ ├── test_etae.py │ ├── test_excvar.py │ ├── test_extractor.py │ ├── test_intpercrange.py │ ├── test_kurtosis.py │ ├── test_linfit.py │ ├── test_lintrend.py │ ├── test_magnpratio.py │ ├── test_maxslope.py │ ├── test_mean.py │ ├── test_meanvar.py │ ├── test_medabsdev.py │ ├── test_medbufrperc.py │ ├── test_median.py │ ├── test_n_not_det_before_fd.py │ ├── test_otsusplit.py │ ├── test_pdiffmperc.py │ ├── test_percampl.py │ ├── test_ptp_var.py │ ├── test_rainbow.py │ ├── test_redchi2.py │ ├── test_roms.py │ ├── test_skew.py │ ├── test_stdev.py │ ├── test_stetsonk.py │ └── test_weightmean.py ├── test_call.py └── test_single_band.py └── test_w_bench.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "monthly" 12 | - package-ecosystem: "cargo" 13 | directory: "/light-curve" 14 | schedule: 15 | interval: "monthly" 16 | - package-ecosystem: "pip" 17 | directory: "/light-curve" 18 | schedule: 19 | interval: "monthly" 20 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python packages 2 | 3 | on: 4 | push: 5 | tags: 6 | push: 7 | pull_request: 8 | repository_dispatch: 9 | workflow_dispatch: 10 | 11 | jobs: 12 | check-version: 13 | name: Check the tag corresponds to the crate version 14 | runs-on: ubuntu-24.04 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Check Cargo.toml version vs Git tag 19 | if: startsWith(github.ref, 'refs/tags/v') 20 | working-directory: ./light-curve 21 | run: | 22 | cargo read-manifest | jq -r '.version' > /tmp/.crate_version 23 | echo '${{ github.ref_name }}' | sed 's/^v//' > /tmp/.tag 24 | diff /tmp/.crate_version /tmp/.tag 25 | 26 | cibuildwheel: 27 | name: Build ${{ matrix.cibw_build }} 28 | runs-on: ${{ matrix.os }} 29 | 30 | needs: check-version 31 | 32 | defaults: 33 | run: 34 | working-directory: ./light-curve 35 | 36 | strategy: 37 | fail-fast: false 38 | matrix: 39 | # CIBW_BUILD identifiers from https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip 40 | # We use the same order as in the table 41 | # * For Windows we support amd64 only 42 | # * For Linux we support ARM64 and x86_64 43 | # * For macOS we support x86_64 (macos-13 runner) and ARM64 (macos-14 runner) 44 | # * We build wheels for CPython only, one per platform, compatible with ABI3.9 45 | include: 46 | # CPython 3.9 47 | - os: macos-13 48 | cibw_build: cp39-macosx_x86_64 49 | - os: macos-14 50 | cibw_build: cp39-macosx_arm64 51 | - os: windows-2019 52 | cibw_build: cp39-win_amd64 53 | - os: ubuntu-24.04 54 | cibw_build: cp39-manylinux_x86_64 55 | - os: ubuntu-24.04 56 | cibw_build: cp39-musllinux_x86_64 57 | - os: ubuntu-24.04-arm 58 | cibw_build: cp39-manylinux_aarch64 59 | - os: ubuntu-24.04-arm 60 | cibw_build: cp39-musllinux_aarch64 61 | 62 | steps: 63 | - uses: actions/checkout@v4 64 | with: 65 | submodules: true 66 | 67 | # ARM macOS runner misses some peaces 68 | - name: Set up Homebrew paths on ARM macOS 69 | if: ${{ matrix.os == 'macos-14' }} 70 | run: | 71 | echo "CPATH=$(brew --prefix)/include:$(brew --prefix)/include/eigen3:${CPATH}" >> $GITHUB_ENV 72 | echo "LIBRARY_PATH=$(brew --prefix)/lib:$(brew --prefix)/lib64:${LIBRARY_PATH}" >> $GITHUB_ENV 73 | 74 | - name: Set MACOSX_DEPLOYMENT_TARGET to the current macOS version 75 | if: ${{ runner.os == 'macOS' }} 76 | run: | 77 | export MACOSX_DEPLOYMENT_TARGET=$(sw_vers -productVersion | awk -F '.' '{print $1"."0}') 78 | echo "MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}" >> $GITHUB_ENV 79 | 80 | # We only support AMD64 architecture for Windows, so we hard-code it here. 81 | - name: Set CIBW envs on Windows 82 | if: ${{ runner.os == 'Windows' }} 83 | run: | 84 | "CIBW_BUILD=${{ matrix.cibw_build }}" >> $env:GITHUB_ENV 85 | "CIBW_ARCHS=AMD64" >> $env:GITHUB_ENV 86 | 87 | - name: Set CIBW envs on Linux or macOS 88 | if: ${{ runner.os != 'Windows' }} 89 | run: | 90 | echo "CIBW_BUILD=${{ matrix.cibw_build }}" >> $GITHUB_ENV 91 | CIBW_ARCHS=$(echo ${{ matrix.cibw_build }} | cut -d'_' -f2,3) 92 | echo "CIBW_ARCHS=${CIBW_ARCHS}" >> $GITHUB_ENV 93 | 94 | - name: Build wheels 95 | uses: pypa/cibuildwheel@v2.23.3 96 | with: 97 | package-dir: ./light-curve 98 | env: 99 | CIBW_BUILD_VERBOSITY: "3" 100 | 101 | - name: Upload wheels as artifacts 102 | uses: actions/upload-artifact@v4 103 | with: 104 | path: ./wheelhouse/*.whl 105 | if-no-files-found: error 106 | name: artifact_${{ matrix.cibw_build }} 107 | 108 | sdist: 109 | name: Build source distribution 110 | runs-on: ubuntu-24.04 111 | 112 | defaults: 113 | run: 114 | working-directory: ./light-curve 115 | 116 | steps: 117 | - uses: actions/checkout@v4 118 | 119 | - name: Setup Python 120 | uses: actions/setup-python@v5 121 | with: 122 | python-version: '3.x' 123 | 124 | - name: Install maturin 125 | run: pip install 'maturin>=1.0,<2.0' 126 | 127 | - name: Build sdist 128 | run: maturin sdist 129 | 130 | - name: Upload sdist as an artifact 131 | uses: actions/upload-artifact@v4 132 | with: 133 | path: ./light-curve/target/wheels/*.tar.gz 134 | if-no-files-found: error 135 | name: artifact_sdist 136 | 137 | publish: 138 | needs: [ cibuildwheel, sdist ] 139 | 140 | name: Publish light-curve 141 | runs-on: ubuntu-latest 142 | 143 | steps: 144 | - uses: actions/download-artifact@v4 145 | with: 146 | pattern: artifact_* 147 | merge-multiple: true 148 | path: artifact 149 | 150 | - name: Setup Python 151 | uses: actions/setup-python@v5 152 | with: 153 | python-version: '3.x' 154 | 155 | - name: Install twine 156 | run: pip install twine 157 | 158 | - name: Publish light-curve for a new version tag 159 | if: startsWith(github.ref, 'refs/tags/v') 160 | working-directory: artifact 161 | run: twine upload *whl *tar.gz -u __token__ -p ${{ secrets.PYPI_TOKEN_LIGHT_CURVE }} --verbose 162 | 163 | publish-light-curve-python: 164 | needs: publish 165 | 166 | name: Publish light-curve-python 167 | runs-on: ubuntu-24.04 168 | 169 | defaults: 170 | run: 171 | working-directory: light-curve-python 172 | 173 | steps: 174 | - uses: actions/checkout@v4 175 | 176 | - name: Set up Python 177 | uses: actions/setup-python@v5 178 | with: 179 | python-version: '3.x' 180 | 181 | - name: Install deps 182 | run: python3 -mpip install setuptools toml twine 183 | 184 | - name: Publish light-curve-python for a new version tag 185 | if: startsWith(github.ref, 'refs/tags/v') 186 | run: | 187 | python3 setup.py sdist 188 | twine check --strict dist/* 189 | twine upload dist/* -u __token__ -p ${{ secrets.PYPI_TOKEN_LIGHT_CURVE_PYTHON }} --verbose 190 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: [ push, pull_request, workflow_dispatch ] 4 | 5 | jobs: 6 | py_build_deps: 7 | outputs: 8 | output: ${{ steps.extract_from_toml.outputs.output }} 9 | 10 | runs-on: ubuntu-latest 11 | 12 | defaults: 13 | run: 14 | working-directory: light-curve 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | submodules: true 20 | - name: Set up Python 3.13 21 | uses: actions/setup-python@v5 22 | with: 23 | python-version: "3.13" 24 | - name: Extract build deps from pyproject.toml 25 | id: extract_from_toml 26 | run: | 27 | echo "output="$(python -c 'import tomllib; print(" ".join(tomllib.load(open("pyproject.toml", "rb"))["build-system"]["requires"]))') >> "$GITHUB_OUTPUT" 28 | 29 | 30 | test: 31 | runs-on: ${{ matrix.os }} 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | python_minor: [ '9', '10', '11', '12', '13', '13t' ] 37 | os: [ ubuntu-latest ] 38 | # Just a single ARM worker to be sure that it works 39 | include: 40 | - python_minor: '13' 41 | os: ubuntu-24.04-arm 42 | 43 | defaults: 44 | run: 45 | working-directory: light-curve 46 | 47 | steps: 48 | - uses: actions/checkout@v4 49 | with: 50 | submodules: true 51 | - name: Set up Rust toolchain 52 | uses: dtolnay/rust-toolchain@master 53 | with: 54 | toolchain: stable 55 | - name: Set up Python 3.${{ matrix.python_minor }} 56 | uses: actions/setup-python@v5 57 | with: 58 | python-version: "3.${{ matrix.python_minor }}" 59 | - uses: Swatinem/rust-cache@v2 60 | with: 61 | shared-key: "${{ runner.os }}_stable-rust_tox-py3${{ matrix.python_minor }}" 62 | workspaces: "light-curve" 63 | - name: Install tox 64 | run: pip install tox 65 | - name: Install system dependencies 66 | run: | 67 | sudo apt-get update 68 | sudo apt-get install -y libgsl-dev 69 | - name: Run Python tests 70 | run: tox -e py3${{ matrix.python_minor }}-base,py3${{ matrix.python_minor }}-test 71 | 72 | cargo-fmt: 73 | runs-on: ubuntu-latest 74 | 75 | steps: 76 | - uses: actions/checkout@v4 77 | - name: Set up Rust toolchain 78 | uses: dtolnay/rust-toolchain@master 79 | with: 80 | toolchain: stable 81 | - run: cargo fmt --manifest-path=light-curve/Cargo.toml -- --check 82 | 83 | cargo-clippy: 84 | runs-on: ubuntu-latest 85 | 86 | steps: 87 | - uses: actions/checkout@v4 88 | - name: Set up Rust toolchain 89 | uses: dtolnay/rust-toolchain@master 90 | with: 91 | toolchain: stable 92 | - uses: Swatinem/rust-cache@v2 93 | with: 94 | shared-key: "${{ runner.os }}_stable-rust_cargo-clippy" 95 | workspaces: "light-curve" 96 | - name: Install system dependencies 97 | run: | 98 | sudo apt-get update 99 | sudo apt-get install -y libgsl-dev 100 | - run: cargo clippy --manifest-path=light-curve/Cargo.toml --all-targets -- -D warnings 101 | 102 | coverage: 103 | runs-on: ubuntu-latest 104 | 105 | needs: [ py_build_deps ] 106 | 107 | defaults: 108 | run: 109 | working-directory: light-curve 110 | 111 | steps: 112 | - uses: actions/checkout@v4 113 | with: 114 | submodules: true 115 | - uses: dtolnay/rust-toolchain@master 116 | with: 117 | toolchain: stable 118 | - name: Install cargo-llvm-cov 119 | uses: taiki-e/install-action@cargo-llvm-cov 120 | - uses: Swatinem/rust-cache@v2 121 | with: 122 | shared-key: "${{ runner.os }}_stable-rust_maturin-develop" 123 | workspaces: "light-curve" 124 | - name: Set up Python 125 | uses: actions/setup-python@v5 126 | with: 127 | python-version: "3.12" 128 | - name: Install build deps 129 | run: pip install "${{ needs.py_build_deps.outputs.output }}" 130 | - name: Install system dependencies 131 | run: | 132 | sudo apt-get update 133 | sudo apt-get install -y libgsl-dev 134 | - name: Generate code coverage 135 | run: | 136 | source <(cargo llvm-cov show-env --export-prefix) 137 | python -m venv venv 138 | source venv/bin/activate 139 | maturin develop --extras=test 140 | python -m pytest 141 | cargo llvm-cov report --lcov --output-path lcov.info 142 | - name: Upload coverage to Codecov 143 | uses: codecov/codecov-action@v5 144 | with: 145 | files: lcov.info 146 | fail_ci_if_error: true 147 | env: 148 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 149 | 150 | benchmarks: 151 | # latest (24.04) is not supported as of 2024-10-09 152 | runs-on: ubuntu-22.04 153 | 154 | needs: [ py_build_deps ] 155 | 156 | steps: 157 | - uses: actions/checkout@v4 158 | - uses: dtolnay/rust-toolchain@master 159 | with: 160 | toolchain: stable 161 | - name: Set up Python 162 | uses: actions/setup-python@v5 163 | with: 164 | python-version: "3.13" 165 | - uses: Swatinem/rust-cache@v2 166 | with: 167 | shared-key: "${{ runner.os }}_stable-rust_maturin-develop-release" 168 | workspaces: "light-curve" 169 | - name: Run benchmarks 170 | uses: CodSpeedHQ/action@v3 171 | with: 172 | token: ${{ secrets.CODSPEED_TOKEN }} 173 | run: | 174 | cd light-curve 175 | python3 -m venv venv 176 | . venv/bin/activate 177 | pip install "${{ needs.py_build_deps.outputs.output }}" pytest-codspeed 178 | maturin develop --extras=test --release 179 | python3 -mpytest -m "not (nobs or multi)" --codspeed tests/test_w_bench.py 180 | 181 | msrv-build: 182 | runs-on: ubuntu-latest 183 | 184 | needs: [ py_build_deps ] 185 | 186 | defaults: 187 | run: 188 | working-directory: light-curve 189 | 190 | steps: 191 | - uses: actions/checkout@v4 192 | - name: Set up Python 3.9 193 | uses: actions/setup-python@v5 194 | with: 195 | python-version: '3.9' 196 | - name: Set up Python 3.13t 197 | uses: actions/setup-python@v5 198 | with: 199 | python-version: '3.13t' 200 | - name: Get minimum supported Rust version 201 | run: echo "::set-output name=msrv::$(grep '^rust-version = ' Cargo.toml | grep -o '[0-9.]\+')" 202 | id: get_msrv 203 | - uses: dtolnay/rust-toolchain@master 204 | with: 205 | toolchain: ${{ steps.get_msrv.outputs.msrv }} 206 | - uses: Swatinem/rust-cache@v2 207 | with: 208 | shared-key: "${{ runner.os }}_msrv-rust_maturin-build" 209 | workspaces: "light-curve" 210 | - name: Install build_deps 211 | run: pip install "${{ needs.py_build_deps.outputs.output }}" 212 | - name: Install system dependencies 213 | run: | 214 | sudo apt-get update 215 | sudo apt-get install -y libgsl-dev 216 | - name: Build 217 | run: | 218 | rustup default ${{ steps.get_msrv.outputs.msrv }} 219 | maturin build -i python3.9 220 | maturin build -i python3.13t 221 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "test-data"] 2 | path = light-curve/tests/light-curve-test-data 3 | url = https://github.com/light-curve/test-data.git 4 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # See https://pre-commit.com for more information 2 | # See https://pre-commit.com/hooks.html for more hooks 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v5.0.0 6 | hooks: 7 | - id: trailing-whitespace # trims trailing whitespace 8 | - id: end-of-file-fixer # ensures that a file is either empty, or ends with one newline 9 | - id: check-yaml 10 | - id: check-toml 11 | - id: check-added-large-files 12 | - id: mixed-line-ending # replaces or checks mixed line ending 13 | - id: check-symlinks # checks for symlinks which do not point to anything 14 | - id: check-case-conflict # checks for files that would conflict in case-insensitive filesystems 15 | # Git 16 | - id: check-merge-conflict # checks for files that contain merge conflict strings 17 | # Python 18 | - id: check-docstring-first # checks a common error of defining a docstring after code 19 | - id: debug-statements # checks for debugger imports and py37+ `breakpoint()` calls in python source 20 | # Ruff should go before black, because its output is not formatted 21 | - repo: https://github.com/astral-sh/ruff-pre-commit 22 | rev: 'v0.11.13' 23 | hooks: 24 | - id: ruff 25 | args: [--fix, --exit-non-zero-on-fix] 26 | - repo: https://github.com/psf/black 27 | rev: 25.1.0 28 | hooks: 29 | - id: black 30 | name: black 31 | args: ['--config=./light-curve/pyproject.toml', './light-curve'] 32 | - repo: local 33 | hooks: 34 | - id: cargo-fmt 35 | name: cargo fmt 36 | language: rust 37 | entry: cargo fmt --manifest-path=light-curve/Cargo.toml 38 | files: \.rs 39 | pass_filenames: false 40 | - id: cargo-clippy-fix 41 | name: cargo clippy fix 42 | language: rust 43 | entry: cargo clippy --manifest-path=light-curve/Cargo.toml --all-targets --fix --allow-dirty --allow-staged 44 | files: \.rs 45 | pass_filenames: false 46 | - id: cargo-clippy-check 47 | name: cargo clippy check 48 | language: rust 49 | entry: cargo clippy --manifest-path=light-curve/Cargo.toml --all-targets -- -D warnings 50 | files: \.rs 51 | pass_filenames: false 52 | 53 | # pre-commit.ci settings 54 | # Skip clippy, because it tries to download dependencies which doesn't work at CI 55 | ci: 56 | skip: [cargo-clippy-fix, cargo-clippy-check] 57 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | light-curve/README.md -------------------------------------------------------------------------------- /light-curve-python/.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | dist/ 3 | *.egg-info/ 4 | -------------------------------------------------------------------------------- /light-curve-python/Cargo.toml: -------------------------------------------------------------------------------- 1 | ../light-curve/Cargo.toml -------------------------------------------------------------------------------- /light-curve-python/MANIFEST.in: -------------------------------------------------------------------------------- 1 | include Cargo.toml 2 | -------------------------------------------------------------------------------- /light-curve-python/README.md: -------------------------------------------------------------------------------- 1 | # `light-curve-python` 2 | 3 | `light-curve-python` had been a previous name of the [`light-curve`](https://github.com/light-curve/light-curve-python) package. 4 | Now this is an empty package which depends on `light-curve` only. 5 | -------------------------------------------------------------------------------- /light-curve-python/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "toml", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /light-curve-python/setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = light-curve-python 3 | author = Konstantin Malanchev 4 | author_email = hombit@gmail.com 5 | description = An alias to light-curve package 6 | long_description = file: README.md 7 | long_description_content_type = text/markdown 8 | url = http://github.com/hombit/light-curve 9 | license = MIT 10 | keywords = science, astrophysics 11 | classifiers = 12 | Intended Audience :: Science/Research 13 | License :: OSI Approved :: MIT License 14 | Topic :: Scientific/Engineering :: Astronomy 15 | Programming Language :: Python :: 3 16 | # See full list on https://pypi.org/classifiers/ 17 | -------------------------------------------------------------------------------- /light-curve-python/setup.py: -------------------------------------------------------------------------------- 1 | import toml 2 | from setuptools import setup 3 | 4 | 5 | def get_light_curve_version(): 6 | with open("Cargo.toml") as fh: 7 | cargo_toml = toml.load(fh) 8 | package = cargo_toml["package"] 9 | version = package["version"] 10 | return version 11 | 12 | 13 | def main(): 14 | version = get_light_curve_version() 15 | requirement = f"light-curve[full]=={version}" 16 | 17 | setup( 18 | version=version, 19 | install_requires=[requirement], 20 | ) 21 | 22 | 23 | if __name__ == "__main__": 24 | main() 25 | -------------------------------------------------------------------------------- /light-curve/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.so 3 | build/ 4 | dist/ 5 | *.egg-info/ 6 | .benchmarks/ 7 | .pytest_cache/ 8 | .tox/ 9 | *.pyc 10 | target/ 11 | -------------------------------------------------------------------------------- /light-curve/.readme/benchplot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/.readme/benchplot.png -------------------------------------------------------------------------------- /light-curve/.readme/benchplot_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/.readme/benchplot_v2.png -------------------------------------------------------------------------------- /light-curve/.readme/multi_bench_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/.readme/multi_bench_v2.png -------------------------------------------------------------------------------- /light-curve/.readme/nobs_bench_v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/.readme/nobs_bench_v2.png -------------------------------------------------------------------------------- /light-curve/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "light-curve-python" 3 | version = "0.10.3" 4 | authors = [ 5 | "Konstantin Malanchev ", 6 | "Anastasia Lavrukhina ", 7 | "Sergey Karpov", 8 | "Etienne Russeil", 9 | ] 10 | description = "Feature extractor from noisy time series" 11 | readme = "README.md" 12 | repository = "https://github.com/light-curve/light-curve-python" 13 | license = "GPL-3.0-or-later" 14 | edition = "2024" 15 | rust-version = "1.85" 16 | 17 | [lib] 18 | name = "light_curve" 19 | crate-type = ["cdylib"] 20 | 21 | [profile.release] 22 | lto = true 23 | codegen-units = 1 24 | 25 | [features] 26 | default = ["abi3", "ceres-source", "fftw-source", "gsl", "mimalloc"] 27 | abi3 = ["pyo3/abi3-py39"] 28 | ceres-source = ["light-curve-feature/ceres-source"] 29 | ceres-system = ["light-curve-feature/ceres-system"] 30 | fftw-source = ["light-curve-feature/fftw-source"] 31 | fftw-system = ["light-curve-feature/fftw-system"] 32 | fftw-mkl = ["light-curve-feature/fftw-mkl"] 33 | gsl = ["light-curve-feature/gsl"] 34 | mimalloc = ["dep:mimalloc"] 35 | 36 | [dependencies] 37 | const_format = "0.2.34" 38 | conv = "0.3.3" 39 | enum-iterator = "2.1.0" 40 | enumflags2 = { version = "0.7.11", features = ["serde"] } 41 | itertools = "0.14.0" 42 | macro_const = "0.1.0" 43 | mimalloc = { version = "0.1.42", features = [ 44 | "local_dynamic_tls", 45 | ], optional = true } 46 | ndarray = { version = "0.16.1", features = ["rayon"] } 47 | numpy = "0.25.0" 48 | num_cpus = "1.17.0" 49 | num-traits = "0.2" 50 | once_cell = "1" 51 | pyo3 = { version = "0.25.0", features = [ 52 | "extension-module", 53 | "multiple-pymethods", 54 | ] } 55 | rand = "0.9.0" 56 | rand_xoshiro = "0.7.0" 57 | rayon = "1.10.0" 58 | serde = { version = "1", features = ["derive"] } 59 | serde-pickle = "1" 60 | serde_json = "1" 61 | thiserror = "2" 62 | unarray = "0.1.4" 63 | unzip3 = "1.0.0" 64 | 65 | [dependencies.light-curve-dmdt] 66 | version = "0.8.0" 67 | features = ["serde"] 68 | 69 | [dependencies.light-curve-feature] 70 | version = "0.10.0" 71 | default-features = false 72 | -------------------------------------------------------------------------------- /light-curve/light_curve/__init__.py: -------------------------------------------------------------------------------- 1 | # Import all Python features 2 | from .light_curve_py import * 3 | 4 | # Hide Python features with Rust equivalents 5 | from .light_curve_ext import * 6 | 7 | # Hide Rust Extractor with universal Python Extractor 8 | from .light_curve_py import Extractor 9 | 10 | from .light_curve_ext import __version__ 11 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_ext.py: -------------------------------------------------------------------------------- 1 | from .light_curve import * 2 | 3 | 4 | # A hack to bypass the lack of package support in PyO3 5 | # https://github.com/PyO3/pyo3/issues/1517#issuecomment-808664021 6 | def __register_submodules(): 7 | import sys 8 | 9 | sys.modules["light_curve.light_curve_ext.ln_prior"] = ln_prior 10 | 11 | 12 | __register_submodules() 13 | 14 | 15 | def feature_from_json(s: str) -> JSONDeserializedFeature: 16 | """Deserialize a JSON string into a Feature object.""" 17 | return JSONDeserializedFeature(s) 18 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/__init__.py: -------------------------------------------------------------------------------- 1 | from .features.adnormal import * 2 | from .features.amplitude import * 3 | from .features.beyondnstd import * 4 | from .features.bins import * 5 | from .features.color_of_median import * 6 | from .features.cusum import * 7 | from .features.eta import * 8 | from .features.etae import * 9 | from .features.excvar import * 10 | from .features.extractor import * 11 | from .features.flux_n_not_det_before_fd import * 12 | from .features.intpercrange import * 13 | from .features.kurtosis import * 14 | from .features.linfit import * 15 | from .features.lintrend import * 16 | from .features.magnitude_n_not_det_before_fd import * 17 | from .features.magnpratio import * 18 | from .features.maxslope import * 19 | from .features.mean import * 20 | from .features.meanvar import * 21 | from .features.medabsdev import * 22 | from .features.medbufrperc import * 23 | from .features.median import * 24 | from .features.otsusplit import * 25 | from .features.pdiffmperc import * 26 | from .features.percampl import * 27 | from .features.ptp_var import * 28 | from .features.rainbow import * 29 | from .features.redchi2 import * 30 | from .features.roms import * 31 | from .features.skew import * 32 | from .features.stdev import * 33 | from .features.stetsonk import * 34 | from .features.weightmean import * 35 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/dataclass_field.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if sys.version_info >= (3, 10): 4 | from dataclasses import field as dataclass_field 5 | else: 6 | from dataclasses import field as _field 7 | 8 | def dataclass_field(*, kw_only, **kwargs): 9 | return _field(**kwargs) 10 | 11 | 12 | __all__ = ["dataclass_field"] 13 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/light_curve/light_curve_py/features/__init__.py -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/_base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from dataclasses import dataclass 3 | from typing import Optional, Sequence 4 | 5 | import numpy as np 6 | 7 | from light_curve.light_curve_py.dataclass_field import dataclass_field 8 | from light_curve.light_curve_py.warnings import mark_experimental 9 | 10 | 11 | @dataclass 12 | class BaseMultiBandFeature(ABC): 13 | @property 14 | @abstractmethod 15 | def size(self) -> int: 16 | pass 17 | 18 | @property 19 | @abstractmethod 20 | def is_band_required(self) -> bool: 21 | pass 22 | 23 | @property 24 | @abstractmethod 25 | def is_multiband_supported(self) -> bool: 26 | pass 27 | 28 | def _eval(self, *, t, m, sigma, band): 29 | """It could be missed if _eval_and_fill is re-implemented""" 30 | raise NotImplementedError("_eval is missed") 31 | 32 | @abstractmethod 33 | def _eval_and_fill(self, *, t, m, sigma, band, fill_value): 34 | """It has a default implementation, but it requires _eval to be implemented""" 35 | try: 36 | a = self._eval(t=t, m=m, sigma=sigma, band=band) 37 | if np.any(~np.isfinite(a)): 38 | raise ValueError 39 | return a 40 | except (ValueError, ZeroDivisionError, RuntimeError) as e: 41 | if fill_value is not None: 42 | return np.full(self.size, fill_value) 43 | raise e 44 | 45 | @mark_experimental 46 | def __post_init__(self) -> None: 47 | pass 48 | 49 | def _normalize_input(self, *, t, m, sigma, band, sorted, check): 50 | t = np.asarray(t) 51 | m = np.asarray(m) 52 | 53 | if sigma is not None: 54 | sigma = np.asarray(sigma) 55 | 56 | if band is None and self.is_band_required: 57 | raise ValueError("band is required") 58 | if band is not None: 59 | if not self.is_multiband_supported: 60 | raise ValueError( 61 | "(band != None) is not supported by this feature instance, consider to pass a band array " 62 | "or recreate the feature instance with different parameters" 63 | ) 64 | band = np.asarray(band) 65 | if band.ndim != 1: 66 | raise ValueError("band must be None or 1D array-like") 67 | 68 | if check: 69 | if np.any(~np.isfinite(t)): 70 | raise ValueError("t values must be finite") 71 | if np.any(~np.isfinite(m)): 72 | raise ValueError("m values must be finite") 73 | if sigma is not None and np.any(np.isnan(sigma)): 74 | raise ValueError("sigma must have no NaNs") 75 | 76 | if sorted is None: 77 | diff = np.diff(t) 78 | if np.any(diff == 0): 79 | raise ValueError("t must be unique") 80 | if np.any(diff < 0): 81 | raise ValueError("t must be sorted") 82 | elif not sorted: 83 | idx = np.argsort(t) 84 | t = t[idx] 85 | m = m[idx] 86 | if sigma is not None: 87 | sigma = sigma[idx] 88 | if band is not None: 89 | band = band[idx] 90 | 91 | return t, m, sigma, band 92 | 93 | def __call__(self, t, m, sigma=None, band=None, *, sorted=None, check=True, fill_value=None): 94 | t, m, sigma, band = self._normalize_input(t=t, m=m, sigma=sigma, band=band, sorted=sorted, check=check) 95 | return self._eval_and_fill(t=t, m=m, sigma=sigma, band=band, fill_value=fill_value) 96 | 97 | def many(self, lcs, *, sorted=None, check=True, fill_value=None, n_jobs=-1): 98 | """Extract features in bulk 99 | 100 | This exists for computability only and doesn't support parallel 101 | execution, that's why `n_jobs=1` must be used 102 | """ 103 | if n_jobs != 1: 104 | raise NotImplementedError("Parallel execution is not supported by this feature, use n_jobs=1") 105 | return np.stack([self(*lc, sorted=sorted, check=check, fill_value=fill_value) for lc in lcs]) 106 | 107 | 108 | @dataclass 109 | class BaseSingleBandFeature(BaseMultiBandFeature): 110 | bands: Optional[Sequence[str]] = dataclass_field(default=None, kw_only=True) 111 | 112 | @property 113 | @abstractmethod 114 | def size_single_band(self) -> int: 115 | pass 116 | 117 | @abstractmethod 118 | def _eval_single_band(self, *, t, m, sigma): 119 | pass 120 | 121 | @property 122 | def n_bands(self) -> int: 123 | if self.bands is None: 124 | return 1 125 | return len(self.bands) 126 | 127 | @property 128 | def size(self) -> int: 129 | return self.n_bands * self.size_single_band 130 | 131 | @property 132 | def is_band_required(self) -> bool: 133 | return self.bands is not None 134 | 135 | @property 136 | def is_multiband_supported(self) -> bool: 137 | return self.bands is not None 138 | 139 | def _eval_and_fill_single_band(self, *, t, m, sigma, fill_value): 140 | try: 141 | a = self._eval_single_band(t=t, m=m, sigma=sigma) 142 | if np.any(~np.isfinite(a)): 143 | raise ValueError 144 | return a 145 | except (ValueError, ZeroDivisionError, RuntimeError) as e: 146 | if fill_value is not None: 147 | return np.full(self.size_single_band, fill_value) 148 | raise e 149 | 150 | def _eval_and_fill(self, *, t, m, sigma, band, fill_value): 151 | if self.bands is None: 152 | return self._eval_and_fill_single_band(t=t, m=m, sigma=sigma, fill_value=fill_value) 153 | 154 | values = [] 155 | for band_to_calc in self.bands: 156 | band_mask = band == band_to_calc 157 | t_band = t[band_mask] 158 | m_band = m[band_mask] 159 | if sigma is None: 160 | sigma_band = None 161 | else: 162 | sigma_band = sigma[band_mask] 163 | v = self._eval_and_fill_single_band(t=t_band, m=m_band, sigma=sigma_band, fill_value=fill_value) 164 | values.append(np.atleast_1d(v)) 165 | 166 | return np.concatenate(values) 167 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/_base_meta.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from dataclasses import dataclass, field 3 | from typing import Collection, Union 4 | 5 | from light_curve.light_curve_ext import Extractor as _RustExtractor 6 | from light_curve.light_curve_ext import _FeatureEvaluator as _RustBaseFeature 7 | 8 | from ..dataclass_field import dataclass_field 9 | from ._base import BaseSingleBandFeature 10 | from .extractor import Extractor, _PyExtractor 11 | 12 | 13 | @dataclass 14 | class BaseMetaSingleBandFeature(BaseSingleBandFeature): 15 | features: Collection[Union[BaseSingleBandFeature, _RustBaseFeature]] = dataclass_field( 16 | default_factory=list, kw_only=True 17 | ) 18 | extractor: Union[_RustExtractor, _PyExtractor] = field(init=False) 19 | 20 | def __post_init__(self): 21 | super().__post_init__() 22 | self.extractor = Extractor(*self.features) 23 | 24 | @abstractmethod 25 | def transform(self, *, t, m, sigma): 26 | """Must return temporarily sorted arrays (t, m, sigma)""" 27 | pass 28 | 29 | def _eval_single_band(self, t, m, sigma=None): 30 | raise NotImplementedError("_eval_single_band is missed for BaseMetaFeature") 31 | 32 | def _eval_and_fill_single_band(self, *, t, m, sigma, fill_value): 33 | t, m, sigma = self.transform(t=t, m=m, sigma=sigma) 34 | return self.extractor._eval_and_fill_single_band(t=t, m=m, sigma=sigma, fill_value=fill_value) 35 | 36 | @property 37 | def size_single_band(self): 38 | if isinstance(self.extractor, _RustExtractor): 39 | return self.extractor.size 40 | return self.extractor.size_single_band 41 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/_lstsq.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def least_squares(t, m, sigma): 5 | if t.size < 3: 6 | raise ValueError("Time series must have at least 3 points") 7 | 8 | A = np.vstack([t, np.ones(len(t))]).T 9 | 10 | if sigma is not None: 11 | w = np.diag(1 / sigma) 12 | A = np.dot(w, A) 13 | m = np.dot(w, m.T) 14 | 15 | (slope, _), residuals, *_ = np.linalg.lstsq(A, m, rcond=None) 16 | return slope, residuals[0] 17 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/adnormal.py: -------------------------------------------------------------------------------- 1 | from ._base import BaseSingleBandFeature 2 | 3 | 4 | class AndersonDarlingNormal(BaseSingleBandFeature): 5 | def _eval_single_band(self, t, m, sigma=None): 6 | try: 7 | from scipy.stats import anderson 8 | except ImportError: 9 | raise ImportError("scipy is required for AndersonDarlingNormal feature, please install it") 10 | 11 | n = len(m) 12 | return anderson(m).statistic * (1 + 4 / n - 25 / n**2) 13 | 14 | @property 15 | def size_single_band(self): 16 | return 1 17 | 18 | 19 | __all__ = ("AndersonDarlingNormal",) 20 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/amplitude.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Amplitude(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return 0.5 * np.ptp(m) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("Amplitude",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/beyondnstd.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | import numpy as np 4 | 5 | from ._base import BaseSingleBandFeature 6 | 7 | 8 | @dataclass() 9 | class BeyondNStd(BaseSingleBandFeature): 10 | nstd: float = 1.0 11 | 12 | def _eval_single_band(self, t, m, sigma=None): 13 | mean = np.mean(m) 14 | std = np.std(m, ddof=1) 15 | return np.count_nonzero(np.abs(m - mean) > self.nstd * std) / len(m) 16 | 17 | @property 18 | def size_single_band(self): 19 | return 1 20 | 21 | 22 | __all__ = ("BeyondNStd",) 23 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/bins.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | import numpy as np 4 | 5 | from ..dataclass_field import dataclass_field 6 | from ._base_meta import BaseMetaSingleBandFeature 7 | 8 | 9 | @dataclass() 10 | class Bins(BaseMetaSingleBandFeature): 11 | window: float = dataclass_field(default=1.0, kw_only=True) 12 | offset: float = dataclass_field(default=0.0, kw_only=True) 13 | 14 | def transform(self, t, m, sigma=None, *, sorted=None, fill_value=None): 15 | try: 16 | from scipy import ndimage 17 | except ImportError: 18 | raise ImportError("scipy is required for Bins feature, please install it") 19 | 20 | assert self.window > 0, "Window should be a positive number." 21 | n = np.ceil((t[-1] - t[0]) / self.window) + 1 22 | j = np.arange(0, n) 23 | bins = j * self.window 24 | 25 | delta = self.window * np.floor((t[0] - self.offset) / self.window) 26 | time = t - self.offset - delta 27 | 28 | idx = np.digitize(time, bins) 29 | uniq_idx, nums = np.unique(idx, return_counts=True) 30 | 31 | new_time = uniq_idx * self.window + self.offset - self.window / 2 + delta 32 | 33 | weights = np.power(sigma, -2) 34 | s = ndimage.sum(weights, labels=idx, index=uniq_idx) 35 | new_magn = ndimage.sum(m * weights, labels=idx, index=uniq_idx) / s 36 | new_sigma = np.sqrt(nums / s) 37 | 38 | return new_time, new_magn, new_sigma 39 | 40 | 41 | __all__ = ("Bins",) 42 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/color_of_median.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ._base import BaseMultiBandFeature 4 | from .median import Median 5 | 6 | 7 | @dataclass() 8 | class ColorOfMedian(BaseMultiBandFeature): 9 | """Difference of median magnitudes in two bands.""" 10 | 11 | blue_band: str 12 | red_band: str 13 | 14 | @property 15 | def is_band_required(self) -> bool: 16 | return True 17 | 18 | @property 19 | def is_multiband_supported(self) -> bool: 20 | return True 21 | 22 | @property 23 | def size(self) -> int: 24 | return 1 25 | 26 | def __post_init__(self) -> None: 27 | super().__post_init__() 28 | self.median_feature = Median(bands=[self.blue_band, self.red_band]) 29 | 30 | def _eval_and_fill(self, *, t, m, sigma, band, fill_value): 31 | median = self.median_feature._eval_and_fill(t=t, m=m, sigma=sigma, band=band, fill_value=fill_value) 32 | return median[0] - median[1] 33 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/cusum.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Cusum(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | m_mean = np.mean(m) 9 | m_std = np.std(m, ddof=1) 10 | m_new = np.cumsum(m - m_mean) 11 | result = m_new / (len(m) * m_std) 12 | return np.ptp(result) 13 | 14 | @property 15 | def size_single_band(self): 16 | return 1 17 | 18 | 19 | __all__ = ("Cusum",) 20 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/eta.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Eta(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | n = len(m) 9 | m_std = np.var(m, ddof=1) 10 | m_sum = np.sum((m[1:] - m[:-1]) ** 2) 11 | return m_sum / ((n - 1) * m_std) 12 | 13 | @property 14 | def size_single_band(self): 15 | return 2 16 | 17 | 18 | __all__ = ("Eta",) 19 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/etae.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class EtaE(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | n = len(m) 9 | m_std = np.var(m, ddof=1) 10 | m_sum = np.sum(((m[1:] - m[:-1]) / (t[1:] - t[:-1])) ** 2) 11 | return m_sum * (t[n - 1] - t[0]) ** 2 / ((n - 1) ** 3 * m_std) 12 | 13 | @property 14 | def size_single_band(self): 15 | return 1 16 | 17 | 18 | __all__ = ("EtaE",) 19 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/excvar.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class ExcessVariance(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | m_mean = np.mean(m) 9 | d_mean = np.mean(np.power(sigma, 2)) 10 | m_std = np.std(m, ddof=1) 11 | return (m_std**2 - d_mean) / m_mean**2 12 | 13 | @property 14 | def size_single_band(self): 15 | return 1 16 | 17 | 18 | __all__ = ("ExcessVariance",) 19 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/extractor.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Collection, Union 3 | 4 | import numpy as np 5 | 6 | from light_curve.light_curve_ext import Extractor as _RustExtractor 7 | from light_curve.light_curve_ext import _FeatureEvaluator as _RustBaseFeature 8 | 9 | from ..dataclass_field import dataclass_field 10 | from ._base import BaseSingleBandFeature 11 | 12 | 13 | @dataclass() 14 | class _PyExtractor(BaseSingleBandFeature): 15 | features: Collection[Union[BaseSingleBandFeature, _RustBaseFeature]] = dataclass_field( 16 | default_factory=list, kw_only=True 17 | ) 18 | 19 | def _eval_single_band(self, t, m, sigma=None): 20 | raise NotImplementedError("_eval_single_band is missed for _PyExtractor") 21 | 22 | def _eval_and_fill_single_band(self, *, t, m, sigma, fill_value): 23 | return np.concatenate([np.atleast_1d(feature(t, m, sigma, fill_value=fill_value)) for feature in self.features]) 24 | 25 | @property 26 | def size_single_band(self): 27 | return sum( 28 | feature.size if isinstance(feature, _RustBaseFeature) else feature.size_single_band 29 | for feature in self.features 30 | ) 31 | 32 | 33 | class Extractor: 34 | def __new__(cls, *args: Collection[Union[BaseSingleBandFeature, _RustBaseFeature]]): 35 | if len(args) > 0 and all(isinstance(feature, _RustBaseFeature) for feature in args): 36 | return _RustExtractor(*args) 37 | else: 38 | return _PyExtractor(features=args) 39 | 40 | 41 | __all__ = ("Extractor",) 42 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/flux_n_not_det_before_fd.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Tuple 3 | 4 | import numpy as np 5 | 6 | from ..dataclass_field import dataclass_field 7 | from ._base import BaseSingleBandFeature 8 | 9 | 10 | @dataclass() 11 | class FluxNNotDetBeforeFd(BaseSingleBandFeature): 12 | """Number of non-detections before the first detection for measurements of the flux. 13 | 14 | Feature use a user-defined signal to noise ratio to define non-detections and count their number before 15 | the first detection. strictly_fainter flag allows counting non-detections with a strictly smaller upper limit 16 | than the first detection flux (there is no such feature in the original article). 17 | 18 | - Depends on: **flux** 19 | - Minimum number of observations: **2** 20 | - Number of features: **1** 21 | 22 | Attributes 23 | ---------- 24 | signal_to_noise : float 25 | Signal to noise ratio. 26 | strictly_fainter : bool 27 | Flag to determine if to find non-detections with strictly smaller upper limit than the first detection flux. 28 | 29 | P. Sánchez-Sáez et al 2021, [DOI:10.3847/1538-3881/abd5c1](https://doi.org/10.3847/1538-3881/abd5c1) 30 | """ 31 | 32 | signal_to_noise: float = dataclass_field(default=5.0, kw_only=True) 33 | strictly_fainter: bool = dataclass_field(default=False, kw_only=True) 34 | 35 | def _eval_single_band(self, t, m, sigma=None): 36 | detections = np.argwhere(m > self.signal_to_noise * sigma).flatten() 37 | 38 | if len(detections) == len(m): 39 | raise ValueError("There is no any non-detections") 40 | 41 | first_detection_idx = detections[0] 42 | 43 | if self.strictly_fainter: 44 | detection_m = m[first_detection_idx] 45 | upper_limits = sigma[:first_detection_idx] * self.signal_to_noise 46 | non_detection_less = np.count_nonzero(upper_limits < detection_m) 47 | return non_detection_less 48 | 49 | return first_detection_idx 50 | 51 | @property 52 | def names(self) -> Tuple[str]: 53 | return ("flux_n_non_detections_before_fd",) 54 | 55 | @property 56 | def descriptions(self) -> Tuple[str]: 57 | return ("number of non detections before the first detection for fluxes",) 58 | 59 | @property 60 | def size_single_band(self) -> int: 61 | return 1 62 | 63 | 64 | __all__ = ("FluxNNotDetBeforeFd",) 65 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/intpercrange.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ..dataclass_field import dataclass_field 4 | from ._base import BaseSingleBandFeature 5 | 6 | 7 | @dataclass() 8 | class InterPercentileRange(BaseSingleBandFeature): 9 | quantile: float = dataclass_field(default=0.25, kw_only=True) 10 | 11 | def _eval_single_band(self, t, m, sigma=None): 12 | try: 13 | from scipy.stats.mstats import mquantiles 14 | except ImportError: 15 | raise ImportError("scipy is required for InterPercentileRange feature, please install it") 16 | 17 | q1, q2 = mquantiles(m, [self.quantile, 1 - self.quantile], alphap=0.5, betap=0.5) 18 | return q2 - q1 19 | 20 | @property 21 | def size_single_band(self): 22 | return 1 23 | 24 | 25 | __all__ = ("InterPercentileRange",) 26 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/kurtosis.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Kurtosis(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | n = len(m) 9 | m_mean = np.mean(m) 10 | m_st = np.std(m, ddof=1) ** 4 11 | m_sum = sum(np.power(m - m_mean, 4)) 12 | return (n * (n + 1) * m_sum) / ((n - 1) * (n - 2) * (n - 3) * m_st) - 3 * np.power((n - 1), 2) / ( 13 | (n - 2) * (n - 3) 14 | ) 15 | 16 | @property 17 | def size_single_band(self): 18 | return 1 19 | 20 | 21 | __all__ = ("Kurtosis",) 22 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/linfit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | from ._lstsq import least_squares 5 | 6 | 7 | class LinearFit(BaseSingleBandFeature): 8 | def _eval_single_band(self, t, m, sigma=None): 9 | n = len(t) 10 | 11 | slope, chi2 = least_squares(t, m, sigma) 12 | 13 | red_chi2 = chi2 / (n - 2) 14 | 15 | weighted_t2 = np.average(t**2, weights=np.power(sigma, -2)) 16 | weighted_t = np.average(t, weights=np.power(sigma, -2)) ** 2 17 | 18 | sigma_sum = np.sum(1 / sigma**2) 19 | 20 | return np.array([slope, np.sqrt(1 / ((weighted_t2 - weighted_t) * sigma_sum)), red_chi2]) 21 | 22 | @property 23 | def size_single_band(self): 24 | return 3 25 | 26 | 27 | __all__ = ("LinearFit",) 28 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/lintrend.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | from ._lstsq import least_squares 5 | 6 | 7 | class LinearTrend(BaseSingleBandFeature): 8 | def _eval_single_band(self, t, m, sigma=None): 9 | n = len(t) 10 | 11 | slope, chi2 = least_squares(t, m, None) 12 | 13 | red_chi2 = chi2 / (n - 2) 14 | sxx = np.var(t, ddof=n - 1) 15 | return np.array([slope, np.sqrt(red_chi2 / sxx), np.sqrt(red_chi2)]) 16 | 17 | @property 18 | def size_single_band(self): 19 | return 3 20 | 21 | 22 | __all__ = ("LinearTrend",) 23 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/magnitude_n_not_det_before_fd.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Tuple 3 | 4 | import numpy as np 5 | 6 | from ..dataclass_field import dataclass_field 7 | from ._base import BaseSingleBandFeature 8 | 9 | 10 | @dataclass() 11 | class MagnitudeNNotDetBeforeFd(BaseSingleBandFeature): 12 | """Number of non detections before the first detection for measurements of the magnitude. 13 | 14 | Feature use a user-defined value to mark non-detections: measurements with sigma equal to this value 15 | considered as non detections. strictly_fainter flag allows counting non-detections with a strictly larger 16 | upper limit than the first detection magnitude (there is no such feature in the original article). 17 | 18 | - Depends on: **magnitude** 19 | - Minimum number of observations: **2** 20 | - Number of features: **1** 21 | 22 | Attributes 23 | ---------- 24 | sigma_non_detection : float 25 | Sigma value to mark the non detections values, may not be NaN. 26 | strictly_fainter : bool 27 | Flag to determine if to find non-detections with strictly larger upper limit than the first detection magnitude. 28 | 29 | P. Sánchez-Sáez et al 2021, [DOI:10.3847/1538-3881/abd5c1](https://doi.org/10.3847/1538-3881/abd5c1) 30 | """ 31 | 32 | sigma_non_detection: float = dataclass_field(default=np.inf, kw_only=True) 33 | strictly_fainter: bool = dataclass_field(default=False, kw_only=True) 34 | 35 | def _eval_single_band(self, t, m, sigma=None): 36 | detections = np.argwhere(sigma != self.sigma_non_detection).flatten() 37 | 38 | if len(detections) == len(m): 39 | raise ValueError("There is no any non-detections") 40 | 41 | first_detection_idx = detections[0] 42 | 43 | if self.strictly_fainter: 44 | detection_m = m[first_detection_idx] 45 | # magnitude upper limits should be larger than the first detection 46 | non_detection_less = np.count_nonzero(m[:first_detection_idx] > detection_m) 47 | return non_detection_less 48 | 49 | return first_detection_idx 50 | 51 | @property 52 | def names(self) -> Tuple[str]: 53 | return ("magn_n_non_detections_before_fd",) 54 | 55 | @property 56 | def descriptions(self) -> Tuple[str]: 57 | return ("number of non detections before the first detection for magnitudes",) 58 | 59 | @property 60 | def size_single_band(self): 61 | return 1 62 | 63 | 64 | __all__ = ("MagnitudeNNotDetBeforeFd",) 65 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/magnpratio.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from ..dataclass_field import dataclass_field 4 | from ._base import BaseSingleBandFeature 5 | 6 | 7 | @dataclass() 8 | class MagnitudePercentageRatio(BaseSingleBandFeature): 9 | quantile_numerator: float = dataclass_field(default=0.4, kw_only=True) 10 | quantile_denominator: float = dataclass_field(default=0.05, kw_only=True) 11 | 12 | def _eval_single_band(self, t, m, sigma=None): 13 | try: 14 | from scipy.stats.mstats import mquantiles 15 | except ImportError: 16 | raise ImportError("scipy is required for MagnitudePercentageRatio feature, please install it") 17 | 18 | n1, n2 = mquantiles(m, [self.quantile_numerator, 1 - self.quantile_numerator], alphap=0.5, betap=0.5) 19 | d1, d2 = mquantiles(m, [self.quantile_denominator, 1 - self.quantile_denominator], alphap=0.5, betap=0.5) 20 | return (n2 - n1) / (d2 - d1) 21 | 22 | @property 23 | def size_single_band(self): 24 | return 1 25 | 26 | 27 | __all__ = ("MagnitudePercentageRatio",) 28 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/maxslope.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class MaximumSlope(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | m_span = np.subtract(m[1:], m[:-1]) 9 | t_span = np.subtract(t[1:], t[:-1]) 10 | div = np.abs(np.divide(m_span, t_span)) 11 | return np.amax(div) 12 | 13 | @property 14 | def size_single_band(self): 15 | return 1 16 | 17 | 18 | __all__ = ("MaximumSlope",) 19 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/mean.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Mean(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return np.mean(m) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("Mean",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/meanvar.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class MeanVariance(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return np.std(m, ddof=1) / np.mean(m) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("MeanVariance",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/medabsdev.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class MedianAbsoluteDeviation(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return np.median((np.abs(m - np.median(m)))) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("MedianAbsoluteDeviation",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/medbufrperc.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | import numpy as np 4 | 5 | from ..dataclass_field import dataclass_field 6 | from ._base import BaseSingleBandFeature 7 | 8 | 9 | @dataclass() 10 | class MedianBufferRangePercentage(BaseSingleBandFeature): 11 | quantile: float = dataclass_field(default=0.1, kw_only=True) 12 | 13 | def _eval_single_band(self, t, m, sigma=None): 14 | median = np.median(m) 15 | return np.count_nonzero(np.abs(median - m) < self.quantile * (np.max(m) - np.min(m)) / 2) / len(m) 16 | 17 | @property 18 | def size_single_band(self): 19 | return 1 20 | 21 | 22 | __all__ = ("MedianBufferRangePercentage",) 23 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/median.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Median(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return np.median(m) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("Median",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/otsusplit.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class OtsuSplit(BaseSingleBandFeature): 7 | """Otsu threshholding algorithm 8 | 9 | Difference of subset means, standard deviation of the lower subset, standard deviation 10 | of the upper subset and lower-to-all observation count ratio for two subsets of magnitudes 11 | obtained by Otsu's method split. 12 | 13 | Otsu's method is used to perform automatic thresholding. The algorithm returns a single 14 | threshold that separate values into two classes. This threshold is determined by minimizing 15 | intra-class intensity variance, or equivalently, by maximizing inter-class variance. 16 | 17 | - Depends on: **magnitude** 18 | - Minimum number of observations: **2** 19 | - Number of features: **4** 20 | 21 | Otsu, Nobuyuki 1979. [DOI:10.1109/tsmc.1979.4310076](https://doi.org/10.1109/tsmc.1979.4310076) 22 | """ 23 | 24 | def _eval_single_band(self, t, m, sigma=None): 25 | n = len(m) 26 | m = np.sort(m) 27 | arg, mean0, mean1 = self._threshold_arg(m) 28 | 29 | std_lower = np.std(m[: arg + 1], ddof=1) 30 | std_upper = np.std(m[arg + 1 :], ddof=1) 31 | 32 | if len(m[: arg + 1]) == 1: 33 | std_lower = 0 34 | if len(m[arg + 1 :]) == 1: 35 | std_upper = 0 36 | 37 | lower_to_all_ratio = (arg + 1) / n 38 | 39 | return mean1[arg] - mean0[arg], std_lower, std_upper, lower_to_all_ratio 40 | 41 | @staticmethod 42 | def _threshold_arg(sorted_m): 43 | n = len(sorted_m) 44 | amounts = np.arange(1, n) 45 | 46 | w0 = amounts / n 47 | w1 = 1 - w0 48 | 49 | cumsum0 = np.cumsum(sorted_m)[:-1] 50 | cumsum1 = np.cumsum(sorted_m[::-1])[:-1][::-1] 51 | mean0 = cumsum0 / amounts 52 | mean1 = cumsum1 / amounts[::-1] 53 | 54 | inter_class_variance = w0 * w1 * (mean0 - mean1) ** 2 55 | arg = np.argmax(inter_class_variance) 56 | return arg, mean0, mean1 57 | 58 | @staticmethod 59 | def threshold(m): 60 | """The Otsu threshold method.""" 61 | m = np.sort(m) 62 | arg, _, _ = OtsuSplit._threshold_arg(m) 63 | return m[arg + 1] 64 | 65 | @property 66 | def names(self): 67 | return "otsu_mean_diff", "otsu_std_lower", "otsu_std_upper", "otsu_lower_to_all_ratio" 68 | 69 | @property 70 | def descriptions(self): 71 | return ( 72 | "difference between mean values of Otsu split subsets", 73 | "standard deviation for observations below the threshold given by Otsu method", 74 | "standard deviation for observations above the threshold given by Otsu method", 75 | "ratio of quantity of observations bellow the threshold given by Otsu method to quantity of all observations", # noqa E501 76 | ) 77 | 78 | @property 79 | def size_single_band(self): 80 | return 4 81 | 82 | 83 | __all__ = ("OtsuSplit",) 84 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/pdiffmperc.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | import numpy as np 4 | 5 | from ..dataclass_field import dataclass_field 6 | from ._base import BaseSingleBandFeature 7 | 8 | 9 | @dataclass() 10 | class PercentDifferenceMagnitudePercentile(BaseSingleBandFeature): 11 | quantile: float = dataclass_field(default=0.25, kw_only=True) 12 | 13 | def _eval_single_band(self, t, m, sigma=None): 14 | try: 15 | from scipy.stats.mstats import mquantiles 16 | except ImportError: 17 | raise ImportError("scipy is required for PercentDifferenceMagnitudePercentile feature, please install it") 18 | 19 | median = np.median(m) 20 | q1, q2 = mquantiles(m, [self.quantile, 1 - self.quantile], alphap=0.5, betap=0.5) 21 | return (q2 - q1) / median 22 | 23 | @property 24 | def size_single_band(self): 25 | return 1 26 | 27 | 28 | __all__ = ("PercentDifferenceMagnitudePercentile",) 29 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/percampl.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class PercentAmplitude(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | median = np.median(m) 9 | return np.max((np.max(m) - median, median - np.min(m))) 10 | 11 | @property 12 | def size_single_band(self): 13 | return 1 14 | 15 | 16 | __all__ = ("PercentAmplitude",) 17 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/ptp_var.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class PeakToPeakVar(BaseSingleBandFeature): 7 | r"""Peak-to-peak variation 8 | 9 | $$ 10 | \frac{(m_i - \sigma_i)_\text{max} - (m_i + \sigma_i)_\text{min}} 11 | {(m_i - \sigma_i)_\text{max} + (m_i + \sigma_i)_\text{min}} 12 | $$ 13 | Input m must be non-negative (e.g. non-differential) flux density. 14 | This feature is a variability detector, higher values correspond to more variable sources. 15 | 16 | - Depends on: **flux density**, **errors** 17 | - Minimum number of observations: **2** 18 | - Number of features: **1** 19 | 20 | Aller M.F., Aller H.D., Hughes P.A. 1992. [DOI:10.1086/171898](https://www.doi.org/10.1086/171898) 21 | """ 22 | 23 | nstd: float = 1.0 24 | 25 | def _eval_single_band(self, t, m, sigma=None): 26 | if np.any(m < 0): 27 | raise ValueError("m must be non-negative") 28 | a = np.max(m - self.nstd * sigma) 29 | b = np.min(m + self.nstd * sigma) 30 | return (a - b) / (a + b) 31 | 32 | @property 33 | def size_single_band(self): 34 | return 1 35 | 36 | 37 | __all__ = ("PeakToPeakVar",) 38 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/light_curve/light_curve_py/features/rainbow.py -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/__init__.py: -------------------------------------------------------------------------------- 1 | from .generic import * 2 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/_bands.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | import numpy as np 4 | from numpy.typing import ArrayLike, NDArray 5 | 6 | 7 | class Bands: 8 | """Monochromatic passbands for Rainbow fit.""" 9 | 10 | @property 11 | def names(self) -> NDArray: 12 | """Names of the bands""" 13 | return self._names.copy() 14 | 15 | @property 16 | def index(self) -> NDArray[int]: 17 | """Index array for the bands""" 18 | return np.arange(len(self._names)) 19 | 20 | def wave_cm(self, band) -> float: 21 | """Wavelength of the band in cm""" 22 | return self._names_to_wave_cm[band] 23 | 24 | @property 25 | def mean_wave_cm(self) -> float: 26 | """Mean wavelength of the bands in cm""" 27 | return np.mean(self._wave_cm) 28 | 29 | def index_to_wave_cm(self, index: NDArray[int]) -> NDArray[float]: 30 | """Wavelength of the band in cm""" 31 | return self._wave_cm[index] 32 | 33 | def __init__(self, names: ArrayLike, wave_cm: ArrayLike): 34 | self._input_validation(names, wave_cm) 35 | 36 | self._names = np.asarray(names) 37 | self._wave_cm = np.asarray(wave_cm) 38 | 39 | self._names_to_wave_cm = dict(zip(self._names, self._wave_cm)) 40 | 41 | self._name_to_index = dict(zip(self._names, range(len(self._names)))) 42 | self.get_index = np.vectorize(self._name_to_index.get) 43 | 44 | @classmethod 45 | def from_dict(cls, band_wave_cm: Dict[Any, float]) -> "Bands": 46 | """Create Bands from a dictionary""" 47 | names, wave_cm = zip(*band_wave_cm.items()) 48 | return cls(names, wave_cm) 49 | 50 | @staticmethod 51 | def _input_validation(names: ArrayLike, wave_cm: ArrayLike): 52 | if len(names) != len(wave_cm): 53 | raise ValueError("names and wave_cm must have the same length") 54 | 55 | if len(names) == 0: 56 | raise ValueError("At least one band must be specified.") 57 | 58 | if len(set(names)) != len(names): 59 | raise ValueError("names must be unique") 60 | 61 | if any(lmbd <= 0 for lmbd in wave_cm): 62 | raise ValueError("wave_cm must be positive") 63 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/_parameters.py: -------------------------------------------------------------------------------- 1 | from enum import IntEnum 2 | from typing import Iterable, List 3 | 4 | import numpy as np 5 | 6 | from light_curve.light_curve_py.features.rainbow._bands import Bands 7 | 8 | __all__ = ["create_parameters_class"] 9 | 10 | 11 | def baseline_parameter_name(band: str) -> str: 12 | return f"baseline_{band}" 13 | 14 | 15 | def baseline_band_name(name: str) -> str: 16 | if name.startswith("baseline_"): 17 | return name[len("baseline_") :] 18 | 19 | return None 20 | 21 | 22 | def create_int_enum(cls_name: str, attributes: Iterable[str]): 23 | return IntEnum(cls_name, {attr: i for i, attr in enumerate(attributes)}) 24 | 25 | 26 | def create_parameters_class( 27 | cls_name: str, 28 | *, 29 | common: List[str], 30 | bol: List[str], 31 | temp: List[str], 32 | bands: Bands, 33 | with_baseline: bool, 34 | ): 35 | """Create an IntEnum class for RainbowFit parameters 36 | 37 | Parameters 38 | ---------- 39 | cls_name : str 40 | Name of the class to create 41 | common : list of str 42 | Common parameters for both bolometric and temperature models 43 | bol : list of str 44 | Bolometric model parameters, without common parameters 45 | temp : list of str 46 | Temperature model parameters, without common parameters 47 | bands : list of str 48 | Unique list of bands in the dataset. It is used to generate baseline 49 | parameters when `with_baseline` is True. 50 | with_baseline : bool 51 | Whether to include baseline parameters, one per band in `bands`. 52 | """ 53 | attributes = common + bol + temp 54 | if with_baseline: 55 | baseline = list(map(baseline_parameter_name, bands.names)) 56 | attributes += baseline 57 | 58 | enum = create_int_enum(cls_name, attributes) 59 | 60 | enum.all_common = common 61 | enum.common_idx = np.array([enum[attr] for attr in common]) 62 | 63 | enum.bol = bol 64 | enum.bol_idx = np.array([enum[attr] for attr in enum.bol]) 65 | enum.all_bol = common + bol 66 | enum.all_bol_idx = np.array([enum[attr] for attr in enum.all_bol]) 67 | 68 | enum.temp = temp 69 | enum.temp_idx = np.array([enum[attr] for attr in enum.temp]) 70 | enum.all_temp = common + temp 71 | enum.all_temp_idx = np.array([enum[attr] for attr in enum.all_temp]) 72 | 73 | enum.with_baseline = with_baseline 74 | if with_baseline: 75 | enum.all_baseline = baseline 76 | enum.baseline_idx = np.array([enum[attr] for attr in enum.all_baseline]) 77 | enum.baseline_parameter_name = staticmethod(baseline_parameter_name) 78 | enum.baseline_band_name = staticmethod(baseline_band_name) 79 | 80 | band_idx_to_baseline_idx = { 81 | band_idx: enum[baseline_parameter_name(band_name)] for band_idx, band_name in zip(bands.index, bands.names) 82 | } 83 | enum.lookup_baseline_idx_with_band_idx = np.vectorize(band_idx_to_baseline_idx.get, otypes=[int]) 84 | 85 | return enum 86 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/_scaler.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Dict, Union 3 | 4 | import numpy as np 5 | from numpy.typing import NDArray 6 | 7 | __all__ = ["Scaler", "MultiBandScaler"] 8 | 9 | 10 | @dataclass() 11 | class Scaler: 12 | """Shift and scale arrays""" 13 | 14 | shift: Union[float, NDArray] 15 | """Scale to apply to the array 16 | 17 | Either a single value or an array of the same shape as the input array 18 | """ 19 | 20 | scale: Union[float, NDArray] 21 | """Scale to apply to the array 22 | 23 | Either a single value or an array of the same shape as the input array 24 | """ 25 | 26 | def __eq__(self, other): 27 | if not isinstance(other, Scaler): 28 | return False 29 | return np.array_equal(self.shift, other.shift) and np.array_equal(self.scale, other.scale) 30 | 31 | @classmethod 32 | def from_time(cls, t) -> "Scaler": 33 | """Create a Scaler from a time array 34 | 35 | It just computes the mean and standard deviation of the array. 36 | """ 37 | shift = np.mean(t) 38 | scale = np.std(t) 39 | if scale == 0.0: 40 | scale = 1.0 41 | return cls(shift=shift, scale=scale) 42 | 43 | def do_shift_scale(self, x): 44 | return (x - self.shift) / self.scale 45 | 46 | def undo_shift_scale(self, x): 47 | return x * self.scale + self.shift 48 | 49 | def do_scale(self, x): 50 | return x / self.scale 51 | 52 | def undo_scale(self, x): 53 | return x * self.scale 54 | 55 | 56 | @dataclass() 57 | class MultiBandScaler(Scaler): 58 | """Shift and scale arrays, optionally per band""" 59 | 60 | per_band_shift: Dict[str, float] 61 | """Shift to apply to each band""" 62 | 63 | def __eq__(self, other): 64 | if not isinstance(other, MultiBandScaler): 65 | return False 66 | return super().__eq__(other) and self.per_band_shift == other.per_band_shift 67 | 68 | @classmethod 69 | def from_flux(cls, flux, band, *, with_baseline: bool) -> "MultiBandScaler": 70 | """Create a Scaler from a flux array. 71 | 72 | It uses standard deviation for the scale. For the shift, it is either 73 | zero (`with_baseline=False`) or the mean of each band otherwise. 74 | """ 75 | flux = np.asarray(flux) 76 | band = np.asarray(band) 77 | 78 | uniq_bands = np.unique(band) 79 | per_band_shift = dict.fromkeys(uniq_bands, 0.0) 80 | shift_array = np.zeros(len(flux)) 81 | 82 | if with_baseline: 83 | for b in uniq_bands: 84 | idx = band == b 85 | shift_array[idx] = per_band_shift[b] = np.mean(flux[idx]) 86 | 87 | scale = np.std(flux) 88 | if scale == 0.0: 89 | scale = 1.0 90 | 91 | return cls(shift=shift_array, scale=scale, per_band_shift=per_band_shift) 92 | 93 | def undo_shift_scale_band(self, x, band): 94 | return x * self.scale + self.per_band_shift.get(band, 0) 95 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/bolometric.py: -------------------------------------------------------------------------------- 1 | import math 2 | from abc import abstractmethod 3 | from dataclasses import dataclass 4 | from typing import Dict, List, Union 5 | 6 | import numpy as np 7 | 8 | __all__ = [ 9 | "bolometric_terms", 10 | "BaseBolometricTerm", 11 | "SigmoidBolometricTerm", 12 | "BazinBolometricTerm", 13 | "LinexpBolometricTerm", 14 | "DoublexpBolometricTerm", 15 | ] 16 | 17 | 18 | @dataclass() 19 | class BaseBolometricTerm: 20 | """Bolometric term for the Rainbow""" 21 | 22 | @staticmethod 23 | @abstractmethod 24 | def parameter_names() -> List[str]: 25 | return NotImplementedError 26 | 27 | @staticmethod 28 | @abstractmethod 29 | def parameter_scalings() -> List[Union[str, None]]: 30 | """Describes how to unscale the parameters. 31 | 32 | Should be the list the same shape as returned by `parameter_names()`, and describes 33 | how the parameters should be un-scaled from the fit done in scaled coordinates. 34 | 35 | List items should be either None or one of the following strings: 36 | - time - the parameter is scaled and shifted like measurement times 37 | - timescale - the parameter is scaled like measurement times, but not shifted, thus 38 | behaving like a difference between two measurement times 39 | - flux - the parameter is scaled like the flux points, without additional shifts 40 | applied to them. Suitable for amplitude-like parameters. 41 | - None - the parameter is kept as is, without any additional scaling or shifting 42 | """ 43 | return NotImplementedError 44 | 45 | @staticmethod 46 | @abstractmethod 47 | def value(self, t, *params) -> float: 48 | return NotImplementedError 49 | 50 | @staticmethod 51 | @abstractmethod 52 | def initial_guesses(t, m, sigma, band) -> Dict[str, float]: 53 | return NotImplementedError 54 | 55 | @staticmethod 56 | @abstractmethod 57 | def limits(t, m, sigma, band) -> Dict[str, float]: 58 | return NotImplementedError 59 | 60 | @staticmethod 61 | @abstractmethod 62 | def peak_time(*params) -> float: 63 | return NotImplementedError 64 | 65 | 66 | @dataclass() 67 | class SigmoidBolometricTerm(BaseBolometricTerm): 68 | """Sigmoid""" 69 | 70 | @staticmethod 71 | def parameter_names(): 72 | return ["reference_time", "amplitude", "rise_time"] 73 | 74 | @staticmethod 75 | def parameter_scalings(): 76 | return ["time", "flux", "timescale"] 77 | 78 | @staticmethod 79 | def value(t, t0, amplitude, rise_time): 80 | dt = t - t0 81 | 82 | result = np.zeros(len(dt)) 83 | # To avoid numerical overflows, let's only compute the exponents not too far from t0 84 | idx = dt > -100 * rise_time 85 | result[idx] = amplitude / (np.exp(-dt[idx] / rise_time) + 1) 86 | 87 | return result 88 | 89 | @staticmethod 90 | def initial_guesses(t, m, sigma, band): 91 | A = np.ptp(m) 92 | 93 | initial = {} 94 | initial["reference_time"] = t[np.argmax(m)] 95 | initial["amplitude"] = A 96 | initial["rise_time"] = 1.0 97 | 98 | return initial 99 | 100 | @staticmethod 101 | def limits(t, m, sigma, band): 102 | t_amplitude = np.ptp(t) 103 | m_amplitude = np.ptp(m) 104 | 105 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 106 | 107 | limits = {} 108 | limits["reference_time"] = (np.min(t) - 10 * t_amplitude, np.max(t) + 10 * t_amplitude) 109 | limits["amplitude"] = (0.0, 20 * m_amplitude) 110 | limits["rise_time"] = (dt / 100, 10 * t_amplitude) 111 | 112 | return limits 113 | 114 | @staticmethod 115 | def peak_time(t0, amplitude, rise_time): 116 | """Peak time is not defined for the sigmoid, so it returns mid-time of the rise instead""" 117 | return t0 118 | 119 | 120 | @dataclass() 121 | class BazinBolometricTerm(BaseBolometricTerm): 122 | """Bazin function, symmetric form""" 123 | 124 | @staticmethod 125 | def parameter_names(): 126 | return ["reference_time", "amplitude", "rise_time", "fall_time"] 127 | 128 | @staticmethod 129 | def parameter_scalings(): 130 | return ["time", "flux", "timescale", "timescale"] 131 | 132 | @staticmethod 133 | def value(t, t0, amplitude, rise_time, fall_time): 134 | dt = t - t0 135 | 136 | # Coefficient to make peak amplitude equal to unity 137 | scale = (fall_time / rise_time) ** (rise_time / (fall_time + rise_time)) + (fall_time / rise_time) ** ( 138 | -fall_time / (fall_time + rise_time) 139 | ) 140 | 141 | result = np.zeros(len(dt)) 142 | # To avoid numerical overflows, let's only compute the exponents not too far from t0 143 | idx = (dt > -100 * rise_time) & (dt < 100 * fall_time) 144 | result[idx] = amplitude * scale / (np.exp(-dt[idx] / rise_time) + np.exp(dt[idx] / fall_time)) 145 | 146 | return result 147 | 148 | @staticmethod 149 | def initial_guesses(t, m, sigma, band): 150 | A = 1.5 * max(np.max(m), np.ptp(m)) 151 | 152 | t0, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 153 | 154 | # Empirical conversion of sigma to rise/fall times 155 | rise_time = dt 156 | fall_time = dt 157 | 158 | initial = {} 159 | initial["reference_time"] = t0 160 | initial["amplitude"] = A 161 | initial["rise_time"] = rise_time 162 | initial["fall_time"] = fall_time 163 | 164 | return initial 165 | 166 | @staticmethod 167 | def limits(t, m, sigma, band): 168 | t_amplitude = np.ptp(t) 169 | m_amplitude = np.ptp(m) 170 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 171 | 172 | limits = {} 173 | limits["reference_time"] = (np.min(t) - 10 * t_amplitude, np.max(t) + 10 * t_amplitude) 174 | limits["amplitude"] = (0.0, 20 * m_amplitude) 175 | limits["rise_time"] = (dt / 100, 10 * t_amplitude) 176 | limits["fall_time"] = (dt / 100, 10 * t_amplitude) 177 | 178 | return limits 179 | 180 | @staticmethod 181 | def peak_time(t0, amplitude, rise_time, fall_time): 182 | return t0 + np.log(fall_time / rise_time) * rise_time * fall_time / (rise_time + fall_time) 183 | 184 | 185 | @dataclass() 186 | class LinexpBolometricTerm(BaseBolometricTerm): 187 | """Linexp function, symmetric form. Generated using a prototype version of Multi-view 188 | Symbolic Regression (Russeil et al. 2024, https://arxiv.org/abs/2402.04298) on 189 | a SLSN ZTF light curve (https://ztf.snad.space/dr17/view/821207100004043). Careful not very stable guesses/limits""" 190 | 191 | @staticmethod 192 | def parameter_names(): 193 | return ["reference_time", "amplitude", "rise_time"] 194 | 195 | @staticmethod 196 | def parameter_scalings(): 197 | return ["time", "flux", "timescale"] 198 | 199 | @staticmethod 200 | def value(t, t0, amplitude, rise_time): 201 | dt = t0 - t 202 | protected_rise = math.copysign(max(1e-5, abs(rise_time)), rise_time) 203 | 204 | # Coefficient to make peak amplitude equal to unity 205 | scale = 1 / (protected_rise * np.exp(-1)) 206 | 207 | power = -dt / protected_rise 208 | power = np.where(power > 100, 100, power) 209 | result = amplitude * scale * dt * np.exp(power) 210 | 211 | return np.where(result > 0, result, 0) 212 | 213 | @staticmethod 214 | def initial_guesses(t, m, sigma, band): 215 | A = np.ptp(m) 216 | med_dt = median_dt(t, band) 217 | t0, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 218 | 219 | # Compute points after or before maximum 220 | peak_time = t[np.argmax(m)] 221 | after = t[-1] - peak_time 222 | before = peak_time - t[0] 223 | 224 | rise_time = 100 * med_dt 225 | rise_time = rise_time if before >= after else -rise_time 226 | 227 | initial = {} 228 | # Reference of linexp correspond to the moment where flux == 0 229 | initial["reference_time"] = peak_time + rise_time 230 | initial["amplitude"] = A 231 | initial["rise_time"] = rise_time 232 | 233 | return initial 234 | 235 | @staticmethod 236 | def limits(t, m, sigma, band): 237 | t_amplitude = np.ptp(t) 238 | m_amplitude = np.ptp(m) 239 | 240 | limits = {} 241 | limits["reference_time"] = (np.min(t) - 10 * t_amplitude, np.max(t) + 10 * t_amplitude) 242 | limits["amplitude"] = (0, 10 * m_amplitude) 243 | limits["rise_time"] = (-10 * t_amplitude, 10 * t_amplitude) 244 | 245 | return limits 246 | 247 | @staticmethod 248 | def peak_time(t0, amplitude, rise_time): 249 | return t0 - rise_time 250 | 251 | 252 | @dataclass() 253 | class DoublexpBolometricTerm(BaseBolometricTerm): 254 | """Doublexp function generated using Multi-view Symbolic Regression on ZTF SNIa light curves 255 | Russeil et al. 2024, https://arxiv.org/abs/2402.04298""" 256 | 257 | @staticmethod 258 | def parameter_names(): 259 | return ["reference_time", "amplitude", "time1", "time2", "p"] 260 | 261 | @staticmethod 262 | def parameter_scalings(): 263 | return ["time", "flux", "timescale", "timescale", "None"] 264 | 265 | @staticmethod 266 | def value(t, t0, amplitude, time1, time2, p): 267 | dt = t - t0 268 | result = np.zeros_like(dt) 269 | 270 | # To avoid numerical overflows 271 | maxp = 20 272 | A = -(dt / time1) * (p - np.exp(-(dt / time2))) 273 | A = np.where(A > maxp, maxp, A) 274 | 275 | result = amplitude * np.exp(A) 276 | 277 | return result 278 | 279 | @staticmethod 280 | def initial_guesses(t, m, sigma, band): 281 | A = max(np.max(m), np.ptp(m)) 282 | t0, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 283 | 284 | # Empirical conversion of sigma to times 285 | time1 = 2 * dt 286 | time2 = 2 * dt 287 | 288 | initial = {} 289 | initial["reference_time"] = t0 290 | initial["amplitude"] = A 291 | initial["time1"] = time1 292 | initial["time2"] = time2 293 | initial["p"] = 1 294 | 295 | return initial 296 | 297 | @staticmethod 298 | def limits(t, m, sigma, band): 299 | t_amplitude = np.ptp(t) 300 | m_amplitude = np.ptp(m) 301 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 302 | 303 | limits = {} 304 | limits["reference_time"] = (np.min(t) - 10 * t_amplitude, np.max(t) + 10 * t_amplitude) 305 | limits["amplitude"] = (0.0, 10 * m_amplitude) 306 | limits["time1"] = (dt / 10, 2 * t_amplitude) 307 | limits["time2"] = (dt / 10, 2 * t_amplitude) 308 | limits["p"] = (1e-2, 100) 309 | 310 | return limits 311 | 312 | @staticmethod 313 | def peak_time(t0, p): 314 | try: 315 | from scipy.special import lambertw 316 | except ImportError: 317 | raise ImportError("scipy is required for DoublexpBolometricTerm.peak_time, please install it") 318 | 319 | return t0 + np.real(-lambertw(p * np.exp(1)) + 1) 320 | 321 | 322 | def median_dt(t, band): 323 | # Compute the median distance between points in each band 324 | # Caution when using this method as it might be strongly biaised because of ZTF high cadence a given day. 325 | dt = [] 326 | for b in np.unique(band): 327 | dt += list(t[band == b][1:] - t[band == b][:-1]) 328 | med_dt = np.median(dt) 329 | return med_dt 330 | 331 | 332 | def t0_and_weighted_centroid_sigma(t, m, sigma): 333 | # To avoid crashing on all-negative data 334 | mc = m - np.min(m) 335 | 336 | # Peak position as weighted centroid of everything above median 337 | idx = m > np.median(m) 338 | t0 = np.sum(t[idx] * m[idx] / sigma[idx]) / np.sum(m[idx] / sigma[idx]) 339 | 340 | # Weighted centroid sigma 341 | dt = np.sqrt(np.sum((t[idx] - t0) ** 2 * (mc[idx]) / sigma[idx]) / np.sum(mc[idx] / sigma[idx])) 342 | return t0, dt 343 | 344 | 345 | bolometric_terms = { 346 | "sigmoid": SigmoidBolometricTerm, 347 | "bazin": BazinBolometricTerm, 348 | "linexp": LinexpBolometricTerm, 349 | "doublexp": DoublexpBolometricTerm, 350 | } 351 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/generic.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Dict, List, Tuple, Union 3 | 4 | from light_curve.light_curve_py.dataclass_field import dataclass_field 5 | from light_curve.light_curve_py.features.rainbow._base import BaseRainbowFit 6 | 7 | from .bolometric import BaseBolometricTerm, bolometric_terms 8 | from .temperature import BaseTemperatureTerm, temperature_terms 9 | 10 | __all__ = ["RainbowFit"] 11 | 12 | # CODATA 2018, grab from astropy 13 | planck_constant = 6.62607004e-27 # erg s 14 | speed_of_light = 2.99792458e10 # cm/s 15 | boltzman_constant = 1.380649e-16 # erg/K 16 | sigma_sb = 5.6703744191844314e-05 # erg/(cm^2 s K^4) 17 | 18 | 19 | @dataclass() 20 | class RainbowFit(BaseRainbowFit): 21 | """Multiband blackbody fit to the light curve using functions to be chosen by the user 22 | Note, that `m` and corresponded `sigma` are assumed to be flux densities. 23 | Based on Russeil et al. 2023, arXiv:2310.02916. 24 | 25 | Parameters 26 | ---------- 27 | band_wave_cm : dict 28 | Dictionary of band names and their effective wavelengths in cm. 29 | with_baseline : bool, optional 30 | Whether to include an offset in the fit, individual for each band. 31 | If it is true, one more fit paramter per passband is added - 32 | the additive constant with the same units as input flux. 33 | bolometric : str or BaseBolometricTerm subclass, optional 34 | The shape of bolometric term. Default is 'bazin'. 35 | Other options are: 'sigmoid' 36 | temperature : str or BaseTemperatureTerm subclass, optional 37 | The shape of temperature term. Default is 'sigmoid'. 38 | Other options are: 'constant', 'delayed_sigmoid' 39 | 40 | Methods 41 | ------- 42 | __call__(t, m, sigma, band, *, sorted=False, check=True, fill_value=None) 43 | Evaluate the feature. Positional arguments are numpy arrays of the same length, 44 | `band` must consist of the same strings as keys in `band_wave_cm`. If `sorted` is True, 45 | `t` must be sorted in ascending order. If `check` is True, the input is checked for 46 | NaNs and Infs. If `fill_value` is not None, it is used to fill the output array if 47 | the feature cannot be evaluated. 48 | model(t, band, *params) 49 | Evaluate Rainbow model on the given arrays of times and bands. `*params` are 50 | fit parameters, basically the output of `__call__` method but without the last 51 | parameter (reduced Chi^2 of the fit). See parameter names in the `.name` attribute. 52 | peak_time(*params) 53 | Return bolometric peak time for given set of parameters 54 | fit_and_get_errors(t, m, sigma, band, sorted=False, check=True, print_level=None, get_initial=False) 55 | The same as `__call__` but also returns the parameter errors. Optionally sets the `print_level` 56 | (verbosity) for Minuit fitter. If `get_initial` is True, returns the initial parameters instead 57 | of fitted ones (useful for debugging) 58 | """ 59 | 60 | bolometric: Union[str, BaseBolometricTerm] = dataclass_field(default="bazin", kw_only=True) 61 | """Which parametric bolometric term to use""" 62 | temperature: Union[str, BaseTemperatureTerm] = dataclass_field(default="sigmoid", kw_only=True) 63 | """Which parametric temperature term to use""" 64 | 65 | def __post_init__(self): 66 | if not isinstance(self.bolometric, BaseBolometricTerm): 67 | self.bolometric = bolometric_terms[self.bolometric] 68 | 69 | if not isinstance(self.temperature, BaseTemperatureTerm): 70 | self.temperature = temperature_terms[self.temperature] 71 | 72 | super().__post_init__() 73 | 74 | def _common_parameter_names(self) -> List[str]: 75 | bolometric_parameters = self.bolometric.parameter_names() 76 | temperature_parameters = self.temperature.parameter_names() 77 | return [j for j in bolometric_parameters if j in temperature_parameters] 78 | 79 | def _bolometric_parameter_names(self) -> List[str]: 80 | bolometric_parameters = self.bolometric.parameter_names() 81 | return [i for i in bolometric_parameters if i not in self._common_parameter_names()] 82 | 83 | def _temperature_parameter_names(self) -> List[str]: 84 | temperature_parameters = self.temperature.parameter_names() 85 | return [i for i in temperature_parameters if i not in self._common_parameter_names()] 86 | 87 | def bol_func(self, t, params): 88 | return self.bolometric.value(t, *params[self.p.all_bol_idx]) 89 | 90 | def temp_func(self, t, params): 91 | return self.temperature.value(t, *params[self.p.all_temp_idx]) 92 | 93 | def _parameter_scalings(self) -> Dict[str, str]: 94 | rules = super()._parameter_scalings() 95 | 96 | for term in [self.bolometric, self.temperature]: 97 | for name, scaling in zip(term.parameter_names(), term.parameter_scalings()): 98 | rules[name] = scaling 99 | 100 | return rules 101 | 102 | def _initial_guesses(self, t, m, sigma, band) -> Dict[str, float]: 103 | initial = self.bolometric.initial_guesses(t, m, sigma, band) 104 | initial.update(self.temperature.initial_guesses(t, m, sigma, band)) 105 | 106 | return initial 107 | 108 | def _limits(self, t, m, sigma, band) -> Dict[str, Tuple[float, float]]: 109 | limits = self.bolometric.limits(t, m, sigma, band) 110 | limits.update(self.temperature.limits(t, m, sigma, band)) 111 | 112 | return limits 113 | 114 | def peak_time(self, params) -> float: 115 | """Returns true bolometric peak position for given parameters""" 116 | return self.bolometric.peak_time(*params[self.p.all_bol_idx]) 117 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/rainbow/temperature.py: -------------------------------------------------------------------------------- 1 | from abc import abstractmethod 2 | from dataclasses import dataclass 3 | from typing import Dict, List, Union 4 | 5 | import numpy as np 6 | 7 | __all__ = ["temperature_terms", "BaseTemperatureTerm", "ConstantTemperatureTerm", "SigmoidTemperatureTerm"] 8 | 9 | 10 | @dataclass() 11 | class BaseTemperatureTerm: 12 | """Bolometric term for the Rainbow""" 13 | 14 | @staticmethod 15 | @abstractmethod 16 | def parameter_names() -> List[str]: 17 | return NotImplementedError 18 | 19 | @staticmethod 20 | @abstractmethod 21 | def parameter_scalings() -> List[Union[str, None]]: 22 | """Describes how to unscale the parameters. 23 | 24 | Should be the list the same shape as returned by `parameter_names()`, and describes 25 | how the parameters should be un-scaled from the fit done in scaled coordinates. 26 | 27 | List items should be either None or one of the following strings: 28 | - time - the parameter is scaled and shifted like measurement times 29 | - timescale - the parameter is scaled like measurement times, but not shifted, thus 30 | behaving like a difference between two measurement times 31 | - flux - the parameter is scaled like the flux points, without additional shifts 32 | applied to them. Suitable for amplitude-like parameters. 33 | - None - the parameter is kept as is, without any additional scaling or shifting 34 | """ 35 | return NotImplementedError 36 | 37 | @staticmethod 38 | @abstractmethod 39 | def value(self, t, *params) -> float: 40 | return NotImplementedError 41 | 42 | @staticmethod 43 | @abstractmethod 44 | def initial_guesses(t, m, sigma, band) -> Dict[str, float]: 45 | return NotImplementedError 46 | 47 | @staticmethod 48 | @abstractmethod 49 | def limits(t, m, sigma, band) -> Dict[str, float]: 50 | return NotImplementedError 51 | 52 | 53 | @dataclass 54 | class ConstantTemperatureTerm(BaseTemperatureTerm): 55 | """Constant temperature""" 56 | 57 | @staticmethod 58 | def parameter_names(): 59 | return ["T"] 60 | 61 | @staticmethod 62 | def parameter_scalings(): 63 | return [None] 64 | 65 | @staticmethod 66 | def value(t, temp): 67 | return np.full_like(t, temp) 68 | 69 | @staticmethod 70 | def initial_guesses(t, m, sigma, band): 71 | initial = {} 72 | initial["T"] = 8000.0 73 | 74 | return initial 75 | 76 | @staticmethod 77 | def limits(t, m, sigma, band): 78 | limits = {} 79 | limits["T"] = (1e3, 2e6) # K 80 | 81 | return limits 82 | 83 | 84 | @dataclass 85 | class SigmoidTemperatureTerm(BaseTemperatureTerm): 86 | """Sigmoid temperature""" 87 | 88 | @staticmethod 89 | def parameter_names(): 90 | return ["reference_time", "Tmin", "Tmax", "t_color"] 91 | 92 | @staticmethod 93 | def parameter_scalings(): 94 | return ["time", None, None, "timescale"] 95 | 96 | @staticmethod 97 | def value(t, t0, temp_min, temp_max, t_color): 98 | dt = t - t0 99 | 100 | # To avoid numerical overflows, let's only compute the exponent not too far from t0 101 | idx1 = dt <= -100 * t_color 102 | idx2 = (dt > -100 * t_color) & (dt < 100 * t_color) 103 | idx3 = dt >= 100 * t_color 104 | 105 | result = np.zeros(len(dt)) 106 | result[idx1] = temp_max 107 | result[idx2] = temp_min + (temp_max - temp_min) / (1.0 + np.exp(dt[idx2] / t_color)) 108 | result[idx3] = temp_min 109 | 110 | return result 111 | 112 | @staticmethod 113 | def initial_guesses(t, m, sigma, band): 114 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 115 | 116 | initial = {} 117 | initial["Tmin"] = 7000.0 118 | initial["Tmax"] = 10000.0 119 | initial["t_color"] = 2 * dt 120 | 121 | return initial 122 | 123 | @staticmethod 124 | def limits(t, m, sigma, band): 125 | t_amplitude = np.ptp(t) 126 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 127 | 128 | limits = {} 129 | limits["Tmin"] = (1e3, 2e6) # K 130 | limits["Tmax"] = (1e3, 2e6) # K 131 | limits["t_color"] = (dt / 3, 10 * t_amplitude) 132 | 133 | return limits 134 | 135 | 136 | @dataclass 137 | class DelayedSigmoidTemperatureTerm(BaseTemperatureTerm): 138 | """Sigmoid temperature with delay w.r.t. bolometric peak""" 139 | 140 | @staticmethod 141 | def parameter_names(): 142 | return ["reference_time", "Tmin", "Tmax", "t_color", "t_delay"] 143 | 144 | @staticmethod 145 | def parameter_scalings(): 146 | return ["time", None, None, "timescale", "timescale"] 147 | 148 | @staticmethod 149 | def value(t, t0, Tmin, Tmax, t_color, t_delay): 150 | dt = t - t0 - t_delay 151 | 152 | # To avoid numerical overflows, let's only compute the exponent not too far from t0 153 | idx1 = dt <= -100 * t_color 154 | idx2 = (dt > -100 * t_color) & (dt < 100 * t_color) 155 | idx3 = dt >= 100 * t_color 156 | 157 | result = np.zeros(len(dt)) 158 | result[idx1] = Tmax 159 | result[idx2] = Tmin + (Tmax - Tmin) / (1.0 + np.exp(dt[idx2] / t_color)) 160 | result[idx3] = Tmin 161 | 162 | return result 163 | 164 | @staticmethod 165 | def initial_guesses(t, m, sigma, band): 166 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 167 | 168 | initial = {} 169 | initial["Tmin"] = 7000.0 170 | initial["Tmax"] = 10000.0 171 | initial["t_color"] = 2 * dt 172 | initial["t_delay"] = 0.0 173 | 174 | return initial 175 | 176 | @staticmethod 177 | def limits(t, m, sigma, band): 178 | t_amplitude = np.ptp(t) 179 | _, dt = t0_and_weighted_centroid_sigma(t, m, sigma) 180 | 181 | limits = {} 182 | limits["Tmin"] = (1e3, 2e6) # K 183 | limits["Tmax"] = (1e3, 2e6) # K 184 | limits["t_color"] = (dt / 3, 10 * t_amplitude) 185 | limits["t_delay"] = (-t_amplitude, t_amplitude) 186 | 187 | return limits 188 | 189 | 190 | def median_dt(t, band): 191 | # Compute the median distance between points in each band 192 | dt = [] 193 | for b in np.unique(band): 194 | dt += list(t[band == b][1:] - t[band == b][:-1]) 195 | med_dt = np.median(dt) 196 | return med_dt 197 | 198 | 199 | def t0_and_weighted_centroid_sigma(t, m, sigma): 200 | # To avoid crashing on all-negative data 201 | mc = m - np.min(m) 202 | 203 | # Peak position as weighted centroid of everything above median 204 | idx = m > np.median(m) 205 | t0 = np.sum(t[idx] * m[idx] / sigma[idx]) / np.sum(m[idx] / sigma[idx]) 206 | 207 | # Weighted centroid sigma 208 | dt = np.sqrt(np.sum((t[idx] - t0) ** 2 * (mc[idx]) / sigma[idx]) / np.sum(mc[idx] / sigma[idx])) 209 | return t0, dt 210 | 211 | 212 | temperature_terms = { 213 | "constant": ConstantTemperatureTerm, 214 | "sigmoid": SigmoidTemperatureTerm, 215 | "delayed_sigmoid": DelayedSigmoidTemperatureTerm, 216 | } 217 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/redchi2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class ReducedChi2(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | n = len(m) 9 | weights = sigma**-2 10 | m_wmean = np.average(m, weights=weights) 11 | s = (m - m_wmean) ** 2 * weights 12 | return np.sum(s) / (n - 1) 13 | 14 | @property 15 | def size_single_band(self): 16 | return 1 17 | 18 | 19 | __all__ = ("ReducedChi2",) 20 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/roms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class Roms(BaseSingleBandFeature): 7 | r"""Robust median statistic 8 | 9 | $$ 10 | \frac1{N-1} \sum_{i=0}^{N-1} \frac{|m_i - \mathrm{median}(m_i)|}{\sigma_i} 11 | $$ 12 | For non-variable data, it should be less than one. 13 | 14 | - Depends on: **magnitude**, **errors** 15 | - Minimum number of observations: **2** 16 | - Number of features: **1** 17 | 18 | Enoch, Brown, Burgasser 2003. [DOI:10.1086/376598](https://www.doi.org/10.1086/376598) 19 | """ 20 | 21 | def _eval_single_band(self, t, m, sigma=None): 22 | n = len(m) 23 | median = np.median(m) 24 | return np.sum(np.abs(m - median) / sigma) / (n - 1) 25 | 26 | @property 27 | def size_single_band(self): 28 | return 1 29 | 30 | 31 | __all__ = ("Roms",) 32 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/skew.py: -------------------------------------------------------------------------------- 1 | from ._base import BaseSingleBandFeature 2 | 3 | 4 | class Skew(BaseSingleBandFeature): 5 | def _eval_single_band(self, t, m, sigma=None): 6 | try: 7 | from scipy.stats import skew 8 | except ImportError: 9 | raise ImportError("scipy is required for Skew feature, please install it") 10 | 11 | return skew(m, bias=False) 12 | 13 | @property 14 | def size_single_band(self): 15 | return 1 16 | 17 | 18 | __all__ = ("Skew",) 19 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/stdev.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class StandardDeviation(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return np.std(m, ddof=1) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("StandardDeviation",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/stetsonk.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class StetsonK(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | m_mean = np.average(m, weights=np.power(sigma, -2)) 9 | numerator = np.sum(np.abs((m - m_mean) / sigma)) 10 | chisq = np.sum(((m - m_mean) / sigma) ** 2) 11 | return numerator / np.sqrt(len(m) * chisq) 12 | 13 | @property 14 | def size_single_band(self): 15 | return 1 16 | 17 | 18 | __all__ = ("StetsonK",) 19 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/features/weightmean.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from ._base import BaseSingleBandFeature 4 | 5 | 6 | class WeightedMean(BaseSingleBandFeature): 7 | def _eval_single_band(self, t, m, sigma=None): 8 | return np.average(m, weights=np.power(sigma, -2)) 9 | 10 | @property 11 | def size_single_band(self): 12 | return 1 13 | 14 | 15 | __all__ = ("WeightedMean",) 16 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/minuit_lsq.py: -------------------------------------------------------------------------------- 1 | """Re-implementation of iminuit.cost.LeastSquares with an arbitrary data format""" 2 | 3 | from typing import Callable, Dict, Tuple 4 | 5 | import numpy as np 6 | 7 | try: 8 | from iminuit import Minuit 9 | except ImportError: 10 | LeastSquares = None 11 | else: 12 | # Following https://iminuit.readthedocs.io/en/stable/notebooks/generic_least_squares.html 13 | class LeastSquares: 14 | errordef = Minuit.LEAST_SQUARES 15 | 16 | def __init__(self, model: Callable, parameters: Dict[str, Tuple[float, float]], *, x, y, yerror): 17 | self.model = model 18 | self.x = x 19 | self.y = y 20 | self.yerror = yerror 21 | self._parameters = parameters 22 | 23 | @property 24 | def ndata(self): 25 | return len(self.y) 26 | 27 | def __call__(self, *par): 28 | ym = self.model(self.x, *par) 29 | return np.sum(np.square((self.y - ym) / self.yerror)) 30 | 31 | 32 | __all__ = ["LeastSquares"] 33 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/minuit_ml.py: -------------------------------------------------------------------------------- 1 | """Maximum-likelihood based cost function""" 2 | 3 | from typing import Callable, Dict, Tuple 4 | 5 | import numpy as np 6 | 7 | try: 8 | from iminuit import Minuit 9 | from scipy.special import erf 10 | except ImportError: 11 | MaximumLikelihood = None 12 | else: 13 | 14 | class MaximumLikelihood: 15 | errordef = Minuit.LIKELIHOOD 16 | 17 | def __init__( 18 | self, model: Callable, parameters: Dict[str, Tuple[float, float]], upper_mask=None, *, x, y, yerror 19 | ): 20 | self.model = model 21 | self.x = x 22 | self.y = y 23 | self.yerror = yerror 24 | self.upper_mask = upper_mask 25 | 26 | self._parameters = parameters 27 | # FIXME: here we assume the order of dict keys is the same as in parameters array 28 | self.limits = np.array([_ for _ in parameters.values()]) 29 | self.limits0 = self.limits[:, 0] 30 | self.limits1 = self.limits[:, 1] 31 | self.limits_scale = self.limits[:, 1] - self.limits[:, 0] 32 | 33 | @property 34 | def ndata(self): 35 | return len(self.y) 36 | 37 | def __call__(self, *par): 38 | ym = self.model(self.x, *par) 39 | 40 | if self.upper_mask is None: 41 | result = -np.sum(self.logpdf(self.y, ym, self.yerror)) 42 | else: 43 | # Measurements 44 | result = -np.sum( 45 | self.logpdf(self.y[~self.upper_mask], ym[~self.upper_mask], self.yerror[~self.upper_mask]) 46 | ) 47 | # Upper limits, Tobit model 48 | # https://stats.stackexchange.com/questions/49443/how-to-model-this-odd-shaped-distribution-almost-a-reverse-j 49 | result += -np.sum( 50 | self.logcdf((self.y[self.upper_mask] - ym[self.upper_mask]) / self.yerror[self.upper_mask]) 51 | ) 52 | 53 | # Barriers around parameter ranges 54 | # Scale is selected so that for the most of the range it is much smaller 55 | # than 0.5 which corresponds to 1-sigma errors 56 | result += 0.0001 * np.sum(self.barrier((par - self.limits0) / self.limits_scale)) 57 | result += 0.0001 * np.sum(self.barrier((self.limits1 - par) / self.limits_scale)) 58 | 59 | return result 60 | 61 | @staticmethod 62 | def logpdf(x, mu, sigma): 63 | # We do not need the second term as it does not depend on parameters 64 | return -(((x - mu) / sigma) ** 2) / 2 # - np.log(np.sqrt(2*np.pi) * sigma) 65 | 66 | @staticmethod 67 | def barrier(x): 68 | res = np.where(x > 0, 1 / x, np.inf) # FIXME: naive barrier function 69 | 70 | return res 71 | 72 | @staticmethod 73 | def logcdf(x): 74 | # TODO: faster (maybe not so accurate, as we do not need it) implementation 75 | # return norm.logcdf(x) 76 | 77 | result = np.zeros(len(x)) 78 | 79 | idx = x < -5 80 | result[idx] = -x[idx] ** 2 / 2 - 1 / x[idx] ** 2 - 0.9189385336 - np.log(-x[idx]) 81 | result[~idx] = np.log(0.5) + np.log1p(erf(x[~idx] / np.sqrt(2))) 82 | 83 | return result 84 | 85 | 86 | __all__ = ["MaximumLikelihood"] 87 | -------------------------------------------------------------------------------- /light-curve/light_curve/light_curve_py/warnings.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | 4 | class ExperimentalWarning(UserWarning): 5 | pass 6 | 7 | 8 | def warn_experimental(msg): 9 | warnings.warn(msg, category=ExperimentalWarning, stacklevel=2) 10 | 11 | 12 | def mark_experimental(f, msg=None): 13 | def inner(f): 14 | message = msg 15 | if message is None: 16 | full_name = f"{f.__module__}.{f.__class__.__name__}" 17 | message = f"Function {full_name} is experimental and may cause any kind of troubles" 18 | 19 | warn_experimental(message) 20 | return f 21 | 22 | return inner 23 | 24 | 25 | __all__ = ( 26 | "ExperimentalWarning", 27 | "warn_experimental", 28 | "mark_experimental", 29 | ) 30 | -------------------------------------------------------------------------------- /light-curve/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=1.0,<2.0"] 3 | build-backend = "maturin" 4 | 5 | [project] 6 | name = "light-curve" 7 | dependencies = ["numpy"] 8 | requires-python = ">=3.9" 9 | classifiers = [ 10 | "Intended Audience :: Science/Research", 11 | "License :: OSI Approved :: MIT License", 12 | "Programming Language :: Python", 13 | "Programming Language :: Python :: 3 :: Only", 14 | "Programming Language :: Python :: 3.9", 15 | "Programming Language :: Python :: 3.10", 16 | "Programming Language :: Python :: 3.11", 17 | "Programming Language :: Python :: 3.12", 18 | "Programming Language :: Python :: 3.13", 19 | "Programming Language :: Rust", 20 | "Topic :: Scientific/Engineering :: Astronomy", 21 | ] 22 | # We load these from Cargo.toml 23 | dynamic = [ 24 | "authors", 25 | "description", 26 | "license", 27 | "readme", 28 | "version", 29 | ] 30 | 31 | [project.optional-dependencies] 32 | # Packages required by some experimental features 33 | full = [ 34 | "iminuit>=2.21,<3", 35 | "scipy<2", 36 | ] 37 | 38 | # Testing environment 39 | test = [ 40 | "pytest", 41 | "markdown-pytest", 42 | "pytest-benchmark", 43 | "pytest-subtests>=0.10", 44 | "iminuit>=2.21,<3", 45 | "numpy", 46 | "scipy", 47 | "cesium", 48 | "joblib", 49 | "pandas", 50 | ] 51 | dev = [ 52 | "pytest", 53 | "markdown-pytest", 54 | "pytest-benchmark", 55 | "pytest-subtests>=0.10", 56 | "iminuit>=2.21,<3", 57 | "numpy", 58 | "scipy", 59 | "cesium", 60 | "joblib", 61 | "pandas", 62 | "black", 63 | "ruff", 64 | ] 65 | # cesium and iminuit don't support free-threading yet 66 | dev-free-threading = [ 67 | "pytest", 68 | "markdown-pytest", 69 | "pytest-benchmark", 70 | "pytest-subtests>=0.10", 71 | "numpy", 72 | "scipy", 73 | "joblib", 74 | "pandas", 75 | "black", 76 | "ruff", 77 | ] 78 | 79 | [tool.maturin] 80 | # It asks to use Cargo.lock to make the build reproducible 81 | locked = true 82 | 83 | [tool.maturin.target.aarch64-apple-darwin] 84 | # I belive Rust requires it 85 | macos-deployment-target = "11.0" 86 | 87 | [tool.maturin.target.x86_64-apple-darwin] 88 | # Default is 10.7, but we need 10.9 for Ceres 89 | macos-deployment-target = "10.9" 90 | 91 | [tool.black] 92 | line-length = 120 93 | target-version = ["py39"] 94 | include = '\.py$' 95 | exclude = ''' 96 | /( 97 | docs 98 | | dist 99 | | target 100 | | tests/light-curve-test-data 101 | | wheelhouse 102 | | \.benchmarks 103 | | \.idea 104 | | \.mypy_cache 105 | | \.pytest_cache 106 | | \.tox 107 | | _build 108 | )/ 109 | ''' 110 | 111 | [tool.ruff] 112 | line-length = 120 113 | exclude = [ 114 | "docs", 115 | "dist", 116 | "target", 117 | "tests/light-curve-test-data", 118 | "wheelhouse", 119 | ".benchmarks", 120 | ".idea", 121 | ".mypy_cache", 122 | ".pytest_cache", 123 | ".tox", 124 | "_build", 125 | ] 126 | target-version = "py39" 127 | 128 | [tool.ruff.lint] 129 | select = [ 130 | # Pyflakes 131 | "F", 132 | # Pycodestyle 133 | "E", 134 | "W", 135 | # isort 136 | "I001", 137 | # Numpy v2.0 compatibility 138 | "NPY201", 139 | ] 140 | 141 | [tool.ruff.lint.per-file-ignores] 142 | # Unused and star imports 143 | "light_curve/__init__.py" = ["F401", "F403", "I001"] 144 | "light_curve/light_curve_ext.py" = ["F403", "F405"] 145 | "light_curve/light_curve_py/__init__.py" = ["F403"] 146 | "light_curve/light_curve_py/features/rainbow/__init__.py" = ["F403"] 147 | 148 | [tool.pytest.ini_options] 149 | minversion = "6.0" 150 | # requires pytest-benchmark 151 | addopts = "-ra --import-mode=append --benchmark-min-time=0.1 --benchmark-max-time=5.0 --benchmark-sort=mean --benchmark-disable" 152 | testpaths = [ 153 | "tests/", 154 | "README.md", # requires markdown-pytest 155 | ] 156 | markers = [ 157 | "nobs: marks benchmarks for different numbers of observations (deselect with '-m \"not nobs\"')", 158 | "multi: marks multiprocessing benchmarks (deselect with '-m \"not multi\"')", 159 | ] 160 | 161 | [tool.tox] 162 | legacy_tox_ini = """ 163 | [tox] 164 | envlist = py{39,310,311,312,313,313t}-{base,test} 165 | isolated_build = True 166 | 167 | [testenv:py{39,310,311,312,313,313t}-base] 168 | change_dir = {envtmpdir} 169 | extras = 170 | commands = 171 | python -c 'import light_curve' 172 | set_env = 173 | CARGO_TARGET_DIR = {tox_root}/target 174 | 175 | [testenv:py{39,310,311,312,313}-test] 176 | extras = dev 177 | commands = 178 | pytest README.md tests/ light_curve/ 179 | ruff check . 180 | set_env = 181 | CARGO_TARGET_DIR = {tox_root}/target 182 | 183 | [testenv:py313t-test] 184 | extras = dev-free-threading 185 | commands = 186 | pytest README.md tests/ light_curve/ \ 187 | --ignore tests/test_w_bench.py \ 188 | --ignore=tests/light_curve_py/features/test_rainbow.py \ 189 | --deselect=README.md::test_rainbow_fit_example 190 | ruff check . 191 | set_env = 192 | CARGO_TARGET_DIR = {tox_root}/target 193 | """ 194 | 195 | 196 | [tool.cibuildwheel] 197 | # Default is "pip", but it is recommended to use "build" 198 | build-frontend = "build" 199 | 200 | # - Set PATH to include Rust. 201 | # - Set maturin build options, including LICU_ADDITIONAL_FEATURES which may be passed through CIBW_ENVIRONMENT_PASS_LINUX 202 | environment = { "PATH" = "$PATH:$HOME/.cargo/bin", "MATURIN_PEP517_ARGS" = "--locked --no-default-features --features=abi3,ceres-system,fftw-system,gsl,mimalloc" } 203 | 204 | # We use our own images which include Rust, GSL and platform-optimised FFTW 205 | # Manylinux CPython 206 | manylinux-aarch64-image = "ghcr.io/light-curve/base-docker-images/manylinux2014_aarch64" 207 | manylinux-x86_64-image = "ghcr.io/light-curve/base-docker-images/manylinux2014_x86_64" 208 | # Manylinux PyPy 209 | manylinux-pypy_aarch64-image = "ghcr.io/light-curve/base-docker-images/manylinux2014_aarch64" 210 | manylinux-pypy_x86_64-image = "ghcr.io/light-curve/base-docker-images/manylinux2014_x86_64" 211 | # Musllinux 212 | musllinux-aarch64-image = "ghcr.io/light-curve/base-docker-images/musllinux_1_2_aarch64" 213 | musllinux-x86_64-image = "ghcr.io/light-curve/base-docker-images/musllinux_1_2_x86_64" 214 | 215 | [tool.cibuildwheel.macos] 216 | before-all = [ 217 | # Install Rust 218 | "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y", 219 | # Install FFTW and GSL 220 | "brew install ceres-solver fftw gsl", 221 | ] 222 | 223 | # We miss Rust installation here because it is not so simple on Windows 224 | [tool.cibuildwheel.windows] 225 | # fftw-src downloads FFTW DLLs, so fftw-sys doesn't link FFTW statically. We need to repair the wheel with these DLLs. 226 | before-all = [ 227 | "curl -o %USERPROFILE%\\Downloads\\fftw-dll64.zip https://fftw.org/pub/fftw/fftw-3.3.5-dll64.zip", 228 | "powershell -command \"Expand-Archive -Path %USERPROFILE%\\Downloads\\fftw-dll64.zip -DestinationPath %USERPROFILE%\\Downloads\\fftw-dll64\"", 229 | ] 230 | before-build = ["pip install delvewheel"] 231 | repair-wheel-command = "delvewheel repair --add-path=%USERPROFILE%\\Downloads\\fftw-dll64 -w {dest_dir} {wheel}" 232 | # We do not support Ceres and GSL on Windows 233 | environment = { "PATH" = "$PATH:$HOME/.cargo/bin", "MATURIN_PEP517_ARGS" = "--locked --no-default-features --features=abi3,fftw-source,mimalloc" } 234 | 235 | # Build with Intel MKL on Linux x86_64 236 | [[tool.cibuildwheel.overrides]] 237 | select = "*linux_x86_64" 238 | # We'd like to use MKL for x86_64 239 | environment = { "PATH" = "$PATH:$HOME/.cargo/bin", "MATURIN_PEP517_ARGS" = "--locked --no-default-features --features=abi3,ceres-system,fftw-mkl,gsl,mimalloc" } 240 | 241 | # Test 242 | # We use platforms natively available on GitHub Actions and skip Windows because it doesn't support all the features 243 | [[tool.cibuildwheel.overrides]] 244 | select = "cp*-manylinux_x86_64 cp*-macosx*" 245 | test-command = "pytest {package}/README.md {package}/light_curve/ {package}/tests/" 246 | test-extras = ["test"] 247 | -------------------------------------------------------------------------------- /light-curve/src/check.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::{Exception, Res}; 2 | 3 | use itertools::Itertools; 4 | use light_curve_feature::Float; 5 | use ndarray::{ArrayView1, Zip}; 6 | 7 | pub(crate) fn is_sorted(a: &[T]) -> bool 8 | where 9 | T: PartialOrd, 10 | { 11 | a.iter().tuple_windows().all(|(a, b)| a < b) 12 | } 13 | 14 | pub(crate) fn check_sorted(a: &[T], sorted: Option) -> Res<()> 15 | where 16 | T: PartialOrd, 17 | { 18 | match sorted { 19 | Some(true) => Ok(()), 20 | Some(false) => Err(Exception::NotImplementedError(String::from( 21 | "sorting is not implemented, please provide time-sorted arrays", 22 | ))), 23 | None => { 24 | if is_sorted(a) { 25 | Ok(()) 26 | } else { 27 | Err(Exception::ValueError(String::from( 28 | "t must be in ascending order", 29 | ))) 30 | } 31 | } 32 | } 33 | } 34 | 35 | pub(crate) fn check_finite(a: ArrayView1<'_, T>) -> Res<()> 36 | where 37 | T: Float, 38 | { 39 | if Zip::from(a).all(|x| x.is_finite()) { 40 | Ok(()) 41 | } else { 42 | Err(Exception::ValueError(String::from( 43 | "t and m values must be finite", 44 | ))) 45 | } 46 | } 47 | 48 | pub(crate) fn check_no_nans(a: ArrayView1<'_, T>) -> Res<()> 49 | where 50 | T: Float, 51 | { 52 | // There are no Zip::any 53 | if Zip::from(a).all(|x| !x.is_nan()) { 54 | Ok(()) 55 | } else { 56 | Err(Exception::ValueError(String::from( 57 | "input arrays must not contain any NaNs", 58 | ))) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /light-curve/src/cont_array.rs: -------------------------------------------------------------------------------- 1 | use ndarray::{Array1, ArrayBase, ArrayView1, CowRepr, Ix1, OwnedRepr, RawData, ViewRepr}; 2 | use num_traits::Zero; 3 | use numpy::Element; 4 | 5 | pub struct ContArrayBase(pub ArrayBase); 6 | 7 | pub type ContArray = ContArrayBase>; 8 | pub type ContArrayView<'a, T> = ContArrayBase>; 9 | pub type ContCowArray<'a, T> = ContArrayBase>; 10 | 11 | impl ContArrayBase 12 | where 13 | S: ndarray::Data, 14 | S::Elem: Clone, 15 | { 16 | pub fn as_slice(&self) -> &[S::Elem] { 17 | self.0.as_slice().unwrap() 18 | } 19 | 20 | pub fn into_owned(self) -> ContArray { 21 | ContArrayBase::>(self.0.into_owned()) 22 | } 23 | } 24 | 25 | impl<'a, T> ContCowArray<'a, T> 26 | where 27 | T: Element + Zero + Copy, 28 | { 29 | pub fn from_view(a: ArrayView1<'a, T>, required: bool) -> Self { 30 | if required || a.is_standard_layout() { 31 | a.into() 32 | } else { 33 | // TODO: Use the same broadcast trick as in light-curve-feature to speed-up this 34 | Self(Array1::zeros(a.len()).into()) 35 | } 36 | } 37 | } 38 | 39 | impl From> for ContArray 40 | where 41 | T: Element + Copy, 42 | { 43 | fn from(a: Array1) -> Self { 44 | if a.is_standard_layout() { 45 | Self(a) 46 | } else { 47 | let owned = a.iter().copied().collect::>(); 48 | Self(Array1::from_vec(owned)) 49 | } 50 | } 51 | } 52 | 53 | impl<'a, T> From> for ContArray 54 | where 55 | T: Element + Copy, 56 | { 57 | fn from(a: ArrayView1<'a, T>) -> Self { 58 | let cow: ContCowArray<_> = a.into(); 59 | cow.into_owned() 60 | } 61 | } 62 | 63 | impl<'a, T> From> for ContCowArray<'a, T> 64 | where 65 | T: Element + Copy, 66 | { 67 | fn from(a: ArrayView1<'a, T>) -> Self { 68 | if a.is_standard_layout() { 69 | Self(a.into()) 70 | } else { 71 | let owned_vec = a.iter().copied().collect::>(); 72 | let array = Array1::from_vec(owned_vec); 73 | let cow = array.into(); 74 | Self(cow) 75 | } 76 | } 77 | } 78 | 79 | impl From> for ContCowArray<'_, T> { 80 | fn from(a: ContArray) -> Self { 81 | Self(a.0.into()) 82 | } 83 | } 84 | 85 | impl<'a, T> From> for ContCowArray<'a, T> { 86 | fn from(a: ContArrayView<'a, T>) -> Self { 87 | Self(a.0.into()) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /light-curve/src/errors.rs: -------------------------------------------------------------------------------- 1 | use pyo3::PyErr; 2 | use pyo3::exceptions::{ 3 | PyIndexError, PyNotImplementedError, PyRuntimeError, PyTypeError, PyValueError, 4 | }; 5 | use pyo3::import_exception; 6 | use std::fmt::Debug; 7 | use std::result::Result; 8 | use thiserror::Error; 9 | 10 | import_exception!(pickle, PicklingError); 11 | import_exception!(pickle, UnpicklingError); 12 | 13 | #[allow(clippy::enum_variant_names)] 14 | #[derive(Error, Debug)] 15 | #[error("{0}")] 16 | pub(crate) enum Exception { 17 | // builtins 18 | IndexError(String), 19 | NotImplementedError(String), 20 | RuntimeError(String), 21 | TypeError(String), 22 | ValueError(String), 23 | // pickle 24 | PicklingError(String), 25 | UnpicklingError(String), 26 | // some exception from pyo3 which we need to handle 27 | PyO3(#[from] PyErr), 28 | } 29 | 30 | impl From for PyErr { 31 | fn from(err: Exception) -> PyErr { 32 | match err { 33 | // builtins 34 | Exception::IndexError(err) => PyIndexError::new_err(err), 35 | Exception::NotImplementedError(err) => PyNotImplementedError::new_err(err), 36 | Exception::RuntimeError(err) => PyRuntimeError::new_err(err), 37 | Exception::TypeError(err) => PyTypeError::new_err(err), 38 | Exception::ValueError(err) => PyValueError::new_err(err), 39 | // pickle 40 | Exception::PicklingError(err) => PicklingError::new_err(err), 41 | Exception::UnpicklingError(err) => UnpicklingError::new_err(err), 42 | // pyo3 43 | Exception::PyO3(err) => err, 44 | } 45 | } 46 | } 47 | 48 | impl From for Exception { 49 | fn from(err: light_curve_feature::EvaluatorError) -> Self { 50 | Exception::RuntimeError(err.to_string()) 51 | } 52 | } 53 | 54 | pub(crate) type Res = Result; 55 | -------------------------------------------------------------------------------- /light-curve/src/lib.rs: -------------------------------------------------------------------------------- 1 | use dmdt::DmDt; 2 | use features as f; 3 | use ln_prior::*; 4 | #[cfg(feature = "mimalloc")] 5 | use mimalloc::MiMalloc; 6 | use pyo3::prelude::*; 7 | 8 | #[macro_use] 9 | mod np_array; 10 | mod check; 11 | mod cont_array; 12 | mod dmdt; 13 | mod errors; 14 | mod features; 15 | mod ln_prior; 16 | mod transform; 17 | 18 | #[cfg(feature = "mimalloc")] 19 | #[global_allocator] 20 | static GLOBAL_ALLOCATOR: MiMalloc = MiMalloc; 21 | 22 | /// High-performance time-series feature extractor 23 | /// 24 | /// The module provides a collection of features to be extracted from unevenly separated 25 | /// time-series. This module if based on Rust crates `light-curve-feature` & `light-curve-dmdt`. 26 | /// 27 | /// dm-lg(dt) maps generator is represented by `DmDt` class, while all other classes are 28 | /// feature extractors 29 | #[pymodule(gil_used = false)] 30 | fn light_curve(py: Python, m: Bound) -> PyResult<()> { 31 | m.add("__version__", env!("CARGO_PKG_VERSION"))?; 32 | 33 | m.add( 34 | "_built_with_ceres", 35 | cfg!(any(feature = "ceres-source", feature = "ceres-system")), 36 | )?; 37 | m.add("_built_with_gsl", cfg!(feature = "gsl"))?; 38 | m.add("_fft_backend", { 39 | #[cfg(feature = "fftw-mkl")] 40 | { 41 | "Intel MKL linked statically for FFTW" 42 | } 43 | #[cfg(all(not(feature = "fftw-mkl"), feature = "fftw-system"))] 44 | { 45 | "FFTW linked from system, may or may not be bundled into the package" 46 | } 47 | #[cfg(all( 48 | not(feature = "fftw-mkl"), 49 | not(feature = "fftw-system"), 50 | feature = "fftw-source" 51 | ))] 52 | { 53 | "FFTW built from source by fftw-src crate and statically linked into the module" 54 | } 55 | #[cfg(not(any(feature = "fftw-mkl", feature = "fftw-system", feature = "fftw-source")))] 56 | { 57 | compile_error!("One of fftw-mkl, fftw-system or fftw-source features is required"); 58 | } 59 | })?; 60 | 61 | m.add_class::()?; 62 | 63 | m.add_class::()?; 64 | 65 | m.add_class::()?; 66 | 67 | m.add_class::()?; 68 | m.add_class::()?; 69 | m.add_class::()?; 70 | m.add_class::()?; 71 | m.add_class::()?; 72 | m.add_class::()?; 73 | m.add_class::()?; 74 | m.add_class::()?; 75 | m.add_class::()?; 76 | m.add_class::()?; 77 | m.add_class::()?; 78 | m.add_class::()?; 79 | m.add_class::()?; 80 | m.add_class::()?; 81 | m.add_class::()?; 82 | m.add_class::()?; 83 | m.add_class::()?; 84 | m.add_class::()?; 85 | m.add_class::()?; 86 | m.add_class::()?; 87 | m.add_class::()?; 88 | m.add_class::()?; 89 | m.add_class::()?; 90 | m.add_class::()?; 91 | m.add_class::()?; 92 | m.add_class::()?; 93 | m.add_class::()?; 94 | m.add_class::()?; 95 | m.add_class::()?; 96 | m.add_class::()?; 97 | m.add_class::()?; 98 | m.add_class::()?; 99 | m.add_class::()?; 100 | m.add_class::()?; 101 | m.add_class::()?; 102 | m.add_class::()?; 103 | m.add_class::()?; 104 | m.add_class::()?; 105 | m.add_class::()?; 106 | 107 | register_ln_prior_submodule(py, m)?; 108 | 109 | Ok(()) 110 | } 111 | -------------------------------------------------------------------------------- /light-curve/src/ln_prior.rs: -------------------------------------------------------------------------------- 1 | /// Prior classes and constructors for *Fit feature evaluators 2 | use crate::errors::{Exception, Res}; 3 | 4 | use light_curve_feature as lcf; 5 | use pyo3::prelude::*; 6 | use pyo3::types::PyBytes; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | /// Logarithm of prior for *Fit feature extractors 10 | /// 11 | /// Construct instances of this class using stand-alone functions. The constructor of this class 12 | /// always returns `none` variant (see `ln_prior.none()`). 13 | #[pyclass(module = "light_curve.light_curve_ext.ln_prior")] 14 | #[derive(Clone, Serialize, Deserialize)] 15 | pub struct LnPrior1D(pub lcf::LnPrior1D); 16 | 17 | #[pymethods] 18 | impl LnPrior1D { 19 | #[new] 20 | fn __new__() -> Self { 21 | Self(lcf::LnPrior1D::none()) 22 | } 23 | 24 | /// Used by pickle.load / pickle.loads 25 | fn __setstate__(&mut self, state: Bound) -> Res<()> { 26 | *self = serde_pickle::from_slice(state.as_bytes(), serde_pickle::DeOptions::new()) 27 | .map_err(|err| { 28 | Exception::UnpicklingError(format!( 29 | r#"Error happened on the Rust side when deserializing LnPrior1D: "{err}""# 30 | )) 31 | })?; 32 | Ok(()) 33 | } 34 | 35 | /// Used by pickle.dump / pickle.dumps 36 | fn __getstate__<'py>(&self, py: Python<'py>) -> Res> { 37 | let vec_bytes = 38 | serde_pickle::to_vec(&self, serde_pickle::SerOptions::new()).map_err(|err| { 39 | Exception::PicklingError(format!( 40 | r#"Error happened on the Rust side when serializing LnPrior1D: "{err}""# 41 | )) 42 | })?; 43 | Ok(PyBytes::new(py, &vec_bytes)) 44 | } 45 | 46 | /// Used by copy.copy 47 | fn __copy__(&self) -> Self { 48 | self.clone() 49 | } 50 | 51 | /// Used by copy.deepcopy 52 | fn __deepcopy__(&self, _memo: Bound) -> Self { 53 | self.clone() 54 | } 55 | } 56 | 57 | /// "None" prior, its logarithm is zero 58 | /// 59 | /// Returns 60 | /// ------- 61 | /// LnPrior1D 62 | #[pyfunction] 63 | fn none() -> LnPrior1D { 64 | LnPrior1D(lcf::LnPrior1D::none()) 65 | } 66 | 67 | /// Log-normal prior 68 | /// 69 | /// Parameters 70 | /// ---------- 71 | /// mu : float 72 | /// sigma : float 73 | /// 74 | /// Returns 75 | /// ------- 76 | /// LnPrior1D 77 | /// 78 | /// https://en.wikipedia.org/wiki/Log-normal_distribution 79 | #[pyfunction] 80 | fn log_normal(mu: f64, sigma: f64) -> LnPrior1D { 81 | LnPrior1D(lcf::LnPrior1D::log_normal(mu, sigma)) 82 | } 83 | 84 | /// Log-uniform prior 85 | /// 86 | /// Parameters 87 | /// ---------- 88 | /// left : float 89 | /// Left border of the distribution (value, not logarithm) 90 | /// right : float 91 | /// Right border of the distribution 92 | /// 93 | /// Returns 94 | /// ------- 95 | /// LnPrior1D 96 | #[pyfunction] 97 | fn log_uniform(left: f64, right: f64) -> LnPrior1D { 98 | LnPrior1D(lcf::LnPrior1D::log_uniform(left, right)) 99 | } 100 | 101 | /// Normal prior 102 | /// 103 | /// Parameters 104 | /// ---------- 105 | /// mu : float 106 | /// sigma : float 107 | /// 108 | /// Returns 109 | /// ------- 110 | /// LnPrior1D 111 | #[pyfunction] 112 | fn normal(mu: f64, sigma: f64) -> LnPrior1D { 113 | LnPrior1D(lcf::LnPrior1D::normal(mu, sigma)) 114 | } 115 | 116 | /// Uniform prior 117 | /// 118 | /// Parameters 119 | /// ---------- 120 | /// left : float 121 | /// Left border of the distribution 122 | /// right : float 123 | /// Right border of the distribution 124 | /// 125 | /// Returns 126 | /// ------- 127 | /// LnPrior1D 128 | #[pyfunction] 129 | fn uniform(left: f64, right: f64) -> LnPrior1D { 130 | LnPrior1D(lcf::LnPrior1D::uniform(left, right)) 131 | } 132 | 133 | /// Prior as a mixed distribution 134 | /// 135 | /// Parameters 136 | /// ---------- 137 | /// mix : list of (float, LnPrior1D) tuples 138 | /// A mixed distribution represented as a list of (weight, LnPrior1D), the 139 | /// mixed logarithm of prior is 140 | /// ln(sum(norm_weight_i * exp(ln_prior_i(x)))) 141 | /// where norm_weight_i = weight_i / sum(weight_j) 142 | #[pyfunction] 143 | fn mix(mix: Vec<(f64, LnPrior1D)>) -> LnPrior1D { 144 | let priors = mix 145 | .into_iter() 146 | .map(|(weight, py_ln_prior)| (weight, py_ln_prior.0)) 147 | .collect(); 148 | LnPrior1D(lcf::LnPrior1D::mix(priors)) 149 | } 150 | 151 | pub fn register_ln_prior_submodule(py: Python, parent_module: Bound) -> PyResult<()> { 152 | let m = PyModule::new(py, "ln_prior")?; 153 | m.add_class::()?; 154 | m.add_function(wrap_pyfunction!(none, &m)?)?; 155 | m.add_function(wrap_pyfunction!(log_normal, &m)?)?; 156 | m.add_function(wrap_pyfunction!(log_uniform, &m)?)?; 157 | m.add_function(wrap_pyfunction!(normal, &m)?)?; 158 | m.add_function(wrap_pyfunction!(uniform, &m)?)?; 159 | m.add_function(wrap_pyfunction!(mix, &m)?)?; 160 | parent_module.add_submodule(&m)?; 161 | Ok(()) 162 | } 163 | -------------------------------------------------------------------------------- /light-curve/src/np_array.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::{Exception, Res}; 2 | 3 | use numpy::prelude::*; 4 | use numpy::{ 5 | AllowTypeChange, PyArray1, PyArrayLike1, PyReadonlyArray1, PyUntypedArray, 6 | PyUntypedArrayMethods, 7 | }; 8 | use pyo3::prelude::*; 9 | use unarray::UnarrayArrayExt; 10 | 11 | pub(crate) type Arr<'a, T> = PyReadonlyArray1<'a, T>; 12 | 13 | pub(crate) trait DType { 14 | fn dtype_name() -> &'static str; 15 | } 16 | 17 | impl DType for f32 { 18 | fn dtype_name() -> &'static str { 19 | "float32" 20 | } 21 | } 22 | 23 | impl DType for f64 { 24 | fn dtype_name() -> &'static str { 25 | "float64" 26 | } 27 | } 28 | 29 | pub(crate) fn unknown_type_exception(name: &str, obj: &Bound) -> Exception { 30 | let message = if let Ok(arr) = obj.downcast::() { 31 | let ndim = arr.ndim(); 32 | if ndim != 1 { 33 | format!("'{name}' is a {ndim}-d array, only 1-d arrays are supported.") 34 | } else { 35 | let dtype = match arr.dtype().str() { 36 | Ok(s) => s, 37 | Err(err) => return err.into(), 38 | }; 39 | format!("'{name}' has dtype {dtype}, but only float32 and float64 are supported.") 40 | } 41 | } else { 42 | let tp = match obj.get_type().name() { 43 | Ok(s) => s, 44 | Err(err) => return err.into(), 45 | }; 46 | format!( 47 | "'{name}' has type '{tp}', float32 or float64 1-d numpy array was supported. Try to cast with np.asarray." 48 | ) 49 | }; 50 | Exception::TypeError(message) 51 | } 52 | 53 | fn cast_fail_reason( 54 | idx: usize, 55 | names: &'static [&'static str; N], 56 | objects: &[&Bound; N], 57 | cast: bool, 58 | ) -> Exception { 59 | let first_name = names.first().expect("Empty names slice"); 60 | let fist_obj = objects.first().expect("Empty objects slice"); 61 | 62 | // If the very first argument downcast 63 | if idx == 0 { 64 | return unknown_type_exception(first_name, fist_obj); 65 | } 66 | 67 | let maybe_first_f32_array = try_downcast_to_f32_array(objects[0]); 68 | let maybe_first_f64_array = try_downcast_to_f64_array(objects[0], cast); 69 | let (first_arr, first_dtype_name) = if let Some(f32_array) = maybe_first_f32_array.as_ref() { 70 | (f32_array.as_untyped(), f32::dtype_name()) 71 | } else if let Some(f64_array) = maybe_first_f64_array.as_ref() { 72 | (f64_array.as_untyped(), f64::dtype_name()) 73 | } else { 74 | return unknown_type_exception(first_name, fist_obj); 75 | }; 76 | 77 | let fail_name = names.get(idx).expect("idx is out of bounds of names slice"); 78 | let fail_obj = objects 79 | .get(idx) 80 | .expect("idx is out of bounds of objects slice"); 81 | 82 | let error_message = if let Ok(fail_arr) = fail_obj.downcast::() { 83 | if fail_arr.ndim() != 1 { 84 | format!( 85 | "'{}' is a {}-d array, only 1-d arrays are supported.", 86 | fail_name, 87 | fail_arr.ndim() 88 | ) 89 | } else { 90 | let first_arr_dtype_str = match first_arr.dtype().str() { 91 | Ok(s) => s, 92 | Err(err) => return err.into(), 93 | }; 94 | let fail_arr_dtype_str = match fail_arr.dtype().str() { 95 | Ok(s) => s, 96 | Err(err) => return err.into(), 97 | }; 98 | format!( 99 | "Mismatched dtypes: '{first_name}': {first_arr_dtype_str}, '{fail_name}': {fail_arr_dtype_str}", 100 | ) 101 | } 102 | } else { 103 | let fail_obj_type_name = match fail_obj.get_type().name() { 104 | Ok(s) => s, 105 | Err(err) => return err.into(), 106 | }; 107 | format!( 108 | "'{fail_name}' must be a numpy array of the same shape and dtype as '{first_name}', '{first_name}' has type 'np.ndarray[{first_dtype_name}]', '{fail_name}' has type '{fail_obj_type_name}')", 109 | ) 110 | }; 111 | Exception::TypeError(error_message) 112 | } 113 | 114 | pub(crate) enum GenericPyReadonlyArrays<'py, const N: usize> { 115 | F32([PyReadonlyArray1<'py, f32>; N]), 116 | F64([PyReadonlyArray1<'py, f64>; N]), 117 | } 118 | 119 | impl GenericPyReadonlyArrays<'_, N> { 120 | fn array_len(&self, i: usize) -> usize { 121 | match self { 122 | Self::F32(v) => v[i].len(), 123 | Self::F64(v) => v[i].len(), 124 | } 125 | } 126 | } 127 | 128 | fn try_downcast_objects_to_f32_arrays<'py, const N: usize>( 129 | objects: &[&Bound<'py, PyAny>; N], 130 | ) -> [Option>; N] { 131 | let mut arrays = [const { None }; N]; 132 | for (&obj, arr) in objects.iter().zip(arrays.iter_mut()) { 133 | *arr = try_downcast_to_f32_array(obj); 134 | // If we cannot cast an array, we stop trying for future arguments 135 | if arr.is_none() { 136 | break; 137 | } 138 | } 139 | arrays 140 | } 141 | 142 | fn try_downcast_to_f32_array<'py>(obj: &Bound<'py, PyAny>) -> Option> { 143 | let py_array = obj.downcast::>().ok()?; 144 | Some(py_array.readonly()) 145 | } 146 | 147 | fn try_downcast_to_f64_array<'py>( 148 | obj: &Bound<'py, PyAny>, 149 | cast: bool, 150 | ) -> Option> { 151 | match (obj.downcast::>(), cast) { 152 | (Ok(py_array), _) => Some(py_array.readonly()), 153 | (Err(_), true) => match PyArrayLike1::::extract_bound(obj) { 154 | Ok(py_array) => Some(py_array.readonly()), 155 | Err(_) => None, 156 | }, 157 | (Err(_), false) => None, 158 | } 159 | } 160 | 161 | const fn index() -> [usize; N] { 162 | let mut arr = [0; N]; 163 | let mut i = 0; 164 | while i < N { 165 | arr[i] = i; 166 | i += 1; 167 | } 168 | arr 169 | } 170 | 171 | fn downcast_objects_cast<'py, const N: usize>( 172 | names: &'static [&'static str; N], 173 | objects: &[&Bound<'py, PyAny>; N], 174 | ) -> Res> { 175 | let f32_arrays = try_downcast_objects_to_f32_arrays(objects); 176 | 177 | if f32_arrays.iter().all(|arr| arr.is_some()) { 178 | Ok(GenericPyReadonlyArrays::F32( 179 | f32_arrays.map(|arr| arr.unwrap()), 180 | )) 181 | } else { 182 | let result = index::().map_result::<_, Exception>(|i| { 183 | let f64_arr = if let Some(f32_arr) = &f32_arrays[i] { 184 | f32_arr.cast::(false)?.readonly() 185 | } else { 186 | try_downcast_to_f64_array(objects[i], true) 187 | .ok_or_else(|| cast_fail_reason(i, names, objects, true))? 188 | }; 189 | Ok(f64_arr) 190 | })?; 191 | 192 | Ok(GenericPyReadonlyArrays::F64(result)) 193 | } 194 | } 195 | 196 | fn downcast_objects_no_cast<'py, const N: usize>( 197 | names: &'static [&'static str; N], 198 | objects: &[&Bound<'py, PyAny>; N], 199 | ) -> Res> { 200 | let f32_arrays = try_downcast_objects_to_f32_arrays(objects); 201 | 202 | if f32_arrays.iter().all(|arr| arr.is_some()) { 203 | Ok(GenericPyReadonlyArrays::F32( 204 | f32_arrays.map(|arr| arr.unwrap()), 205 | )) 206 | } else { 207 | let mut valid_f64_count = 0; 208 | let f64_arrays = objects 209 | .map_option(|obj| { 210 | valid_f64_count += 1; 211 | try_downcast_to_f64_array(obj, false) 212 | }) 213 | .ok_or_else(|| { 214 | let valid_f32_count = f32_arrays.iter().filter(|arr| arr.is_some()).count(); 215 | let max_count = usize::max(valid_f32_count, valid_f64_count); 216 | if max_count == 0 { 217 | unknown_type_exception(names[0], objects[0]) 218 | } else { 219 | let idx = max_count - 1; 220 | cast_fail_reason(idx, names, objects, false) 221 | } 222 | })?; 223 | Ok(GenericPyReadonlyArrays::F64(f64_arrays)) 224 | } 225 | } 226 | 227 | pub(crate) fn downcast_and_validate<'py, const N: usize>( 228 | names: &'static [&'static str; N], 229 | objects: &[&Bound<'py, PyAny>; N], 230 | check_size: &[bool; N], 231 | cast: bool, 232 | ) -> Res> { 233 | assert_eq!(names.len(), objects.len()); 234 | 235 | let arrays = if cast { 236 | downcast_objects_cast(names, objects)? 237 | } else { 238 | downcast_objects_no_cast(names, objects)? 239 | }; 240 | let mut first_array_len = None; 241 | // We checked that 1) names size matches objects size, 2) objects is not empty 242 | let first_name = names[0]; 243 | 244 | for i in 1..N { 245 | if check_size[i] { 246 | let length0 = if let Some(length0) = first_array_len { 247 | length0 248 | } else { 249 | let length0 = arrays.array_len(0); 250 | first_array_len = Some(length0); 251 | length0 252 | }; 253 | let length = arrays.array_len(i); 254 | if length != length0 { 255 | return Err(Exception::ValueError(format!( 256 | "Mismatched lengths: '{}': {}, '{}': {}", 257 | first_name, length0, names[i], length, 258 | ))); 259 | } 260 | } 261 | } 262 | Ok(arrays) 263 | } 264 | 265 | macro_rules! _distinguish_eq_symbol { 266 | (=) => { 267 | true 268 | }; 269 | (!=) => { 270 | false 271 | }; 272 | } 273 | 274 | macro_rules! dtype_dispatch { 275 | // @call variants are for the internal usage only 276 | (@call $func:ident, $arrays:ident, $_arg1:expr,) => {{ 277 | let [x1] = $arrays; 278 | $func(x1) 279 | }}; 280 | (@call $func:ident, $arrays:ident, $_arg1:expr, $_arg2:expr,) => {{ 281 | let [x1, x2] = $arrays; 282 | $func(x1, x2) 283 | }}; 284 | (@call $func:ident, $arrays:ident, $_arg1:expr, $_arg2:expr, $_arg3:expr,) => {{ 285 | let [x1, x2, x3] = $arrays; 286 | $func(x1, x2, x3) 287 | }}; 288 | ($func:tt ($first_arg:expr $(,$eq:tt $arg:expr)* $(,)?)) => { 289 | dtype_dispatch!($func, $func, $first_arg $(,$eq $arg)*) 290 | }; 291 | ($func:tt ($first_arg:expr $(,$eq:tt $arg:expr)*; cast=$cast:expr $(,)?)) => { 292 | dtype_dispatch!($func, $func, $first_arg $(,$eq $arg)*; cast=$cast) 293 | }; 294 | ($f32:expr, $f64:expr, $first_arg:expr $(,$eq:tt $arg:expr)* $(,)?) => { 295 | dtype_dispatch!($f32, $f64, $first_arg $(,$eq $arg)*; cast=false) 296 | }; 297 | ($f32:expr, $f64:expr, $first_arg:expr $(,$eq:tt $arg:expr)*; cast=$cast:expr) => {{ 298 | let names = &[stringify!($first_arg), $(stringify!($arg), )*]; 299 | let objects = &[&$first_arg, $(&$arg, )*]; 300 | let check_size = &[ false, $(_distinguish_eq_symbol!($eq), )* ]; 301 | 302 | let generic_arrays = crate::np_array::downcast_and_validate(names, objects, check_size, $cast)?; 303 | match generic_arrays { 304 | crate::np_array::GenericPyReadonlyArrays::F32(arrays) => { 305 | let func = $f32; 306 | dtype_dispatch!(@call func, arrays, $first_arg, $($arg,)*) 307 | } 308 | crate::np_array::GenericPyReadonlyArrays::F64(arrays) => { 309 | let func = $f64; 310 | dtype_dispatch!(@call func, arrays, $first_arg, $($arg,)*) 311 | } 312 | } 313 | }}; 314 | } 315 | -------------------------------------------------------------------------------- /light-curve/src/transform.rs: -------------------------------------------------------------------------------- 1 | use crate::errors::{Exception, Res}; 2 | 3 | use enum_iterator::Sequence; 4 | use light_curve_feature::transformers::{ 5 | Transformer, arcsinh::ArcsinhTransformer, clipped_lg::ClippedLgTransformer, 6 | identity::IdentityTransformer, lg::LgTransformer, ln1p::Ln1pTransformer, sqrt::SqrtTransformer, 7 | }; 8 | use pyo3::prelude::*; 9 | use pyo3::types::{PyBool, PyString}; 10 | 11 | #[derive(Clone, Copy, Sequence)] 12 | pub(crate) enum StockTransformer { 13 | Arcsinh, 14 | ClippedLg, 15 | Identity, 16 | Lg, 17 | Ln1p, 18 | Sqrt, 19 | } 20 | 21 | impl StockTransformer { 22 | pub(crate) fn all_variants() -> impl Iterator { 23 | enum_iterator::all::() 24 | } 25 | 26 | pub(crate) fn all_names() -> impl Iterator { 27 | Self::all_variants().map(|variant| variant.into()) 28 | } 29 | 30 | pub(crate) fn doc(&self) -> &'static str { 31 | match self { 32 | Self::Arcsinh => ArcsinhTransformer::doc(), 33 | Self::ClippedLg => ClippedLgTransformer::::doc(), 34 | Self::Identity => IdentityTransformer::doc(), 35 | Self::Lg => LgTransformer::doc(), 36 | Self::Ln1p => Ln1pTransformer::doc(), 37 | Self::Sqrt => SqrtTransformer::doc(), 38 | } 39 | } 40 | } 41 | 42 | impl TryFrom<&str> for StockTransformer { 43 | type Error = Exception; 44 | 45 | fn try_from(s: &str) -> Res { 46 | Ok(match s { 47 | "arcsinh" => Self::Arcsinh, 48 | "clipped_lg" => Self::ClippedLg, 49 | "identity" => Self::Identity, 50 | "lg" => Self::Lg, 51 | "ln1p" => Self::Ln1p, 52 | "sqrt" => Self::Sqrt, 53 | _ => { 54 | return Err(Exception::ValueError(format!( 55 | "Unknown stock transformer: {}", 56 | s 57 | ))); 58 | } 59 | }) 60 | } 61 | } 62 | 63 | impl From for &'static str { 64 | fn from(val: StockTransformer) -> Self { 65 | match val { 66 | StockTransformer::Arcsinh => "arcsinh", 67 | StockTransformer::ClippedLg => "clipped_lg", 68 | StockTransformer::Identity => "identity", 69 | StockTransformer::Lg => "lg", 70 | StockTransformer::Ln1p => "ln1p", 71 | StockTransformer::Sqrt => "sqrt", 72 | } 73 | } 74 | } 75 | 76 | impl From for (Transformer, Transformer) { 77 | fn from(val: StockTransformer) -> Self { 78 | match val { 79 | StockTransformer::Arcsinh => ( 80 | ArcsinhTransformer::default().into(), 81 | ArcsinhTransformer::default().into(), 82 | ), 83 | StockTransformer::ClippedLg => ( 84 | ClippedLgTransformer::default().into(), 85 | ClippedLgTransformer::default().into(), 86 | ), 87 | StockTransformer::Identity => ( 88 | IdentityTransformer::default().into(), 89 | IdentityTransformer::default().into(), 90 | ), 91 | StockTransformer::Lg => ( 92 | LgTransformer::default().into(), 93 | LgTransformer::default().into(), 94 | ), 95 | StockTransformer::Ln1p => ( 96 | Ln1pTransformer::default().into(), 97 | Ln1pTransformer::default().into(), 98 | ), 99 | StockTransformer::Sqrt => ( 100 | SqrtTransformer::default().into(), 101 | SqrtTransformer::default().into(), 102 | ), 103 | } 104 | } 105 | } 106 | 107 | pub(crate) fn parse_transform( 108 | option: Option>, 109 | default: StockTransformer, 110 | ) -> Res> { 111 | match option { 112 | None => Ok(None), 113 | Some(py_any) => { 114 | if let Ok(py_bool) = py_any.downcast::() { 115 | if py_bool.is_true() { 116 | Ok(Some(default)) 117 | } else { 118 | Ok(None) 119 | } 120 | } else if let Ok(py_str) = py_any.downcast::() { 121 | // py_str.to_str() is Python 3.10+ only 122 | let cow_string = py_str.to_cow()?; 123 | let s = cow_string.as_ref(); 124 | match s.try_into() { 125 | Ok(stock_transformer) => Ok(Some(stock_transformer)), 126 | _ => { 127 | if s == "default" { 128 | Ok(Some(default)) 129 | } else { 130 | Err(Exception::ValueError(format!( 131 | "Unknown transformation: {}", 132 | s 133 | ))) 134 | } 135 | } 136 | } 137 | } else { 138 | Err(Exception::ValueError(format!( 139 | "transform must be None, a bool or a str, not {}", 140 | py_any.get_type().qualname()? 141 | ))) 142 | } 143 | } 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /light-curve/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/tests/__init__.py -------------------------------------------------------------------------------- /light-curve/tests/conftest.py: -------------------------------------------------------------------------------- 1 | def patch_astropy_for_feets(): 2 | """Feets is incompatible with astropy v6.0 because of backward incompatible 3 | changes in the subpackage structure. This function monkey patches astropy 4 | to make it compatible with feets. 5 | """ 6 | import importlib 7 | import sys 8 | from importlib.metadata import version 9 | 10 | try: 11 | astropy_version = version("astropy") 12 | except ImportError: 13 | # astropy is not installed 14 | return 15 | if int(astropy_version.split(".")[0]) < 6: 16 | # astropy is older than v6.0 17 | return 18 | 19 | lombscargle = importlib.import_module("astropy.timeseries.periodograms.lombscargle") 20 | sys.modules["astropy.stats.lombscargle"] = lombscargle 21 | 22 | 23 | patch_astropy_for_feets() 24 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_ext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/light-curve/light-curve-python/a1dc57264f84b42fe16e29d97b1cd48379a4e66a/light-curve/tests/light_curve_ext/__init__.py -------------------------------------------------------------------------------- /light-curve/tests/light_curve_ext/test_dmdt.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import pickle 3 | from itertools import product 4 | 5 | try: 6 | from contextlib import nullcontext 7 | except ImportError: 8 | from contextlib import contextmanager 9 | 10 | @contextmanager 11 | def nullcontext(enter_result=None): 12 | yield enter_result 13 | 14 | 15 | import numpy as np 16 | import pytest 17 | from numpy.testing import assert_allclose, assert_array_equal 18 | 19 | from light_curve.light_curve_ext import DmDt 20 | 21 | DM_DT = [ 22 | DmDt.from_borders(min_lgdt=0.0, max_lgdt=np.log10(3), max_abs_dm=1.0, lgdt_size=16, dm_size=32, norm=[]), 23 | DmDt(dt=np.logspace(0.0, np.log10(3.0), 17), dm=np.linspace(-1.0, 1.0, 33), dt_type="auto", dm_type="auto"), 24 | DmDt(dt=np.logspace(0.0, np.log10(3.0), 17), dm=np.linspace(-1.0, 1.0, 33), dt_type="asis", dm_type="asis"), 25 | DmDt(dt=np.logspace(0.0, np.log10(3.0), 17), dm=np.linspace(-1.0, 1.0, 33), dt_type="log", dm_type="linear"), 26 | DmDt.from_borders(min_lgdt=-1.0, max_lgdt=1.0, max_abs_dm=2.0, lgdt_size=32, dm_size=32, norm=["dt"]), 27 | DmDt.from_borders(min_lgdt=-1.0, max_lgdt=1.0, max_abs_dm=2.0, lgdt_size=32, dm_size=32, norm=["max"]), 28 | DmDt.from_borders(min_lgdt=-1.0, max_lgdt=1.0, max_abs_dm=2.0, lgdt_size=32, dm_size=32, norm=["dt", "max"]), 29 | ] 30 | 31 | 32 | def random_lc(n, sigma=True, rng=None, dtype=np.float64): 33 | rng = np.random.default_rng(rng) 34 | t = np.sort(np.asarray(rng.uniform(0, 10, n), dtype=dtype)) 35 | m = np.asarray(rng.normal(0, 1, n), dtype=dtype) 36 | lc = (t, m) 37 | if sigma: 38 | sigma = np.asarray(rng.uniform(0.01, 0.1, n), dtype=dtype) 39 | lc += (sigma,) 40 | return lc 41 | 42 | 43 | def sine_lc(n, sigma=True, dtype=np.float64): 44 | t = np.asarray(np.linspace(0, 10, n), dtype=dtype) 45 | m = np.sin(t) 46 | lc = (t, m) 47 | if sigma: 48 | sigma = np.full_like(t, 0.1) 49 | lc += (sigma,) 50 | return lc 51 | 52 | 53 | def sliced(a, step=2): 54 | """Mix with random data and slice to original data""" 55 | assert step > 0 56 | n = a.size 57 | rng = np.random.default_rng() 58 | random_data = np.asarray(rng.normal(0, 1, (step - 1, n)), dtype=a.dtype) 59 | mixed = np.vstack([a[::-1], random_data]).T.reshape(-1).copy() 60 | s = mixed[-step::-step] 61 | assert not s.flags.owndata 62 | assert_array_equal(s, a) 63 | return s 64 | 65 | 66 | def test_dmdt_count_dt_three_obs(): 67 | dmdt = DmDt.from_borders(min_lgdt=0, max_lgdt=np.log10(3), max_abs_dm=1, lgdt_size=2, dm_size=32, norm=[]) 68 | 69 | t = np.array([0, 1, 2], dtype=np.float32) 70 | 71 | desired = np.array([2, 1]) 72 | actual = dmdt.count_dt(t) 73 | 74 | assert_array_equal(actual, desired) 75 | 76 | 77 | def test_log_linear_grids(): 78 | lc = random_lc(101) 79 | 80 | min_lgdt = -1 81 | max_lgdt = 1 82 | lgdt_size = 32 83 | max_abs_dm = 2 84 | dm_size = 32 85 | 86 | min_dt = 10**min_lgdt 87 | max_dt = 10**max_lgdt 88 | 89 | dt_grid = np.logspace(min_lgdt, max_lgdt, lgdt_size + 1) 90 | dm_grid = np.linspace(-max_abs_dm, max_abs_dm, dm_size + 1) 91 | 92 | dmdt_from_borders = DmDt.from_borders( 93 | min_lgdt=min_lgdt, max_lgdt=max_lgdt, max_abs_dm=max_abs_dm, lgdt_size=lgdt_size, dm_size=dm_size 94 | ) 95 | dmdt_auto = DmDt(dt=dt_grid, dm=dm_grid, dt_type="auto", dm_type="auto") 96 | dmdt_log_linear = DmDt(dt=dt_grid, dm=dm_grid, dt_type="log", dm_type="linear") 97 | dmdt_asis = DmDt(dt=dt_grid, dm=dm_grid, dt_type="asis", dm_type="asis") 98 | 99 | for dmdt in ( 100 | dmdt_from_borders, 101 | dmdt_auto, 102 | dmdt_log_linear, 103 | dmdt_asis, 104 | ): 105 | assert_allclose(dmdt.min_dt, min_dt) 106 | assert_allclose(dmdt.max_dt, max_dt) 107 | assert_allclose(dmdt.min_dm, -max_abs_dm) 108 | assert_allclose(dmdt.max_dm, max_abs_dm) 109 | assert_allclose(dmdt.dt_grid, dt_grid) 110 | assert_allclose(dmdt.dm_grid, dm_grid) 111 | 112 | points = dmdt_from_borders.points(lc[0], lc[1]) 113 | gausses = dmdt_from_borders.gausses(*lc) 114 | for dmdt in ( 115 | dmdt_auto, 116 | dmdt_log_linear, 117 | dmdt_asis, 118 | ): 119 | assert_allclose(dmdt.points(lc[0], lc[1]), points) 120 | assert_allclose(dmdt.gausses(*lc), gausses) 121 | 122 | 123 | @pytest.mark.parametrize("lc", [sine_lc(101), sine_lc(101, dtype=np.float32)]) 124 | def test_dmdt_count_dt_contiguous_non(lc): 125 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=[]) 126 | desired = dmdt.count_dt(lc[0]) 127 | actual = dmdt.count_dt(sliced(lc[0])) 128 | assert_array_equal(actual, desired) 129 | 130 | 131 | @pytest.mark.parametrize("lc", [sine_lc(101), random_lc(101), sine_lc(101, dtype=np.float32)]) 132 | def test_dmdt_count_dt_many_one(lc): 133 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=[]) 134 | 135 | desired = dmdt.count_dt(lc[0]) 136 | assert np.any(desired != 0) 137 | actual = dmdt.count_dt_many([lc[0]]) 138 | 139 | assert actual.shape[0] == 1 140 | assert_array_equal(actual[0], desired) 141 | 142 | 143 | @pytest.mark.parametrize( 144 | "lcs", 145 | [ 146 | [sine_lc(101), sine_lc(11)], 147 | [random_lc(101), random_lc(101), random_lc(11)], 148 | [sine_lc(101, dtype=np.float32), sine_lc(11, dtype=np.float32)], 149 | ], 150 | ) 151 | def test_dmdt_count_dt_many(lcs): 152 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=[]) 153 | 154 | desired = [dmdt.count_dt(t) for t, *_ in lcs] 155 | actual = dmdt.count_dt_many([t for t, *_ in lcs]) 156 | 157 | assert_array_equal(actual, desired) 158 | 159 | 160 | def test_dmdt_points_three_obs(): 161 | dmdt = DmDt.from_borders(min_lgdt=0, max_lgdt=np.log10(3), max_abs_dm=3, lgdt_size=2, dm_size=4, norm=[]) 162 | 163 | t = np.array([0, 1, 2], dtype=np.float32) 164 | m = np.array([0, 1, 2], dtype=np.float32) 165 | 166 | desired = np.array( 167 | [ 168 | [0, 0, 2, 0], 169 | [0, 0, 0, 1], 170 | ] 171 | ) 172 | actual = dmdt.points(t, m) 173 | 174 | assert_array_equal(actual, desired) 175 | 176 | 177 | @pytest.mark.parametrize("lc", [sine_lc(101, False), sine_lc(101, False, dtype=np.float32)]) 178 | def test_dmdt_points_contiguous_non(lc): 179 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=[]) 180 | desired = dmdt.points(*lc) 181 | t, m = lc 182 | t = sliced(t) 183 | m = sliced(m) 184 | actual = dmdt.points(t, m) 185 | assert_array_equal(actual, desired) 186 | 187 | 188 | @pytest.mark.parametrize("lc", [sine_lc(101, False), random_lc(101, False), sine_lc(101, False, dtype=np.float32)]) 189 | @pytest.mark.parametrize("norm", [[], ["dt"], ["max"], ["dt", "max"]]) 190 | def test_dmdt_points_many_one(lc, norm): 191 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=norm) 192 | 193 | desired = dmdt.points(*lc) 194 | assert np.any(desired != 0) 195 | actual = dmdt.points_many([lc]) 196 | 197 | assert actual.shape[0] == 1 198 | assert_array_equal(actual[0], desired) 199 | 200 | 201 | @pytest.mark.parametrize( 202 | "lcs", 203 | [ 204 | [sine_lc(101, False), sine_lc(11, False)], 205 | [random_lc(101, False), random_lc(101, False), random_lc(11, False)], 206 | [sine_lc(101, False, dtype=np.float32), sine_lc(11, False, dtype=np.float32)], 207 | ], 208 | ) 209 | @pytest.mark.parametrize("norm", [[], ["dt"], ["max"], ["dt", "max"]]) 210 | def test_dmdt_points_many(lcs, norm): 211 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=norm) 212 | 213 | desired = [dmdt.points(*lc) for lc in lcs] 214 | actual = dmdt.points_many(lcs) 215 | 216 | assert_array_equal(actual, desired) 217 | 218 | 219 | @pytest.mark.parametrize("lc", [sine_lc(101), sine_lc(101, dtype=np.float32)]) 220 | def test_dmdt_gausses_contiguous_non(lc): 221 | dmdt = DmDt.from_borders(min_lgdt=-1, max_lgdt=1, max_abs_dm=2, lgdt_size=32, dm_size=32, norm=[]) 222 | desired = dmdt.gausses(*lc) 223 | t, m, sigma = lc 224 | t = sliced(t) 225 | m = sliced(m) 226 | sigma = sliced(sigma) 227 | actual = dmdt.gausses(t, m, sigma) 228 | assert_array_equal(actual, desired) 229 | 230 | 231 | @pytest.mark.parametrize("lc", [sine_lc(101), random_lc(101), sine_lc(101, dtype=np.float32)]) 232 | @pytest.mark.parametrize("norm", [[], ["dt"], ["max"], ["dt", "max"]]) 233 | @pytest.mark.parametrize("approx_erf", [True, False]) 234 | def test_dmdt_gausses_many_one(lc, norm, approx_erf): 235 | dmdt = DmDt.from_borders( 236 | min_lgdt=-1, 237 | max_lgdt=1, 238 | max_abs_dm=2, 239 | lgdt_size=32, 240 | dm_size=32, 241 | norm=norm, 242 | approx_erf=approx_erf, 243 | ) 244 | 245 | desired = dmdt.gausses(*lc) 246 | assert np.any(desired != 0) 247 | actual = dmdt.gausses_many([lc]) 248 | 249 | assert actual.shape[0] == 1 250 | assert_array_equal(actual[0], desired) 251 | 252 | 253 | @pytest.mark.parametrize( 254 | "lcs", 255 | [ 256 | [sine_lc(101), sine_lc(11)], 257 | [random_lc(101), random_lc(101), random_lc(11)], 258 | [sine_lc(101, dtype=np.float32), sine_lc(11, dtype=np.float32)], 259 | ], 260 | ) 261 | @pytest.mark.parametrize("norm", [[], ["dt"], ["max"], ["dt", "max"]]) 262 | @pytest.mark.parametrize("approx_erf", [True, False]) 263 | def test_dmdt_gausses_many(lcs, norm, approx_erf): 264 | dmdt = DmDt.from_borders( 265 | min_lgdt=-1, 266 | max_lgdt=1, 267 | max_abs_dm=2, 268 | lgdt_size=32, 269 | dm_size=32, 270 | norm=norm, 271 | approx_erf=approx_erf, 272 | ) 273 | 274 | desired = [dmdt.gausses(*lc) for lc in lcs] 275 | actual = dmdt.gausses_many(lcs) 276 | 277 | assert_array_equal(actual, desired) 278 | 279 | 280 | @pytest.mark.parametrize("t_dtype,m_dtype", product(*[[np.float32, np.float64]] * 2)) 281 | def test_dmdt_points_dtype(t_dtype, m_dtype): 282 | t = np.linspace(0, 1, 11, dtype=t_dtype) 283 | m = np.asarray(t, dtype=m_dtype) 284 | dmdt = DmDt.from_borders(min_lgdt=0, max_lgdt=1, max_abs_dm=1, lgdt_size=2, dm_size=2, norm=[]) 285 | 286 | if t_dtype is m_dtype: 287 | context = nullcontext() 288 | else: 289 | context = pytest.raises(TypeError) 290 | with context: 291 | dmdt.points(t, m, cast=False) 292 | values = dmdt.points(t, m, cast=True) 293 | assert values.dtype == np.result_type(t, m) 294 | 295 | 296 | @pytest.mark.parametrize("t_dtype,m_dtype,sigma_dtype", product(*[[np.float32, np.float64]] * 3)) 297 | def test_dmdt_gausses_dtype(t_dtype, m_dtype, sigma_dtype): 298 | t = np.linspace(1, 2, 11, dtype=t_dtype) 299 | m = np.asarray(t, dtype=m_dtype) 300 | sigma = np.asarray(t, dtype=sigma_dtype) 301 | dmdt = DmDt.from_borders(min_lgdt=0, max_lgdt=1, max_abs_dm=1, lgdt_size=2, dm_size=2, norm=[]) 302 | 303 | if t_dtype is m_dtype is sigma_dtype: 304 | context = nullcontext() 305 | else: 306 | context = pytest.raises(TypeError) 307 | with context: 308 | dmdt.gausses(t, m, sigma, cast=False) 309 | values = dmdt.gausses(t, m, sigma, cast=True) 310 | assert values.dtype == np.result_type(t, m, sigma) 311 | 312 | 313 | @pytest.mark.parametrize("t1_dtype,m1_dtype,t2_dtype,m2_dtype", product(*[[np.float32, np.float64]] * 4)) 314 | def test_dmdt_points_many_dtype(t1_dtype, m1_dtype, t2_dtype, m2_dtype): 315 | t1 = np.linspace(1, 2, 11, dtype=t1_dtype) 316 | m1 = np.asarray(t1, dtype=m1_dtype) 317 | t2 = np.asarray(t1, dtype=t2_dtype) 318 | m2 = np.asarray(t1, dtype=m2_dtype) 319 | lcs = [(t1, m1), (t2, m2)] 320 | dmdt = DmDt.from_borders(min_lgdt=0, max_lgdt=1, max_abs_dm=1, lgdt_size=2, dm_size=2, norm=[]) 321 | if t1_dtype is m1_dtype is t2_dtype is m2_dtype: 322 | context = nullcontext() 323 | else: 324 | context = pytest.raises(TypeError) 325 | with context: 326 | dmdt.points_many(lcs) 327 | dmdt.points_batches(lcs) 328 | 329 | 330 | @pytest.mark.parametrize( 331 | "t1_dtype,m1_dtype,sigma1_dtype,t2_dtype,m2_dtype,sigma2_dtype", product(*[[np.float32, np.float64]] * 6) 332 | ) 333 | def test_dmdt_gausses_many_dtype(t1_dtype, m1_dtype, sigma1_dtype, t2_dtype, m2_dtype, sigma2_dtype): 334 | t1 = np.linspace(1, 2, 11, dtype=t1_dtype) 335 | m1 = np.asarray(t1, dtype=m1_dtype) 336 | sigma1 = np.asarray(t1, dtype=sigma1_dtype) 337 | t2 = np.asarray(t1, dtype=t2_dtype) 338 | m2 = np.asarray(t1, dtype=m2_dtype) 339 | sigma2 = np.asarray(t1, dtype=sigma2_dtype) 340 | lcs = [(t1, m1, sigma1), (t2, m2, sigma2)] 341 | dmdt = DmDt.from_borders(min_lgdt=0, max_lgdt=1, max_abs_dm=1, lgdt_size=2, dm_size=2, norm=[]) 342 | if t1_dtype is m1_dtype is sigma1_dtype is t2_dtype is m2_dtype is sigma2_dtype: 343 | context = nullcontext() 344 | else: 345 | context = pytest.raises(TypeError) 346 | with context: 347 | dmdt.gausses_many(lcs) 348 | dmdt.gausses_batches(lcs) 349 | 350 | 351 | @pytest.mark.parametrize("dmdt", DM_DT) 352 | @pytest.mark.parametrize("pickle_protocol", tuple(range(2, pickle.HIGHEST_PROTOCOL + 1))) 353 | def test_pickle(dmdt, pickle_protocol): 354 | data = random_lc(51) 355 | values = dmdt.gausses(*data) 356 | 357 | b = pickle.dumps(dmdt, protocol=pickle_protocol) 358 | new_dmt = pickle.loads(b) 359 | new_values = new_dmt.gausses(*data) 360 | 361 | assert_array_equal(values, new_values) 362 | 363 | 364 | @pytest.mark.parametrize("dmdt", DM_DT) 365 | def test_copy_deepcopy(dmdt): 366 | data = random_lc(51) 367 | values = dmdt.gausses(*data) 368 | 369 | dmdt_copy = copy.copy(dmdt) 370 | copy_values = dmdt_copy.gausses(*data) 371 | assert_array_equal(values, copy_values) 372 | 373 | dmdt_deepcopy = copy.deepcopy(dmdt) 374 | deepcopy_values = dmdt_deepcopy.gausses(*data) 375 | assert_array_equal(values, deepcopy_values) 376 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_ext/test_feature.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import inspect 3 | import pickle 4 | from concurrent.futures import ThreadPoolExecutor 5 | 6 | import numpy as np 7 | import pytest 8 | from numpy.testing import assert_allclose, assert_array_equal 9 | 10 | import light_curve.light_curve_ext as lc 11 | 12 | 13 | def _feature_classes(module, *, exclude_parametric=True): 14 | for name, member in inspect.getmembers(module): 15 | if name.startswith("_"): 16 | continue 17 | if inspect.ismodule(member): 18 | yield from _feature_classes(member) 19 | if not inspect.isclass(member): 20 | continue 21 | if not issubclass(member, lc._FeatureEvaluator): 22 | continue 23 | if member is lc.JSONDeserializedFeature: 24 | continue 25 | # Skip classes with non-trivial constructors 26 | if exclude_parametric: 27 | try: 28 | member() 29 | except TypeError: 30 | continue 31 | yield member 32 | 33 | 34 | non_param_feature_classes = frozenset(_feature_classes(lc, exclude_parametric=True)) 35 | assert len(non_param_feature_classes) > 0 36 | 37 | all_feature_classes = frozenset(_feature_classes(lc, exclude_parametric=False)) 38 | assert len(all_feature_classes) > 0 39 | 40 | 41 | def get_new_args_kwargs(cls): 42 | if hasattr(cls, "__getnewargs_ex__"): 43 | return cls.__getnewargs_ex__() 44 | if hasattr(cls, "__getnewargs__"): 45 | args = cls.__getnewargs__() 46 | return args, {} 47 | return (), {} 48 | 49 | 50 | def new_default(cls, **kwargs): 51 | args, kwargs_ = get_new_args_kwargs(cls) 52 | kwargs = dict(kwargs_, **kwargs) 53 | return cls(*args, **kwargs) 54 | 55 | 56 | def construct_example_objects(cls, *, parametric_variants=1, rng=None): 57 | # Extractor is special 58 | if cls is lc.Extractor: 59 | return [cls(lc.BeyondNStd(1.5), lc.LinearFit())] 60 | 61 | # No mandatory arguments 62 | if not hasattr(cls, "__getnewargs__"): 63 | return [cls()] 64 | 65 | # default mandatory arguments 66 | args, kwargs = get_new_args_kwargs(cls) 67 | 68 | # Add Mean feature for metafeatures 69 | args = [[lc.Mean()] if arg == () else arg for arg in args] 70 | 71 | objects = [cls(*args, **kwargs)] 72 | # Nothing to mutate 73 | if not any(isinstance(arg, float) for arg in args + list(kwargs.values())): 74 | return objects 75 | 76 | # Mutate floats 77 | rng = np.random.default_rng(rng) 78 | 79 | def mutation(value): 80 | if not isinstance(value, float): 81 | return value 82 | return value * rng.uniform(0.9, 1.1) + rng.uniform(0.0, 1e-3) 83 | 84 | for _ in range(1, parametric_variants): 85 | mutated_args = list(map(mutation, args)) 86 | mutated_kwargs = {name: mutation(value) for name, value in kwargs.items()} 87 | objects.append(cls(*mutated_args, **mutated_kwargs)) 88 | return objects 89 | 90 | 91 | def gen_feature_evaluators(*, parametric_variants=0, rng=None): 92 | if parametric_variants == 0: 93 | for cls in non_param_feature_classes: 94 | yield cls() 95 | return 96 | rng = np.random.default_rng(rng) 97 | for cls in all_feature_classes: 98 | yield from construct_example_objects(cls, parametric_variants=parametric_variants, rng=rng) 99 | 100 | 101 | def gen_lc(n, rng=None): 102 | rng = np.random.default_rng(rng) 103 | 104 | t = np.sort(rng.normal(0, 1, n)) 105 | m = t.copy() 106 | sigma = np.full_like(t, 0.1) 107 | 108 | return t, m, sigma 109 | 110 | 111 | @pytest.mark.parametrize("cls", list(all_feature_classes)) 112 | def test_available_transforms(cls): 113 | # All available features should consume transform=None 114 | none = new_default(cls, transform=None) 115 | 116 | # If transform consumes False it 117 | # 1) should give the same feature as transform=None 118 | # 2) should be able to consume transform=True 119 | try: 120 | false = new_default(cls, transform=False) 121 | except NotImplementedError: 122 | return 123 | # It would be better to compare objects themselves, but __eq__ is not implemented yet 124 | # https://github.com/light-curve/light-curve-python/issues/148 125 | assert false.names == none.names 126 | true = new_default(cls, transform=True) 127 | # Check if transform=True is not the same as transform=False 128 | default_transform = getattr(cls, "default_transform", None) 129 | if default_transform != "identity": 130 | assert true.names != false.names 131 | 132 | # Both attributes should be present or absent 133 | assert hasattr(cls, "supported_transforms") == hasattr(cls, "default_transform") 134 | 135 | if not hasattr(cls, "supported_transforms"): 136 | return 137 | 138 | assert cls.default_transform in cls.supported_transforms 139 | 140 | for transform in cls.supported_transforms + ["default"]: 141 | new_default(cls, transform=transform) 142 | 143 | 144 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=2)) 145 | def test_negative_strides(feature): 146 | t = np.linspace(1, 0, 20)[::-2] 147 | m = np.exp(t)[:] 148 | err = np.random.uniform(0.1, 0.2, t.shape) 149 | feature(t, m, err) 150 | 151 | 152 | # We don't want *Fit features here: not precise 153 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=0)) 154 | def test_float32_vs_float64(feature): 155 | rng = np.random.default_rng(0) 156 | n = 128 157 | 158 | t, m, sigma = gen_lc(n, rng=rng) 159 | 160 | results = [ 161 | feature(t.astype(dtype), m.astype(dtype), sigma.astype(dtype), sorted=True) 162 | for dtype in [np.float32, np.float64] 163 | ] 164 | assert_allclose(*results, rtol=1e-5, atol=1e-5) 165 | 166 | 167 | # We don't want *Fit features here: too slow 168 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=0)) 169 | def test_many_vs_call(feature): 170 | rng = np.random.default_rng(0) 171 | n_obs = 128 172 | n_lc = 128 173 | 174 | lcs = [gen_lc(n_obs, rng=rng) for _ in range(n_lc)] 175 | 176 | call = np.stack([feature(*lc, sorted=True) for lc in lcs]) 177 | many = feature.many(lcs, sorted=True, n_jobs=2) 178 | assert_array_equal(call, many) 179 | 180 | # Test with Python threads to ensure we have no problems on the free-threading CPython 181 | with ThreadPoolExecutor(2) as pool: 182 | futures = [pool.submit(feature, *lc, sorted=True) for lc in lcs] 183 | call_threads = np.stack([f.result() for f in futures]) 184 | del futures 185 | assert_array_equal(call, call_threads) 186 | 187 | n_lcs_per_job = 4 188 | with ThreadPoolExecutor(2) as pool: 189 | futures = [ 190 | pool.submit(feature.many, lcs[i : i + n_lcs_per_job], sorted=True, n_jobs=2) 191 | for i in range(0, n_lc, n_lcs_per_job) 192 | ] 193 | many_threads = np.concatenate([f.result() for f in futures]) 194 | del futures 195 | assert_array_equal(call, many_threads) 196 | 197 | 198 | def test_fill_value_not_enough_observations(): 199 | n = 1 200 | t = np.linspace(0.0, 1.0, n) 201 | m = t.copy() 202 | fill_value = -100.0 203 | sigma = np.ones_like(t) 204 | feature = lc.Kurtosis() 205 | with pytest.raises(ValueError): 206 | feature(t, m, sigma, fill_value=None) 207 | assert_array_equal(feature(t, m, sigma, fill_value=fill_value), fill_value) 208 | 209 | 210 | @pytest.mark.parametrize("cls", all_feature_classes) 211 | def test_nonempty_docstring(cls): 212 | assert len(cls.__doc__) > 10 213 | 214 | 215 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=2)) 216 | def test_check_t(feature): 217 | n_obs = 128 218 | t, m, sigma = gen_lc(n_obs) 219 | t[0] = np.nan 220 | with pytest.raises(ValueError): 221 | feature(t, m, sigma, check=True) 222 | t[0] = np.inf 223 | with pytest.raises(ValueError): 224 | feature(t, m, sigma, check=True) 225 | 226 | 227 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=2)) 228 | def test_check_m(feature): 229 | n_obs = 128 230 | t, m, sigma = gen_lc(n_obs) 231 | m[0] = np.nan 232 | with pytest.raises(ValueError): 233 | feature(t, m, sigma, check=True) 234 | m[0] = np.inf 235 | with pytest.raises(ValueError): 236 | feature(t, m, sigma, check=True) 237 | 238 | 239 | # We need evaluators which use sigma 240 | @pytest.mark.parametrize("cls", (lc.ExcessVariance, lc.LinearFit, lc.ReducedChi2, lc.StetsonK, lc.WeightedMean)) 241 | def test_check_sigma(cls): 242 | n_obs = 128 243 | t, m, sigma = gen_lc(n_obs) 244 | sigma[0] = np.nan 245 | feature = cls() 246 | with pytest.raises(ValueError): 247 | feature(t, m, sigma, check=True) 248 | # infinite values are allowed for sigma 249 | sigma[0] = np.inf 250 | feature(t, m, sigma, check=True) 251 | 252 | 253 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=5, rng=None)) 254 | @pytest.mark.parametrize("pickle_protocol", tuple(range(2, pickle.HIGHEST_PROTOCOL + 1))) 255 | def test_pickling(feature, pickle_protocol): 256 | n_obs = 128 257 | data = gen_lc(n_obs) 258 | values = feature(*data) 259 | 260 | b = pickle.dumps(feature, protocol=pickle_protocol) 261 | new_feature = pickle.loads(b) 262 | 263 | new_values = new_feature(*data) 264 | assert_array_equal(values, new_values) 265 | 266 | 267 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=5, rng=None)) 268 | def test_copy_deepcopy(feature): 269 | n_obs = 128 270 | data = gen_lc(n_obs) 271 | values = feature(*data) 272 | 273 | copied = copy.copy(feature) 274 | values_copied = copied(*data) 275 | assert_array_equal(values, values_copied) 276 | 277 | deepcopied = copy.deepcopy(feature) 278 | values_deepcopied = deepcopied(*data) 279 | assert_array_equal(values, values_deepcopied) 280 | 281 | 282 | PICKLE_BENCHMARK_FEATURES = [ 283 | lc.Amplitude(), # no parameters 284 | lc.BeyondNStd(1.5), # parametric 285 | lc.Extractor( # large 286 | lc.Amplitude(), 287 | lc.BeyondNStd(2.0), 288 | lc.Bins( 289 | [lc.Kurtosis(), lc.LinearTrend(), lc.WeightedMean()], 290 | window=2.0, 291 | offset=59500.5, 292 | ), 293 | lc.Periodogram(features=[lc.InterPercentileRange(0.01)], peaks=5, max_freq_factor=12.0), 294 | ), 295 | ] 296 | 297 | 298 | @pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES) 299 | def test_benchmark_pickle_loads(feature, benchmark): 300 | b = pickle.dumps(feature, protocol=pickle.HIGHEST_PROTOCOL) 301 | benchmark(pickle.loads, b) 302 | 303 | 304 | @pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES) 305 | def test_benchmark_pickle_dumps(feature, benchmark): 306 | benchmark(pickle.dumps, feature, protocol=pickle.HIGHEST_PROTOCOL) 307 | 308 | 309 | @pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES) 310 | def test_benchmark_copy(feature, benchmark): 311 | benchmark(copy.copy, feature) 312 | 313 | 314 | @pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES) 315 | def test_benchmark_deepcopy(feature, benchmark): 316 | benchmark(copy.deepcopy, feature) 317 | 318 | 319 | # We do not check pure MCMC because it requires a lot of iterations and would be too slow 320 | @pytest.mark.parametrize("algo", ("ceres", "mcmc-ceres", "lmsder", "mcmc-lmsder")) 321 | def test_bazin_fit_precise(algo): 322 | bazin = lc.BazinFit(algo) 323 | 324 | true_params = np.array([10.0, -2.0, 10.0, 10.0, 25.0]) 325 | t = np.linspace(-50.0, 120.0, 1000) 326 | flux = bazin.model(t, true_params) 327 | fluxerr = np.ones_like(t) 328 | 329 | *params, reduced_chi2 = bazin(t, flux, fluxerr) 330 | assert_allclose(true_params, params, rtol=1e-4) # tolerance set to underlying algorithms 331 | 332 | 333 | @pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=5, rng=None)) 334 | def test_json_serialization(feature): 335 | n_obs = 128 336 | data = gen_lc(n_obs) 337 | values = feature(*data) 338 | 339 | from_to_json = lc.feature_from_json(feature.to_json()) 340 | values_from_to_json = from_to_json(*data) 341 | assert_array_equal(values, values_from_to_json) 342 | 343 | 344 | def test_json_deserialization(): 345 | json = """ 346 | {"FeatureExtractor":{"features":[{"Transformed":{"feature":{"AndersonDarlingNormal":{}},"transformer":{"Ln1p":{}}}}, 347 | {"Transformed":{"feature":{"BazinFit":{"algorithm":{"Ceres":{"loss_factor":null,"niterations":20}},"inits_bounds": 348 | {"OptionArrays":{"init":[null,null,null,null,null],"lower":[0.0036307805477010066,null,null,0.0001,0.0001],"upper": 349 | [3630780547.7010174,null,null,30000.0,30000.0]}},"ln_prior":{"Fixed":{"None":{}}}}},"transformer":{"BazinFit": 350 | {"mag_zp":23.899999618530273}}}},{"ExcessVariance":{}}]}} 351 | """ 352 | from_json = lc.feature_from_json(json) 353 | assert isinstance(from_json, lc._FeatureEvaluator) 354 | from_json(*gen_lc(128)) 355 | 356 | 357 | def test_raises_for_wrong_inputs(): 358 | fe = lc.Amplitude() 359 | 360 | # First argument 361 | with pytest.raises(TypeError, match="'t' has type 'int'"): 362 | fe(5, [1.0, 2.0, 3.0]) 363 | with pytest.raises(TypeError, match="'t' is a 2-d array"): 364 | fe(np.array([[1.0, 2.0, 3.0]]), np.array([1.0, 2.0, 3.0])) 365 | with pytest.raises(TypeError, match="'t' has dtype 1e-3, flux, 1e-3) 99 | 100 | # S/N = 10 for minimum flux, scale for Poisson noise 101 | flux_err = np.sqrt(protected_flux * np.min(protected_flux)) / 10.0 102 | flux += rng.normal(0.0, flux_err) 103 | 104 | actual = feature(t, flux, sigma=flux_err, band=band) 105 | 106 | # import matplotlib.pyplot as plt 107 | # plt.figure() 108 | # plt.scatter(t, flux, s=5, label="data") 109 | # plt.errorbar(t, flux, yerr=flux_err, ls="none", capsize=1) 110 | # plt.plot(t, feature.model(t, band, *expected), "x", label="expected") 111 | # plt.plot(t, feature.model(t, band, *actual), "*", label="actual") 112 | # plt.ylim(-.05, flux.max()+0.1) 113 | # plt.legend() 114 | # plt.show() 115 | 116 | # The first test might be too rigid. The second test allow for good local minima to be accepted 117 | np.testing.assert_allclose(actual[:-1], expected[:-1], rtol=0.1) 118 | 119 | # If either the absolute or the relative test passes, it is accepted. 120 | # It prevents linexp, which include a flat exactly 0 baseline to not pass the test because 121 | # of very minor parameter differences that lead to a major relative difference. 122 | np.testing.assert_allclose( 123 | feature.model(t, band, *expected), feature.model(t, band, *actual), rtol=0.1, atol=0.1, strict=False 124 | ) 125 | 126 | 127 | def test_scaler_from_flux_list_input(): 128 | "https://github.com/light-curve/light-curve-python/issues/492" 129 | # Was failing 130 | scaler1 = MultiBandScaler.from_flux( 131 | flux=[1.0, 2.0, 3.0, 4.0], band=np.array(["g", "r", "g", "r"]), with_baseline=True 132 | ) 133 | # Was not failing, but was wrong 134 | scaler2 = MultiBandScaler.from_flux(flux=[1.0, 2.0, 3.0, 4.0], band=["g", "r", "g", "r"], with_baseline=True) 135 | assert scaler1 == scaler2 136 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/features/test_redchi2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | 4 | from light_curve.light_curve_py import ReducedChi2 5 | 6 | 7 | def test_redchi2_equal_sigma(): 8 | m = np.array([1.0, 1.0, 2.0, 3.0, 4.0, 5.0]) 9 | sigma = np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5]) 10 | feature = ReducedChi2() 11 | desired = feature(np.linspace(0, 1, len(m)), m, sigma) 12 | actual = 10.666667 13 | assert_allclose(actual, desired) 14 | 15 | 16 | def test_redchi2_different_sigma(): 17 | m = np.arange(6) 18 | sigma = np.array([0.5, 1.0, 0.5, 1.0, 0.5, 1.0]) 19 | feature = ReducedChi2() 20 | desired = feature(np.linspace(0, 1, len(m)), m, sigma) 21 | actual = 8.48 22 | assert_allclose(actual, desired) 23 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/features/test_roms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | 4 | from light_curve.light_curve_py import Roms 5 | 6 | 7 | def test_roms_const_data(): 8 | feature = Roms() 9 | n = 100 10 | t = np.arange(n) 11 | m = np.ones_like(t) 12 | sigma = np.ones_like(t) 13 | actual = feature(t, m, sigma) 14 | desired = 0.0 15 | assert_allclose(actual, desired) 16 | 17 | 18 | def test_roms_periodic_data(): 19 | feature = Roms() 20 | n = 100 21 | t = np.linspace(0, 2 * np.pi, n) 22 | m = 2 * np.sin(t) 23 | sigma = np.ones_like(t) 24 | actual = feature(t, m, sigma) 25 | desired = 4 / np.pi 26 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 27 | 28 | 29 | def test_roms_norm_data_1(): 30 | rng = np.random.default_rng(0) 31 | n = 100 32 | t = np.linspace(0, 1, n) 33 | m = rng.normal(0, 1, n) 34 | sigma = np.ones_like(t) 35 | feature = Roms() 36 | actual = feature(t, m, sigma) 37 | desired = 2 / np.sqrt((2 * np.pi)) 38 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 39 | 40 | 41 | def test_roms_norm_data_2(): 42 | rng = np.random.default_rng(0) 43 | n = 10000 44 | t = np.linspace(0, 1, n) 45 | m = rng.normal(0, 1, n) 46 | sigma = np.ones_like(t) 47 | feature = Roms() 48 | actual = feature(t, m, sigma) 49 | desired = 2 / np.sqrt((2 * np.pi)) 50 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 51 | 52 | 53 | def test_roms_expon_data_1(): 54 | rng = np.random.default_rng(0) 55 | n = 100 56 | t = np.linspace(0, 1, n) 57 | m = rng.exponential(2, n) 58 | sigma = np.ones_like(t) 59 | feature = Roms() 60 | actual = feature(t, m, sigma) 61 | desired = 2 * np.log(2) 62 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 63 | 64 | 65 | def test_roms_expon_data_2(): 66 | rng = np.random.default_rng(0) 67 | n = 10000 68 | t = np.linspace(0, 1, n) 69 | m = rng.exponential(2, n) 70 | sigma = np.ones_like(t) 71 | feature = Roms() 72 | actual = feature(t, m, sigma) 73 | desired = 2 * np.log(2) 74 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 75 | 76 | 77 | def test_roms_gamma_data_1(): 78 | rng = np.random.default_rng(0) 79 | n = 100 80 | t = np.linspace(0, 1, n) 81 | m = rng.gamma(2, 1, n) 82 | sigma = np.ones_like(t) 83 | feature = Roms() 84 | actual = feature(t, m, sigma) 85 | desired = 1.0518265193 86 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 87 | 88 | 89 | def test_roms_gamma_data_2(): 90 | rng = np.random.default_rng(0) 91 | n = 10000 92 | t = np.linspace(0, 1, n) 93 | m = rng.gamma(2, 1, n) 94 | sigma = np.ones_like(t) 95 | feature = Roms() 96 | actual = feature(t, m, sigma) 97 | desired = 1.0518265193 98 | assert_allclose(actual, desired, rtol=3 / np.sqrt(n)) 99 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/features/test_skew.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | 4 | from light_curve.light_curve_py import Skew 5 | 6 | 7 | def test_skew(): 8 | m = [1.0, 2.0, 3.0, 50.0, 25.0] 9 | feature = Skew() 10 | actual = feature(np.linspace(0, 1, len(m)), m, None) 11 | desired = 1.307253786 12 | assert_allclose(actual, desired) 13 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/features/test_stdev.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | 4 | from light_curve.light_curve_py import StandardDeviation 5 | 6 | 7 | def test_stdev(): 8 | m = np.arange(10) 9 | feature = StandardDeviation() 10 | actual = feature(m, m, None) 11 | m_sum = sum((m - np.mean(m)) ** 2) 12 | desired = np.sqrt(m_sum / (len(m) - 1)) 13 | assert_allclose(actual, desired) 14 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/features/test_stetsonk.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | from scipy import signal 4 | 5 | from light_curve.light_curve_py import StetsonK 6 | 7 | 8 | def test_stetsonk_1(): 9 | feature = StetsonK() 10 | x = np.linspace(0.0, 2 * np.pi, 2000) 11 | sin = np.sin(x) 12 | error = np.ones(2000) 13 | actual = feature(x, sin, error) 14 | desired = np.sqrt(8) / np.pi 15 | assert_allclose(actual, desired, rtol=1e-03) 16 | 17 | 18 | def test_stetsonk_2(): 19 | feature = StetsonK() 20 | x = np.linspace(0.0, 1.0, 1000) 21 | sawtooth = signal.sawtooth(2 * np.pi * 5 * x) 22 | error = np.ones(1000) 23 | actual = feature(x, sawtooth, error) 24 | desired = np.sqrt(12) / 4 25 | assert_allclose(actual, desired, rtol=1e-03) 26 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/features/test_weightmean.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.testing import assert_allclose 3 | 4 | from light_curve.light_curve_py import WeightedMean 5 | 6 | 7 | def test_weightmean(): 8 | a = [2.0, 3.0, 1.0, 9.0, 5.0] 9 | b = [0.3, 0.4, 0.5, 1, 1] 10 | feature = WeightedMean() 11 | actual = feature(np.linspace(0, 1, len(a)), a, b) 12 | desired = 2.52437574316 13 | assert_allclose(actual, desired) 14 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/test_call.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from light_curve.light_curve_py import MaximumSlope 4 | 5 | 6 | def test_normalize(): 7 | t = [1, 4, 3, 8] 8 | m = [2, 15, 4, 3] 9 | feature = MaximumSlope() 10 | actual = feature(t, m, None, sorted=False) 11 | desired = 11.0 12 | assert actual == desired 13 | 14 | 15 | def test_fill_zero_division(): 16 | t = [1, 1, 3, 4] 17 | feature = MaximumSlope() 18 | actual = feature(t, t, None, sorted=False, fill_value=1.0) 19 | desired = 1.0 20 | assert actual == desired 21 | 22 | 23 | def test_fill_inf_values(): 24 | t = [0, 1e-300] 25 | m = [0, 1e300] 26 | feature = MaximumSlope() 27 | actual = feature(t, m, None, sorted=True, fill_value=1.0) 28 | desired = 1.0 29 | assert actual == desired 30 | 31 | 32 | def test_non_unique_values(): 33 | t = [1, 1, 3, 4] 34 | feature = MaximumSlope() 35 | with pytest.raises(ValueError): 36 | feature(t, t, None, sorted=False) 37 | 38 | 39 | def test_non_sorted_values(): 40 | t = [2, 1, 3, 4] 41 | feature = MaximumSlope() 42 | with pytest.raises(ValueError): 43 | feature(t, t, None) 44 | -------------------------------------------------------------------------------- /light-curve/tests/light_curve_py/test_single_band.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from light_curve.light_curve_py import LinearFit 5 | 6 | 7 | def test_no_init_bands_no_input_bands(): 8 | """Announce no bands, use no bands""" 9 | t = [1.0, 2.0, 3.0, 4.0, 5.0] 10 | m = [1.0, 2.0, 3.0, 4.0, 5.0] 11 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0] 12 | feature = LinearFit(bands=None) 13 | _values = feature(t, m, sigma) 14 | 15 | 16 | def test_init_bands_no_input_bands(): 17 | """Announce bands, use no bands""" 18 | t = [1.0, 2.0, 3.0, 4.0, 5.0] 19 | m = [1.0, 2.0, 3.0, 4.0, 5.0] 20 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0] 21 | feature = LinearFit(bands=["g"]) 22 | with pytest.raises(ValueError): 23 | _values = feature(t, m, sigma) 24 | 25 | 26 | def test_no_init_bands_input_bands(): 27 | """Announce no bands, use input bands""" 28 | t = [1.0, 2.0, 3.0, 4.0, 5.0] 29 | m = [1.0, 2.0, 3.0, 4.0, 5.0] 30 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0] 31 | bands = ["g", "g", "g", "r", "r"] 32 | feature = LinearFit(bands=None) 33 | with pytest.raises(ValueError): 34 | _values = feature(t, m, sigma, bands) 35 | 36 | 37 | def test_init_bands_eq_input_bands(): 38 | """Announce no bands, use input bands""" 39 | t = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 40 | m = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 41 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 42 | bands = ["g", "g", "g", "r", "r", "r"] 43 | feature = LinearFit(bands=["g", "r"]) 44 | values = feature(t, m, sigma, bands) 45 | assert 2 == feature.n_bands 46 | assert values.size == feature.size 47 | assert values.size == feature.size_single_band * feature.n_bands 48 | 49 | 50 | def test_init_bands_less_input_bands(): 51 | t = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 52 | m = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 53 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 54 | bands = ["g", "g", "g", "r", "r", "r"] 55 | feature = LinearFit(bands=["g"]) 56 | values = feature(t, m, sigma, bands) 57 | assert 1 == feature.n_bands 58 | assert values.size == feature.size 59 | assert values.size == feature.size_single_band 60 | 61 | 62 | def test_init_bands_more_input_bands(): 63 | t = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 64 | m = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 65 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 66 | bands = ["g"] * 6 67 | feature = LinearFit(bands=["g", "r"]) 68 | # "r" band is virtually existing, but having zero observations 69 | with pytest.raises(ValueError): 70 | _values = feature(t, m, sigma, bands, fill_value=None) 71 | # ... so it should be filled with the fill_value 72 | values = feature(t, m, sigma, bands, fill_value=np.nan) 73 | assert 2 == feature.n_bands 74 | assert values.size == feature.size 75 | assert values.size == feature.size_single_band * feature.n_bands 76 | assert np.all(np.isfinite(values[: feature.size_single_band])) 77 | assert np.all(np.isnan(values[feature.size_single_band :])) 78 | 79 | 80 | def test_init_bands_different_input_bands(): 81 | t = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 82 | m = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] 83 | sigma = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] 84 | bands = ["g"] * 6 85 | feature = LinearFit(bands=["r"]) 86 | # "r" band is virtually existing, but having zero observations 87 | with pytest.raises(ValueError): 88 | _values = feature(t, m, sigma, bands, fill_value=None) 89 | # ... so it should be filled with the fill_value 90 | values = feature(t, m, sigma, bands, fill_value=-999.0) 91 | assert 1 == feature.n_bands 92 | assert values.size == feature.size == feature.size_single_band 93 | np.testing.assert_array_equal(values, -999.0) 94 | --------------------------------------------------------------------------------