├── .devcontainer └── devcontainer.json ├── .github └── workflows │ ├── BasicTerm_ME_python.yml │ └── github-runners-benchmarks.yml ├── .gitignore ├── LICENSE ├── README.md ├── containers └── BasicTerm_ME_python │ ├── .devcontainer │ └── devcontainer.json │ ├── BasicTerm_ME │ ├── Projection │ │ └── __init__.py │ ├── __init__.py │ ├── _data │ │ └── data.pickle │ ├── _system.json │ ├── disc_rate_ann.xlsx │ ├── model_point_table.xlsx │ ├── mort_table.xlsx │ └── premium_table.xlsx │ ├── Dockerfile │ ├── main.py │ ├── notes.md │ ├── requirements.txt │ ├── term_me_iterative_jax.py │ └── term_me_recursive_pytorch.py ├── github-runners-benchmarks ├── Julia │ ├── CondaPkg.toml │ ├── Project.toml │ ├── README.md │ ├── analysis │ │ ├── CondaPkg.toml │ │ ├── Project.toml │ │ ├── README.md │ │ ├── analysis.jl │ │ ├── images │ │ │ ├── memory_complexity_static_duration_basic_life.png │ │ │ ├── memory_complexity_static_duration_universal_life.png │ │ │ ├── memory_complexity_variable_duration_basic_life.png │ │ │ ├── memory_complexity_variable_duration_universal_life.png │ │ │ ├── time_complexity_basic_life.png │ │ │ └── time_complexity_universal_life.png │ │ ├── large_run.txt │ │ ├── memory_complexity.jl │ │ └── time_complexity.jl │ ├── basic_term.jl │ ├── basic_term_array.jl │ ├── benchmark_results.yaml │ ├── exposures.jl │ ├── main.jl │ ├── mortality.jl │ ├── read_model.jl │ └── savings.jl ├── Python │ ├── BasicTerm_M │ │ ├── Projection │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── _data │ │ │ └── data.pickle │ │ ├── _system.json │ │ ├── disc_rate_ann.csv │ │ ├── disc_rate_ann.xlsx │ │ ├── model_point_table.csv │ │ ├── model_point_table.xlsx │ │ ├── mort_table.csv │ │ └── mort_table.xlsx │ ├── BasicTerm_ME │ │ ├── Projection │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── _data │ │ │ └── data.pickle │ │ ├── _system.json │ │ ├── disc_rate_ann.xlsx │ │ ├── model_point_table.xlsx │ │ ├── mort_table.xlsx │ │ └── premium_table.xlsx │ ├── CashValue_ME_EX4 │ │ ├── Projection │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── _data │ │ │ └── data.pickle │ │ ├── _system.json │ │ ├── disc_rate_ann.xlsx │ │ ├── model_point_1.xlsx │ │ ├── model_point_moneyness.xlsx │ │ ├── model_point_table_10K.csv │ │ ├── mort_table.xlsx │ │ ├── product_spec_table.xlsx │ │ └── surr_charge_table.xlsx │ ├── basicterm_m.py │ ├── basicterm_m_array_numpy.py │ ├── basicterm_m_array_pytorch.py │ ├── basicterm_m_lifelib.py │ ├── basicterm_m_recursive_numpy.py │ ├── basicterm_m_recursive_pytorch.py │ ├── basicterm_me.py │ ├── basicterm_me_heavylight_numpy.py │ ├── basicterm_me_lifelib.py │ ├── basicterm_me_recursive_numpy.py │ ├── benchmark_results.yaml │ ├── main.py │ ├── mortality.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── savings_me.py │ ├── savings_me_lifelib.py │ └── savings_me_recursive_numpy.py ├── R │ ├── .Rprofile │ ├── R.Rproj │ ├── benchmark_results.yaml │ ├── exposures.R │ ├── main.R │ ├── renv.lock │ └── renv │ │ ├── .gitignore │ │ ├── activate.R │ │ └── settings.dcf ├── README.md ├── data │ └── census_dat.csv ├── devnotes.md ├── generate_readme.py ├── julia-memory-analysis.md ├── readme_template.md └── requirements.txt └── joss ├── Makefile ├── paper.bib └── paper.md /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "image": "mcr.microsoft.com/devcontainers/universal:2", 3 | "features": { 4 | "ghcr.io/rocker-org/devcontainer-features/r-apt:0": {}, 5 | "ghcr.io/julialang/devcontainer-features/julia:1": { 6 | "channel": "1.9.3" 7 | }, 8 | "ghcr.io/dhoeric/features/act:1": {}, 9 | "ghcr.io/rocker-org/devcontainer-features/r-packages:1": {} 10 | } 11 | } -------------------------------------------------------------------------------- /.github/workflows/BasicTerm_ME_python.yml: -------------------------------------------------------------------------------- 1 | name: basicterm_me_python 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - 'containers/BasicTerm_ME_python/**' 8 | 9 | jobs: 10 | docker: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Delete huge unnecessary tools folder # https://github.com/orgs/community/discussions/25678 14 | run: rm -rf /opt/hostedtoolcache 15 | - 16 | name: Set up QEMU 17 | uses: docker/setup-qemu-action@v3 18 | - 19 | name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v3 21 | - 22 | name: Login to Docker Hub 23 | uses: docker/login-action@v3 24 | with: 25 | username: actuarial 26 | password: ${{ secrets.DOCKERHUB_TOKEN }} 27 | - 28 | name: Build and push 29 | uses: docker/build-push-action@v5 30 | with: 31 | context: "{{defaultContext}}:containers/BasicTerm_ME_python" 32 | push: true 33 | tags: actuarial/basicterm_me_python:latest -------------------------------------------------------------------------------- /.github/workflows/github-runners-benchmarks.yml: -------------------------------------------------------------------------------- 1 | name: github-runners-benchmarks 2 | on: 3 | workflow_dispatch: 4 | push: 5 | paths: 6 | - 'github-runners-benchmarks/**' 7 | pull_request: 8 | paths: 9 | - 'github-runners-benchmarks/**' 10 | jobs: 11 | bench-R: 12 | runs-on: ubuntu-latest 13 | env: 14 | RENV_PATHS_ROOT: ~/.local/share/renv 15 | defaults: 16 | run: 17 | working-directory: github-runners-benchmarks/R 18 | steps: 19 | - uses: actions/checkout@v3 #now we need to install R 20 | - uses: r-lib/actions/setup-r@v2 21 | with: 22 | r-version: '4.2.2' 23 | # we need to manually cache the packages 24 | - name: Cache R packages 25 | uses: actions/cache@v2 26 | with: 27 | path: ${{ env.RENV_PATHS_ROOT }} 28 | key: ${{ runner.os }}-renv-${{ hashFiles('**/renv.lock') }} 29 | restore-keys: | 30 | ${{ runner.os }}-renv- 31 | - name: Restore packages 32 | shell: Rscript {0} 33 | run: | 34 | if (!requireNamespace("renv", quietly = TRUE)) install.packages("renv") 35 | renv::restore() 36 | - name: Benchmark 37 | run: Rscript -e 'source("main.R")' 38 | - run: ls 39 | - name: upload R benchmark 40 | uses: actions/upload-artifact@v3 41 | with: 42 | name: R_benchmark 43 | path: github-runners-benchmarks/R/benchmark_results.yaml 44 | bench-Julia: 45 | runs-on: ubuntu-latest 46 | defaults: 47 | run: 48 | working-directory: github-runners-benchmarks/Julia 49 | steps: 50 | - uses: actions/checkout@v3 51 | - uses: julia-actions/setup-julia@v1 52 | with: 53 | version: '1.9.3' 54 | - name: Benchmark 55 | run: julia --project -e 'using Pkg; Pkg.instantiate(); include("main.jl")' 56 | - run: ls 57 | - name: upload Julia benchmark 58 | uses: actions/upload-artifact@v3 59 | with: 60 | name: Julia_benchmark 61 | path: github-runners-benchmarks/Julia/benchmark_results.yaml 62 | bench-Python: 63 | runs-on: ubuntu-latest 64 | defaults: 65 | run: 66 | working-directory: github-runners-benchmarks/Python 67 | steps: 68 | - uses: actions/checkout@v3 69 | - uses: actions/setup-python@v2 70 | with: 71 | python-version: '3.11' 72 | - run: pip install -r requirements.txt 73 | # cache the python pip installed packages 74 | - name: Cache pip packages 75 | uses: actions/cache@v2 76 | with: 77 | path: ~/.cache/pip 78 | key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} 79 | restore-keys: | 80 | ${{ runner.os }}-pip- 81 | - name: Benchmark 82 | run: python main.py 83 | - run: ls 84 | - name: upload Python benchmark 85 | uses: actions/upload-artifact@v3 86 | with: 87 | name: Python_benchmark 88 | path: github-runners-benchmarks/Python/benchmark_results.yaml 89 | create-README: 90 | defaults: 91 | run: 92 | working-directory: github-runners-benchmarks 93 | runs-on: ubuntu-latest 94 | needs: [bench-R, bench-Julia, bench-Python] 95 | steps: 96 | - uses: actions/checkout@v3 97 | - name: Download R benchmark 98 | uses: actions/download-artifact@v2 99 | with: 100 | name: R_benchmark 101 | path: github-runners-benchmarks/R 102 | - name: Download Julia benchmark 103 | uses: actions/download-artifact@v2 104 | with: 105 | name: Julia_benchmark 106 | path: github-runners-benchmarks/Julia 107 | - name: Download Python benchmark 108 | uses: actions/download-artifact@v2 109 | with: 110 | name: Python_benchmark 111 | path: github-runners-benchmarks/Python 112 | # SETUP python and install dependencies 113 | - uses: actions/setup-python@v2 114 | with: 115 | python-version: '3.11' 116 | - run: pip install -r requirements.txt 117 | - run: python generate_readme.py 118 | - run: cat README.md 119 | # commit and push the README.md 120 | - name: Commit and push 121 | run: | 122 | git config --local user.email "github-actions[bot]@users.noreply.github.com" 123 | git config --local user.name "github-actions[bot]" 124 | git add . 125 | git commit -m "Bench and update README.md" 126 | git push 127 | if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request' 128 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.DS_STORE 2 | .Rproj.user 3 | **__pycache__ 4 | /.vscode 5 | .CondaPkg 6 | *.code-workspace 7 | Manifest.toml 8 | joss/jats* 9 | joss/paper.pdf -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Actuarial Open Source Community 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Benchmarking 2 | 3 | We provide benchmarks to encourage collaboration and competition in making actuarial software run faster than ever before. 4 | 5 | ## Containerized benchmarks 6 | 7 | We currently only have 1 benchmark, we are working to expand the benchmarks. Open a discussion in this repository if you have any thoughts to share. 8 | 9 | Time measurement is currently the best of 3 runs. 10 | 11 | | benchmark | classification | container |A100-SXM4-40GB | H100-SXM5-80GB | 12 | |---------------|-|-|----------------|----------------| 13 | | BasicTerm_ME 100 Million | recursive PyTorch | [link](https://hub.docker.com/repository/docker/actuarial/basicterm_me_python/general) | 15.8284s | 7.205s | 14 | | BasicTerm_ME 100 Million | compiled iterative JAX | [link](https://hub.docker.com/repository/docker/actuarial/basicterm_me_python/general) | 3.448s | 1.551s | 15 | 16 | 17 | ### Notes 18 | 19 | * BasicTerm_ME 100 Million 20 | * You can find lifelib's modelpoint file with 10,000 modelpoints as `model_point_table.xlsx`. We use these modelpoints, but repeat them 10,000 times for 100,000,000 modelpoints. 21 | 22 | 23 | ## GitHub-hosted runners 24 | 25 | These benchmarks run in GitHub-hosted runners in GitHub actions. Used for benchmarks that are not computationally intensive. 26 | 27 | Benchmarks in this repository: 28 | 29 | * `basic_term_benchmark`: Replicate the cashflows of the [LifeLib BasicTerm model](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 30 | * Python [LifeLib BasicTerm_M](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 31 | * Julia [using LifeSimulator](https://github.com/JuliaActuary/LifeSimulator.jl) 32 | * Python using recursive formulas with [PyTorch](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_pytorch.py) and [NumPy](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_numpy.py) 33 | * Python using matrix operations (no recursion) on [PyTorch arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_pytorch.py) and [NumPy arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_numpy.py) 34 | * `exposures`: Create date partitions for experience studies 35 | * Julia [ExperienceAnalysis](https://github.com/JuliaActuary/ExperienceAnalysis.jl) 36 | * R [actxps](https://github.com/mattheaphy/actxps) 37 | * `mortality`: Read SOA mortality tables and use them in a simple calculation 38 | * Julia [MortalityTables](https://github.com/JuliaActuary/MortalityTables.jl) 39 | * Python [Pymort](https://github.com/actuarialopensource/pymort) 40 | 41 | The below results are generated by the benchmarking scripts in the folders for each language. These scripts are run automatically by GitHub Actions and populate the results below. 42 | ```yaml 43 | basic_term_benchmark: 44 | - Julia array basic_term: 45 | minimum time: TrialEstimate(30.279 ms) 46 | result: 1.4489630534602132e7 47 | Julia recursive basic_term: 48 | minimum time: TrialEstimate(84.812 ms) 49 | result: 1.4489630534602132e7 50 | - Python array numpy basic_term_m: 51 | minimum time: 83.45314299992879 milliseconds 52 | result: 14489630.534603368 53 | Python array pytorch basic_term_m: 54 | minimum time: 51.54931000004126 milliseconds 55 | result: 14489630.534603368 56 | Python lifelib basic_term_m: 57 | minimum time: 618.0503439999256 milliseconds 58 | result: 14489630.534601536 59 | Python recursive numpy basic_term_m: 60 | minimum time: 63.08064000006652 milliseconds 61 | result: 14489630.534603368 62 | Python recursive pytorch basic_term_m: 63 | minimum time: 75.6699999999455 milliseconds 64 | result: 14489630.53460337 65 | basic_term_me_benchmark: 66 | - Python heavylight numpy basic_term_me: 67 | minimum time: 354.6492100000478 milliseconds 68 | result: 215146132.0684811 69 | Python lifelib basic_term_me: 70 | minimum time: 1191.792300999964 milliseconds 71 | result: 215146132.06848112 72 | Python recursive numpy basic_term_me: 73 | minimum time: 309.30894900006933 milliseconds 74 | result: 215146132.0684814 75 | exposures: 76 | - Julia ExperienceAnalysis.jl: 77 | minimum time: TrialEstimate(29.659 ms) 78 | num_rows: 141281 79 | - R actxps: 80 | min: 486.248656 ms 81 | num_rows: 141281 82 | mortality: 83 | - Julia MortalityTables.jl: 84 | minimum time: TrialEstimate(229.507 μs) 85 | result: 1904.4865526636793 86 | - Python PyMort: 87 | minimum time: 9.425531999909253 milliseconds 88 | result: 1904.4865526636793 89 | savings_benchmark: 90 | - Julia Benchmarks savings: 91 | minimum time: TrialEstimate(119.226 ms) 92 | result: 3.507113709040273e12 93 | - Python lifelib cashvalue_me_ex4: 94 | minimum time: 596.2715760000492 milliseconds 95 | result: 3507113709040.141 96 | Python recursive numpy cashvalue_me_ex4: 97 | minimum time: 543.3022800000344 milliseconds 98 | result: 3507113709040.124 99 | ``` 100 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile 3 | { 4 | "name": "Existing Dockerfile", 5 | "build": { 6 | // Sets the run context to one level up instead of the .devcontainer folder. 7 | "context": "..", 8 | // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. 9 | "dockerfile": "../Dockerfile" 10 | }, 11 | "customizations": { 12 | "vscode": { 13 | "extensions": [ 14 | "ms-toolsai.jupyter", 15 | "ms-python.python" 16 | ] 17 | } 18 | }, 19 | "runArgs": [ 20 | "--gpus", 21 | "all" 22 | ] 23 | 24 | // Features to add to the dev container. More info: https://containers.dev/features. 25 | // "features": {}, 26 | 27 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 28 | // "forwardPorts": [], 29 | 30 | // Uncomment the next line to run commands after the container is created. 31 | // "postCreateCommand": "cat /etc/os-release", 32 | 33 | // Configure tool-specific properties. 34 | // "customizations": {}, 35 | 36 | // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. 37 | // "remoteUser": "devcontainer" 38 | } 39 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "BasicTerm_ME" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/_data/data.pickle -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 17, 0], "serializer_version": 4} -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/model_point_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/model_point_table.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/mort_table.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/premium_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/premium_table.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the PyTorch image with CUDA and cuDNN support 2 | FROM pytorch/pytorch:2.2.2-cuda11.8-cudnn8-runtime 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | RUN pip install --upgrade pip 8 | RUN pip install --upgrade "jax[cuda12]" 9 | RUN pip install \ 10 | pandas \ 11 | openpyxl \ 12 | equinox \ 13 | heavylight==1.0.6 14 | 15 | # Copy the rest of the application 16 | COPY . /app/ 17 | 18 | # Environment variable (optional but might help with CUDA memory management) 19 | ENV PYTORCH_CUDA_ALLOC_CONF="garbage_collection_threshold:0.8" 20 | 21 | # Set the entrypoint and provide the script name as default command 22 | ENTRYPOINT ["python", "main.py"] 23 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | def main(): 4 | parser = argparse.ArgumentParser(description="Term ME model runner") 5 | parser.add_argument("--multiplier", type=int, default=100, help="Multiplier for model points") 6 | # add an argument that must be either "torch_recursive" or "jax_iterative" 7 | parser.add_argument("--model", type=str, default="jax_iterative", choices=["torch_recursive", "jax_iterative"], help="Model to run") 8 | args = parser.parse_args() 9 | 10 | multiplier = args.multiplier 11 | 12 | if args.model == "torch_recursive": 13 | from term_me_recursive_pytorch import time_recursive_PyTorch # having both imports at top level gave a jax error? 14 | time_recursive_PyTorch(multiplier) 15 | elif args.model == "jax_iterative": 16 | from term_me_iterative_jax import time_iterative_jax 17 | time_iterative_jax(multiplier) 18 | else: 19 | raise ValueError("Invalid model") 20 | 21 | if __name__ == "__main__": 22 | main() -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/notes.md: -------------------------------------------------------------------------------- 1 | docker build . -t lol 2 | docker run lol # no gpu 3 | docker run --gpus all lol 4 | 5 | act -j build -s "CODECOV_TOKEN=your-codecov-token-abc555-5555" -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | openpyxl 3 | heavylight==1.0.6 -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/term_me_iterative_jax.py: -------------------------------------------------------------------------------- 1 | import jax 2 | import pandas as pd 3 | import numpy as np 4 | import timeit 5 | import jax.numpy as jnp 6 | import equinox as eqx 7 | jax.config.update("jax_enable_x64", True) 8 | 9 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 10 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 11 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 12 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 13 | 14 | class ModelPointsEqx(eqx.Module): 15 | premium_pp: jnp.ndarray 16 | duration_mth: jnp.ndarray 17 | age_at_entry: jnp.ndarray 18 | sum_assured: jnp.ndarray 19 | policy_count: jnp.ndarray 20 | policy_term: jnp.ndarray 21 | max_proj_len: jnp.ndarray 22 | 23 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame, size_multiplier: int = 1): 24 | table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 25 | table.sort_values(by="policy_id", inplace=True) 26 | self.premium_pp = jnp.round(jnp.array(np.tile(table["sum_assured"].to_numpy() * table["premium_rate"].to_numpy(), size_multiplier)),decimals=2) 27 | self.duration_mth = jnp.array(jnp.tile(table["duration_mth"].to_numpy(), size_multiplier)) 28 | self.age_at_entry = jnp.array(jnp.tile(table["age_at_entry"].to_numpy(), size_multiplier)) 29 | self.sum_assured = jnp.array(jnp.tile(table["sum_assured"].to_numpy(), size_multiplier)) 30 | self.policy_count = jnp.array(jnp.tile(table["policy_count"].to_numpy(), size_multiplier)) 31 | self.policy_term = jnp.array(jnp.tile(table["policy_term"].to_numpy(), size_multiplier)) 32 | self.max_proj_len = jnp.max(12 * self.policy_term - self.duration_mth) + 1 33 | 34 | class AssumptionsEqx(eqx.Module): 35 | disc_rate_ann: jnp.ndarray 36 | mort_table: jnp.ndarray 37 | expense_acq: jnp.ndarray 38 | expense_maint: jnp.ndarray 39 | 40 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 41 | self.disc_rate_ann = jnp.array(disc_rate_ann["zero_spot"].to_numpy()) 42 | self.mort_table = jnp.array(mort_table.to_numpy()) 43 | self.expense_acq = jnp.array(300) 44 | self.expense_maint = jnp.array(60) 45 | 46 | class LoopState(eqx.Module): 47 | t: jnp.ndarray 48 | tot: jnp.ndarray 49 | pols_lapse_prev: jnp.ndarray 50 | pols_death_prev: jnp.ndarray 51 | pols_if_at_BEF_DECR_prev: jnp.ndarray 52 | 53 | class TermME(eqx.Module): 54 | mp: ModelPointsEqx 55 | assume: AssumptionsEqx 56 | init_ls: LoopState 57 | 58 | def __init__(self, mp: ModelPointsEqx, assume: AssumptionsEqx): 59 | self.mp = mp 60 | self.assume = assume 61 | self.init_ls = LoopState( 62 | t=jnp.array(0), 63 | tot = jnp.array(0), 64 | pols_lapse_prev=jnp.zeros_like(self.mp.duration_mth, dtype=jnp.float64), 65 | pols_death_prev=jnp.zeros_like(self.mp.duration_mth, dtype=jnp.float64), 66 | pols_if_at_BEF_DECR_prev=jnp.where(self.mp.duration_mth > 0, self.mp.policy_count, 0.) 67 | ) 68 | 69 | def __call__(self): 70 | def iterative_core(ls: LoopState, _): 71 | duration_month_t = self.mp.duration_mth + ls.t 72 | duration_t = duration_month_t // 12 73 | age_t = self.mp.age_at_entry + duration_t 74 | pols_if_init = ls.pols_if_at_BEF_DECR_prev - ls.pols_lapse_prev - ls.pols_death_prev 75 | pols_if_at_BEF_MAT = pols_if_init 76 | pols_maturity = (duration_month_t == self.mp.policy_term * 12) * pols_if_at_BEF_MAT 77 | pols_if_at_BEF_NB = pols_if_at_BEF_MAT - pols_maturity 78 | pols_new_biz = jnp.where(duration_month_t == 0, self.mp.policy_count, 0) 79 | pols_if_at_BEF_DECR = pols_if_at_BEF_NB + pols_new_biz 80 | mort_rate = self.assume.mort_table[age_t-18, jnp.clip(duration_t, a_max=5)] 81 | mort_rate_mth = 1 - (1 - mort_rate) ** (1/12) 82 | pols_death = pols_if_at_BEF_DECR * mort_rate_mth 83 | claims = self.mp.sum_assured * pols_death 84 | premiums = self.mp.premium_pp * pols_if_at_BEF_DECR 85 | commissions = (duration_t == 0) * premiums 86 | discount = (1 + self.assume.disc_rate_ann[ls.t//12]) ** (-ls.t/12) 87 | inflation_factor = (1 + 0.01) ** (ls.t/12) 88 | expenses = self.assume.expense_acq * pols_new_biz + pols_if_at_BEF_DECR * self.assume.expense_maint/12 * inflation_factor 89 | lapse_rate = jnp.clip(0.1 - 0.02 * duration_t, a_min=0.02) 90 | net_cf = premiums - claims - expenses - commissions 91 | discounted_net_cf = jnp.sum(net_cf) * discount 92 | nxt_ls = LoopState( 93 | t=ls.t+1, 94 | tot = ls.tot + discounted_net_cf, 95 | pols_lapse_prev=(pols_if_at_BEF_DECR - pols_death) * (1 - (1 - lapse_rate) ** (1/12)), 96 | pols_death_prev=pols_death, 97 | pols_if_at_BEF_DECR_prev=pols_if_at_BEF_DECR 98 | ) 99 | return nxt_ls, None 100 | return jax.lax.scan(iterative_core, self.init_ls, xs=None, length=277)[0].tot 101 | 102 | 103 | def run_jax_term_ME(term_me: TermME): 104 | return term_me() 105 | 106 | run_jax_term_ME_opt = jax.jit(run_jax_term_ME) 107 | 108 | def time_jax_func(mp, assume, func): 109 | term_me = TermME(mp, assume) 110 | result = func(term_me).block_until_ready() 111 | start = timeit.default_timer() 112 | result = func(term_me).block_until_ready() 113 | end = timeit.default_timer() 114 | elapsed_time = end - start # Time in seconds 115 | return float(result), elapsed_time 116 | 117 | def time_iterative_jax(multiplier: int): 118 | mp = ModelPointsEqx(model_point_table, premium_table, size_multiplier=multiplier) 119 | assume = AssumptionsEqx(disc_rate_ann, mort_table) 120 | result, time_in_seconds = time_jax_func(mp, assume, run_jax_term_ME_opt) 121 | print("JAX iterative model") 122 | print(f"number modelpoints={len(mp.duration_mth):,}") 123 | print(f"{result=:,}") 124 | print(f"{time_in_seconds=}") 125 | 126 | if __name__ == "__main__": 127 | time_iterative_jax(100) -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/term_me_recursive_pytorch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | import torch 6 | from heavylight import LightModel, agg 7 | import timeit 8 | 9 | print(f"{torch.cuda.is_available()=}") 10 | # set 64 bit precision 11 | torch.set_default_dtype(torch.float64) 12 | print(f"{torch.get_default_dtype()=}") 13 | 14 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 15 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 16 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 17 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 18 | 19 | class ModelPoints: 20 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame, size_multiplier: int = 1): 21 | self.table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 22 | self.table.sort_values(by="policy_id", inplace=True) 23 | self.premium_pp = torch.round(torch.tensor(np.tile(self.table["sum_assured"].to_numpy() * self.table["premium_rate"].to_numpy(), size_multiplier)),decimals=2) 24 | self.duration_mth = torch.tensor(np.tile(self.table["duration_mth"].to_numpy(), size_multiplier)) 25 | self.age_at_entry = torch.tensor(np.tile(self.table["age_at_entry"].to_numpy(), size_multiplier)) 26 | self.sum_assured = torch.tensor(np.tile(self.table["sum_assured"].to_numpy(), size_multiplier)) 27 | self.policy_count = torch.tensor(np.tile(self.table["policy_count"].to_numpy(), size_multiplier)) 28 | self.policy_term = torch.tensor(np.tile(self.table["policy_term"].to_numpy(), size_multiplier)) 29 | self.max_proj_len: int = int(torch.max(12 * self.policy_term - self.duration_mth) + 1) 30 | 31 | class Assumptions: 32 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 33 | self.disc_rate_ann = torch.tensor(disc_rate_ann["zero_spot"].to_numpy()) 34 | self.mort_table = torch.tensor(mort_table.to_numpy()) 35 | 36 | def get_mortality(self, age, duration): 37 | return self.mort_table[age-18, torch.clamp(duration, max=5)] 38 | 39 | agg_func = lambda x: float(torch.sum(x)) 40 | 41 | class TermME(LightModel): 42 | def __init__(self, mp: ModelPoints, assume: Assumptions): 43 | super().__init__(agg_function=None) 44 | self.mp = mp 45 | self.assume = assume 46 | 47 | def age(self, t): 48 | return self.mp.age_at_entry + self.duration(t) 49 | 50 | def claim_pp(self, t): 51 | return self.mp.sum_assured 52 | 53 | def claims(self, t): 54 | return self.claim_pp(t) * self.pols_death(t) 55 | 56 | def commissions(self, t): 57 | return (self.duration(t) == 0) * self.premiums(t) 58 | 59 | def disc_factors(self): 60 | return torch.tensor(list((1 + self.disc_rate_mth()[t])**(-t) for t in range(self.mp.max_proj_len))) 61 | 62 | def discount(self, t: int): 63 | return (1 + self.assume.disc_rate_ann[t//12]) ** (-t/12) 64 | 65 | def disc_rate_mth(self): 66 | return torch.tensor(list((1 + self.assume.disc_rate_ann[t//12])**(1/12) - 1 for t in range(self.mp.max_proj_len))) 67 | 68 | def duration(self, t): 69 | return self.duration_mth(t) // 12 70 | 71 | def duration_mth(self, t): 72 | if t == 0: 73 | return self.mp.duration_mth 74 | else: 75 | return self.duration_mth(t-1) + 1 76 | 77 | def expense_acq(self): 78 | return 300 79 | 80 | def expense_maint(self): 81 | return 60 82 | 83 | def expenses(self, t): 84 | return self.expense_acq() * self.pols_new_biz(t) \ 85 | + self.pols_if_at(t, "BEF_DECR") * self.expense_maint()/12 * self.inflation_factor(t) 86 | 87 | def inflation_factor(self, t): 88 | return (1 + self.inflation_rate())**(t/12) 89 | 90 | def inflation_rate(self): 91 | return 0.01 92 | 93 | def lapse_rate(self, t): 94 | return torch.clamp(0.1 - 0.02 * self.duration(t), min=0.02) 95 | 96 | def loading_prem(self): 97 | return 0.5 98 | 99 | def mort_rate(self, t): 100 | return self.assume.get_mortality(self.age(t), self.duration(t)) 101 | 102 | def mort_rate_mth(self, t): 103 | return 1-(1- self.mort_rate(t))**(1/12) 104 | 105 | def net_cf(self, t): 106 | return self.premiums(t) - self.claims(t) - self.expenses(t) - self.commissions(t) 107 | 108 | def pols_death(self, t): 109 | return self.pols_if_at(t, "BEF_DECR") * self.mort_rate_mth(t) 110 | 111 | @agg(agg_func) 112 | def discounted_net_cf(self, t): 113 | return torch.sum(self.net_cf(t)) * self.discount(t) 114 | 115 | def pols_if_at(self, t, timing): 116 | if timing == "BEF_MAT": 117 | if t == 0: 118 | return self.pols_if_init() 119 | else: 120 | return self.pols_if_at(t-1, "BEF_DECR") - self.pols_lapse(t-1) - self.pols_death(t-1) 121 | elif timing == "BEF_NB": 122 | return self.pols_if_at(t, "BEF_MAT") - self.pols_maturity(t) 123 | elif timing == "BEF_DECR": 124 | return self.pols_if_at(t, "BEF_NB") + self.pols_new_biz(t) 125 | else: 126 | raise ValueError("invalid timing") 127 | 128 | def pols_if_init(self): 129 | return torch.where(self.duration_mth(0) > 0, self.mp.policy_count, 0) 130 | 131 | def pols_lapse(self, t): 132 | return (self.pols_if_at(t, "BEF_DECR") - self.pols_death(t)) * (1-(1 - self.lapse_rate(t))**(1/12)) 133 | 134 | def pols_maturity(self, t): 135 | return (self.duration_mth(t) == self.mp.policy_term * 12) * self.pols_if_at(t, "BEF_MAT") 136 | 137 | def pols_new_biz(self, t): 138 | return torch.where(self.duration_mth(t) == 0, self.mp.policy_count, 0) 139 | 140 | def premiums(self, t): 141 | return self.mp.premium_pp * self.pols_if_at(t, "BEF_DECR") 142 | 143 | 144 | def run_recursive_model(model: TermME): 145 | model.cache_graph._caches = defaultdict(dict) 146 | model.cache_graph._caches_agg = defaultdict(dict) 147 | model.RunModel(model.mp.max_proj_len) 148 | return float(sum(model.cache_agg['discounted_net_cf'].values())) 149 | 150 | 151 | def time_recursive_GPU(model: TermME): 152 | model.OptimizeMemoryAndReset() 153 | start = torch.cuda.Event(enable_timing=True) 154 | end = torch.cuda.Event(enable_timing=True) 155 | start.record() 156 | result = run_recursive_model(model) 157 | end.record() 158 | torch.cuda.synchronize() 159 | return result, start.elapsed_time(end) / 1000 160 | 161 | def time_recursive_CPU(model: TermME): 162 | model.OptimizeMemoryAndReset() 163 | start = timeit.default_timer() 164 | result = run_recursive_model(model) 165 | end = timeit.default_timer() 166 | return result, end - start 167 | 168 | def time_recursive_PyTorch(multiplier: int): 169 | if torch.cuda.is_available(): 170 | device = torch.device('cuda') 171 | else: 172 | device = torch.device('cpu') 173 | print(f"{device=}") 174 | 175 | with device: 176 | mp = ModelPoints(model_point_table, premium_table) 177 | mp_multiplied = ModelPoints(model_point_table, premium_table, multiplier) 178 | assume = Assumptions(disc_rate_ann, mort_table) 179 | model = TermME(mp, assume) 180 | 181 | if device.type == 'cuda': 182 | time_recursive = time_recursive_GPU 183 | else: 184 | time_recursive = time_recursive_CPU 185 | run_recursive_model(model) # warm up, generate dependency graph 186 | model.mp = mp_multiplied 187 | result, time_in_seconds = time_recursive(model) 188 | # report results 189 | print("PyTorch recursive model") 190 | print(f"number modelpoints={len(mp_multiplied.duration_mth):,}") 191 | print(f"{result=:,}") 192 | print(f"{time_in_seconds=}") -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/CondaPkg.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | pandas = "1.5.3" 3 | jax = "0.4.8" 4 | jaxlib = "0.4.7" 5 | numpy = "1.24.2" 6 | jaxtyping = "0.2.15" 7 | pyyaml = "6.0" 8 | openpyxl = "3.1.2" 9 | modelx = "0.21.0" 10 | 11 | [pip.deps] 12 | lifelib = "" 13 | pymort = "" 14 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/Project.toml: -------------------------------------------------------------------------------- 1 | name = "Benchmarks" 2 | 3 | [deps] 4 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 5 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" 6 | CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" 7 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" 8 | DayCounts = "44e31299-2c53-5a9b-9141-82aa45d7972f" 9 | ExperienceAnalysis = "51cd30ab-a913-41ff-9b6f-9b78880a2ac2" 10 | LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890" 11 | MortalityTables = "4780e19d-04b9-53dc-86c2-9e9aa59b5a12" 12 | PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" 13 | Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" 14 | YAML = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" 15 | 16 | [compat] 17 | julia = "1.9" 18 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/README.md: -------------------------------------------------------------------------------- 1 | # Julia benchmarks 2 | 3 | These benchmarks produce a `benchmark_results.yaml` file, obtained by running `main.jl`. 4 | They are run as part of the `bench-Julia` CI job under `.github/workflows/bench.yml`. 5 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/CondaPkg.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | pandas = "1.5.3" 3 | jax = "0.4.8" 4 | jaxlib = "0.4.7" 5 | numpy = "1.24.2" 6 | jaxtyping = "0.2.15" 7 | pyyaml = "6.0" 8 | openpyxl = "3.1.2" 9 | modelx = "0.21.0" 10 | 11 | [pip.deps] 12 | lifelib = "" 13 | pymort = "" 14 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" 3 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 4 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 5 | Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" 6 | LifeSimulator = "73783465-395e-4165-b528-1c694332812b" 7 | PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" 8 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/README.md: -------------------------------------------------------------------------------- 1 | # Analysis 2 | 3 | To reproduce the images found under `./images` (currently shown in the README), simply run 4 | 5 | ```bash 6 | /path/to/benchmarks/Julia/analysis$ julia --color=yes --project analysis.jl 7 | ``` 8 | 9 | Running the various benchmarks and timings will take at least a few minutes. 10 | 11 | You will likely a machine with at least 16 GB of RAM. 32 GB of RAM is recommended for running the model with 10,000,000 points (last stage of the analysis to stress-test and evaluate the performance at large scale). 12 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/analysis.jl: -------------------------------------------------------------------------------- 1 | using LifeSimulator, CairoMakie, BenchmarkTools, PythonCall, Accessors, Dates 2 | 3 | images_folder() = joinpath(@__DIR__, "images") 4 | 5 | model_string(::LifelibBasiclife) = "basic_life" 6 | model_string(::LifelibSavings) = "universal_life" 7 | model_title(::LifelibBasiclife) = "Term life" 8 | model_title(::LifelibSavings) = "Universal life" 9 | language_used_memoized(::LifelibBasiclife) = "Julia" 10 | language_used_memoized(::LifelibSavings) = "Python" 11 | 12 | include("../read_model.jl") 13 | !@isdefined(proj) && (proj = read_savings_model()) 14 | 15 | # Store results into a dictionary to avoid having to recompute benchmark data every time. 16 | # Empty these dictionaries if you want to regenerate the results. 17 | const TIME_RESULTS = Dict{Model,NamedTuple}() 18 | const MEMORY_RESULTS = Dict{Model,NamedTuple}() 19 | const term_life_model = Ref(LifelibBasiclife(commission_rate = 1.0)) 20 | const universal_life_model = Ref(LifelibSavings()) 21 | 22 | include("time_complexity.jl") 23 | include("memory_complexity.jl") 24 | 25 | @info "Running simulation with 10,000,000 model points" 26 | policies = rand(PolicySet, 10_000_000) 27 | CashFlow(universal_life_model[], rand(PolicySet, 1_000), 5) # JIT compilation 28 | # @with SHOW_PROGRESS => true @time CashFlow(universal_life_model[], policies, 150) 29 | open(joinpath(@__DIR__, "large_run.txt"), "w+") do io 30 | ex = :(CashFlow(universal_life_model[], policies, 150)) 31 | println(io, "julia> ", ex) 32 | redirect_stdout(io) do 33 | @eval @time $ex 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_basic_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_basic_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_universal_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_universal_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_basic_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_basic_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_universal_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_universal_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/time_complexity_basic_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/time_complexity_basic_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/time_complexity_universal_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/time_complexity_universal_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/large_run.txt: -------------------------------------------------------------------------------- 1 | julia> CashFlow(universal_life_model[], policies, 150) 2 | 92.053248 seconds (84 allocations: 8.473 GiB, 0.05% gc time) 3 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/memory_complexity.jl: -------------------------------------------------------------------------------- 1 | function generate_memory_complexity_data(model::Model) 2 | @info "Generating memory complexity benchmarks for model $(nameof(typeof(model)))" 3 | sizes = [9, 100, 1_000, 10_000, 100_000] 4 | files = "savings/" .* ["model_point_table_9.csv", "model_point_table_100.csv", "model_point_table_1K.csv", "model_point_table_10K.csv", "model_point_table_100K.csv"] 5 | ts = 50:50:150 6 | allocations = zeros(length(files), length(ts)) 7 | for (i, file) in enumerate(files) 8 | for (j, n) in enumerate(ts) 9 | policies = policies_from_csv(file) 10 | allocations[i, j] = (@benchmark CashFlow($model, isa($model, $LifelibBasiclife) ? estimate_premiums($model, $policies, $n) : $policies, $n)).memory / 1e6 11 | end 12 | end 13 | MEMORY_RESULTS[model] = (; ts, files, sizes, allocations) 14 | end 15 | 16 | function plot_memory_complexity_results(model; folder = images_folder()) 17 | (; ts, sizes, files, allocations) = MEMORY_RESULTS[model] 18 | colors = Makie.wong_colors() 19 | 20 | fig = Figure(; resolution = (1000, 300)) 21 | ax = Axis(fig[1, 1]; title = "Memory allocations - $(model_title(model)) model", xlabel = "Number of time steps", ylabel = "Allocations (MB)", yscale = log10, xticks = ts) 22 | ls = [lines!(ax, ts, allocations[i, :]; color = colors[i]) for i in eachindex(sizes)] 23 | ss = [scatter!(ax, ts, allocations[i, :], color = colors[i]; marker = :x) for i in eachindex(sizes)] 24 | Legend(fig[1, 2], reverse(collect(collect.(zip(ls, ss)))), "n = " .* reverse(string.(sizes))) 25 | file = joinpath(folder, "memory_complexity_variable_duration_$(model_string(model)).png") 26 | @info "Saving plot at $file" 27 | save(file, fig) 28 | 29 | fig = Figure(; resolution = (1000, 300)) 30 | ax = Axis(fig[1, 1]; title = "Memory allocations - $(model_title(model)) model ($(maximum(ts)) timesteps)", xlabel = "Model size", ylabel = "Allocations (MB)", xscale = log10, yscale = log10) 31 | lines!(ax, sizes, allocations[:, end]; color = colors[1]) 32 | scatter!(ax, sizes, allocations[:, end]; color = colors[1], marker = :x) 33 | file = joinpath(folder, "memory_complexity_static_duration_$(model_string(model)).png") 34 | @info "Saving plot at $file" 35 | save(file, fig) 36 | end 37 | 38 | function memory_complexity_benchmarks(model::Model; folder = images_folder()) 39 | !haskey(MEMORY_RESULTS, model) && generate_memory_complexity_data(model) 40 | plot_memory_complexity_results(model; folder) 41 | end 42 | 43 | memory_complexity_benchmarks(term_life_model[]) 44 | memory_complexity_benchmarks(universal_life_model[]) 45 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/time_complexity.jl: -------------------------------------------------------------------------------- 1 | function generate_time_complexity_data(model::LifelibBasiclife) 2 | @info "Generating time complexity benchmarks for model $(nameof(typeof(model)))" 3 | sizes = [10, 100, 1_000, 10_000] 4 | files = "basic_term/" .* ["model_point_table_10.csv", "model_point_table_100.csv", "model_point_table_1K.csv", "model_point_table_10K.csv"] 5 | iterative_timings = Float64[] 6 | memoized_timings = Float64[] 7 | for file in files 8 | policies = policies_from_csv(file) 9 | 10 | n = BasicTermMemoized.final_timestep[] 11 | push!(iterative_timings, minimum(@benchmark CashFlow($model, estimate_premiums($model, $policies, $n), $n)).time * 1e-9) 12 | 13 | set_basic_term_policies!(policies) 14 | push!(memoized_timings, minimum(@benchmark begin 15 | empty_memoization_caches!() 16 | sum(LifeSimulator.pv_net_cf()) 17 | end).time * 1e-9) 18 | end 19 | TIME_RESULTS[model] = (; sizes, files, iterative_timings, memoized_timings) 20 | end 21 | 22 | function generate_time_complexity_data(model::LifelibSavings) 23 | @info "Generating time complexity benchmarks for model $(nameof(typeof(model)))" 24 | proj.scen_size = 1 25 | sizes = [9, 100, 1_000, 10_000, 100_000] 26 | files = "savings/" .* ["model_point_table_9.csv", "model_point_table_100.csv", "model_point_table_1K.csv", "model_point_table_10K.csv", "model_point_table_100K.csv"] 27 | julia_timings = Float64[] 28 | python_timings = Float64[] 29 | timeit = pyimport("timeit") 30 | for (i, file) in enumerate(files) 31 | trials = Int(min(50, 3e5 ÷ sizes[i])) 32 | policies = policies_from_csv(file) 33 | 34 | # lifelib will always simulate until the largest policy term, so we make sure we have no policies beyond 35 | # a desired simulation end (e.g. 30 years) and at least one policy reaching such a term. 36 | # In this way, timesteps are consistent across evaluations with different numbers of policies. 37 | policies .= map(set -> @set(set.policy.issued_at = Month(0)), policies) 38 | policies .= map(set -> @set(set.policy.term = min(set.policy.term, Year(20))), policies) 39 | set = policies[1]; policies[1] = @set set.policy.term = Year(20) 40 | 41 | use_policies!(proj, policies) 42 | @assert ntimesteps(proj) == 241 43 | 44 | push!(julia_timings, minimum(@benchmark CashFlow(sim, n) setup = begin 45 | policies = policies_from_csv(proj) 46 | n = ntimesteps(proj) 47 | model = LifelibSavings(investment_rates = investment_rate(proj)) 48 | sim = Simulation(model, policies) 49 | end).time * 1e-9) 50 | 51 | push!(python_timings, minimum(pyconvert(Array, timeit.repeat("proj.clear_cache = 1; proj.pv_net_cf().sum()"; globals = pydict(; proj), number = 1, repeat = trials)))) 52 | end 53 | TIME_RESULTS[model] = (; sizes, files, iterative_timings = julia_timings, memoized_timings = python_timings) 54 | end 55 | 56 | function plot_time_complexity_results(model; folder = images_folder()) 57 | (; sizes, files, iterative_timings, memoized_timings) = TIME_RESULTS[model] 58 | colors = Makie.wong_colors() 59 | fig = Figure(; resolution = (1000, 300)) 60 | ax = Axis(fig[1, 1]; title = "Time performance - $(model_title(model)) models", xlabel = "Number of policy sets", ylabel = "Time (s)", xscale = log10, yscale = log10) 61 | l1 = lines!(ax, sizes, iterative_timings; color = colors[1]) 62 | l2 = lines!(ax, sizes, memoized_timings; color = colors[2]) 63 | s1 = scatter!(ax, sizes, iterative_timings; color = colors[1], marker = :x) 64 | s2 = scatter!(ax, sizes, memoized_timings; color = colors[2], marker = :x) 65 | Legend(fig[1, 2], [[l1, s1], [l2, s2]], ["Iterative (Julia)", "Memoized ($(language_used_memoized(model)))"]) 66 | file = joinpath(folder, "time_complexity_$(model_string(model)).png") 67 | @info "Saving plot at $file" 68 | save(file, fig) 69 | end 70 | 71 | function time_complexity_benchmarks(model::Model; folder = images_folder()) 72 | !haskey(TIME_RESULTS, model) && generate_time_complexity_data(model) 73 | plot_time_complexity_results(model; folder) 74 | end 75 | 76 | time_complexity_benchmarks(term_life_model[]) 77 | time_complexity_benchmarks(universal_life_model[]) 78 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/basic_term.jl: -------------------------------------------------------------------------------- 1 | using LifeSimulator: empty_memoization_caches!, pv_net_cf 2 | 3 | function cf1() 4 | empty_memoization_caches!() 5 | sum(pv_net_cf()) 6 | end 7 | 8 | function run_basic_term_benchmark() 9 | cf1_benchmark = @benchmark cf1() 10 | result = cf1() 11 | return Dict( 12 | "minimum time" => string(minimum(cf1_benchmark)), 13 | "result" => string(result), 14 | ) 15 | end 16 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/basic_term_array.jl: -------------------------------------------------------------------------------- 1 | using CSV 2 | using DataFrames 3 | using Tables 4 | using LoopVectorization 5 | 6 | # Random uniform distribution in PyTorch 7 | 8 | read_csv(file) = CSV.read(data_file(file), DataFrame) 9 | data_file(file) = joinpath(dirname(@__DIR__), "Python", "BasicTerm_M", file) 10 | 11 | 12 | function project(max_proj_len, disc_rate, sum_assured, policy_term, age_at_entry, mort, loading_prem, expense_acq, expense_maint, inflation_rate) 13 | time_axis = 0:(max_proj_len-1) 14 | duration = time_axis .÷ 12 15 | discount_factors = @. (1 + disc_rate[duration+1])^(-time_axis / 12) 16 | inflation_factor = @. (1 + inflation_rate)^(time_axis / 12) 17 | lapse_rate = @. max(0.1 - 0.02 * duration, 0.02) 18 | lapse_rate_monthly = @. 1 - (1 - lapse_rate)^(1 / 12) 19 | 20 | monthly_mortality = [mort[ia+d-17, min(d + 1, 6)] for ia in age_at_entry, d in duration] 21 | monthly_mortality .= @turbo @. 1 - (1 - monthly_mortality)^(1 / 12) 22 | 23 | pols_if = let 24 | m = similar(monthly_mortality) 25 | for I in CartesianIndices(m) 26 | i, j = Tuple(I) 27 | m[i, j] = if j == 1 28 | 1.0 29 | elseif policy_term[i] * 12 < j 30 | 0.0 31 | else 32 | m[i, j-1] * (1 - lapse_rate_monthly[j-1]) * (1 - monthly_mortality[i, j-1]) 33 | end 34 | end 35 | m 36 | end 37 | 38 | claims = @. monthly_mortality * pols_if * sum_assured 39 | pv_claims = claims * discount_factors 40 | pv_pols_if = pols_if * discount_factors 41 | net_premium = pv_claims ./ pv_pols_if 42 | premium_pp = @. round((1 + loading_prem) * net_premium, digits=2) 43 | premiums = premium_pp .* pols_if 44 | commissions = (duration .== 0)' .* premiums 45 | expenses = @. (expense_maint / 12 * inflation_factor)' * pols_if 46 | expenses[:, 1] .+= expense_acq 47 | pv_premiums = premiums * discount_factors 48 | pv_expenses = expenses * discount_factors 49 | pv_commissions = commissions * discount_factors 50 | pv_net_cf = @. pv_premiums - pv_claims - pv_expenses - pv_commissions 51 | sum(pv_net_cf) 52 | end 53 | 54 | function run_basicterm_array_benchmark() 55 | # parameters 56 | max_proj_len = 12 * 20 + 1 57 | loading_prem = 0.5 58 | expense_acq = 300.0 59 | expense_maint = 60.0 60 | inflation_rate = 0.01 61 | 62 | mp = read_csv("model_point_table.csv") 63 | disc_rate = read_csv("disc_rate_ann.csv").zero_spot 64 | sum_assured = mp.sum_assured 65 | policy_term = mp.policy_term 66 | age_at_entry = mp.age_at_entry 67 | mort = CSV.read(data_file("mort_table.csv"), Tables.matrix; drop=[1]) 68 | 69 | result = project( 70 | max_proj_len, 71 | disc_rate, 72 | sum_assured, 73 | policy_term, 74 | age_at_entry, 75 | mort, 76 | loading_prem, 77 | expense_acq, 78 | expense_maint, 79 | inflation_rate, 80 | ) 81 | 82 | b1 = @benchmark return project( 83 | $max_proj_len, 84 | $disc_rate, 85 | $sum_assured, 86 | $policy_term, 87 | $age_at_entry, 88 | $mort, 89 | $loading_prem, 90 | $expense_acq, 91 | $expense_maint, 92 | $inflation_rate, 93 | ) 94 | 95 | return Dict( 96 | "result" => result, 97 | "minimum time" => string(minimum(b1)), 98 | ) 99 | end -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/benchmark_results.yaml: -------------------------------------------------------------------------------- 1 | basic_term_benchmark: 2 | Julia array basic_term: 3 | minimum time: "TrialEstimate(29.152 ms)" 4 | result: 1.4489630534602132e7 5 | Julia recursive basic_term: 6 | minimum time: "TrialEstimate(81.070 ms)" 7 | result: "1.4489630534602132e7" 8 | mortality: 9 | Julia MortalityTables.jl: 10 | minimum time: "TrialEstimate(239.946 μs)" 11 | result: 1904.4865526636793 12 | savings_benchmark: 13 | Julia Benchmarks savings: 14 | minimum time: "TrialEstimate(118.603 ms)" 15 | result: 3.507113709040273e12 16 | exposures: 17 | Julia ExperienceAnalysis.jl: 18 | num_rows: 141281 19 | minimum time: "TrialEstimate(29.284 ms)" 20 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/exposures.jl: -------------------------------------------------------------------------------- 1 | using DataFrames 2 | using CSV 3 | using Dates 4 | using BenchmarkTools 5 | using ExperienceAnalysis 6 | using DayCounts 7 | 8 | function expsoures_ExperienceAnalysis( 9 | df_yearly::DataFrame, 10 | study_start::Date, 11 | study_end::Date, 12 | ) 13 | continue_exposure = df_yearly.status .== "Surrender" 14 | df_yearly.exposure = 15 | ExperienceAnalysis.exposure.( 16 | ExperienceAnalysis.Anniversary(Year(1)), # The basis for our exposures 17 | df_yearly.issue_date, # The `from` date 18 | df_yearly.term_date, # the `to` date array we created above 19 | continue_exposure; 20 | study_start=study_start, 21 | study_end = study_end, 22 | left_partials=false 23 | ) 24 | df_yearly = flatten(df_yearly, :exposure) 25 | df_yearly.exposure_fraction = 26 | map(e -> yearfrac(e.from, e.to, DayCounts.Thirty360()), df_yearly.exposure) 27 | return df_yearly 28 | end 29 | 30 | 31 | 32 | function run_exposure_benchmarks() 33 | df = CSV.read(joinpath(dirname(@__DIR__), "data", "census_dat.csv"), DataFrame) 34 | df.term_date = [d == "NA" ? nothing : Date(d, "yyyy-mm-dd") for d in df.term_date] 35 | study_end = Date(2020, 2, 29) 36 | study_start = Date(2006, 6, 15) 37 | df_yearly_exp = copy(df) 38 | result_exp = expsoures_ExperienceAnalysis(copy(df), study_start, study_end) 39 | b_exp = @benchmark expsoures_ExperienceAnalysis($df_yearly_exp, $study_start, $study_end) 40 | 41 | return Dict( 42 | "Julia ExperienceAnalysis.jl" => Dict( 43 | "num_rows" => size(result_exp, 1), 44 | "minimum time" => string(minimum(b_exp)), 45 | ) 46 | ) 47 | end 48 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/main.jl: -------------------------------------------------------------------------------- 1 | using Pkg 2 | Pkg.add(url="https://github.com/JuliaActuary/LifeSimulator.jl") 3 | 4 | include("mortality.jl") 5 | include("exposures.jl") 6 | include("basic_term.jl") 7 | include("basic_term_array.jl") 8 | include("savings.jl") 9 | import YAML 10 | 11 | 12 | function run_benchmarks() 13 | return Dict( 14 | "mortality" => run_mortality_benchmarks(), 15 | "exposures" => run_exposure_benchmarks(), 16 | "basic_term_benchmark" => Dict( 17 | "Julia recursive basic_term" => run_basic_term_benchmark(), 18 | "Julia array basic_term" => run_basicterm_array_benchmark(), 19 | ), 20 | "savings_benchmark" => run_savings_benchmark(), 21 | ) 22 | end 23 | 24 | YAML.write_file("benchmark_results.yaml", run_benchmarks()) 25 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/mortality.jl: -------------------------------------------------------------------------------- 1 | using MortalityTables 2 | using BenchmarkTools 3 | 4 | @inline function npv(qs, r, term = length(qs)) 5 | inforce, result = 1.0, 0.0 6 | v = 1 / (1 + r) 7 | v_t = v 8 | @inbounds @simd for t = 1:min(term, length(qs)) 9 | q = qs[t] 10 | result += inforce * q * v_t 11 | inforce = inforce * (1 - q) 12 | v_t *= v 13 | end 14 | return result 15 | end 16 | 17 | function mortality1(tbls = MortalityTables.table.(3299:3308)) 18 | issue_ages = 18:50 19 | durations = 1:25 20 | term = 29 21 | total = 0.0 22 | @inbounds for i in eachindex(tbls), ia in issue_ages, dur in durations 23 | start_age = ia + dur - 1 24 | total += @views npv(tbls[i].select[ia][start_age:start_age+term], 0.02) 25 | end 26 | return total 27 | end 28 | 29 | function run_mortality_benchmarks() 30 | tbls = MortalityTables.table.(3299:3308) 31 | mort1_result = mortality1(tbls) 32 | b1 = @benchmark mortality1($tbls) 33 | return Dict( 34 | "Julia MortalityTables.jl" => Dict( 35 | "result" => mort1_result, 36 | "minimum time" => string(minimum(b1)), 37 | ), 38 | ) 39 | end 40 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/read_model.jl: -------------------------------------------------------------------------------- 1 | using PythonCall: pyimport 2 | 3 | python_directory() = joinpath(dirname(@__DIR__), "Python") 4 | 5 | "Read a specific `savings` model, such as `SE_EX4` or `ME_EX4`." 6 | function read_savings_model(model = "ME_EX4"; dir = python_directory()) 7 | mx = pyimport("modelx") 8 | mx.read_model(joinpath(dir, "CashValue_$model")).Projection 9 | end 10 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/savings.jl: -------------------------------------------------------------------------------- 1 | using LifeSimulator 2 | using BenchmarkTools 3 | 4 | include("read_model.jl") 5 | 6 | function run_savings_benchmark() 7 | proj = read_savings_model() 8 | proj.scen_size = 1 9 | policies = policies_from_csv("savings/model_point_table_10K.csv") 10 | use_policies!(proj, policies) 11 | model = LifelibSavings(investment_rates = investment_rate(proj)) 12 | n = ntimesteps(proj) 13 | savings_benchmark = @benchmark CashFlow($model, $policies, $n).discounted 14 | savings = CashFlow(model, policies, n).discounted 15 | Dict( 16 | "Julia Benchmarks savings" => Dict( 17 | "minimum time" => string(minimum(savings_benchmark)), 18 | "result" => savings, 19 | ) 20 | ) 21 | end 22 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "BasicTerm_M" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/_data/data.pickle -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 16, 1], "serializer_version": 4} -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/disc_rate_ann.csv: -------------------------------------------------------------------------------- 1 | year,zero_spot 2 | 0,0 3 | 1,0.00555 4 | 2,0.006840000000000001 5 | 3,0.00788 6 | 4,0.00866 7 | 5,0.00937 8 | 6,0.00997 9 | 7,0.0105 10 | 8,0.01098 11 | 9,0.01144 12 | 10,0.01188 13 | 11,0.01226 14 | 12,0.01259 15 | 13,0.01285 16 | 14,0.01308 17 | 15,0.0133 18 | 16,0.01345 19 | 17,0.01358 20 | 18,0.01368 21 | 19,0.01375 22 | 20,0.01378 23 | 21,0.01379 24 | 22,0.01376 25 | 23,0.01373 26 | 24,0.01369 27 | 25,0.01365 28 | 26,0.01361 29 | 27,0.01356 30 | 28,0.01351 31 | 29,0.01346 32 | 30,0.0134 33 | 31,0.01333 34 | 32,0.01325 35 | 33,0.01316 36 | 34,0.01306 37 | 35,0.01295 38 | 36,0.01283 39 | 37,0.01271 40 | 38,0.0126 41 | 39,0.0125 42 | 40,0.01241 43 | 41,0.01235 44 | 42,0.01229 45 | 43,0.01222 46 | 44,0.01214 47 | 45,0.01203 48 | 46,0.0119 49 | 47,0.01178 50 | 48,0.01168 51 | 49,0.01164 52 | 50,0.01166 53 | 51,0.01177 54 | 52,0.01193 55 | 53,0.01215 56 | 54,0.01241 57 | 55,0.0127 58 | 56,0.01301 59 | 57,0.01333 60 | 58,0.01367 61 | 59,0.01402 62 | 60,0.01437 63 | 61,0.01473 64 | 62,0.01508 65 | 63,0.01543 66 | 64,0.01579 67 | 65,0.01613 68 | 66,0.01648 69 | 67,0.01682 70 | 68,0.01715 71 | 69,0.01748 72 | 70,0.0178 73 | 71,0.01812 74 | 72,0.01843 75 | 73,0.01874 76 | 74,0.01903 77 | 75,0.01933 78 | 76,0.01961 79 | 77,0.01989 80 | 78,0.02016 81 | 79,0.02043 82 | 80,0.02069 83 | 81,0.02095 84 | 82,0.0212 85 | 83,0.02144 86 | 84,0.02168 87 | 85,0.02192 88 | 86,0.02215 89 | 87,0.02237 90 | 88,0.02259 91 | 89,0.0228 92 | 90,0.02301 93 | 91,0.02322 94 | 92,0.02342 95 | 93,0.02362 96 | 94,0.02381 97 | 95,0.024 98 | 96,0.02419 99 | 97,0.02437 100 | 98,0.02455 101 | 99,0.02472 102 | 100,0.02489 103 | 101,0.02506 104 | 102,0.02522 105 | 103,0.02539 106 | 104,0.02554 107 | 105,0.0257 108 | 106,0.02585 109 | 107,0.026 110 | 108,0.02615 111 | 109,0.02629 112 | 110,0.02643 113 | 111,0.02657 114 | 112,0.02671 115 | 113,0.02684 116 | 114,0.02698 117 | 115,0.02711 118 | 116,0.02723 119 | 117,0.02736 120 | 118,0.02748 121 | 119,0.0276 122 | 120,0.02772 123 | 121,0.02784 124 | 122,0.02795 125 | 123,0.02807 126 | 124,0.02818 127 | 125,0.02829 128 | 126,0.0284 129 | 127,0.0285 130 | 128,0.02861000000000001 131 | 129,0.02871000000000001 132 | 130,0.02881 133 | 131,0.02891 134 | 132,0.02901 135 | 133,0.02911000000000001 136 | 134,0.0292 137 | 135,0.0293 138 | 136,0.02939 139 | 137,0.02948000000000001 140 | 138,0.02957 141 | 139,0.02966 142 | 140,0.02975 143 | 141,0.02984 144 | 142,0.02992 145 | 143,0.03001 146 | 144,0.03009 147 | 145,0.03017 148 | 146,0.03025 149 | 147,0.03033000000000001 150 | 148,0.03041000000000001 151 | 149,0.03049 152 | 150,0.03056000000000001 153 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/model_point_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/model_point_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/mort_table.csv: -------------------------------------------------------------------------------- 1 | Age,0,1,2,3,4,5 2 | 18,0.0002310671048780701,0.0002541738153658771,0.0002795911969024648,0.0003075503165927113,0.0003383053482519825,0.0003721358830771808 3 | 19,0.0002353200126583699,0.000258852013924207,0.0002847372153166277,0.0003132109368482905,0.0003445320305331195,0.0003789852335864315 4 | 20,0.0002399363756770145,0.0002639300132447159,0.0002903230145691875,0.0003193553160261063,0.000351290847628717,0.0003864199323915887 5 | 21,0.000244937423152777,0.0002694311654680547,0.0002963742820148602,0.0003260117102163462,0.0003586128812379809,0.000394474169361779 6 | 22,0.0002503462740334557,0.0002753809014368013,0.0003029189915804814,0.0003332108907385296,0.0003665319798123826,0.0004031851777936208 7 | 23,0.0002561881183155183,0.0002818069301470701,0.0003099876231617771,0.0003409863854779549,0.0003750850240257504,0.0004125935264283255 8 | 24,0.0002624904157144116,0.0002887394572858528,0.0003176134030144381,0.0003493747433158819,0.0003843122176474701,0.0004227434394122172 9 | 25,0.0002692831139799757,0.0002962114253779732,0.0003258325679157706,0.0003584158247073477,0.0003942574071780824,0.0004336831478958907 10 | 26,0.0002765988893708184,0.0003042587783079002,0.0003346846561386903,0.0003681531217525593,0.0004049684339278153,0.0004454652773205969 11 | 27,0.0002844734120632121,0.0003129207532695333,0.0003442128285964867,0.0003786341114561354,0.000416497522601749,0.0004581472748619239 12 | 28,0.0002929456395766907,0.0003222402035343598,0.0003544642238877958,0.0003899106462765754,0.000428901710904233,0.0004717918819946564 13 | 29,0.0003020581416539583,0.0003322639558193541,0.0003654903514012896,0.0004020393865414186,0.0004422433251955605,0.0004864676577151166 14 | 30,0.0003118574604420908,0.0003430432064863,0.00037734752713493,0.000415082279848423,0.0004565905078332653,0.0005022495586165919 15 | 31,0.0003223945102916596,0.0003546339613208256,0.0003900973574529082,0.0004291070931981991,0.000472017802518019,0.000519219582769821 16 | 32,0.0003337250220279336,0.000367097524230727,0.0004038072766537997,0.0004441880043191797,0.0004886068047510977,0.0005374674852262075 17 | 33,0.0003459100371627477,0.0003805010408790224,0.0004185511449669247,0.0004604062594636172,0.000506446885409979,0.0005570915739509769 18 | 34,0.0003590164582175236,0.000394918104039276,0.0004344099144432036,0.0004778509058875241,0.0005256359964762765,0.0005781995961239042 19 | 35,0.0003731176621295971,0.0004104294283425568,0.0004514723711768126,0.0004966196082944939,0.0005462815691239433,0.0006009097260363377 20 | 36,0.0003882941846297993,0.0004271236030927793,0.0004698359634020572,0.000516819559742263,0.0005685015157164894,0.0006253516672881384 21 | 37,0.0004046344845257716,0.0004450979329783488,0.0004896077262761837,0.0005385684989038021,0.0005924253487941823,0.0006516678836736005 22 | 38,0.0004222357980220614,0.0004644593778242676,0.0005109053156066944,0.0005619958471673638,0.0006181954318841003,0.0006800149750725104 23 | 39,0.0004412050945770665,0.0004853256040347732,0.0005338581644382506,0.0005872439808820756,0.0006459683789702832,0.0007105652168673116 24 | 40,0.0004616601473642979,0.0005078261621007278,0.0005586087783108006,0.0006144696561418807,0.0006759166217560689,0.0007435082839316758 25 | 41,0.0004837307332014202,0.0005321038065215623,0.0005853141871737187,0.0006438456058910905,0.0007082301664801996,0.0007790531831282197 26 | 42,0.0005075599788701063,0.0005583159767571169,0.0006141475744328287,0.0006755623318761115,0.0007431185650637228,0.0008174304215700951 27 | 43,0.0005333058731135947,0.0005866364604249543,0.0006453001064674498,0.0007098301171141948,0.0007808131288256143,0.0008588944417081758 28 | 44,0.0005611429663144072,0.000617257262945848,0.0006789829892404329,0.0007468812881644762,0.0008215694169809239,0.0009037263586790164 29 | 45,0.0005912642829769909,0.00065039071127469,0.0007154297824021591,0.0007869727606423751,0.0008656700367066127,0.0009522370403772741 30 | 46,0.0006238834757334444,0.0006862718233067889,0.0007548990056374678,0.0008303889062012146,0.0009134277968213361,0.00100477057650347 31 | 47,0.0006592372537298736,0.000725160979102861,0.0007976770770131472,0.0008774447847144619,0.0009651892631859082,0.001061708189504499 32 | 48,0.0006975881230237241,0.0007673469353260965,0.0008440816288587063,0.000928489791744577,0.001021338770919035,0.001123472648010938 33 | 49,0.0007392274821309431,0.0008131502303440375,0.0008944652533784413,0.0009839117787162855,0.001082302956587914,0.001190533252246706 34 | 50,0.0007844791222255886,0.0008629270344481476,0.0009492197378929624,0.001044141711682259,0.001148555882850485,0.001263411471135533 35 | 51,0.0008337031888533204,0.0009170735077386526,0.001008780858512518,0.00110965894436377,0.001220624838800147,0.001342687322680162 36 | 52,0.0008873006705383532,0.0009760307375921886,0.001073633811351408,0.001180997192486548,0.001299096911735203,0.001429006602908724 37 | 53,0.0009457184895330499,0.001040290338486355,0.001144319372334991,0.00125875130956849,0.001384626440525339,0.001523089084577873 38 | 54,0.001009455281406362,0.001110400809546999,0.001221440890501699,0.001343584979551869,0.001477943477507056,0.001625737825257762 39 | 55,0.00107906796345734,0.001186974759803075,0.001305672235783382,0.00143623945936172,0.001579863405297893,0.001737849745827682 40 | 56,0.001155179207384984,0.001270697128123482,0.00139776684093583,0.001537543525029414,0.001691297877532355,0.001860427665285591 41 | 57,0.00123848594961408,0.001362334544575489,0.001498567999033038,0.001648424798936341,0.001813267278829976,0.001994594006712974 42 | 58,0.001329769093601377,0.001462746002961515,0.001609020603257667,0.001769922663583434,0.001946914929941777,0.002141606422935955 43 | 59,0.001429904582839669,0.001572895041123636,0.001730184545236,0.0019032029997596,0.00209352329973556,0.002302875629709116 44 | 60,0.001539876051743219,0.001693863656917541,0.001863250022609295,0.002049575024870224,0.002254532527357247,0.002479985780092972 45 | 61,0.00166078929485154,0.001826868224336694,0.002009555046770363,0.0022105105514474,0.00243156160659214,0.002674717767251354 46 | 62,0.001793888833675047,0.001973277717042552,0.002170605488746808,0.002387666037621488,0.002626432641383637,0.002889075905522001 47 | 63,0.001940576906029566,0.002134634596632523,0.002348098056295775,0.002582907861925353,0.002841198648117888,0.003125318512929678 48 | 64,0.002102435256054419,0.002312678781659861,0.002543946659825847,0.002798341325808432,0.003078175458389276,0.003385993004228204 49 | 65,0.002281250165694602,0.002509375182264062,0.002760312700490468,0.003036343970539515,0.003339978367593467,0.003673976204352814 50 | 66,0.002479041241928696,0.002726945366121566,0.002999639902733722,0.003299603893007094,0.003629564282307804,0.003992520710538585 51 | 67,0.002698094560439211,0.002967904016483132,0.003264694418131445,0.00359116385994459,0.003950280245939049,0.004345308270532955 52 | 68,0.002941000868128997,0.003235100954941897,0.003558611050436087,0.003914472155479696,0.004305919371027666,0.004736511308130433 53 | 69,0.003210699666725728,0.003531769633398301,0.003884946596738132,0.004273441256411946,0.004700785382053141,0.005170863920258455 54 | 70,0.003510530141071987,0.003861583155179186,0.004247741470697105,0.004672515617766815,0.005139767179543497,0.005653743897497847 55 | 71,0.003844290062620606,0.004228719068882667,0.004651590975770933,0.005116750073348027,0.00562842508068283,0.006191267588751114 56 | 72,0.004216303995988054,0.004637934395586859,0.005101727835145545,0.0056119006186601,0.00617309068052611,0.006790399748578721 57 | 73,0.004631502369963116,0.005094652606959428,0.005604117867655371,0.006164529654420908,0.006780982619863,0.007459080881849301 58 | 74,0.005095513251082814,0.005605064576191096,0.006165571033810205,0.006782128137191226,0.00746034095091035,0.008206375046001385 59 | 75,0.00561476898612035,0.006176245884732386,0.006793870473205625,0.007473257520526189,0.008220583272578809,0.00904264159983669 60 | 76,0.006196630269622466,0.006816293296584713,0.007497922626243185,0.008247714888867504,0.009072486377754254,0.00997973501552968 61 | 77,0.006849530656051229,0.007534483721656353,0.008287932093821988,0.009116725303204188,0.01002839783352461,0.01103123761687707 62 | 78,0.0075831450876836,0.008341459596451961,0.009175605556097158,0.01009316611170687,0.01110248272287756,0.01221273099516532 63 | 79,0.008408586666725113,0.009249445333397626,0.01017438986673739,0.01119182885341113,0.01231101173875224,0.01354211291262747 64 | 80,0.009338636684281934,0.01027250035271013,0.01129975038798114,0.01242972542677926,0.01367269796945719,0.01503996776640291 65 | 81,0.01038801385549308,0.01142681524104239,0.01256949676514663,0.01382644644166129,0.01520909108582742,0.01673000019441017 66 | 82,0.01157368983020542,0.01273105881322597,0.01400416469454857,0.01540458116400342,0.01694503928040377,0.01863954320844415 67 | 83,0.01291525938959877,0.01420678532855865,0.01562746386141452,0.01719021024755597,0.01890923127231157,0.02080015439954273 68 | 84,0.01443537534660213,0.01587891288126235,0.01746680416938859,0.01921348458632744,0.02113483304496019,0.02324831634945621 69 | 85,0.01616026009703804,0.01777628610674184,0.01955391471741603,0.02150930618915763,0.0236602368080734,0.02602626048888074 70 | 86,0.01812030808631169,0.01993233889494286,0.02192557278443714,0.02411813006288086,0.02652994306916895,0.02918293737608585 71 | 87,0.02035079624486606,0.02238587586935267,0.02462446345628793,0.02708690980191673,0.0297956007821084,0.03277516086031924 72 | 88,0.02289272280409473,0.0251819950845042,0.02770019459295462,0.03047021405225009,0.0335172354574751,0.03686895900322261 73 | 89,0.02579379895447039,0.02837317884991743,0.03121049673490918,0.0343315464084001,0.03776470104924012,0.04154117115416413 74 | 90,0.0291096226976437,0.03202058496740807,0.03522264346414888,0.03874490781056377,0.04261939859162015,0.04688133845078217 75 | 91,0.03290507015568574,0.03619557717125432,0.03981513488837975,0.04379664837721773,0.04817631321493951,0.05299394453643346 76 | 92,0.03725594675567852,0.04098154143124638,0.04507969557437102,0.04958766513180812,0.05454643164498894,0.06000107480948783 77 | 93,0.04225094937882418,0.0464760443167066,0.05112364874837726,0.05623601362321499,0.0618596149855365,0.06804557648409015 78 | 94,0.04799400108452031,0.05279340119297234,0.05807274131226958,0.06388001544349654,0.07026801698784621,0.07729481868663084 79 | 95,0.05460703280218726,0.06006773608240599,0.0660745096906466,0.07268196065971126,0.0799501567256824,0.08794517239825064 80 | 96,0.06223330193255692,0.06845663212581261,0.07530229533839389,0.08283252487223328,0.09111577735945661,0.1002273550954023 81 | 97,0.07104135673834143,0.07814549241217558,0.08596004165339315,0.09455604581873246,0.1040116504006057,0.1144128154406663 82 | 98,0.08122977849975568,0.08935275634973126,0.0982880319847044,0.1081168351831748,0.1189285187014923,0.1308213705716416 83 | 99,0.09303286161187924,0.1023361477730672,0.1125697625503739,0.1238267388054113,0.1362094126859524,0.1498303539545477 84 | 100,0.1067274262812674,0.1174001689093942,0.1291401858003336,0.142054204380367,0.1562596248184037,0.1718855873002441 85 | 101,0.1226410006907388,0.1349051007598127,0.1483956108357939,0.1632351719193733,0.1795586891113107,0.1975145580224417 86 | 102,0.1411616612443195,0.1552778273687514,0.1708056101056266,0.1878861711161893,0.2066747882278082,0.227342267050589 87 | 103,0.1627498830132505,0.1790248713145755,0.1969273584460331,0.2166200942906364,0.2382821037197001,0.2621103140916701 88 | 104,0.1879528305562259,0.2067481136118486,0.2274229249730334,0.2501652174703368,0.2751817392173705,0.3026999131391076 89 | 105,0.2174216153384464,0.2391637768722911,0.2630801545595202,0.2893881700154723,0.3183269870170196,0.3501596857187215 90 | 106,0.2519321643307321,0.2771253807638053,0.3048379188401859,0.3353217107242045,0.3688538817966249,0.4057392699762875 91 | 107,0.2924104904045808,0.3216515394450389,0.3538166933895428,0.3891983627284971,0.4281181990013468,0.4709300189014816 92 | 108,0.3399633355628309,0.373959669119114,0.4113556360310255,0.4524911996341281,0.4977403195975409,0.5475143515572951 93 | 109,0.3959153812539035,0.4355069193792939,0.4790576113172234,0.5269633724489458,0.5796597096938404,0.6376256806632244 94 | 110,0.4618544965251583,0.5080399461776742,0.5588439407954418,0.614728334874986,0.6762011683624847,0.7438212851987333 95 | 111,0.5396868377647932,0.5936555215412725,0.6530210736953999,0.7183231810649399,0.7901554991714339,0.8691710490885773 96 | 112,0.6317040398225142,0.6948744438047657,0.7643618881852423,0.8407980770037666,0.9248778847041433,1 97 | 113,0.7406652682100285,0.8147317950310314,0.8962049745341346,0.9858254719875481,1,1 98 | 114,0.8698975620835765,0.9568873182919342,1,1,1,1 99 | 115,1,1,1,1,1,1 100 | 116,1,1,1,1,1,1 101 | 117,1,1,1,1,1,1 102 | 118,1,1,1,1,1,1 103 | 119,1,1,1,1,1,1 104 | 120,1,1,1,1,1,1 105 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/mort_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "BasicTerm_ME" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/_data/data.pickle -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 17, 0], "serializer_version": 4} -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/model_point_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/model_point_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/mort_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/premium_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/premium_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "CashValue_ME_EX4" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/_data/data.pickle -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 17, 0], "serializer_version": 4} -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_1.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_1.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_moneyness.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_moneyness.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/mort_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/product_spec_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/product_spec_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/surr_charge_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/surr_charge_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import timeit 3 | from basicterm_m_lifelib import basicterm_m_lifelib 4 | from basicterm_m_recursive_pytorch import basicterm_recursive_pytorch 5 | from basicterm_m_recursive_numpy import basicterm_recursive_numpy 6 | from basicterm_m_array_pytorch import basicterm_array_pytorch 7 | from basicterm_m_array_numpy import basicterm_array_numpy 8 | from pprint import pprint 9 | 10 | 11 | def run_basic_term_benchmarks(): 12 | trials = 20 13 | modelx_time = timeit.repeat(stmt="basicterm_m_lifelib()", setup="from basicterm_m_lifelib import basicterm_m_lifelib", number=1, repeat=trials) 14 | modelx_result = basicterm_m_lifelib() 15 | recursive_pytorch_time = timeit.repeat(stmt="basicterm_recursive_pytorch()", setup="from basicterm_m_recursive_pytorch import basicterm_recursive_pytorch", number=1, repeat=trials) 16 | recursive_pytorch_result = basicterm_recursive_pytorch() 17 | recursive_numpy_time = timeit.repeat(stmt="basicterm_recursive_numpy()", setup="from basicterm_m_recursive_numpy import basicterm_recursive_numpy", number=1, repeat=trials) 18 | recursive_numpy_result = basicterm_recursive_numpy() 19 | array_pytorch_time = timeit.repeat(stmt="basicterm_array_pytorch()", setup="from basicterm_m_array_pytorch import basicterm_array_pytorch", number=1, repeat=trials) 20 | array_pytorch_result = basicterm_array_pytorch() 21 | array_numpy_time = timeit.repeat(stmt="basicterm_array_numpy()", setup="from basicterm_m_array_numpy import basicterm_array_numpy", number=1, repeat=trials) 22 | array_numpy_result = basicterm_array_numpy() 23 | return { 24 | "Python lifelib basic_term_m": { 25 | "minimum time": f"{np.min(modelx_time)*1000} milliseconds", 26 | "result": modelx_result, 27 | }, 28 | "Python recursive pytorch basic_term_m": { 29 | "minimum time": f"{np.min(recursive_pytorch_time)*1000} milliseconds", 30 | "result": recursive_pytorch_result, 31 | }, 32 | "Python recursive numpy basic_term_m": { 33 | "minimum time": f"{np.min(recursive_numpy_time)*1000} milliseconds", 34 | "result": recursive_numpy_result, 35 | }, 36 | "Python array pytorch basic_term_m": { 37 | "minimum time": f"{np.min(array_pytorch_time)*1000} milliseconds", 38 | "result": array_pytorch_result, 39 | }, 40 | "Python array numpy basic_term_m": { 41 | "minimum time": f"{np.min(array_numpy_time)*1000} milliseconds", 42 | "result": array_numpy_result, 43 | } 44 | } 45 | 46 | if __name__ == "__main__": 47 | results = run_basic_term_benchmarks() 48 | pprint(results) 49 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_array_numpy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | # Read data using pandas 5 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 6 | disc_rate = np.array(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")["zero_spot"].values, dtype=np.float64) 7 | sum_assured = np.array(mp["sum_assured"].values, dtype=np.float64) 8 | policy_term = np.array(mp["policy_term"].values, dtype=np.int64) 9 | age_at_entry = np.array(mp["age_at_entry"].values, dtype=np.int64) 10 | mort = np.array(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values, dtype=np.float64) 11 | 12 | def run(max_proj_len, disc_rate, sum_assured, policy_term, age_at_entry, mort, loading_prem, expense_acq, expense_maint, inflation_rate): 13 | time_axis = np.arange(max_proj_len)[:, None] 14 | duration = time_axis // 12 15 | discount_factors = np.power(1 + disc_rate[duration], -time_axis / 12) 16 | inflation_factor = np.power(1 + inflation_rate, time_axis / 12) 17 | lapse_rate = np.maximum(0.1 - 0.02 * duration, 0.02) 18 | lapse_rate_monthly = 1 - np.power(1 - lapse_rate, 1 / 12) 19 | attained_age = age_at_entry + duration 20 | annual_mortality = mort[attained_age - 18, np.minimum(duration, 5)] 21 | monthly_mortality = 1 - np.power(1 - annual_mortality, 1 / 12) 22 | pre_pols_if = np.vstack([ 23 | np.ones((1, monthly_mortality.shape[1])), 24 | np.cumprod((1 - lapse_rate_monthly) * (1 - monthly_mortality), axis=0)[:-1], 25 | ]) 26 | pols_if = (time_axis < (policy_term * 12)) * pre_pols_if 27 | pols_death = pols_if * monthly_mortality 28 | claims = sum_assured * pols_death 29 | pv_claims = np.sum(claims * discount_factors, axis=0) 30 | pv_pols_if = np.sum(pols_if * discount_factors, axis=0) 31 | net_premium = pv_claims / pv_pols_if 32 | premium_pp = np.round((1 + loading_prem) * net_premium, decimals=2) 33 | premiums = premium_pp * pols_if 34 | commissions = (duration == 0) * premiums 35 | expenses = (time_axis == 0) * expense_acq * pols_if + pols_if * expense_maint / 12 * inflation_factor 36 | pv_premiums = np.sum(premiums * discount_factors, axis=0) 37 | pv_expenses = np.sum(expenses * discount_factors, axis=0) 38 | pv_commissions = np.sum(commissions * discount_factors, axis=0) 39 | pv_net_cf = pv_premiums - pv_claims - pv_expenses - pv_commissions 40 | return float(pv_net_cf.sum()) 41 | 42 | def basicterm_array_numpy(): 43 | # parameters 44 | max_proj_len = 12 * 20 + 1 45 | loading_prem = 0.5 46 | expense_acq = 300.0 47 | expense_maint = 60.0 48 | inflation_rate = 0.01 49 | 50 | return run( 51 | max_proj_len, 52 | disc_rate, 53 | sum_assured, 54 | policy_term, 55 | age_at_entry, 56 | mort, 57 | loading_prem, 58 | expense_acq, 59 | expense_maint, 60 | inflation_rate, 61 | ) 62 | 63 | if __name__ == "__main__": 64 | print(basicterm_array_numpy()) 65 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_array_pytorch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pandas as pd 3 | 4 | # Ensure PyTorch uses double precision (64-bit) by default, similar to JAX configuration 5 | torch.set_default_dtype(torch.float64) 6 | 7 | # Random uniform distribution in PyTorch 8 | x = torch.rand(1000, dtype=torch.float64) 9 | print(f"{x.dtype=}") # --> dtype('torch.float64') 10 | 11 | 12 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 13 | disc_rate = torch.tensor(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")["zero_spot"].values) 14 | sum_assured = torch.tensor(mp["sum_assured"].values) 15 | policy_term = torch.tensor(mp["policy_term"].values) 16 | age_at_entry = torch.tensor(mp["age_at_entry"].values) 17 | mort = torch.tensor(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values) 18 | 19 | def run(max_proj_len, disc_rate, sum_assured, policy_term, age_at_entry, mort, loading_prem, expense_acq, expense_maint, inflation_rate): 20 | time_axis = torch.arange(max_proj_len)[:, None] 21 | duration = time_axis // 12 22 | discount_factors = (1 + disc_rate[duration]) ** (-time_axis / 12) 23 | inflation_factor = (1 + inflation_rate) ** (time_axis / 12) 24 | lapse_rate = torch.maximum(0.1 - 0.02 * duration, torch.tensor(0.02)) 25 | lapse_rate_monthly = 1 - (1 - lapse_rate) ** (1 / 12) 26 | attained_age = age_at_entry + duration 27 | annual_mortality = mort[attained_age - 18, torch.minimum(duration, torch.tensor(5, dtype=torch.int64))] 28 | monthly_mortality = 1 - (1 - annual_mortality) ** (1 / 12) 29 | pre_pols_if = torch.cat([ 30 | torch.ones((1, monthly_mortality.shape[1])), 31 | torch.cumprod((1 - lapse_rate_monthly) * (1 - monthly_mortality), dim=0)[:-1], 32 | ]) 33 | pols_if = (time_axis < (policy_term * 12)) * pre_pols_if 34 | pols_death = pols_if * monthly_mortality 35 | claims = sum_assured * pols_death 36 | pv_claims = torch.sum(claims * discount_factors, dim=0) 37 | pv_pols_if = torch.sum(pols_if * discount_factors, dim=0) 38 | net_premium = pv_claims / pv_pols_if 39 | premium_pp = torch.round((1 + loading_prem) * net_premium, decimals=2) 40 | premiums = premium_pp * pols_if 41 | commissions = (duration == 0) * premiums 42 | expenses = (time_axis == 0) * expense_acq * pols_if + pols_if * expense_maint / 12 * inflation_factor 43 | pv_premiums = torch.sum(premiums * discount_factors, dim=0) 44 | pv_expenses = torch.sum(expenses * discount_factors, dim=0) 45 | pv_commissions = torch.sum(commissions * discount_factors, dim=0) 46 | pv_net_cf = pv_premiums - pv_claims - pv_expenses - pv_commissions 47 | return float(pv_net_cf.sum()) 48 | 49 | def basicterm_array_pytorch(): 50 | # parameters 51 | max_proj_len = 12 * 20 + 1 52 | loading_prem = torch.tensor(0.5) 53 | expense_acq = torch.tensor(300.0) 54 | expense_maint = torch.tensor(60.0) 55 | inflation_rate = torch.tensor(0.01) 56 | 57 | return run( 58 | max_proj_len, 59 | disc_rate, 60 | sum_assured, 61 | policy_term, 62 | age_at_entry, 63 | mort, 64 | loading_prem, 65 | expense_acq, 66 | expense_maint, 67 | inflation_rate, 68 | ) 69 | 70 | if __name__ == "__main__": 71 | print(basicterm_array_pytorch()) 72 | 73 | 74 | # e2e test 75 | # assert results["net_cf_agg"][100].item() == 97661.8046875 76 | # # integration tests from development 77 | # assert premium_agg[-2].item() == 174528.421875 78 | # assert expenses_agg[-2].item() == 10686.298828125 79 | # assert commissions_agg[11].item() == 751268.375 80 | # assert commissions_agg[-20].item() == 0 81 | # assert claims_agg[-2].item() == 253439.921875 82 | # # unit tests from development 83 | # assert mort_jnp[attained_age - 18, duration][0][0].item() == 0.0006592372665181756 84 | # assert annual_mortality[-1][0].item() == 0.004345308057963848 85 | # assert pols_death[-2][1].item() == 5.005334969609976e-05 86 | # assert pols_death[-1][1].item() == 0 87 | # assert pols_lapse[0][0].item() == 0.008741136640310287 88 | # assert pols_lapse[-2][1].item() == 0.0008985198801383376 89 | # assert pols_lapse[-1][1].item() == 0 90 | # assert lapse_rate[12].item() == 0.07999999821186066 91 | # assert pre_pols_if[-1][1].item() == 0.5332472920417786 92 | # assert pols_if[-1][1].item() == 0 93 | # assert pols_if[-2][1].item() == 0.5341958403587341 94 | # assert pols_maturity[-1][1].item() == 0.5332472920417786 95 | # assert claims[0][0].item() == 34.18231201171875 96 | # assert claims[-2][1].item() == 37.64011764526367 97 | # assert jnp.sum(claims[-1]).item() == 0 98 | # assert discount_factors[11][0].item() == 1 99 | # assert discount_factors[30][0].item() == 0.9831026196479797 100 | # assert pv_claims[0].item() == 5501.505859375 101 | # assert net_premium[0].item() == 63.22805404663086 102 | # assert premiums[-2][1].item() == 32.66129684448242 103 | # assert commissions[1][0].item() == 94.0078353881836 104 | # assert expenses[0][0].item() == 305.0 105 | # assert expenses[-2][1].item() == 3.2564003467559814 106 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_lifelib.py: -------------------------------------------------------------------------------- 1 | import modelx as mx 2 | import numpy as np 3 | 4 | m = mx.read_model("BasicTerm_M") 5 | 6 | def basicterm_m_lifelib(): 7 | m.Projection.clear_cache = 1 8 | return float(np.sum(m.Projection.pv_net_cf())) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_recursive_numpy.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | 6 | # constants 7 | max_proj_len = 12 * 20 + 1 8 | 9 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 10 | disc_rate = np.array(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")['zero_spot'].values, dtype=np.float64) 11 | mort_np = np.array(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values, dtype=np.float64) 12 | sum_assured = np.array(mp["sum_assured"].values, dtype=np.float64) 13 | issue_age = np.array(mp["age_at_entry"].values, dtype=np.int32) 14 | policy_term = np.array(mp["policy_term"].values, dtype=np.int32) 15 | 16 | # classes 17 | class Cash: 18 | def __init__(self): 19 | self.reset() 20 | 21 | def reset(self): 22 | self.caches = defaultdict(dict) 23 | 24 | def __call__(self, func): 25 | @wraps(func) 26 | def wrapper(*args, **kwargs): 27 | key = (args, frozenset(kwargs.items())) 28 | if key not in self.caches[func.__name__]: 29 | self.caches[func.__name__][key] = func(*args, **kwargs) 30 | return self.caches[func.__name__][key] 31 | 32 | return wrapper 33 | 34 | cash = Cash() 35 | 36 | @cash 37 | def get_annual_rate(duration: int): 38 | return mort_np[issue_age + duration - 18, np.minimum(duration, 5)] 39 | @cash 40 | def get_monthly_rate(duration: int): 41 | return 1 - np.power((1 - get_annual_rate(duration)), 1/12) 42 | @cash 43 | def duration(t: int): 44 | return t // 12 45 | @cash 46 | def pols_death(t: int): 47 | return pols_if(t) * get_monthly_rate(duration(t)) 48 | @cash 49 | def pols_if(t: int): 50 | if t == 0: 51 | return 1 52 | return pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1) - pols_maturity(t) 53 | 54 | @cash 55 | def lapse_rate(t: int): 56 | return np.maximum(0.1 - 0.02 * duration(t), 0.02) 57 | @cash 58 | def pols_lapse(t: int): 59 | return (pols_if(t) - pols_death(t)) * (1 - np.power((1 - lapse_rate(t)), 1/12)) 60 | @cash 61 | def pols_maturity(t: int): 62 | if t == 0: 63 | return 0 64 | return (t == 12 * policy_term) * (pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1)) 65 | 66 | @cash 67 | def discount(t: int): 68 | return np.power((1 + disc_rate[duration(t)]), (-t/12)) 69 | @cash 70 | def claims(t: int): 71 | return pols_death(t) * sum_assured 72 | @cash 73 | def inflation_rate(): 74 | return 0.01 75 | @cash 76 | def inflation_factor(t): 77 | return np.power((1 + inflation_rate()), (t/12)) 78 | @cash 79 | def expense_acq(): 80 | return 300 81 | @cash 82 | def expense_maint(): 83 | return 60 84 | @cash 85 | def pv_pols_if(): 86 | return sum(pols_if(t) * discount(t) for t in range(max_proj_len)) 87 | @cash 88 | def pv_claims(): 89 | return sum(claims(t) * discount(t) for t in range(max_proj_len)) 90 | @cash 91 | def net_premium_pp(): 92 | return pv_claims() / pv_pols_if() 93 | @cash 94 | def loading_prem(): 95 | return 0.5 96 | @cash 97 | def expenses(t): 98 | return (t == 0) * expense_acq() * pols_if(t) \ 99 | + pols_if(t) * expense_maint()/12 * inflation_factor(t) 100 | @cash 101 | def premium_pp(): 102 | return np.round((1 + loading_prem()) * net_premium_pp(), decimals=2) 103 | @cash 104 | def premiums(t): 105 | return premium_pp() * pols_if(t) 106 | @cash 107 | def pv_premiums(): 108 | return sum(premiums(t) * discount(t) for t in range(max_proj_len)) 109 | @cash 110 | def pv_expenses(): 111 | return sum(expenses(t) * discount(t) for t in range(max_proj_len)) 112 | 113 | @cash 114 | def commissions(t): 115 | return (duration(t) == 0) * premiums(t) 116 | 117 | @cash 118 | def pv_commissions(): 119 | return sum(commissions(t) * discount(t) for t in range(max_proj_len)) 120 | 121 | @cash 122 | def net_cf(t): 123 | return premiums(t) - claims(t) - expenses(t) - commissions(t) 124 | 125 | @cash 126 | def pv_net_cf(): 127 | return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions() 128 | 129 | @cash 130 | def result_cf(): 131 | t_len = range(max_proj_len) 132 | 133 | data = { 134 | "Premiums": [np.sum(premiums(t)) for t in t_len], 135 | "Claims": [np.sum(claims(t)) for t in t_len], 136 | "Expenses": [np.sum(expenses(t)) for t in t_len], 137 | "Commissions": [np.sum(commissions(t)) for t in t_len], 138 | "Net Cashflow": [np.sum(net_cf(t)) for t in t_len] 139 | } 140 | return pd.DataFrame(data, index=t_len) 141 | 142 | def basicterm_recursive_numpy(): 143 | cash.reset() # Ensure the cache is clear before running calculations 144 | return float(np.sum(pv_net_cf())) 145 | 146 | if __name__ == "__main__": 147 | print(basicterm_recursive_numpy()) 148 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_recursive_pytorch.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import torch 5 | 6 | torch.set_default_dtype(torch.float64) 7 | 8 | # constants 9 | max_proj_len = 12 * 20 + 1 10 | 11 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 12 | disc_rate = torch.tensor(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")['zero_spot'].values) 13 | mort_np = torch.tensor(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values) 14 | sum_assured = torch.tensor(mp["sum_assured"].values) 15 | issue_age = torch.tensor(mp["age_at_entry"].values) 16 | policy_term = torch.tensor(mp["policy_term"].values) 17 | 18 | # classes 19 | class Cash: 20 | def __init__(self): 21 | self.reset() 22 | 23 | def reset(self): 24 | self.caches = defaultdict(dict) 25 | 26 | def __call__(self, func): 27 | @wraps(func) 28 | def wrapper(*args, **kwargs): 29 | key = (args, frozenset(kwargs.items())) 30 | if key not in self.caches[func.__name__]: 31 | self.caches[func.__name__][key] = func(*args, **kwargs) 32 | return self.caches[func.__name__][key] 33 | 34 | return wrapper 35 | 36 | cash = Cash() 37 | 38 | @cash 39 | def get_annual_rate(duration: int): 40 | return mort_np[issue_age + duration - 18, min(duration, 5)] 41 | @cash 42 | def get_monthly_rate(duration: int): 43 | return 1 - (1 - get_annual_rate(duration)) ** (1/12) 44 | @cash 45 | def duration(t: int): 46 | return t // 12 47 | @cash 48 | def pols_death(t: int): 49 | return pols_if(t) * get_monthly_rate(duration(t)) 50 | @cash 51 | def pols_if(t: int): 52 | if t == 0: 53 | return 1 54 | return pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1) - pols_maturity(t) 55 | 56 | @cash 57 | def lapse_rate(t: int): 58 | return max(0.1 - 0.02 * duration(t), 0.02) 59 | @cash 60 | def pols_lapse(t: int): 61 | return (pols_if(t) - pols_death(t)) * (1 - (1 - lapse_rate(t)) ** (1/12)) 62 | @cash 63 | def pols_maturity(t: int): 64 | if t == 0: 65 | return 0 66 | return (t == 12 * policy_term) * (pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1)) 67 | 68 | @cash 69 | def discount(t: int): 70 | return (1 + disc_rate[duration(t)]) ** (-t/12) 71 | @cash 72 | def claims(t: int): 73 | return pols_death(t) * sum_assured 74 | @cash 75 | def inflation_rate(): 76 | return 0.01 77 | @cash 78 | def inflation_factor(t): 79 | return (1 + inflation_rate()) ** (t/12) 80 | @cash 81 | def expense_acq(): 82 | return 300 83 | @cash 84 | def expense_maint(): 85 | return 60 86 | @cash 87 | def pv_pols_if(): 88 | return sum(pols_if(t) * discount(t) for t in range(max_proj_len)) 89 | @cash 90 | def pv_claims(): 91 | return sum(claims(t) * discount(t) for t in range(max_proj_len)) 92 | @cash 93 | def net_premium_pp(): 94 | return pv_claims() / pv_pols_if() 95 | @cash 96 | def loading_prem(): 97 | return 0.5 98 | @cash 99 | def expenses(t): 100 | return (t == 0) * expense_acq() * pols_if(t) \ 101 | + pols_if(t) * expense_maint()/12 * inflation_factor(t) 102 | @cash 103 | def premium_pp(): 104 | return torch.round((1 + loading_prem()) * net_premium_pp(), decimals=2) 105 | @cash 106 | def premiums(t): 107 | return premium_pp() * pols_if(t) 108 | @cash 109 | def pv_premiums(): 110 | return sum(premiums(t) * discount(t) for t in range(max_proj_len)) 111 | @cash 112 | def pv_expenses(): 113 | return sum(expenses(t) * discount(t) for t in range(max_proj_len)) 114 | @cash 115 | def commissions(t): 116 | return (duration(t) == 0) * premiums(t) 117 | @cash 118 | def pv_commissions(): 119 | return sum(commissions(t) * discount(t) for t in range(max_proj_len)) 120 | @cash 121 | def net_cf(t): 122 | return premiums(t) - claims(t) - expenses(t) - commissions(t) 123 | @cash 124 | def pv_net_cf(): 125 | return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions() 126 | 127 | @cash 128 | def result_cf(): 129 | t_len = range(max_proj_len) 130 | data = { 131 | "Premiums": [torch.sum(premiums(t)).item() for t in t_len], 132 | "Claims": [torch.sum(claims(t)).item() for t in t_len], 133 | "Expenses": [torch.sum(expenses(t)).item() for t in t_len], 134 | "Commissions": [torch.sum(commissions(t)).item() for t in t_len], 135 | "Net Cashflow": [torch.sum(net_cf(t)).item() for t in t_len] 136 | } 137 | return pd.DataFrame(data, index=t_len) 138 | 139 | 140 | def basicterm_recursive_pytorch(): 141 | cash.caches.clear() 142 | return float(torch.sum(pv_net_cf()).item()) 143 | 144 | 145 | 146 | 147 | def run_tests(): 148 | # Note: The test values may need to be adjusted for PyTorch's precision and operation differences 149 | assert abs(pv_net_cf()[0] - 910.9206609336586) < 1e-3 150 | assert abs(pv_premiums()[0] - 8252.085855522233) < 1e-3 151 | assert abs(pv_expenses()[0] - 755.3660261078035) < 1e-3 152 | assert abs(pv_commissions()[0] - 1084.6042701164513) < 1e-3 153 | assert abs(pv_pols_if()[0] - 87.0106058152913) < 1e-3 154 | assert abs(pv_claims()[0] - 5501.19489836432) < 1e-3 155 | assert abs(net_premium_pp()[0] - 63.22441783754982) < 1e-3 156 | # Adjust the following tests for tensors 157 | # assert all(pols_if(200)[:3] == [0, 0.5724017900070532, 0]) 158 | # assert all(claims(130)[:3] == [0, 28.82531005791726, 0]) 159 | # assert premiums(130)[1] == 39.565567796442494 160 | # assert expenses(100)[1] == 3.703818110341339 161 | assert abs(premium_pp()[0] - 94.84) < 1e-2 162 | assert abs(inflation_factor(100) - 1.0864542626396292) < 1e-3 163 | 164 | if __name__ == "__main__": 165 | run_tests() 166 | print("All tests passed") 167 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import timeit 3 | from basicterm_me_lifelib import basicterm_me_lifelib 4 | from basicterm_me_recursive_numpy import basicterm_me_recursive_numpy 5 | from basicterm_me_heavylight_numpy import basicterm_me_heavylight_numpy 6 | from pprint import pprint 7 | 8 | 9 | def run_basic_term_me_benchmarks(): 10 | trials = 7 11 | modelx_time = timeit.repeat(stmt="basicterm_me_lifelib()", setup="from basicterm_me_lifelib import basicterm_me_lifelib", number=1, repeat=trials) 12 | modelx_result = basicterm_me_lifelib() 13 | recursive_numpy_time = timeit.repeat(stmt="basicterm_me_recursive_numpy()", setup="from basicterm_me_recursive_numpy import basicterm_me_recursive_numpy", number=1, repeat=trials) 14 | recursive_numpy_result = basicterm_me_recursive_numpy() 15 | heavylight_time = timeit.repeat(stmt="basicterm_me_heavylight_numpy()", setup="from basicterm_me_heavylight_numpy import basicterm_me_heavylight_numpy", number=1, repeat=trials) 16 | heavylight_result = basicterm_me_heavylight_numpy() 17 | return { 18 | "Python lifelib basic_term_me": { 19 | "minimum time": f"{np.min(modelx_time)*1000} milliseconds", 20 | "result": modelx_result, 21 | }, 22 | "Python recursive numpy basic_term_me": { 23 | "minimum time": f"{np.min(recursive_numpy_time)*1000} milliseconds", 24 | "result": recursive_numpy_result, 25 | }, 26 | "Python heavylight numpy basic_term_me": { 27 | "minimum time": f"{np.min(heavylight_time)*1000} milliseconds", 28 | "result": heavylight_result, 29 | } 30 | } 31 | 32 | if __name__ == "__main__": 33 | results = run_basic_term_me_benchmarks() 34 | pprint(results) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me_heavylight_numpy.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | from heavylight.memory_optimized_model import LightModel 6 | 7 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 8 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 9 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 10 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 11 | 12 | class ModelPoints: 13 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame): 14 | self.table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 15 | self.table.sort_values(by="policy_id", inplace=True) 16 | self.table["premium_pp"] = np.around(self.table["sum_assured"] * self.table["premium_rate"],2) 17 | self.premium_pp = self.table["premium_pp"].to_numpy() 18 | self.duration_mth = self.table["duration_mth"].to_numpy() 19 | self.age_at_entry = self.table["age_at_entry"].to_numpy() 20 | self.sum_assured = self.table["sum_assured"].to_numpy() 21 | self.policy_count = self.table["policy_count"].to_numpy() 22 | self.policy_term = self.table["policy_term"].to_numpy() 23 | self.max_proj_len: int = np.max(12 * self.policy_term - self.duration_mth) + 1 24 | 25 | class Assumptions: 26 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 27 | self.disc_rate_ann = disc_rate_ann["zero_spot"].values 28 | self.mort_table = mort_table.to_numpy() 29 | 30 | def get_mortality(self, age, duration): 31 | return self.mort_table[age-18, np.minimum(duration, 5)] 32 | 33 | class TermME(LightModel): 34 | def __init__(self, mp: ModelPoints, assume: Assumptions): 35 | super().__init__() 36 | self.mp = mp 37 | self.assume = assume 38 | 39 | def age(self, t): 40 | return self.mp.age_at_entry + self.duration(t) 41 | 42 | def claim_pp(self, t): 43 | return self.mp.sum_assured 44 | 45 | def claims(self, t): 46 | return self.claim_pp(t) * self.pols_death(t) 47 | 48 | def commissions(self, t): 49 | return (self.duration(t) == 0) * self.premiums(t) 50 | 51 | def disc_factors(self): 52 | return np.array(list((1 + self.disc_rate_mth()[t])**(-t) for t in range(self.mp.max_proj_len))) 53 | 54 | def discount(self, t: int): 55 | return (1 + self.assume.disc_rate_ann[t//12]) ** (-t/12) 56 | 57 | def disc_rate_mth(self): 58 | return np.array(list((1 + self.assume.disc_rate_ann[t//12])**(1/12) - 1 for t in range(self.mp.max_proj_len))) 59 | 60 | def duration(self, t): 61 | return self.duration_mth(t) //12 62 | 63 | def duration_mth(self, t): 64 | if t == 0: 65 | return self.mp.duration_mth 66 | else: 67 | return self.duration_mth(t-1) + 1 68 | 69 | def expense_acq(self): 70 | return 300 71 | 72 | def expense_maint(self): 73 | return 60 74 | 75 | def expenses(self, t): 76 | return self.expense_acq() * self.pols_new_biz(t) \ 77 | + self.pols_if_at(t, "BEF_DECR") * self.expense_maint()/12 * self.inflation_factor(t) 78 | 79 | def inflation_factor(self, t): 80 | return (1 + self.inflation_rate())**(t/12) 81 | 82 | def inflation_rate(self): 83 | return 0.01 84 | 85 | def lapse_rate(self, t): 86 | return np.maximum(0.1 - 0.02 * self.duration(t), 0.02) 87 | 88 | def loading_prem(self): 89 | return 0.5 90 | 91 | def mort_rate(self, t): 92 | return self.assume.get_mortality(self.age(t), self.duration(t)) 93 | 94 | def mort_rate_mth(self, t): 95 | return 1-(1- self.mort_rate(t))**(1/12) 96 | 97 | def net_cf(self, t): 98 | return self.premiums(t) - self.claims(t) - self.expenses(t) - self.commissions(t) 99 | 100 | def pols_death(self, t): 101 | return self.pols_if_at(t, "BEF_DECR") * self.mort_rate_mth(t) 102 | 103 | def pols_if(self, t): 104 | return self.pols_if_at(t, "BEF_MAT") 105 | 106 | def pols_if_at(self, t, timing): 107 | if timing == "BEF_MAT": 108 | if t == 0: 109 | return self.pols_if_init() 110 | else: 111 | return self.pols_if_at(t-1, "BEF_DECR") - self.pols_lapse(t-1) - self.pols_death(t-1) 112 | elif timing == "BEF_NB": 113 | return self.pols_if_at(t, "BEF_MAT") - self.pols_maturity(t) 114 | elif timing == "BEF_DECR": 115 | return self.pols_if_at(t, "BEF_NB") + self.pols_new_biz(t) 116 | else: 117 | raise ValueError("invalid timing") 118 | 119 | def pols_if_init(self): 120 | return np.where(self.duration_mth(0) > 0, self.mp.policy_count, 0) 121 | 122 | def pols_lapse(self, t): 123 | return (self.pols_if_at(t, "BEF_DECR") - self.pols_death(t)) * (1-(1 - self.lapse_rate(t))**(1/12)) 124 | 125 | def pols_maturity(self, t): 126 | return (self.duration_mth(t) == self.mp.policy_term * 12) * self.pols_if_at(t, "BEF_MAT") 127 | 128 | def pols_new_biz(self, t): 129 | return np.where(self.duration_mth(t) == 0, self.mp.policy_count, 0) 130 | 131 | def premiums(self, t): 132 | return self.mp.premium_pp * self.pols_if_at(t, "BEF_DECR") 133 | 134 | mp = ModelPoints(model_point_table, premium_table) 135 | assume = Assumptions(disc_rate_ann, mort_table) 136 | model = TermME(mp, assume) 137 | 138 | def basicterm_me_heavylight_numpy(): 139 | model.ResetCache() 140 | tot = sum(np.sum(model.premiums(t) - model.claims(t) - model.expenses(t) - model.commissions(t)) \ 141 | * model.discount(t) for t in range(model.mp.max_proj_len)) 142 | return float(tot) 143 | 144 | if __name__ == "__main__": 145 | print(basicterm_me_heavylight_numpy()) 146 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me_lifelib.py: -------------------------------------------------------------------------------- 1 | import modelx as mx 2 | import numpy as np 3 | 4 | m = mx.read_model("BasicTerm_ME") 5 | 6 | def basicterm_me_lifelib(): 7 | m.Projection.clear_cache = 1 8 | return float(np.sum(m.Projection.pv_net_cf())) 9 | 10 | if __name__ == "__main__": 11 | print(basicterm_me_lifelib()) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me_recursive_numpy.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | 6 | class Cash: 7 | def __init__(self): 8 | self.reset() 9 | 10 | def reset(self): 11 | self.caches = defaultdict(dict) 12 | 13 | def __call__(self, func): 14 | @wraps(func) 15 | def wrapper(*args, **kwargs): 16 | key = (args, frozenset(kwargs.items())) 17 | if key not in self.caches[func.__name__]: 18 | self.caches[func.__name__][key] = func(*args, **kwargs) 19 | return self.caches[func.__name__][key] 20 | 21 | return wrapper 22 | 23 | cash = Cash() 24 | 25 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 26 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 27 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 28 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 29 | 30 | class ModelPoints: 31 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame): 32 | self.table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 33 | self.table.sort_values(by="policy_id", inplace=True) 34 | self.table["premium_pp"] = np.around(self.table["sum_assured"] * self.table["premium_rate"],2) 35 | self.premium_pp = self.table["premium_pp"].to_numpy() 36 | self.duration_mth = self.table["duration_mth"].to_numpy() 37 | self.age_at_entry = self.table["age_at_entry"].to_numpy() 38 | self.sum_assured = self.table["sum_assured"].to_numpy() 39 | self.policy_count = self.table["policy_count"].to_numpy() 40 | self.policy_term = self.table["policy_term"].to_numpy() 41 | 42 | class Assumptions: 43 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 44 | self.disc_rate_ann = disc_rate_ann["zero_spot"].values 45 | self.mort_table = mort_table.to_numpy() 46 | 47 | def get_mortality(self, age, duration): 48 | return self.mort_table[age-18, np.minimum(duration, 5)] 49 | 50 | mp = ModelPoints(model_point_table, premium_table) 51 | assume = Assumptions(disc_rate_ann, mort_table) 52 | 53 | @cash 54 | def age(t): 55 | return mp.age_at_entry + duration(t) 56 | 57 | @cash 58 | def claim_pp(t): 59 | return mp.sum_assured 60 | 61 | @cash 62 | def claims(t): 63 | return claim_pp(t) * pols_death(t) 64 | 65 | @cash 66 | def commissions(t): 67 | return (duration(t) == 0) * premiums(t) 68 | 69 | @cash 70 | def disc_factors(): 71 | return np.array(list((1 + disc_rate_mth()[t])**(-t) for t in range(max_proj_len()))) 72 | 73 | @cash 74 | def discount(t: int): 75 | return (1 + assume.disc_rate_ann[t//12]) ** (-t/12) 76 | 77 | @cash 78 | def disc_rate_mth(): 79 | return np.array(list((1 + assume.disc_rate_ann[t//12])**(1/12) - 1 for t in range(max_proj_len()))) 80 | 81 | @cash 82 | def duration(t): 83 | return duration_mth(t) //12 84 | 85 | @cash 86 | def duration_mth(t): 87 | if t == 0: 88 | return mp.duration_mth 89 | else: 90 | return duration_mth(t-1) + 1 91 | 92 | @cash 93 | def expense_acq(): 94 | return 300 95 | 96 | @cash 97 | def expense_maint(): 98 | return 60 99 | 100 | @cash 101 | def expenses(t): 102 | return expense_acq() * pols_new_biz(t) \ 103 | + pols_if_at(t, "BEF_DECR") * expense_maint()/12 * inflation_factor(t) 104 | 105 | @cash 106 | def inflation_factor(t): 107 | return (1 + inflation_rate())**(t/12) 108 | 109 | @cash 110 | def inflation_rate(): 111 | return 0.01 112 | 113 | @cash 114 | def lapse_rate(t): 115 | return np.maximum(0.1 - 0.02 * duration(t), 0.02) 116 | 117 | @cash 118 | def loading_prem(): 119 | return 0.5 120 | 121 | @cash 122 | def max_proj_len(): 123 | return max(proj_len()) 124 | 125 | @cash 126 | def mort_rate(t): 127 | return assume.get_mortality(age(t), duration(t)) 128 | 129 | @cash 130 | def mort_rate_mth(t): 131 | return 1-(1- mort_rate(t))**(1/12) 132 | 133 | @cash 134 | def net_cf(t): 135 | return premiums(t) - claims(t) - expenses(t) - commissions(t) 136 | 137 | @cash 138 | def pols_death(t): 139 | return pols_if_at(t, "BEF_DECR") * mort_rate_mth(t) 140 | 141 | @cash 142 | def pols_if(t): 143 | return pols_if_at(t, "BEF_MAT") 144 | 145 | @cash 146 | def pols_if_at(t, timing): 147 | if timing == "BEF_MAT": 148 | if t == 0: 149 | return pols_if_init() 150 | else: 151 | return pols_if_at(t-1, "BEF_DECR") - pols_lapse(t-1) - pols_death(t-1) 152 | elif timing == "BEF_NB": 153 | return pols_if_at(t, "BEF_MAT") - pols_maturity(t) 154 | elif timing == "BEF_DECR": 155 | return pols_if_at(t, "BEF_NB") + pols_new_biz(t) 156 | else: 157 | raise ValueError("invalid timing") 158 | 159 | @cash 160 | def pols_if_init(): 161 | return np.where(duration_mth(0) > 0, mp.policy_count, 0) 162 | 163 | @cash 164 | def pols_lapse(t): 165 | return (pols_if_at(t, "BEF_DECR") - pols_death(t)) * (1-(1 - lapse_rate(t))**(1/12)) 166 | 167 | @cash 168 | def pols_maturity(t): 169 | return (duration_mth(t) == mp.policy_term * 12) * pols_if_at(t, "BEF_MAT") 170 | 171 | @cash 172 | def pols_new_biz(t): 173 | return np.where(duration_mth(t) == 0, mp.policy_count, 0) 174 | 175 | @cash 176 | def premiums(t): 177 | return mp.premium_pp * pols_if_at(t, "BEF_DECR") 178 | 179 | @cash 180 | def proj_len(): 181 | return np.maximum(12 * mp.policy_term - duration_mth(0) + 1, 0) 182 | 183 | @cash 184 | def pv_claims(): 185 | return sum(claims(t) * discount(t) for t in range(max_proj_len())) 186 | 187 | @cash 188 | def pv_commissions(): 189 | return sum(commissions(t) * discount(t) for t in range(max_proj_len())) 190 | 191 | @cash 192 | def pv_expenses(): 193 | return sum(expenses(t) * discount(t) for t in range(max_proj_len())) 194 | 195 | @cash 196 | def pv_net_cf(): 197 | return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions() 198 | 199 | @cash 200 | def pv_pols_if(): 201 | return sum(pols_if_at(t, "BEF_DECR") * discount(t) for t in range(max_proj_len())) 202 | 203 | @cash 204 | def pv_premiums(): 205 | return sum(premiums(t) * discount(t) for t in range(max_proj_len())) 206 | 207 | @cash 208 | def result_cf(): 209 | t_len = range(max_proj_len()) 210 | 211 | data = { 212 | "Premiums": [sum(premiums(t)) for t in t_len], 213 | "Claims": [sum(claims(t)) for t in t_len], 214 | "Expenses": [sum(expenses(t)) for t in t_len], 215 | "Commissions": [sum(commissions(t)) for t in t_len], 216 | "Net Cashflow": [sum(net_cf(t)) for t in t_len] 217 | } 218 | 219 | return pd.DataFrame(data, index=t_len) 220 | 221 | 222 | def result_pols(): 223 | t_len = range(max_proj_len()) 224 | 225 | data = { 226 | "pols_if": [sum(pols_if(t)) for t in t_len], 227 | "pols_maturity": [sum(pols_maturity(t)) for t in t_len], 228 | "pols_new_biz": [sum(pols_new_biz(t)) for t in t_len], 229 | "pols_death": [sum(pols_death(t)) for t in t_len], 230 | "pols_lapse": [sum(pols_lapse(t)) for t in t_len] 231 | } 232 | 233 | return pd.DataFrame(data, index=t_len) 234 | 235 | 236 | def basicterm_me_recursive_numpy(): 237 | cash.reset() 238 | return float(np.sum(pv_net_cf())) 239 | 240 | if __name__ == "__main__": 241 | cash.reset() 242 | print(basicterm_me_recursive_numpy()) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/benchmark_results.yaml: -------------------------------------------------------------------------------- 1 | basic_term_benchmark: 2 | Python array numpy basic_term_m: 3 | minimum time: 81.15190000000894 milliseconds 4 | result: 14489630.534603368 5 | Python array pytorch basic_term_m: 6 | minimum time: 48.05233400000475 milliseconds 7 | result: 14489630.534603368 8 | Python lifelib basic_term_m: 9 | minimum time: 606.547575999997 milliseconds 10 | result: 14489630.534601536 11 | Python recursive numpy basic_term_m: 12 | minimum time: 47.489563999988604 milliseconds 13 | result: 14489630.534603368 14 | Python recursive pytorch basic_term_m: 15 | minimum time: 73.68574099999137 milliseconds 16 | result: 14489630.53460337 17 | basic_term_me_benchmark: 18 | Python heavylight numpy basic_term_me: 19 | minimum time: 347.3877970000103 milliseconds 20 | result: 215146132.0684811 21 | Python lifelib basic_term_me: 22 | minimum time: 1140.047031999984 milliseconds 23 | result: 215146132.06848112 24 | Python recursive numpy basic_term_me: 25 | minimum time: 325.56454799998846 milliseconds 26 | result: 215146132.0684814 27 | mortality: 28 | Python PyMort: 29 | minimum time: 9.101711999988993 milliseconds 30 | result: 1904.4865526636793 31 | savings_benchmark: 32 | Python lifelib cashvalue_me_ex4: 33 | minimum time: 602.367275000006 milliseconds 34 | result: 3507113709040.141 35 | Python recursive numpy cashvalue_me_ex4: 36 | minimum time: 541.5200040000059 milliseconds 37 | result: 3507113709040.124 38 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/main.py: -------------------------------------------------------------------------------- 1 | from mortality import run_mortality_benchmarks 2 | from basicterm_m import run_basic_term_benchmarks 3 | from savings_me import run_savings_benchmarks 4 | from basicterm_me import run_basic_term_me_benchmarks 5 | import yaml 6 | 7 | 8 | def get_results(): 9 | return { 10 | "mortality": run_mortality_benchmarks(), 11 | "basic_term_benchmark": run_basic_term_benchmarks(), 12 | "basic_term_me_benchmark": run_basic_term_me_benchmarks(), 13 | "savings_benchmark": run_savings_benchmarks(), 14 | } 15 | 16 | 17 | if __name__ == "__main__": 18 | results = get_results() 19 | # write to benchmark_results.yaml 20 | with open("benchmark_results.yaml", "w") as f: 21 | yaml.dump(results, f) 22 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/mortality.py: -------------------------------------------------------------------------------- 1 | from pymort.XML import MortXML 2 | import numpy as np 3 | import timeit 4 | 5 | def get_select(): 6 | return np.array( 7 | [MortXML(id).Tables[0].Values.unstack().values for id in range(3299, 3309)] 8 | ) 9 | 10 | def get_ultimate(): 11 | return np.array( 12 | [MortXML(id).Tables[1].Values.unstack().values for id in range(3299, 3309)] 13 | ) 14 | 15 | def mortality1(select = get_select(), ultimate = get_ultimate()): 16 | mortality_table_index = np.arange(10) 17 | duration = np.arange(25) 18 | issue_age = np.arange(18, 51) 19 | mortality_table_index, duration, issue_age = [ 20 | x.flatten() for x in np.meshgrid(mortality_table_index, duration, issue_age) 21 | ] 22 | time_axis = np.arange(30)[:, None] 23 | duration_projected = time_axis + duration 24 | q = np.where( 25 | duration_projected < select.shape[-1], 26 | select[ 27 | mortality_table_index, 28 | issue_age - 18, 29 | np.minimum(duration_projected, select.shape[-1] - 1), 30 | ], # np.minimum avoids some out of bounds error (JAX clips out of bounds indexes so no problem if using JAX) 31 | ultimate[mortality_table_index, issue_age - 18 + duration_projected], 32 | ) 33 | npx = np.concatenate( 34 | [np.ones((1, q.shape[1])), np.cumprod(1 - q, axis=0)[:-1]], axis=0 35 | ) 36 | v = 1 / 1.02 37 | v_eoy = v ** np.arange(1, 31)[:, None] 38 | unit_claims_discounted = npx * q * v_eoy 39 | return np.sum(unit_claims_discounted) 40 | 41 | def run_mortality_benchmarks(): 42 | select, ultimate = get_select(), get_ultimate() 43 | mort1_result = mortality1(select, ultimate) 44 | trials = 20 45 | b1 = timeit.repeat(stmt="mortality1(select, ultimate)", setup="from mortality import mortality1", globals = {"select": select, "ultimate": ultimate}, number=1, repeat=trials) 46 | return { 47 | "Python PyMort": { 48 | "result": float(mort1_result), 49 | "minimum time": f"{np.min(b1)*1000} milliseconds", 50 | } 51 | } 52 | 53 | if __name__ == "__main__": 54 | results = run_mortality_benchmarks() 55 | print(results) 56 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 25, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "UserWarning: Existing model 'CashValue_ME_EX4' renamed to 'CashValue_ME_EX4_BAK1'\n" 13 | ] 14 | }, 15 | { 16 | "data": { 17 | "text/plain": [ 18 | "0.6307517449999978" 19 | ] 20 | }, 21 | "execution_count": 25, 22 | "metadata": {}, 23 | "output_type": "execute_result" 24 | } 25 | ], 26 | "source": [ 27 | "import lifelib\n", 28 | "import timeit\n", 29 | "import pandas as pd\n", 30 | "import numpy as np\n", 31 | "import modelx as mx\n", 32 | "import openpyxl\n", 33 | "\n", 34 | "ex4 = mx.read_model('CashValue_ME_EX4')\n", 35 | "Projection = ex4.Projection\n", 36 | "\n", 37 | "timeit.timeit('ex4.Projection.result_pv()', globals=globals(), number=5)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "model_point(): spec_id age_at_entry sex policy_term policy_count \\\n", 50 | "point_id scen_id \n", 51 | "1 1 A 20 M 10 100 \n", 52 | " 2 A 20 M 10 100 \n", 53 | " 3 A 20 M 10 100 \n", 54 | " 4 A 20 M 10 100 \n", 55 | " 5 A 20 M 10 100 \n", 56 | "... ... ... .. ... ... \n", 57 | "9 996 A 20 M 10 100 \n", 58 | " 997 A 20 M 10 100 \n", 59 | " 998 A 20 M 10 100 \n", 60 | " 999 A 20 M 10 100 \n", 61 | " 1000 A 20 M 10 100 \n", 62 | "\n", 63 | " sum_assured duration_mth premium_pp av_pp_init \\\n", 64 | "point_id scen_id \n", 65 | "1 1 500000 0 500000 0 \n", 66 | " 2 500000 0 500000 0 \n", 67 | " 3 500000 0 500000 0 \n", 68 | " 4 500000 0 500000 0 \n", 69 | " 5 500000 0 500000 0 \n", 70 | "... ... ... ... ... \n", 71 | "9 996 500000 0 300000 0 \n", 72 | " 997 500000 0 300000 0 \n", 73 | " 998 500000 0 300000 0 \n", 74 | " 999 500000 0 300000 0 \n", 75 | " 1000 500000 0 300000 0 \n", 76 | "\n", 77 | " accum_prem_init_pp premium_type has_surr_charge \\\n", 78 | "point_id scen_id \n", 79 | "1 1 0 SINGLE False \n", 80 | " 2 0 SINGLE False \n", 81 | " 3 0 SINGLE False \n", 82 | " 4 0 SINGLE False \n", 83 | " 5 0 SINGLE False \n", 84 | "... ... ... ... \n", 85 | "9 996 0 SINGLE False \n", 86 | " 997 0 SINGLE False \n", 87 | " 998 0 SINGLE False \n", 88 | " 999 0 SINGLE False \n", 89 | " 1000 0 SINGLE False \n", 90 | "\n", 91 | " surr_charge_id load_prem_rate is_wl \n", 92 | "point_id scen_id \n", 93 | "1 1 NaN 0.0 False \n", 94 | " 2 NaN 0.0 False \n", 95 | " 3 NaN 0.0 False \n", 96 | " 4 NaN 0.0 False \n", 97 | " 5 NaN 0.0 False \n", 98 | "... ... ... ... \n", 99 | "9 996 NaN 0.0 False \n", 100 | " 997 NaN 0.0 False \n", 101 | " 998 NaN 0.0 False \n", 102 | " 999 NaN 0.0 False \n", 103 | " 1000 NaN 0.0 False \n", 104 | "\n", 105 | "[9000 rows x 15 columns]\n", 106 | "with indices: MultiIndex([(1, 1),\n", 107 | " (1, 2),\n", 108 | " (1, 3),\n", 109 | " (1, 4),\n", 110 | " (1, 5),\n", 111 | " (1, 6),\n", 112 | " (1, 7),\n", 113 | " (1, 8),\n", 114 | " (1, 9),\n", 115 | " (1, 10),\n", 116 | " ...\n", 117 | " (9, 991),\n", 118 | " (9, 992),\n", 119 | " (9, 993),\n", 120 | " (9, 994),\n", 121 | " (9, 995),\n", 122 | " (9, 996),\n", 123 | " (9, 997),\n", 124 | " (9, 998),\n", 125 | " (9, 999),\n", 126 | " (9, 1000)],\n", 127 | " names=['point_id', 'scen_id'], length=9000)\n" 128 | ] 129 | } 130 | ], 131 | "source": [ 132 | "# Projection.model_point_table = Projection.model_point_1\n", 133 | "table = Projection.model_point_table\n", 134 | "# print(\"Number of model points: \", len(table))\n", 135 | "# print(\"Model points: \", table)\n", 136 | "# points = Projection.model_point_table_ext()\n", 137 | "# points = Projection.model_point()[\"scen_id\"].values[990:1010]\n", 138 | "points = Projection.model_point()\n", 139 | "print(\"model_point(): \", points)\n", 140 | "print(\"with indices: \", points.index)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 3, 146 | "metadata": {}, 147 | "outputs": [ 148 | { 149 | "name": "stdout", 150 | "output_type": "stream", 151 | "text": [ 152 | "(9000,)\n", 153 | "900000.0\n", 154 | "[100. 100. 100. ... 100. 100. 100.]\n" 155 | ] 156 | } 157 | ], 158 | "source": [ 159 | "pols = ex4.Projection.pols_if_at(12, \"BEF_DECR\")\n", 160 | "print(np.shape(pols))\n", 161 | "print(sum(pols))\n", 162 | "print(pols)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 4, 168 | "metadata": {}, 169 | "outputs": [ 170 | { 171 | "data": { 172 | "text/plain": [ 173 | "array([100., 100., 100., ..., 100., 100., 100.])" 174 | ] 175 | }, 176 | "execution_count": 4, 177 | "metadata": {}, 178 | "output_type": "execute_result" 179 | } 180 | ], 181 | "source": [ 182 | "Projection.pols_if(1)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 24, 188 | "metadata": {}, 189 | "outputs": [ 190 | { 191 | "data": { 192 | "text/plain": [ 193 | "399477611.70743275" 194 | ] 195 | }, 196 | "execution_count": 24, 197 | "metadata": {}, 198 | "output_type": "execute_result" 199 | } 200 | ], 201 | "source": [ 202 | "Projection.result_pv()[\"Net Cashflow\"].groupby(\"point_id\").mean().sum()" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "metadata": {}, 216 | "outputs": [ 217 | { 218 | "name": "stdout", 219 | "output_type": "stream", 220 | "text": [ 221 | "121\n" 222 | ] 223 | }, 224 | { 225 | "data": { 226 | "text/plain": [ 227 | "array([50000000., 50000000., 50000000., ..., 30000000., 30000000.,\n", 228 | " 30000000.])" 229 | ] 230 | }, 231 | "execution_count": 17, 232 | "metadata": {}, 233 | "output_type": "execute_result" 234 | } 235 | ], 236 | "source": [ 237 | "print(ex4.Projection.max_proj_len())\n", 238 | "ex4.Projection.pv_premiums()" 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": null, 244 | "metadata": {}, 245 | "outputs": [ 246 | { 247 | "data": { 248 | "text/plain": [ 249 | "array([100, 100, 100, ..., 100, 100, 100])" 250 | ] 251 | }, 252 | "execution_count": 18, 253 | "metadata": {}, 254 | "output_type": "execute_result" 255 | } 256 | ], 257 | "source": [ 258 | "ex4.Projection.pols_new_biz(0)" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": null, 264 | "metadata": {}, 265 | "outputs": [ 266 | { 267 | "name": "stdout", 268 | "output_type": "stream", 269 | "text": [ 270 | "Montlhy investment returns: [ 0.00807793 -0.00048898 -0.00302246 ... -0.00917993 -0.00629737\n", 271 | " -0.00596671]\n", 272 | "with shape: (9000,)\n" 273 | ] 274 | } 275 | ], 276 | "source": [ 277 | "inv = Projection.inv_return_mth(2)\n", 278 | "print(\"Montlhy investment returns: \", inv)\n", 279 | "print(\"with shape: \", np.shape(inv))" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": null, 285 | "metadata": {}, 286 | "outputs": [ 287 | { 288 | "data": { 289 | "text/html": [ 290 | "
\n", 291 | "\n", 304 | "\n", 305 | " \n", 306 | " \n", 307 | " \n", 308 | " \n", 309 | " \n", 310 | " \n", 311 | " \n", 312 | " \n", 313 | " \n", 314 | " \n", 315 | " \n", 316 | " \n", 317 | " \n", 318 | " \n", 319 | " \n", 320 | " \n", 321 | " \n", 322 | " \n", 323 | " \n", 324 | " \n", 325 | " \n", 326 | " \n", 327 | " \n", 328 | " \n", 329 | " \n", 330 | " \n", 331 | " \n", 332 | " \n", 333 | " \n", 334 | " \n", 335 | " \n", 336 | " \n", 337 | " \n", 338 | " \n", 339 | " \n", 340 | " \n", 341 | " \n", 342 | " \n", 343 | " \n", 344 | " \n", 345 | " \n", 346 | " \n", 347 | " \n", 348 | " \n", 349 | " \n", 350 | " \n", 351 | " \n", 352 | " \n", 353 | " \n", 354 | " \n", 355 | " \n", 356 | " \n", 357 | " \n", 358 | " \n", 359 | " \n", 360 | " \n", 361 | " \n", 362 | " \n", 363 | " \n", 364 | " \n", 365 | " \n", 366 | " \n", 367 | " \n", 368 | " \n", 369 | " \n", 370 | " \n", 371 | " \n", 372 | " \n", 373 | " \n", 374 | " \n", 375 | " \n", 376 | " \n", 377 | " \n", 378 | " \n", 379 | " \n", 380 | " \n", 381 | " \n", 382 | " \n", 383 | " \n", 384 | " \n", 385 | " \n", 386 | " \n", 387 | " \n", 388 | " \n", 389 | " \n", 390 | " \n", 391 | " \n", 392 | " \n", 393 | " \n", 394 | " \n", 395 | " \n", 396 | " \n", 397 | " \n", 398 | " \n", 399 | " \n", 400 | " \n", 401 | " \n", 402 | " \n", 403 | " \n", 404 | " \n", 405 | " \n", 406 | " \n", 407 | " \n", 408 | " \n", 409 | " \n", 410 | " \n", 411 | " \n", 412 | " \n", 413 | " \n", 414 | " \n", 415 | " \n", 416 | " \n", 417 | " \n", 418 | " \n", 419 | " \n", 420 | " \n", 421 | " \n", 422 | " \n", 423 | " \n", 424 | " \n", 425 | " \n", 426 | " \n", 427 | " \n", 428 | " \n", 429 | " \n", 430 | " \n", 431 | " \n", 432 | " \n", 433 | " \n", 434 | " \n", 435 | " \n", 436 | " \n", 437 | " \n", 438 | " \n", 439 | " \n", 440 | " \n", 441 | " \n", 442 | " \n", 443 | " \n", 444 | " \n", 445 | " \n", 446 | " \n", 447 | " \n", 448 | " \n", 449 | " \n", 450 | " \n", 451 | " \n", 452 | " \n", 453 | " \n", 454 | " \n", 455 | " \n", 456 | " \n", 457 | " \n", 458 | " \n", 459 | " \n", 460 | " \n", 461 | " \n", 462 | " \n", 463 | " \n", 464 | " \n", 465 | " \n", 466 | " \n", 467 | " \n", 468 | " \n", 469 | " \n", 470 | "
PremiumsDeathSurrenderMaturityExpensesCommissionsInvestment IncomeChange in AVNet Cashflow
point_idscen_id
1150000000.00.00.05.765190e+07975895.9511472500000.01.793864e+071.028674e+07-3.475896e+06
250000000.00.00.04.781116e+07975895.9511472500000.07.638184e+069.827021e+06-3.475896e+06
350000000.00.00.05.184905e+07975895.9511472500000.01.232610e+071.047706e+07-3.475896e+06
450000000.00.00.04.752251e+07975895.9511472500000.07.454824e+069.932312e+06-3.475896e+06
550000000.00.00.05.796074e+07975895.9511472500000.01.852191e+071.056117e+07-3.475896e+06
.................................
999630000000.00.00.04.093654e+07975895.9511471500000.04.256529e+065.753036e+06-1.490894e+07
99730000000.00.00.04.093654e+07975895.9511471500000.07.287750e+066.331561e+06-1.245624e+07
99830000000.00.00.04.093654e+07975895.9511471500000.07.480443e+066.031063e+06-1.196305e+07
99930000000.00.00.04.093654e+07975895.9511471500000.01.098676e+076.345723e+06-8.771397e+06
100030000000.00.00.04.093654e+07975895.9511471500000.08.407759e+066.481302e+06-1.148598e+07
\n", 471 | "

9000 rows × 9 columns

\n", 472 | "
" 473 | ], 474 | "text/plain": [ 475 | " Premiums Death Surrender Maturity Expenses \\\n", 476 | "point_id scen_id \n", 477 | "1 1 50000000.0 0.0 0.0 5.765190e+07 975895.951147 \n", 478 | " 2 50000000.0 0.0 0.0 4.781116e+07 975895.951147 \n", 479 | " 3 50000000.0 0.0 0.0 5.184905e+07 975895.951147 \n", 480 | " 4 50000000.0 0.0 0.0 4.752251e+07 975895.951147 \n", 481 | " 5 50000000.0 0.0 0.0 5.796074e+07 975895.951147 \n", 482 | "... ... ... ... ... ... \n", 483 | "9 996 30000000.0 0.0 0.0 4.093654e+07 975895.951147 \n", 484 | " 997 30000000.0 0.0 0.0 4.093654e+07 975895.951147 \n", 485 | " 998 30000000.0 0.0 0.0 4.093654e+07 975895.951147 \n", 486 | " 999 30000000.0 0.0 0.0 4.093654e+07 975895.951147 \n", 487 | " 1000 30000000.0 0.0 0.0 4.093654e+07 975895.951147 \n", 488 | "\n", 489 | " Commissions Investment Income Change in AV Net Cashflow \n", 490 | "point_id scen_id \n", 491 | "1 1 2500000.0 1.793864e+07 1.028674e+07 -3.475896e+06 \n", 492 | " 2 2500000.0 7.638184e+06 9.827021e+06 -3.475896e+06 \n", 493 | " 3 2500000.0 1.232610e+07 1.047706e+07 -3.475896e+06 \n", 494 | " 4 2500000.0 7.454824e+06 9.932312e+06 -3.475896e+06 \n", 495 | " 5 2500000.0 1.852191e+07 1.056117e+07 -3.475896e+06 \n", 496 | "... ... ... ... ... \n", 497 | "9 996 1500000.0 4.256529e+06 5.753036e+06 -1.490894e+07 \n", 498 | " 997 1500000.0 7.287750e+06 6.331561e+06 -1.245624e+07 \n", 499 | " 998 1500000.0 7.480443e+06 6.031063e+06 -1.196305e+07 \n", 500 | " 999 1500000.0 1.098676e+07 6.345723e+06 -8.771397e+06 \n", 501 | " 1000 1500000.0 8.407759e+06 6.481302e+06 -1.148598e+07 \n", 502 | "\n", 503 | "[9000 rows x 9 columns]" 504 | ] 505 | }, 506 | "execution_count": 57, 507 | "metadata": {}, 508 | "output_type": "execute_result" 509 | } 510 | ], 511 | "source": [ 512 | "Projection.result_pv()" 513 | ] 514 | }, 515 | { 516 | "cell_type": "code", 517 | "execution_count": null, 518 | "metadata": {}, 519 | "outputs": [], 520 | "source": [] 521 | }, 522 | { 523 | "cell_type": "code", 524 | "execution_count": null, 525 | "metadata": {}, 526 | "outputs": [], 527 | "source": [] 528 | } 529 | ], 530 | "metadata": { 531 | "kernelspec": { 532 | "display_name": "Python 3", 533 | "language": "python", 534 | "name": "python3" 535 | }, 536 | "language_info": { 537 | "codemirror_mode": { 538 | "name": "ipython", 539 | "version": 3 540 | }, 541 | "file_extension": ".py", 542 | "mimetype": "text/x-python", 543 | "name": "python", 544 | "nbconvert_exporter": "python", 545 | "pygments_lexer": "ipython3", 546 | "version": "3.10.10" 547 | }, 548 | "orig_nbformat": 4 549 | }, 550 | "nbformat": 4, 551 | "nbformat_minor": 2 552 | } 553 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/requirements.txt: -------------------------------------------------------------------------------- 1 | pymort==1.0.0 2 | PyYaml==6.0 3 | modelx==0.21.0 4 | openpyxl==3.1.2 5 | pandas==1.5.3 6 | numpy==1.24.2 7 | torch==2.2.0 8 | scipy==1.12.0 9 | heavylight==1.0.5 -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/savings_me.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import timeit 3 | from savings_me_lifelib import savings_me_lifelib 4 | from savings_me_recursive_numpy import savings_me_recursive_numpy 5 | from pprint import pprint 6 | 7 | def run_savings_benchmarks(): 8 | trials = 5 9 | modelx_time = timeit.repeat(stmt="savings_me_lifelib()", setup="from savings_me_lifelib import savings_me_lifelib", number=1, repeat=trials) 10 | modelx_result = savings_me_lifelib() 11 | recursive_numpy_time = timeit.repeat(stmt="savings_me_recursive_numpy()", setup="from savings_me_recursive_numpy import savings_me_recursive_numpy", number=1, repeat=trials) 12 | recursive_numpy_result = savings_me_recursive_numpy() 13 | return { 14 | "Python lifelib cashvalue_me_ex4": { 15 | "minimum time": f"{np.min(modelx_time)*1000} milliseconds", 16 | "result": modelx_result, 17 | }, 18 | "Python recursive numpy cashvalue_me_ex4": { 19 | "minimum time": f"{np.min(recursive_numpy_time)*1000} milliseconds", 20 | "result": recursive_numpy_result, 21 | } 22 | } 23 | 24 | if __name__ == "__main__": 25 | print(run_savings_benchmarks()) 26 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/savings_me_lifelib.py: -------------------------------------------------------------------------------- 1 | import modelx as mx 2 | import numpy as np 3 | import timeit 4 | import pandas as pd 5 | from os.path import dirname, join 6 | 7 | m = mx.read_model("CashValue_ME_EX4") 8 | model_file = join(dirname(__file__), "CashValue_ME_EX4", "model_point_table_10K.csv") 9 | m.Projection.model_point_table = pd.read_csv(model_file) 10 | m.Projection.scen_size = 1 11 | print(m.Projection.max_proj_len()) 12 | 13 | def savings_me_lifelib(): 14 | m.Projection.clear_cache = 1 15 | return float(np.sum(m.Projection.pv_net_cf())) 16 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/savings_me_recursive_numpy.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from functools import wraps 3 | import pandas as pd 4 | import numpy as np 5 | 6 | class Cash: 7 | def __init__(self): 8 | self.reset() 9 | 10 | def reset(self): 11 | self.caches = defaultdict(dict) 12 | 13 | def __call__(self, func): 14 | @wraps(func) 15 | def wrapper(*args, **kwargs): 16 | key = (args, frozenset(kwargs.items())) 17 | if key not in self.caches[func.__name__]: 18 | self.caches[func.__name__][key] = func(*args, **kwargs) 19 | return self.caches[func.__name__][key] 20 | 21 | return wrapper 22 | 23 | cash = Cash() 24 | 25 | disc_rate_ann = np.array(pd.read_excel("./CashValue_ME_EX4/disc_rate_ann.xlsx")["zero_spot"]) 26 | disc_rate_arr = np.concatenate([[1], np.cumprod((1+np.repeat(disc_rate_ann, 12)) ** (-1/12))]) 27 | mort_table = pd.read_excel("./CashValue_ME_EX4/mort_table.xlsx") 28 | surr_charge_table = pd.read_excel("./CashValue_ME_EX4/surr_charge_table.xlsx") 29 | product_spec_table = pd.read_excel("./CashValue_ME_EX4/product_spec_table.xlsx") 30 | model_point_table = pd.read_csv("./CashValue_ME_EX4/model_point_table_10K.csv") 31 | model_point_table_ext = model_point_table.merge(product_spec_table, on='spec_id') 32 | model_point_moneyness = pd.read_excel("./CashValue_ME_EX4/model_point_moneyness.xlsx") 33 | scen_id = 1 34 | scen_size = 1 35 | 36 | @cash 37 | def age(t): 38 | return age_at_entry() + duration(t) 39 | 40 | @cash 41 | def age_at_entry(): 42 | return model_point()["age_at_entry"].values 43 | 44 | @cash 45 | def av_at(t, timing): 46 | if timing == "BEF_MAT": 47 | return av_pp_at(t, "BEF_PREM") * pols_if_at(t, "BEF_MAT") 48 | elif timing == "BEF_NB": 49 | return av_pp_at(t, "BEF_PREM") * pols_if_at(t, "BEF_NB") 50 | elif timing == "BEF_FEE": 51 | return av_pp_at(t, "BEF_FEE") * pols_if_at(t, "BEF_DECR") 52 | else: 53 | raise ValueError("invalid timing") 54 | 55 | @cash 56 | def av_change(t): 57 | return av_at(t+1, 'BEF_MAT') - av_at(t, 'BEF_MAT') 58 | 59 | @cash 60 | def av_pp_at(t, timing): 61 | if timing == "BEF_PREM": 62 | if t == 0: 63 | return av_pp_init() 64 | else: 65 | return av_pp_at(t-1, "BEF_INV") + inv_income_pp(t-1) 66 | elif timing == "BEF_FEE": 67 | return av_pp_at(t, "BEF_PREM") + prem_to_av_pp(t) 68 | elif timing == "BEF_INV": 69 | return av_pp_at(t, "BEF_FEE") - maint_fee_pp(t) - coi_pp(t) 70 | elif timing == "MID_MTH": 71 | return av_pp_at(t, "BEF_INV") + 0.5 * inv_income_pp(t) 72 | else: 73 | raise ValueError("invalid timing") 74 | 75 | @cash 76 | def av_pp_init(): 77 | return model_point()["av_pp_init"].values 78 | 79 | @cash 80 | def claim_net_pp(t, kind): 81 | if kind == "DEATH": 82 | return claim_pp(t, "DEATH") - av_pp_at(t, "MID_MTH") 83 | elif kind == "LAPSE": 84 | return 0 85 | elif kind == "MATURITY": 86 | return claim_pp(t, "MATURITY") - av_pp_at(t, "BEF_PREM") 87 | else: 88 | raise ValueError("invalid kind") 89 | 90 | @cash 91 | def claim_pp(t, kind): 92 | if kind == "DEATH": 93 | return np.maximum(sum_assured(), av_pp_at(t, "MID_MTH")) 94 | elif kind == "LAPSE": 95 | return av_pp_at(t, "MID_MTH") 96 | elif kind == "MATURITY": 97 | return np.maximum(sum_assured(), av_pp_at(t, "BEF_PREM")) 98 | else: 99 | raise ValueError("invalid kind") 100 | 101 | @cash 102 | def claims(t, kind=None): 103 | if kind == "DEATH": 104 | return claim_pp(t, "DEATH") * pols_death(t) 105 | elif kind == "LAPSE": 106 | return claims_from_av(t, "LAPSE") - surr_charge(t) 107 | elif kind == "MATURITY": 108 | return claim_pp(t, "MATURITY") * pols_maturity(t) 109 | elif kind is None: 110 | return sum(claims(t, k) for k in ["DEATH", "LAPSE", "MATURITY"]) 111 | else: 112 | raise ValueError("invalid kind") 113 | 114 | @cash 115 | def claims_from_av(t, kind): 116 | if kind == "DEATH": 117 | return av_pp_at(t, "MID_MTH") * pols_death(t) 118 | elif kind == "LAPSE": 119 | return av_pp_at(t, "MID_MTH") * pols_lapse(t) 120 | elif kind == "MATURITY": 121 | return av_pp_at(t, "BEF_PREM") * pols_maturity(t) 122 | else: 123 | raise ValueError("invalid kind") 124 | 125 | @cash 126 | def claims_over_av(t, kind): 127 | return claims(t, kind) - claims_from_av(t, kind) 128 | 129 | @cash 130 | def coi(t): 131 | return coi_pp(t) * pols_if_at(t, "BEF_DECR") 132 | 133 | @cash 134 | def coi_pp(t): 135 | return coi_rate(t) * net_amt_at_risk(t) 136 | 137 | @cash 138 | def coi_rate(t): 139 | return 0 #1.1 * mort_rate_mth(t) 140 | 141 | @cash 142 | def commissions(t): 143 | return 0.05 * premiums(t) 144 | 145 | @cash 146 | def disc_factors(): 147 | return disc_rate_arr[:max_proj_len()] 148 | 149 | @cash 150 | def duration(t): 151 | return duration_mth(t) // 12 152 | 153 | @cash 154 | def duration_mth(t): 155 | if t == 0: 156 | return model_point()['duration_mth'].values 157 | else: 158 | return duration_mth(t-1) + 1 159 | 160 | @cash 161 | def expense_acq(): 162 | return 5000 163 | 164 | @cash 165 | def expense_maint(): 166 | return 500 167 | 168 | @cash 169 | def expenses(t): 170 | return expense_acq() * pols_new_biz(t) \ 171 | + pols_if_at(t, "BEF_DECR") * expense_maint()/12 * inflation_factor(t) 172 | 173 | @cash 174 | def has_surr_charge(): 175 | return model_point()['has_surr_charge'].values 176 | 177 | @cash 178 | def inflation_factor(t): 179 | return (1 + inflation_rate())**(t/12) 180 | 181 | @cash 182 | def inflation_rate(): 183 | return 0.01 184 | 185 | @cash 186 | def inv_income(t): 187 | return (inv_income_pp(t) * pols_if_at(t+1, "BEF_MAT") 188 | + 0.5 * inv_income_pp(t) * (pols_death(t) + pols_lapse(t))) 189 | 190 | @cash 191 | def inv_income_pp(t): 192 | return inv_return_mth(t) * av_pp_at(t, "BEF_INV") 193 | 194 | @cash 195 | def inv_return_mth(t): 196 | return inv_return_table()[:, t] 197 | 198 | @cash 199 | def inv_return_table(): 200 | mu = 0.02 201 | sigma = 0.03 202 | dt = 1/12 203 | 204 | return np.tile(np.exp( 205 | (mu - 0.5 * sigma**2) * dt + sigma * dt**0.5 * std_norm_rand()) - 1, 206 | (point_size(), 1)) 207 | 208 | @cash 209 | def is_wl(): 210 | return model_point()['is_wl'].values 211 | 212 | @cash 213 | def lapse_rate(t): 214 | return 0 215 | 216 | @cash 217 | def load_prem_rate(): 218 | return model_point()['load_prem_rate'].values 219 | 220 | @cash 221 | def maint_fee(t): 222 | return maint_fee_pp(t) * pols_if_at(t, "BEF_DECR") 223 | 224 | @cash 225 | def maint_fee_pp(t): 226 | return maint_fee_rate() * av_pp_at(t, "BEF_FEE") 227 | 228 | @cash 229 | def maint_fee_rate(): 230 | return 0 # 0.01 / 12 231 | 232 | @cash 233 | def margin_expense(t): 234 | return (load_prem_rate()* premium_pp(t) * pols_if_at(t, "BEF_DECR") 235 | + surr_charge(t) 236 | + maint_fee(t) 237 | - commissions(t) 238 | - expenses(t)) 239 | 240 | @cash 241 | def margin_mortality(t): 242 | return coi(t) - claims_over_av(t, 'DEATH') 243 | 244 | @cash 245 | def max_proj_len(): 246 | return max(proj_len()) 247 | 248 | @cash 249 | def model_point(): 250 | mps = model_point_table_ext 251 | 252 | idx = pd.MultiIndex.from_product( 253 | [mps.index, scen_index()], 254 | names = mps.index.names + scen_index().names 255 | ) 256 | 257 | res = pd.DataFrame( 258 | np.repeat(mps.values, len(scen_index()), axis=0), 259 | index=idx, 260 | columns=mps.columns 261 | ) 262 | 263 | return res.astype(mps.dtypes) 264 | 265 | @cash 266 | def mort_rate(t): 267 | return np.zeros(len(model_point().index)) 268 | 269 | @cash 270 | def mort_rate_mth(t): 271 | return 1-(1- mort_rate(t))**(1/12) 272 | 273 | @cash 274 | def mort_table_last_age(): 275 | return 102 # original implementation contained a bug, hard coding for now 276 | 277 | @cash 278 | def mort_table_reindexed(): 279 | result = [] 280 | for col in mort_table.columns: 281 | df = mort_table[[col]] 282 | df = df.assign(Duration=int(col)).set_index('Duration', append=True)[col] 283 | result.append(df) 284 | 285 | return pd.concat(result) 286 | 287 | @cash 288 | def net_amt_at_risk(t): 289 | return np.maximum(sum_assured() - av_pp_at(t, 'BEF_FEE'), 0) 290 | 291 | @cash 292 | def net_cf(t): 293 | return (premiums(t) 294 | + inv_income(t) - claims(t) - expenses(t) - commissions(t) - av_change(t)) 295 | 296 | @cash 297 | def policy_term(): 298 | return (is_wl() * (mort_table_last_age() - age_at_entry()) 299 | + (is_wl() == False) * model_point()["policy_term"].values) 300 | 301 | @cash 302 | def pols_death(t): 303 | return pols_if_at(t, "BEF_DECR") * mort_rate_mth(t) 304 | 305 | @cash 306 | def pols_if(t): 307 | return pols_if_at(t, "BEF_MAT") 308 | 309 | @cash 310 | def pols_if_at(t, timing): 311 | if timing == "BEF_MAT": 312 | if t == 0: 313 | return pols_if_init() 314 | else: 315 | return pols_if_at(t-1, "BEF_DECR") - pols_lapse(t-1) - pols_death(t-1) 316 | elif timing == "BEF_NB": 317 | return pols_if_at(t, "BEF_MAT") - pols_maturity(t) 318 | elif timing == "BEF_DECR": 319 | return pols_if_at(t, "BEF_NB") + pols_new_biz(t) 320 | else: 321 | raise ValueError("invalid timing") 322 | 323 | @cash 324 | def pols_if_init(): 325 | return model_point()["policy_count"].where(duration_mth(0) > 0, other=0).values 326 | 327 | @cash 328 | def pols_lapse(t): 329 | return (pols_if_at(t, "BEF_DECR") - pols_death(t)) * (1-(1 - lapse_rate(t))**(1/12)) 330 | 331 | @cash 332 | def pols_maturity(t): 333 | return (duration_mth(t) == policy_term() * 12) * pols_if_at(t, "BEF_MAT") 334 | 335 | @cash 336 | def pols_new_biz(t): 337 | return model_point()['policy_count'].values * (duration_mth(t) == 0) 338 | 339 | @cash 340 | def prem_to_av(t): 341 | return prem_to_av_pp(t) * pols_if_at(t, "BEF_DECR") 342 | 343 | @cash 344 | def prem_to_av_pp(t): 345 | return (1 - load_prem_rate()) * premium_pp(t) 346 | 347 | @cash 348 | def premium_pp(t): 349 | return model_point()['premium_pp'].values * ((premium_type() == 'SINGLE') & (duration_mth(t) == 0) | 350 | (premium_type() == 'LEVEL') & (duration_mth(t) < 12 * policy_term())) 351 | 352 | @cash 353 | def premium_type(): 354 | return model_point()['premium_type'].values 355 | 356 | @cash 357 | def premiums(t): 358 | return premium_pp(t) * pols_if_at(t, "BEF_DECR") 359 | 360 | @cash 361 | def proj_len(): 362 | return np.maximum(12 * policy_term() - duration_mth(0) + 1, 0) 363 | 364 | @cash 365 | def pv_av_change(): 366 | return sum(av_change(t) * disc_rate_arr[t] for t in range(max_proj_len())) 367 | 368 | @cash 369 | def pv_claims(kind=None): 370 | return sum(claims(t, kind) * disc_rate_arr[t] for t in range(max_proj_len())) 371 | 372 | @cash 373 | def pv_commissions(): 374 | return sum(commissions(t) * disc_rate_arr[t] for t in range(max_proj_len())) 375 | 376 | @cash 377 | def pv_expenses(): 378 | return sum(expenses(t) * disc_rate_arr[t] for t in range(max_proj_len())) 379 | 380 | @cash 381 | def pv_inv_income(): 382 | return sum(inv_income(t) * disc_rate_arr[t] for t in range(max_proj_len())) 383 | 384 | @cash 385 | def pv_pols_if(): 386 | return sum(pols_if_at(t, "BEF_DECR") for t in range(max_proj_len())) 387 | 388 | @cash 389 | def pv_premiums(): 390 | return sum(premiums(t) * disc_rate_arr[t] for t in range(max_proj_len())) 391 | 392 | @cash 393 | def pv_net_cf(): 394 | return (pv_premiums() 395 | + pv_inv_income() 396 | - pv_claims() 397 | - pv_expenses() 398 | - pv_commissions() 399 | - pv_av_change()) 400 | 401 | @cash 402 | def result_pv(): 403 | data = { 404 | "Premiums": pv_premiums(), 405 | "Death": pv_claims("DEATH"), 406 | "Surrender": pv_claims("LAPSE"), 407 | "Maturity": pv_claims("MATURITY"), 408 | "Expenses": pv_expenses(), 409 | "Commissions": pv_commissions(), 410 | "Investment Income": pv_inv_income(), 411 | "Change in AV": pv_av_change(), 412 | "Net Cashflow": pv_net_cf() 413 | } 414 | return pd.DataFrame(data, index=model_point().index) 415 | 416 | @cash 417 | def scen_index(): 418 | return pd.Index(range(1, scen_size + 1), name='scen_id') 419 | 420 | @cash 421 | def sex(): 422 | return model_point()["sex"].values 423 | 424 | @cash 425 | def std_norm_rand(): 426 | if hasattr(np.random, 'default_rng'): 427 | gen = np.random.default_rng(1234) 428 | rnd = gen.standard_normal((scen_size, 242)) 429 | else: 430 | np.random.seed(1234) 431 | rnd = np.random.standard_normal(size=(scen_size, 242)) 432 | return rnd 433 | 434 | @cash 435 | def sum_assured(): 436 | return model_point()['sum_assured'].values 437 | 438 | @cash 439 | def surr_charge(t): 440 | return surr_charge_rate(t) * av_pp_at(t, "MID_MTH") * pols_lapse(t) 441 | 442 | @cash 443 | def surr_charge_id(): 444 | return model_point()['surr_charge_id'].values.astype(str) 445 | 446 | @cash 447 | def surr_charge_max_idx(): 448 | return max(surr_charge_table.index) 449 | 450 | @cash 451 | def surr_charge_rate(t): 452 | ind_row = np.minimum(duration(t), surr_charge_max_idx()) 453 | return surr_charge_table.values.flat[surr_charge_table_column() + ind_row * len(surr_charge_table.columns)] 454 | 455 | @cash 456 | def surr_charge_table_column(): 457 | return surr_charge_table.columns.searchsorted(surr_charge_id(), side='right') - 1 458 | 459 | @cash 460 | def surr_charge_table_stacked(): 461 | return surr_charge_table.stack().reorder_levels([1, 0]).sort_index() 462 | 463 | @cash 464 | def point_size(): 465 | return len(model_point_table_ext) 466 | 467 | def savings_me_recursive_numpy(): 468 | cash.reset() # Ensure the cache is clear before running calculations 469 | return float(np.sum(pv_net_cf())) 470 | 471 | if __name__ == "__main__": 472 | print(savings_me_recursive_numpy()) -------------------------------------------------------------------------------- /github-runners-benchmarks/R/.Rprofile: -------------------------------------------------------------------------------- 1 | source("renv/activate.R") 2 | -------------------------------------------------------------------------------- /github-runners-benchmarks/R/R.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | -------------------------------------------------------------------------------- /github-runners-benchmarks/R/benchmark_results.yaml: -------------------------------------------------------------------------------- 1 | exposures: 2 | R actxps: 3 | num_rows: 141281 4 | min: 470.446116 ms 5 | -------------------------------------------------------------------------------- /github-runners-benchmarks/R/exposures.R: -------------------------------------------------------------------------------- 1 | #' Create exposure records from census records 2 | #' 3 | #' @description Convert a data frame of census-level records to exposure-level 4 | #' records. 5 | #' 6 | #' @details Census-level data refers to a data set wherein there is one row 7 | #' per unique policy. Exposure-level data expands census-level data such that 8 | #' there is one record per policy per observation period. Observation periods 9 | #' could be any meaningful period of time such as a policy year, policy month, 10 | #' calendar year, calendar quarter, calendar month, etc. 11 | #' 12 | #' `target_status` is used in the calculation of exposures. The annual 13 | #' exposure method is applied, which allocates a full period of exposure for 14 | #' any statuses in `target_status`. For all other statuses, new entrants 15 | #' and exits are partially exposed based on the time elapsed in the observation 16 | #' period. This method is consistent with the Balducci Hypothesis, which assumes 17 | #' that the probability of termination is proportionate to the time elapsed 18 | #' in the observation period. If the annual exposure method isn't desired, 19 | #' `target_status` can be ignored. In this case, partial exposures are 20 | #' always applied regardless of status. 21 | #' 22 | #' `default_status` is used to indicate the default active status that 23 | #' should be used when exposure records are created. If left blank, then the 24 | #' first status level will be assumed to be the default active status. 25 | #' 26 | #' # Policy period and calendar period variations 27 | #' 28 | #' The functions `expose_py()`, `expose_pq()`, `expose_pm()`, 29 | #' `expose_pw()`, `expose_cy()`, `expose_cq()`, 30 | #' `expose_cm()`, `expose_cw()` are convenience functions for 31 | #' specific implementations of `expose()`. The two characters after the 32 | #' underscore describe the exposure type and exposure period, respectively. 33 | #' 34 | #' For exposures types: 35 | #' 36 | #' - `p` refers to policy years 37 | #' - `c` refers to calendar years. 38 | #' 39 | #' For exposure periods: 40 | #' 41 | #' - `y` = years 42 | #' - `q` = quarters 43 | #' - `m` = months 44 | #' - `w` = weeks. 45 | #' 46 | #' @param .data a data frame with census-level records 47 | #' @param end_date experience study end date 48 | #' @param start_date experience study start date. Default value = 1900-01-01. 49 | #' @param target_status character vector of target status values. Default value = `NULL`. 50 | #' @param cal_expo set to TRUE for calendar year exposures. Otherwise policy year exposures are assumed. 51 | #' @param expo_length exposure period length 52 | #' @param col_pol_num name of the column in `.data` containing the policy number 53 | #' @param col_status name of the column in `.data` containing the policy status 54 | #' @param col_issue_date name of the column in `.data` containing the issue date 55 | #' @param col_term_date name of the column in `.data` containing the termination date 56 | #' @param default_status optional scalar character representing the default active status code 57 | #' @param ... arguments passed to `expose()` 58 | #' 59 | #' @return A tibble with class `exposed_df`, `tbl_df`, `tbl`, 60 | #' and `data.frame`. The results include all existing columns in 61 | #' `.data` plus new columns for exposures and observation periods. Observation 62 | #' periods include counters for policy exposures, start dates, and end dates. 63 | #' Both start dates and end dates are inclusive bounds. 64 | #' 65 | #' For policy year exposures, two observation period columns are returned. 66 | #' Columns beginning with (`pol_`) are integer policy periods. Columns 67 | #' beginning with (`pol_date_`) are calendar dates representing 68 | #' anniversary dates, monthiversary dates, etc. 69 | #' 70 | #' @examples 71 | #' toy_census |> expose("2020-12-31") 72 | #' 73 | #' census_dat |> expose_py("2019-12-31", target_status = "Surrender") 74 | #' 75 | #' @references Atkinson and McGarry (2016). Experience Study Calculations. 76 | #' 77 | #' 78 | #' @importFrom lubridate %m+% 79 | #' 80 | #' @export 81 | expose <- function(.data, 82 | end_date, 83 | start_date = as.Date("1900-01-01"), 84 | target_status = NULL, 85 | cal_expo = FALSE, 86 | expo_length = c("year", "quarter", "month", "week"), 87 | col_pol_num = "pol_num", 88 | col_status = "status", 89 | col_issue_date = "issue_date", 90 | col_term_date = "term_date", 91 | default_status) { 92 | 93 | end_date <- as.Date(end_date) 94 | start_date <- as.Date(start_date) 95 | 96 | # helper functions 97 | rename_col <- function(x, prefix, suffix = "") { 98 | res <- abbr_period(expo_length) 99 | paste0(prefix, "_", res, suffix) 100 | } 101 | 102 | 103 | # set up exposure period lengths 104 | expo_length <- rlang::arg_match(expo_length) 105 | expo_step <- switch(expo_length, 106 | "year" = lubridate::years(1), 107 | "quarter" = months(3), 108 | "month" = months(1), 109 | "week" = lubridate::days(7)) 110 | 111 | cal_frac <- switch(expo_length, 112 | "year" = year_frac, 113 | "quarter" = quarter_frac, 114 | "month" = month_frac, 115 | 'week' = week_frac) 116 | 117 | # column renames and name conflicts 118 | .data <- .data |> 119 | dplyr::rename(pol_num = {{col_pol_num}}, 120 | status = {{col_status}}, 121 | issue_date = {{col_issue_date}}, 122 | term_date = {{col_term_date}}) |> 123 | .expo_name_conflict(cal_expo, expo_length) 124 | 125 | # set up statuses 126 | if(!is.factor(.data$status)) .data$status <- factor(.data$status) 127 | 128 | if (missing(default_status)) { 129 | default_status <- factor(levels(.data$status)[[1]], 130 | levels = levels(.data$status)) 131 | } else { 132 | status_levels <- union(levels(.data$status), default_status) 133 | default_status <- factor(default_status, 134 | levels = status_levels) 135 | levels(.data$status) <- status_levels 136 | } 137 | 138 | # pre-exposure updates 139 | res <- .data |> 140 | dplyr::filter(issue_date < end_date, 141 | is.na(term_date) | term_date > start_date) |> 142 | dplyr::mutate( 143 | term_date = dplyr::if_else(term_date > end_date, 144 | lubridate::NA_Date_, term_date), 145 | status = dplyr::if_else(is.na(term_date), default_status, status), 146 | last_date = pmin(term_date, end_date, na.rm = TRUE)) 147 | 148 | if (cal_expo) { 149 | res <- res |> 150 | dplyr::mutate( 151 | first_date = pmax(issue_date, start_date), 152 | cal_b = lubridate::floor_date(first_date, expo_length), 153 | tot_per = lubridate::interval( 154 | cal_b, 155 | lubridate::floor_date(last_date, expo_length) 156 | ) / expo_step, 157 | rep_n = ceiling(tot_per) + 1) 158 | } else { 159 | res <- res |> 160 | dplyr::mutate( 161 | tot_per = lubridate::interval(issue_date - 1, last_date) / expo_step, 162 | rep_n = ceiling(tot_per)) 163 | } 164 | 165 | # apply exposures 166 | res <- res |> 167 | dplyr::slice(rep(dplyr::row_number(), rep_n)) |> 168 | dplyr::group_by(pol_num) |> 169 | dplyr::mutate(.time = dplyr::row_number()) |> 170 | dplyr::ungroup() |> 171 | dplyr::mutate( 172 | last_per = .time == rep_n, 173 | status = dplyr::if_else(last_per, status, default_status), 174 | term_date = dplyr::if_else(last_per, term_date, lubridate::NA_Date_)) 175 | 176 | if (cal_expo) { 177 | res <- res |> 178 | dplyr::mutate(first_per = .time == 1, 179 | cal_e = cal_b %m+% (expo_step * .time) - 1, 180 | cal_b = cal_b %m+% (expo_step * (.time - 1)), 181 | exposure = dplyr::case_when( 182 | status %in% target_status ~ 1, 183 | first_per & last_per ~ cal_frac(last_date) - cal_frac(first_date, 1), 184 | first_per ~ 1 - cal_frac(first_date, 1), 185 | last_per ~ cal_frac(last_date), 186 | TRUE ~ 1) 187 | ) |> 188 | dplyr::select(-rep_n, -first_date, -last_date, -first_per, -last_per, 189 | -.time, -tot_per) |> 190 | dplyr::relocate(cal_e, .after = cal_b) |> 191 | dplyr::rename_with(.fn = rename_col, .cols = cal_b, prefix = "cal") |> 192 | dplyr::rename_with(.fn = rename_col, .cols = cal_e, prefix = "cal", 193 | suffix = "_end") 194 | } else { 195 | res <- res |> 196 | dplyr::mutate( 197 | cal_b = issue_date %m+% (expo_step * (.time - 1)), 198 | cal_e = issue_date %m+% (expo_step * .time) - 1, 199 | exposure = dplyr::if_else(last_per & !status %in% target_status, 200 | tot_per %% 1, 1), 201 | # exposure = 0 is possible if exactly 1 period has elapsed. replace these with 1's 202 | exposure = dplyr::if_else(exposure == 0, 1, exposure) 203 | ) |> 204 | dplyr::select(-last_per, -last_date, -tot_per, -rep_n) |> 205 | dplyr::filter(dplyr::between(cal_b, start_date, end_date)) |> 206 | dplyr::rename_with(.fn = rename_col, .cols = .time, prefix = "pol") |> 207 | dplyr::rename_with(.fn = rename_col, .cols = cal_b, prefix = "pol_date") |> 208 | dplyr::rename_with(.fn = rename_col, .cols = cal_e, prefix = "pol_date", 209 | suffix = "_end") 210 | 211 | 212 | } 213 | 214 | # set up S3 object 215 | as_exposed_df(res, end_date, start_date, 216 | target_status, cal_expo, expo_length) 217 | 218 | } 219 | 220 | 221 | #' @rdname expose 222 | #' @export 223 | expose_py <- function(...) { 224 | expose(cal_expo = FALSE, expo_length = "year", ...) 225 | } 226 | 227 | #' @rdname expose 228 | #' @export 229 | expose_pq <- function(...) { 230 | expose(cal_expo = FALSE, expo_length = "quarter", ...) 231 | } 232 | 233 | #' @rdname expose 234 | #' @export 235 | expose_pm <- function(...) { 236 | expose(cal_expo = FALSE, expo_length = "month", ...) 237 | } 238 | 239 | #' @rdname expose 240 | #' @export 241 | expose_pw <- function(...) { 242 | expose(cal_expo = FALSE, expo_length = "week", ...) 243 | } 244 | 245 | #' @rdname expose 246 | #' @export 247 | expose_cy <- function(...) { 248 | expose(cal_expo = TRUE, expo_length = "year", ...) 249 | } 250 | 251 | #' @rdname expose 252 | #' @export 253 | expose_cq <- function(...) { 254 | expose(cal_expo = TRUE, expo_length = "quarter", ...) 255 | } 256 | 257 | #' @rdname expose 258 | #' @export 259 | expose_cm <- function(...) { 260 | expose(cal_expo = TRUE, expo_length = "month", ...) 261 | } 262 | 263 | #' @rdname expose 264 | #' @export 265 | expose_cw <- function(...) { 266 | expose(cal_expo = TRUE, expo_length = "week", ...) 267 | } 268 | 269 | year_frac <- function(x, .offset = 0) { 270 | (lubridate::yday(x) - .offset) / (365 + lubridate::leap_year(x)) 271 | } 272 | 273 | quarter_frac <- function(x, .offset = 0) { 274 | (lubridate::qday(x) - .offset) / 275 | lubridate::qday((lubridate::ceiling_date(x, "quarter") - 1)) 276 | } 277 | 278 | month_frac <- function(x, .offset = 0) { 279 | (lubridate::mday(x) - .offset) / 280 | lubridate::mday((lubridate::ceiling_date(x, "month") - 1)) 281 | } 282 | 283 | week_frac <- function(x, .offset = 0) { 284 | (lubridate::wday(x) - .offset) / 7 285 | } 286 | 287 | # helper function to handle name conflicts 288 | .expo_name_conflict <- function(.data, cal_expo, expo_length) { 289 | 290 | abbrev <- abbr_period(expo_length) 291 | 292 | x <- c( 293 | "exposure", 294 | paste0(if (cal_expo) "cal_" else "pol_", abbrev), 295 | if (!cal_expo) paste0("pol_date_", abbrev), 296 | paste0(if (cal_expo) "cal_" else "pol_date_", abbrev, "_end") 297 | ) 298 | 299 | x <- x[x %in% names(.data)] 300 | .data[x] <- NULL 301 | if (length(x > 0)) { 302 | rlang::warn(c(x = glue::glue("`.data` contains the following conflicting columns that will be overridden: {paste(x, collapse = ', ')}. If you don't want this to happen, please rename these columns prior to calling the applicable expose function."))) 303 | } 304 | .data 305 | } 306 | 307 | 308 | #' @export 309 | print.exposed_df <- function(x, ...) { 310 | cat("Exposure data\n\n", 311 | "Exposure type:", attr(x, "exposure_type"), "\n", 312 | "Target status:", paste(attr(x, "target_status"), collapse = ", "), "\n", 313 | "Study range:", as.character(attr(x, "start_date")), "to", 314 | as.character(attr(x, "end_date")), "\n\n") 315 | NextMethod() 316 | } 317 | 318 | # helper function - do not export 319 | abbr_period <- function(x) { 320 | switch(x, 321 | "year" = "yr", 322 | "quarter" = "qtr", 323 | "month" = "mth", 324 | 'week' = "wk") 325 | } 326 | 327 | is_exposed_df <- function(x) { 328 | "exposed_df" %in% class(x) 329 | } 330 | 331 | #' @rdname is_exposed_df 332 | #' @export 333 | as_exposed_df <- function(x, end_date, start_date = as.Date("1900-01-01"), 334 | target_status = NULL, cal_expo = FALSE, 335 | expo_length = "year") { 336 | 337 | if(!is.data.frame(x)) { 338 | rlang::abort("`x` must be a data frame.") 339 | } 340 | 341 | structure(x, class = c("exposed_df", class(x)), 342 | target_status = target_status, 343 | exposure_type = glue::glue("{if(cal_expo) 'calendar' else 'policy'}_{expo_length}"), 344 | start_date = start_date, 345 | end_date = end_date) 346 | 347 | } -------------------------------------------------------------------------------- /github-runners-benchmarks/R/main.R: -------------------------------------------------------------------------------- 1 | library(yaml) 2 | library(readr) 3 | library(microbenchmark) 4 | library(magrittr) 5 | library(dplyr) 6 | library(lubridate) 7 | source("exposures.R") 8 | # use readr to read csv from ../data/census_dat.csv 9 | 10 | census_dat <- read_csv("../data/census_dat.csv") 11 | exposures <- expose_py( 12 | census_dat, 13 | start_date = "2006-6-15", 14 | end_date = "2020-02-29", 15 | target_status = "Surrender" 16 | ) 17 | results <- microbenchmark( 18 | expose_py( 19 | census_dat, 20 | start_date = "2006-6-15", 21 | end_date = "2020-02-29", 22 | target_status = "Surrender" 23 | ) 24 | ) 25 | 26 | create_benchmark_results_yaml <- function(results){ 27 | summarised_results <- results %>% 28 | group_by(expr) %>% 29 | summarise(min_time_ms = min(time)/1000000) 30 | write_yaml( 31 | list( 32 | exposures = list( 33 | "R actxps" = list( 34 | num_rows = nrow(exposures), 35 | min = paste(summarised_results$min_time_ms, "ms") 36 | ) 37 | ) 38 | ), 39 | "benchmark_results.yaml" 40 | ) 41 | } 42 | 43 | create_benchmark_results_yaml(results) 44 | -------------------------------------------------------------------------------- /github-runners-benchmarks/R/renv.lock: -------------------------------------------------------------------------------- 1 | { 2 | "R": { 3 | "Version": "4.2.2", 4 | "Repositories": [ 5 | { 6 | "Name": "CRAN", 7 | "URL": "https://cloud.r-project.org" 8 | } 9 | ] 10 | }, 11 | "Packages": { 12 | "R6": { 13 | "Package": "R6", 14 | "Version": "2.5.1", 15 | "Source": "Repository", 16 | "Repository": "CRAN", 17 | "Hash": "470851b6d5d0ac559e9d01bb352b4021", 18 | "Requirements": [] 19 | }, 20 | "bit": { 21 | "Package": "bit", 22 | "Version": "4.0.5", 23 | "Source": "Repository", 24 | "Repository": "CRAN", 25 | "Hash": "d242abec29412ce988848d0294b208fd", 26 | "Requirements": [] 27 | }, 28 | "bit64": { 29 | "Package": "bit64", 30 | "Version": "4.0.5", 31 | "Source": "Repository", 32 | "Repository": "CRAN", 33 | "Hash": "9fe98599ca456d6552421db0d6772d8f", 34 | "Requirements": [ 35 | "bit" 36 | ] 37 | }, 38 | "cli": { 39 | "Package": "cli", 40 | "Version": "3.6.0", 41 | "Source": "Repository", 42 | "Repository": "CRAN", 43 | "Hash": "3177a5a16c243adc199ba33117bd9657", 44 | "Requirements": [] 45 | }, 46 | "clipr": { 47 | "Package": "clipr", 48 | "Version": "0.8.0", 49 | "Source": "Repository", 50 | "Repository": "CRAN", 51 | "Hash": "3f038e5ac7f41d4ac41ce658c85e3042", 52 | "Requirements": [] 53 | }, 54 | "cpp11": { 55 | "Package": "cpp11", 56 | "Version": "0.4.3", 57 | "Source": "Repository", 58 | "Repository": "CRAN", 59 | "Hash": "ed588261931ee3be2c700d22e94a29ab", 60 | "Requirements": [] 61 | }, 62 | "crayon": { 63 | "Package": "crayon", 64 | "Version": "1.5.2", 65 | "Source": "Repository", 66 | "Repository": "CRAN", 67 | "Hash": "e8a1e41acf02548751f45c718d55aa6a", 68 | "Requirements": [] 69 | }, 70 | "dplyr": { 71 | "Package": "dplyr", 72 | "Version": "1.1.0", 73 | "Source": "Repository", 74 | "Repository": "CRAN", 75 | "Hash": "d3c34618017e7ae252d46d79a1b9ec32", 76 | "Requirements": [ 77 | "R6", 78 | "cli", 79 | "generics", 80 | "glue", 81 | "lifecycle", 82 | "magrittr", 83 | "pillar", 84 | "rlang", 85 | "tibble", 86 | "tidyselect", 87 | "vctrs" 88 | ] 89 | }, 90 | "ellipsis": { 91 | "Package": "ellipsis", 92 | "Version": "0.3.2", 93 | "Source": "Repository", 94 | "Repository": "CRAN", 95 | "Hash": "bb0eec2fe32e88d9e2836c2f73ea2077", 96 | "Requirements": [ 97 | "rlang" 98 | ] 99 | }, 100 | "fansi": { 101 | "Package": "fansi", 102 | "Version": "1.0.4", 103 | "Source": "Repository", 104 | "Repository": "CRAN", 105 | "Hash": "1d9e7ad3c8312a192dea7d3db0274fde", 106 | "Requirements": [] 107 | }, 108 | "generics": { 109 | "Package": "generics", 110 | "Version": "0.1.3", 111 | "Source": "Repository", 112 | "Repository": "CRAN", 113 | "Hash": "15e9634c0fcd294799e9b2e929ed1b86", 114 | "Requirements": [] 115 | }, 116 | "glue": { 117 | "Package": "glue", 118 | "Version": "1.6.2", 119 | "Source": "Repository", 120 | "Repository": "CRAN", 121 | "Hash": "4f2596dfb05dac67b9dc558e5c6fba2e", 122 | "Requirements": [] 123 | }, 124 | "hms": { 125 | "Package": "hms", 126 | "Version": "1.1.2", 127 | "Source": "Repository", 128 | "Repository": "CRAN", 129 | "Hash": "41100392191e1244b887878b533eea91", 130 | "Requirements": [ 131 | "ellipsis", 132 | "lifecycle", 133 | "pkgconfig", 134 | "rlang", 135 | "vctrs" 136 | ] 137 | }, 138 | "lifecycle": { 139 | "Package": "lifecycle", 140 | "Version": "1.0.3", 141 | "Source": "Repository", 142 | "Repository": "CRAN", 143 | "Hash": "001cecbeac1cff9301bdc3775ee46a86", 144 | "Requirements": [ 145 | "cli", 146 | "glue", 147 | "rlang" 148 | ] 149 | }, 150 | "lubridate": { 151 | "Package": "lubridate", 152 | "Version": "1.9.2", 153 | "Source": "Repository", 154 | "Repository": "CRAN", 155 | "Hash": "e25f18436e3efd42c7c590a1c4c15390", 156 | "Requirements": [ 157 | "generics", 158 | "timechange" 159 | ] 160 | }, 161 | "magrittr": { 162 | "Package": "magrittr", 163 | "Version": "2.0.3", 164 | "Source": "Repository", 165 | "Repository": "CRAN", 166 | "Hash": "7ce2733a9826b3aeb1775d56fd305472", 167 | "Requirements": [] 168 | }, 169 | "microbenchmark": { 170 | "Package": "microbenchmark", 171 | "Version": "1.4.9", 172 | "Source": "Repository", 173 | "Repository": "CRAN", 174 | "Hash": "7001412b0204877d34dd8f46b04fde62", 175 | "Requirements": [] 176 | }, 177 | "pillar": { 178 | "Package": "pillar", 179 | "Version": "1.8.1", 180 | "Source": "Repository", 181 | "Repository": "CRAN", 182 | "Hash": "f2316df30902c81729ae9de95ad5a608", 183 | "Requirements": [ 184 | "cli", 185 | "fansi", 186 | "glue", 187 | "lifecycle", 188 | "rlang", 189 | "utf8", 190 | "vctrs" 191 | ] 192 | }, 193 | "pkgconfig": { 194 | "Package": "pkgconfig", 195 | "Version": "2.0.3", 196 | "Source": "Repository", 197 | "Repository": "CRAN", 198 | "Hash": "01f28d4278f15c76cddbea05899c5d6f", 199 | "Requirements": [] 200 | }, 201 | "prettyunits": { 202 | "Package": "prettyunits", 203 | "Version": "1.1.1", 204 | "Source": "Repository", 205 | "Repository": "CRAN", 206 | "Hash": "95ef9167b75dde9d2ccc3c7528393e7e", 207 | "Requirements": [] 208 | }, 209 | "progress": { 210 | "Package": "progress", 211 | "Version": "1.2.2", 212 | "Source": "Repository", 213 | "Repository": "CRAN", 214 | "Hash": "14dc9f7a3c91ebb14ec5bb9208a07061", 215 | "Requirements": [ 216 | "R6", 217 | "crayon", 218 | "hms", 219 | "prettyunits" 220 | ] 221 | }, 222 | "readr": { 223 | "Package": "readr", 224 | "Version": "2.1.4", 225 | "Source": "Repository", 226 | "Repository": "CRAN", 227 | "Hash": "b5047343b3825f37ad9d3b5d89aa1078", 228 | "Requirements": [ 229 | "R6", 230 | "cli", 231 | "clipr", 232 | "cpp11", 233 | "crayon", 234 | "hms", 235 | "lifecycle", 236 | "rlang", 237 | "tibble", 238 | "tzdb", 239 | "vroom" 240 | ] 241 | }, 242 | "renv": { 243 | "Package": "renv", 244 | "Version": "0.16.0", 245 | "Source": "Repository", 246 | "Repository": "CRAN", 247 | "Hash": "c9e8442ab69bc21c9697ecf856c1e6c7", 248 | "Requirements": [] 249 | }, 250 | "rlang": { 251 | "Package": "rlang", 252 | "Version": "1.0.6", 253 | "Source": "Repository", 254 | "Repository": "CRAN", 255 | "Hash": "4ed1f8336c8d52c3e750adcdc57228a7", 256 | "Requirements": [] 257 | }, 258 | "tibble": { 259 | "Package": "tibble", 260 | "Version": "3.1.8", 261 | "Source": "Repository", 262 | "Repository": "CRAN", 263 | "Hash": "56b6934ef0f8c68225949a8672fe1a8f", 264 | "Requirements": [ 265 | "fansi", 266 | "lifecycle", 267 | "magrittr", 268 | "pillar", 269 | "pkgconfig", 270 | "rlang", 271 | "vctrs" 272 | ] 273 | }, 274 | "tidyselect": { 275 | "Package": "tidyselect", 276 | "Version": "1.2.0", 277 | "Source": "Repository", 278 | "Repository": "CRAN", 279 | "Hash": "79540e5fcd9e0435af547d885f184fd5", 280 | "Requirements": [ 281 | "cli", 282 | "glue", 283 | "lifecycle", 284 | "rlang", 285 | "vctrs", 286 | "withr" 287 | ] 288 | }, 289 | "timechange": { 290 | "Package": "timechange", 291 | "Version": "0.2.0", 292 | "Source": "Repository", 293 | "Repository": "CRAN", 294 | "Hash": "8548b44f79a35ba1791308b61e6012d7", 295 | "Requirements": [ 296 | "cpp11" 297 | ] 298 | }, 299 | "tzdb": { 300 | "Package": "tzdb", 301 | "Version": "0.3.0", 302 | "Source": "Repository", 303 | "Repository": "CRAN", 304 | "Hash": "b2e1cbce7c903eaf23ec05c58e59fb5e", 305 | "Requirements": [ 306 | "cpp11" 307 | ] 308 | }, 309 | "utf8": { 310 | "Package": "utf8", 311 | "Version": "1.2.3", 312 | "Source": "Repository", 313 | "Repository": "CRAN", 314 | "Hash": "1fe17157424bb09c48a8b3b550c753bc", 315 | "Requirements": [] 316 | }, 317 | "vctrs": { 318 | "Package": "vctrs", 319 | "Version": "0.5.2", 320 | "Source": "Repository", 321 | "Repository": "CRAN", 322 | "Hash": "e4ffa94ceed5f124d429a5a5f0f5b378", 323 | "Requirements": [ 324 | "cli", 325 | "glue", 326 | "lifecycle", 327 | "rlang" 328 | ] 329 | }, 330 | "vroom": { 331 | "Package": "vroom", 332 | "Version": "1.6.1", 333 | "Source": "Repository", 334 | "Repository": "CRAN", 335 | "Hash": "7015a74373b83ffaef64023f4a0f5033", 336 | "Requirements": [ 337 | "bit64", 338 | "cli", 339 | "cpp11", 340 | "crayon", 341 | "glue", 342 | "hms", 343 | "lifecycle", 344 | "progress", 345 | "rlang", 346 | "tibble", 347 | "tidyselect", 348 | "tzdb", 349 | "vctrs", 350 | "withr" 351 | ] 352 | }, 353 | "withr": { 354 | "Package": "withr", 355 | "Version": "2.5.0", 356 | "Source": "Repository", 357 | "Repository": "CRAN", 358 | "Hash": "c0e49a9760983e81e55cdd9be92e7182", 359 | "Requirements": [] 360 | }, 361 | "yaml": { 362 | "Package": "yaml", 363 | "Version": "2.3.7", 364 | "Source": "Repository", 365 | "Repository": "CRAN", 366 | "Hash": "0d0056cc5383fbc240ccd0cb584bf436", 367 | "Requirements": [] 368 | } 369 | } 370 | } 371 | -------------------------------------------------------------------------------- /github-runners-benchmarks/R/renv/.gitignore: -------------------------------------------------------------------------------- 1 | library/ 2 | local/ 3 | cellar/ 4 | lock/ 5 | python/ 6 | sandbox/ 7 | staging/ 8 | -------------------------------------------------------------------------------- /github-runners-benchmarks/R/renv/settings.dcf: -------------------------------------------------------------------------------- 1 | bioconductor.version: 2 | external.libraries: 3 | ignored.packages: 4 | package.dependency.fields: Imports, Depends, LinkingTo 5 | r.version: 6 | snapshot.type: implicit 7 | use.cache: TRUE 8 | vcs.ignore.cellar: TRUE 9 | vcs.ignore.library: TRUE 10 | vcs.ignore.local: TRUE 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/README.md: -------------------------------------------------------------------------------- 1 | The benchmarking repository for some time has autogenerated the top-level README from within GitHub actions. 2 | 3 | As we expand our benchmarking activities for GPUs and larger scale calculations, the GitHub hosted runners are still used, but we will be focusing more efforts on Docker containers. 4 | 5 | The top-level readme is not autogenerated, this file will be, and probably will have results copied and pasted into the top-level readme manually. 6 | 7 | # Benchmarking 8 | 9 | Benchmarks in this repository: 10 | 11 | * `basic_term_benchmark`: Replicate the cashflows of the [LifeLib BasicTerm model](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 12 | * Python [LifeLib BasicTerm_M](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 13 | * Julia [using LifeSimulator](https://github.com/JuliaActuary/LifeSimulator.jl) 14 | * Python using recursive formulas with [PyTorch](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_pytorch.py) and [NumPy](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_numpy.py) 15 | * Python using matrix operations (no recursion) on [PyTorch arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_pytorch.py) and [NumPy arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_numpy.py) 16 | * `exposures`: Create date partitions for experience studies 17 | * Julia [ExperienceAnalysis](https://github.com/JuliaActuary/ExperienceAnalysis.jl) 18 | * R [actxps](https://github.com/mattheaphy/actxps) 19 | * `mortality`: Read SOA mortality tables and use them in a simple calculation 20 | * Julia [MortalityTables](https://github.com/JuliaActuary/MortalityTables.jl) 21 | * Python [Pymort](https://github.com/actuarialopensource/pymort) 22 | 23 | The below results are generated by the benchmarking scripts in the folders for each language. These scripts are run automatically by GitHub Actions and populate the results below. 24 | 25 | ```yaml 26 | basic_term_benchmark: 27 | - Julia array basic_term: 28 | minimum time: TrialEstimate(29.152 ms) 29 | result: 1.4489630534602132e7 30 | Julia recursive basic_term: 31 | minimum time: TrialEstimate(81.070 ms) 32 | result: 1.4489630534602132e7 33 | - Python array numpy basic_term_m: 34 | minimum time: 81.15190000000894 milliseconds 35 | result: 14489630.534603368 36 | Python array pytorch basic_term_m: 37 | minimum time: 48.05233400000475 milliseconds 38 | result: 14489630.534603368 39 | Python lifelib basic_term_m: 40 | minimum time: 606.547575999997 milliseconds 41 | result: 14489630.534601536 42 | Python recursive numpy basic_term_m: 43 | minimum time: 47.489563999988604 milliseconds 44 | result: 14489630.534603368 45 | Python recursive pytorch basic_term_m: 46 | minimum time: 73.68574099999137 milliseconds 47 | result: 14489630.53460337 48 | basic_term_me_benchmark: 49 | - Python heavylight numpy basic_term_me: 50 | minimum time: 347.3877970000103 milliseconds 51 | result: 215146132.0684811 52 | Python lifelib basic_term_me: 53 | minimum time: 1140.047031999984 milliseconds 54 | result: 215146132.06848112 55 | Python recursive numpy basic_term_me: 56 | minimum time: 325.56454799998846 milliseconds 57 | result: 215146132.0684814 58 | exposures: 59 | - Julia ExperienceAnalysis.jl: 60 | minimum time: TrialEstimate(29.284 ms) 61 | num_rows: 141281 62 | - R actxps: 63 | min: 470.446116 ms 64 | num_rows: 141281 65 | mortality: 66 | - Julia MortalityTables.jl: 67 | minimum time: TrialEstimate(239.946 μs) 68 | result: 1904.4865526636793 69 | - Python PyMort: 70 | minimum time: 9.101711999988993 milliseconds 71 | result: 1904.4865526636793 72 | savings_benchmark: 73 | - Julia Benchmarks savings: 74 | minimum time: TrialEstimate(118.603 ms) 75 | result: 3.507113709040273e12 76 | - Python lifelib cashvalue_me_ex4: 77 | minimum time: 602.367275000006 milliseconds 78 | result: 3507113709040.141 79 | Python recursive numpy cashvalue_me_ex4: 80 | minimum time: 541.5200040000059 milliseconds 81 | result: 3507113709040.124 82 | ``` 83 | -------------------------------------------------------------------------------- /github-runners-benchmarks/devnotes.md: -------------------------------------------------------------------------------- 1 | We use the devcontainer primarily to install `nektos/act` in GitHub codespaces and debug pipelines. Which doesn't always work (once network performance very bad in codespaces) perfectly but sometimes is helpful. 2 | 3 | act -W .github/workflows/github-runners-benchmarks.yml 4 | -------------------------------------------------------------------------------- /github-runners-benchmarks/generate_readme.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | def read_yaml_file(filename): 4 | with open(filename, 'r') as stream: 5 | try: 6 | return yaml.safe_load(stream) 7 | except yaml.YAMLError as exc: 8 | print(exc) 9 | 10 | def generate_readme(): 11 | julia_yaml = read_yaml_file('Julia/benchmark_results.yaml') 12 | python_yaml = read_yaml_file('Python/benchmark_results.yaml') 13 | r_yaml = read_yaml_file('R/benchmark_results.yaml') 14 | final_result = {} 15 | for d in (julia_yaml, python_yaml, r_yaml): 16 | for k, v in d.items(): 17 | if k not in final_result: 18 | final_result[k] = [] 19 | final_result[k].append(v) 20 | 21 | # read the text in readme_template.md and store it as a string "template" 22 | with open('readme_template.md', 'r') as f: 23 | template = f.read() 24 | 25 | with open('README.md', 'w') as readme: 26 | readme.write(template) 27 | readme.write('\n\n```yaml \n') 28 | readme.write(yaml.dump(final_result, allow_unicode=True)) 29 | readme.write('```\n') 30 | 31 | if __name__ == '__main__': 32 | generate_readme() 33 | -------------------------------------------------------------------------------- /github-runners-benchmarks/julia-memory-analysis.md: -------------------------------------------------------------------------------- 1 | ## Analysis 2 | 3 | This section is dedicated to the analysis and comparison of performance characteristics between the iterative and memoization-based implementations. All iterative implementations are new and implemented in [LifeSimulator.jl](https://github.com/JuliaActuary/LifeSimulator.jl). Memoization-based implementations use or were inspired by [lifelib](https://github.com/lifelib-dev/lifelib). For the term life model, a memoization-based algorithm has been reimplemented in Julia; however, for the universal life model, we fall back to lifelib's Python implementation. 4 | 5 | First, computation time is compared between iterative and memoization-based implementations. Second, memory complexity is analyzed for the iterative implementation of the term life and universal life models. 6 | 7 | The analysis was performed using on this machine and Julia version: 8 | 9 | ```julia 10 | julia> versioninfo() 11 | Julia Version 1.11.0-DEV.483 12 | Commit ebe1a37af57 (2023-09-16 12:52 UTC) 13 | Build Info: 14 | Official https://julialang.org/ release 15 | Platform Info: 16 | OS: Linux (x86_64-linux-gnu) 17 | CPU: 20 × 12th Gen Intel(R) Core(TM) i7-12700H 18 | WORD_SIZE: 64 19 | LLVM: libLLVM-15.0.7 (ORCJIT, alderlake) 20 | Threads: 29 on 20 virtual cores 21 | ``` 22 | 23 | ### Time complexity 24 | 25 | ![](Julia/analysis/images/time_complexity_basic_life.png) 26 | ![](Julia/analysis/images/time_complexity_universal_life.png) 27 | 28 | #### Performance 29 | 30 | Using the Julia implementation of the universal life model, we can easily simulate millions of policies: 31 | 32 | ```julia-repl 33 | julia> policies = rand(PolicySet, 10_000_000); 34 | 35 | julia> model = LifelibSavings(); 36 | 37 | julia> @time CashFlow(model, policies, 150); 38 | 103.615767 seconds (84 allocations: 8.473 GiB, 0.01% gc time) 39 | ``` 40 | 41 | ### Memory complexity 42 | 43 | We discuss here the memory complexity associated with the iterative implementation of the term life and universal life models. The theoretical memory complexity is $O(P)$, with $P$ the number of policies. 44 | 45 | The following images illustrate that the memory complexity may be assumed independent of the number of timesteps, and that it may be assumed to scale linearly in the number of policy sets. Note however that measuring the true memory complexity is tricky in a garbage-collected language; results are approximates at best and certinaly not a rigorous proof of what we advance. 46 | 47 | #### Term life model 48 | 49 | ![](Julia/analysis/images/memory_complexity_variable_duration_basic_life.png) 50 | 51 | ![](Julia/analysis/images/memory_complexity_static_duration_basic_life.png) 52 | 53 | #### Universal life model 54 | 55 | ![](Julia/analysis/images/memory_complexity_variable_duration_universal_life.png) 56 | 57 | ![](Julia/analysis/images/memory_complexity_static_duration_universal_life.png) 58 | -------------------------------------------------------------------------------- /github-runners-benchmarks/readme_template.md: -------------------------------------------------------------------------------- 1 | The benchmarking repository for some time has autogenerated the top-level README from within GitHub actions. 2 | 3 | As we expand our benchmarking activities for GPUs and larger scale calculations, the GitHub hosted runners are still used, but we will be focusing more efforts on Docker containers. 4 | 5 | The top-level readme is not autogenerated, this file will be, and probably will have results copied and pasted into the top-level readme manually. 6 | 7 | # Benchmarking 8 | 9 | Benchmarks in this repository: 10 | 11 | * `basic_term_benchmark`: Replicate the cashflows of the [LifeLib BasicTerm model](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 12 | * Python [LifeLib BasicTerm_M](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 13 | * Julia [using LifeSimulator](https://github.com/JuliaActuary/LifeSimulator.jl) 14 | * Python using recursive formulas with [PyTorch](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_pytorch.py) and [NumPy](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_numpy.py) 15 | * Python using matrix operations (no recursion) on [PyTorch arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_pytorch.py) and [NumPy arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_numpy.py) 16 | * `exposures`: Create date partitions for experience studies 17 | * Julia [ExperienceAnalysis](https://github.com/JuliaActuary/ExperienceAnalysis.jl) 18 | * R [actxps](https://github.com/mattheaphy/actxps) 19 | * `mortality`: Read SOA mortality tables and use them in a simple calculation 20 | * Julia [MortalityTables](https://github.com/JuliaActuary/MortalityTables.jl) 21 | * Python [Pymort](https://github.com/actuarialopensource/pymort) 22 | 23 | The below results are generated by the benchmarking scripts in the folders for each language. These scripts are run automatically by GitHub Actions and populate the results below. -------------------------------------------------------------------------------- /github-runners-benchmarks/requirements.txt: -------------------------------------------------------------------------------- 1 | PyYaml==6.0 -------------------------------------------------------------------------------- /joss/Makefile: -------------------------------------------------------------------------------- 1 | all: pdf 2 | 3 | pdf: 4 | docker run --rm --volume $(PWD):/data --user $(id -u):$(id -g) --env JOURNAL=joss openjournals/inara -------------------------------------------------------------------------------- /joss/paper.bib: -------------------------------------------------------------------------------- 1 | @techreport{Larochelle:2023, 2 | title={Predictive Analytics and Machine Learning – Practical Applications for Actuarial Modeling (Nested Stochastic)}, 3 | author={Larochelle, Jean-Philippe and Carlson, Peter and Carrier Cote, Vincent and Lu, Ying and Shapiro, Noah and Tam, Alex and Thusu, Viresh and Zhang, Ally}, 4 | institution={SOA Research Institute}, 5 | year={2023}, 6 | month={May}, 7 | url={https://www.soa.org/49ae74/globalassets/assets/files/resources/research-report/2023/predictive-analytics-and-machine-learning.pdf}, 8 | type={Technical report} 9 | } 10 | 11 | @article{Strommen:2013, 12 | title={Tables Database Goes XtbML}, 13 | author={Strommen, Stephen J.}, 14 | journal={Society of Actuaries CompAct Newsletter}, 15 | year={2013}, 16 | month={April}, 17 | url={https://www.soa.org/news-and-publications/newsletters/compact/2013/april/com-2013-iss47/tables-database-goes-xtbml/} 18 | } 19 | 20 | @techreport{Milliman:2024, 21 | title={An actuary’s guide to Julia: Use cases and performance benchmarking in insurance}, 22 | author={Lee, Yun-Tien and Morales, Victor and Brackett, Jim and Long, Joe and Peplow, Thomas}, 23 | institution={Milliman}, 24 | type={White paper}, 25 | year={2024}, 26 | month={January}, 27 | day={29}, 28 | url={https://www.milliman.com/en/insight/an-actuary-guide-to-julia-use-cases-performance-benchmarking-insurance} 29 | } 30 | 31 | @techreport{Atkinson:2016, 32 | title={Experience Study Calculations}, 33 | author={Atkinson, David B. and McGarry, John K.}, 34 | institution={Society of Actuaries}, 35 | year={2016}, 36 | month={October}, 37 | type={Technical report}, 38 | url={https://www.soa.org/globalassets/assets/Files/Research/2016-10-experience-study-calculations.pdf} 39 | } 40 | 41 | @misc{Kinrade:2024, 42 | title={The Future of Actuarial Modeling}, 43 | author={Kinrade, Nick and Abdullah, Yusuf}, 44 | year={2024}, 45 | month={January}, 46 | day={5}, 47 | url={https://www.pwc.com/us/en/industries/financial-services/library/future-of-actuarial-modeling.html} 48 | } 49 | 50 | @misc{Kim:2018, 51 | author={Kim, Joseph}, 52 | title={Application of GPU in Actuarial Modeling}, 53 | year={2018}, 54 | month={May}, 55 | day={25}, 56 | howpublished={Presentation at The 8th SOA Asia Pacific Annual Symposium}, 57 | organization={Milliman}, 58 | url={https://www.soa.org/globalassets/assets/files/e-business/pd/events/2018/asia-pacific-symposium/asia-pacific-symposium-session-5b.pdf} 59 | } 60 | 61 | @misc{Hamamura:2018, 62 | author = {Hamamura, Fumito}, 63 | title = {lifelib: life actuarial models}, 64 | year = {2018}, 65 | publisher = {GitHub}, 66 | journal = {GitHub repository}, 67 | url = {https://github.com/lifelib-dev/lifelib} 68 | } 69 | 70 | @misc{Hamamura:2022a, 71 | title={Testing lifelib on GPU}, 72 | author={Hamamura, Fumito}, 73 | year={2022}, 74 | month={January}, 75 | day={15}, 76 | howpublished={\url{https://modelx.io/blog/2022/01/15/testing-lifelib-on-gpu}}, 77 | organization={PwC} 78 | } 79 | 80 | @misc{Hamamura:2022b, 81 | title={Running a heavy model while saving memory}, 82 | author={Hamamura, Fumito}, 83 | year={2022}, 84 | month={March}, 85 | day={26}, 86 | howpublished={\url{https://modelx.io/blog/2022/03/26/running-model-while-saving-memory}}, 87 | organization={PwC} 88 | } 89 | 90 | @misc{Belmant:2022, 91 | author = {Belmant, Cédric}, 92 | title = {LifeSimulator.jl: Simulation of insurance products forward in time}, 93 | year = {2022}, 94 | publisher = {GitHub}, 95 | journal = {GitHub repository}, 96 | url = {https://github.com/JuliaActuary/LifeSimulator.jl} 97 | } 98 | 99 | 100 | @article{Robidoux:2016, 101 | title={Introduction to Using Graphical Processing Units for Variable Annuity Guarantee Modeling}, 102 | author={Robidoux, Bryon}, 103 | journal={Society of Actuaries Predictive Analytics and Futurism Newsletter}, 104 | year={2016}, 105 | month={December}, 106 | url={https://www.soa.org/globalassets/assets/Library/Newsletters/Predictive-Analytics-and-Futurism/2016/december/paf-iss14-robidoux.pdf} 107 | } 108 | 109 | 110 | @misc{Halloran:2021, 111 | title={2021 US Life Insurance Pricing Survey Highlights}, 112 | author={Halloran, Christopher}, 113 | year={2021}, 114 | month={November}, 115 | howpublished={Presentation at the Southeastern Actuaries Conference Annual Meeting}, 116 | institution={Oliver Wyman}, 117 | url={https://www.seactuary.com/files/meetings/2021Fall/2021SEACAnnualHalloran(AM).pdf} 118 | } 119 | 120 | 121 | 122 | @Manual{Heaphy:2024, 123 | title = {actxps: Create Actuarial Experience Studies: Prepare Data, Summarize Results, and Create Reports}, 124 | author = {Matt Heaphy}, 125 | year = {2024}, 126 | note = {R package version 1.4.0.9000}, 127 | url = {https://github.com/mattheaphy/actxps/} 128 | } 129 | 130 | @misc{Loudenback:2018, 131 | author = {Loudenback, Alec}, 132 | title = {MortalityTables.jl: SOA mortality tables in Julia}, 133 | year = {2018}, 134 | publisher = {GitHub}, 135 | journal = {GitHub repository}, 136 | url = {https://github.com/JuliaActuary/MortalityTables.jl} 137 | } 138 | 139 | @misc{Loudenback:2020, 140 | author = {Loudenback, Alec and Caseres, Matthew}, 141 | title = {ExperienceAnalysis.jl: Calculate exposures}, 142 | year = {2020}, 143 | publisher = {GitHub}, 144 | journal = {GitHub repository}, 145 | url = {https://github.com/JuliaActuary/ExperienceAnalysis.jl} 146 | } 147 | 148 | @misc{Caseres:2021, 149 | author = {Caseres, Matthew}, 150 | title = {Pymort: SOA mortality tables in Python}, 151 | year = {2021}, 152 | publisher = {GitHub}, 153 | journal = {GitHub repository}, 154 | url = {https://github.com/actuarialopensource/pymort} 155 | } -------------------------------------------------------------------------------- /joss/paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'Actuarial Open Source Benchmarks: Performance testing actuarial models on CPU and GPU' 3 | tags: 4 | - Python 5 | - Julia 6 | - actuarial science 7 | - GPU 8 | - performance testing 9 | authors: 10 | - name: Matthew Caseres 11 | orcid: 0000-0001-5819-001X 12 | affiliation: 1 13 | affiliations: 14 | - name: Georgia Institute of Technology, USA 15 | index: 1 16 | date: 27 May 2024 17 | bibliography: paper.bib 18 | --- 19 | 20 | # Summary 21 | 22 | Actuaries are employed by insurance companies to manage risk related to uncertain future cashflows and play a large role in regulatory compliance for insurance companies. 23 | Actuarial models are used to forecast future economic scenarios and cash flows related to insurance contracts. The benchmarks provided in the Actuarial Open Source GitHub organization test the consistency and performance of existing open source actuarial models and provides novel approaches to achieve greater performance and scale on certain tasks. 24 | 25 | # Statement of need 26 | 27 | The actuarial modeling software market is dominated by large vendors [@Halloran:2021; @Kinrade:2024]. Vendor software can restrict the sharing of actuarial software [@Larochelle:2023], demonstrating the value of open source solutions in enabling reproducible research. 28 | 29 | A 2024 PwC publication [@Kinrade:2024] speculates that graphics processing units (GPUs) may become the norm for actuarial calculation engines. Despite discussion of actuarial models running on GPUs in several publications [@Kim:2018; @Hamamura:2022a; @Robidoux:2016], there are no reproducible benchmarks for GPU-based actuarial applications. 30 | 31 | On the CPU, the ecosystem is more mature and multiple open source packages might implement a particular calculation. In this case we can compare the execution time of the packages on a specific task and validate that the packages can produce the same results. 32 | 33 | # Benchmarking infrastructure 34 | 35 | Calculations performed on CPU are run with GitHub actions on GitHub-hosted runners. We found that the execution times reported from GitHub hosted runners are generally consistent between runs and this has been independently verified by Milliman [@Milliman:2024]. 36 | 37 | Calculations performed on GPU are published to DockerHub using GitHub actions and tested on GPUs in the cloud. 38 | 39 | # Benchmark categories 40 | 41 | ## LifeLib life insurance cash flow models 42 | 43 | LifeLib [@Hamamura:2018] is an open source library that contains reference implementations for various life insurance product cash flow models. We provide implementations in Python and Julia to assess the performance and coding style of the following strategies: 44 | 45 | * Recursion with memoization 46 | * Recursion with memoization and cache eviction to reduce memory consumption [@Hamamura:2022b] 47 | * Array based models using broadcasting and avoiding iteration 48 | * Array based models using iteration that are optimized to reduce memory consumption [@Belmant:2022] 49 | 50 | ## Experience studies 51 | 52 | Some actuarial techniques for calculating mortality rates involve partitioning date ranges [@Atkinson:2016]. These date partitioning algorithms are implemented by the actxps R package [@Heaphy:2024] and the ExperienceAnalysis.jl [@Loudenback:2020] Julia package. The benchmarking process identified a number of inconsistencies which were raised as issues on GitHub and quickly resolved by the maintainers of the packages. 53 | 54 | ## Mortality tables software 55 | 56 | The Society of Actuaries provides a number of mortality tables in an XML format [@Strommen:2013]. These tables have been wrapped into packages for convenient access with the pymort [@Caseres:2021] Python package and the MortalityTables.jl [@Loudenback:2018] Julia package. A hash was computed using each value from 10 files verifying that the values are consistent between the two packages for these files. The Julia implementation was significantly faster. 57 | 58 | # Conclusion 59 | 60 | The benchmarks provided by the Actuarial Open Source organization on GitHub intend to assist in testing the performance and accuracy of open source actuarial software. This is accomplished by selecting a specific actuarial calculation and comparing the results and execution times of various approaches. 61 | 62 | Our current benchmarks are chosen for simplicity so that the barrier to entry remains low. We have not implemented complex calculations like nested stochastic variable annuity models which could benefit from concrete benchmarks. More complex benchmarks that represent the computational challenges facing the insurance industry will require broader community engagement. 63 | 64 | # Acknowledgements 65 | 66 | I thank Alec Loudenback for contributing Julia benchmarks, providing feedback through GitHub issues, and creating many Julia packages through the JuliaActuary GitHub organization. I thank Fumito Hamamura for creating LifeLib, the Actuarial Open Source LinkedIn community page, modelx, and the insights found on the modelx blog. Many thanks Cédric Belmant for providing a Julia implementation to the universal life benchmark which is incredibly hard to beat and to Matt Heaphy for his implementations of experience studies software in R and Python. 67 | 68 | # References --------------------------------------------------------------------------------