├── .devcontainer └── devcontainer.json ├── .github └── workflows │ ├── BasicTerm_ME_python.yml │ └── github-runners-benchmarks.yml ├── .gitignore ├── LICENSE ├── README.md ├── containers └── BasicTerm_ME_python │ ├── .devcontainer │ └── devcontainer.json │ ├── BasicTerm_ME │ ├── Projection │ │ └── __init__.py │ ├── __init__.py │ ├── _data │ │ └── data.pickle │ ├── _system.json │ ├── disc_rate_ann.xlsx │ ├── model_point_table.xlsx │ ├── mort_table.xlsx │ └── premium_table.xlsx │ ├── Dockerfile │ ├── main.py │ ├── notes.md │ ├── requirements.txt │ ├── term_me_iterative_jax.py │ └── term_me_recursive_pytorch.py ├── github-runners-benchmarks ├── Julia │ ├── CondaPkg.toml │ ├── Project.toml │ ├── README.md │ ├── analysis │ │ ├── CondaPkg.toml │ │ ├── Project.toml │ │ ├── README.md │ │ ├── analysis.jl │ │ ├── images │ │ │ ├── memory_complexity_static_duration_basic_life.png │ │ │ ├── memory_complexity_static_duration_universal_life.png │ │ │ ├── memory_complexity_variable_duration_basic_life.png │ │ │ ├── memory_complexity_variable_duration_universal_life.png │ │ │ ├── time_complexity_basic_life.png │ │ │ └── time_complexity_universal_life.png │ │ ├── large_run.txt │ │ ├── memory_complexity.jl │ │ └── time_complexity.jl │ ├── basic_term.jl │ ├── basic_term_array.jl │ ├── benchmark_results.yaml │ ├── exposures.jl │ ├── main.jl │ ├── mortality.jl │ ├── read_model.jl │ └── savings.jl ├── Python │ ├── BasicTerm_M │ │ ├── Projection │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── _data │ │ │ └── data.pickle │ │ ├── _system.json │ │ ├── disc_rate_ann.csv │ │ ├── disc_rate_ann.xlsx │ │ ├── model_point_table.csv │ │ ├── model_point_table.xlsx │ │ ├── mort_table.csv │ │ └── mort_table.xlsx │ ├── BasicTerm_ME │ │ ├── Projection │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── _data │ │ │ └── data.pickle │ │ ├── _system.json │ │ ├── disc_rate_ann.xlsx │ │ ├── model_point_table.xlsx │ │ ├── mort_table.xlsx │ │ └── premium_table.xlsx │ ├── CashValue_ME_EX4 │ │ ├── Projection │ │ │ └── __init__.py │ │ ├── __init__.py │ │ ├── _data │ │ │ └── data.pickle │ │ ├── _system.json │ │ ├── disc_rate_ann.xlsx │ │ ├── model_point_1.xlsx │ │ ├── model_point_moneyness.xlsx │ │ ├── model_point_table_10K.csv │ │ ├── mort_table.xlsx │ │ ├── product_spec_table.xlsx │ │ └── surr_charge_table.xlsx │ ├── basicterm_m.py │ ├── basicterm_m_array_numpy.py │ ├── basicterm_m_array_pytorch.py │ ├── basicterm_m_lifelib.py │ ├── basicterm_m_recursive_numpy.py │ ├── basicterm_m_recursive_pytorch.py │ ├── basicterm_me.py │ ├── basicterm_me_heavylight_numpy.py │ ├── basicterm_me_lifelib.py │ ├── basicterm_me_recursive_numpy.py │ ├── benchmark_results.yaml │ ├── main.py │ ├── mortality.py │ ├── notebook.ipynb │ ├── requirements.txt │ ├── savings_me.py │ ├── savings_me_lifelib.py │ └── savings_me_recursive_numpy.py ├── R │ ├── .Rprofile │ ├── R.Rproj │ ├── benchmark_results.yaml │ ├── exposures.R │ ├── main.R │ ├── renv.lock │ └── renv │ │ ├── .gitignore │ │ ├── activate.R │ │ └── settings.dcf ├── README.md ├── data │ └── census_dat.csv ├── devnotes.md ├── generate_readme.py ├── julia-memory-analysis.md ├── readme_template.md └── requirements.txt └── joss ├── Makefile ├── paper.bib └── paper.md /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "image": "mcr.microsoft.com/devcontainers/universal:2", 3 | "features": { 4 | "ghcr.io/rocker-org/devcontainer-features/r-apt:0": {}, 5 | "ghcr.io/julialang/devcontainer-features/julia:1": { 6 | "channel": "1.9.3" 7 | }, 8 | "ghcr.io/dhoeric/features/act:1": {}, 9 | "ghcr.io/rocker-org/devcontainer-features/r-packages:1": {} 10 | } 11 | } -------------------------------------------------------------------------------- /.github/workflows/BasicTerm_ME_python.yml: -------------------------------------------------------------------------------- 1 | name: basicterm_me_python 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths: 7 | - 'containers/BasicTerm_ME_python/**' 8 | 9 | jobs: 10 | docker: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Delete huge unnecessary tools folder # https://github.com/orgs/community/discussions/25678 14 | run: rm -rf /opt/hostedtoolcache 15 | - 16 | name: Set up QEMU 17 | uses: docker/setup-qemu-action@v3 18 | - 19 | name: Set up Docker Buildx 20 | uses: docker/setup-buildx-action@v3 21 | - 22 | name: Login to Docker Hub 23 | uses: docker/login-action@v3 24 | with: 25 | username: actuarial 26 | password: ${{ secrets.DOCKERHUB_TOKEN }} 27 | - 28 | name: Build and push 29 | uses: docker/build-push-action@v5 30 | with: 31 | context: "{{defaultContext}}:containers/BasicTerm_ME_python" 32 | push: true 33 | tags: actuarial/basicterm_me_python:latest -------------------------------------------------------------------------------- /.github/workflows/github-runners-benchmarks.yml: -------------------------------------------------------------------------------- 1 | name: github-runners-benchmarks 2 | on: 3 | workflow_dispatch: 4 | push: 5 | paths: 6 | - 'github-runners-benchmarks/**' 7 | pull_request: 8 | paths: 9 | - 'github-runners-benchmarks/**' 10 | jobs: 11 | bench-R: 12 | runs-on: ubuntu-latest 13 | env: 14 | RENV_PATHS_ROOT: ~/.local/share/renv 15 | defaults: 16 | run: 17 | working-directory: github-runners-benchmarks/R 18 | steps: 19 | - uses: actions/checkout@v3 #now we need to install R 20 | - uses: r-lib/actions/setup-r@v2 21 | with: 22 | r-version: '4.2.2' 23 | # we need to manually cache the packages 24 | - name: Cache R packages 25 | uses: actions/cache@v2 26 | with: 27 | path: ${{ env.RENV_PATHS_ROOT }} 28 | key: ${{ runner.os }}-renv-${{ hashFiles('**/renv.lock') }} 29 | restore-keys: | 30 | ${{ runner.os }}-renv- 31 | - name: Restore packages 32 | shell: Rscript {0} 33 | run: | 34 | if (!requireNamespace("renv", quietly = TRUE)) install.packages("renv") 35 | renv::restore() 36 | - name: Benchmark 37 | run: Rscript -e 'source("main.R")' 38 | - run: ls 39 | - name: upload R benchmark 40 | uses: actions/upload-artifact@v3 41 | with: 42 | name: R_benchmark 43 | path: github-runners-benchmarks/R/benchmark_results.yaml 44 | bench-Julia: 45 | runs-on: ubuntu-latest 46 | defaults: 47 | run: 48 | working-directory: github-runners-benchmarks/Julia 49 | steps: 50 | - uses: actions/checkout@v3 51 | - uses: julia-actions/setup-julia@v1 52 | with: 53 | version: '1.9.3' 54 | - name: Benchmark 55 | run: julia --project -e 'using Pkg; Pkg.instantiate(); include("main.jl")' 56 | - run: ls 57 | - name: upload Julia benchmark 58 | uses: actions/upload-artifact@v3 59 | with: 60 | name: Julia_benchmark 61 | path: github-runners-benchmarks/Julia/benchmark_results.yaml 62 | bench-Python: 63 | runs-on: ubuntu-latest 64 | defaults: 65 | run: 66 | working-directory: github-runners-benchmarks/Python 67 | steps: 68 | - uses: actions/checkout@v3 69 | - uses: actions/setup-python@v2 70 | with: 71 | python-version: '3.11' 72 | - run: pip install -r requirements.txt 73 | # cache the python pip installed packages 74 | - name: Cache pip packages 75 | uses: actions/cache@v2 76 | with: 77 | path: ~/.cache/pip 78 | key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} 79 | restore-keys: | 80 | ${{ runner.os }}-pip- 81 | - name: Benchmark 82 | run: python main.py 83 | - run: ls 84 | - name: upload Python benchmark 85 | uses: actions/upload-artifact@v3 86 | with: 87 | name: Python_benchmark 88 | path: github-runners-benchmarks/Python/benchmark_results.yaml 89 | create-README: 90 | defaults: 91 | run: 92 | working-directory: github-runners-benchmarks 93 | runs-on: ubuntu-latest 94 | needs: [bench-R, bench-Julia, bench-Python] 95 | steps: 96 | - uses: actions/checkout@v3 97 | - name: Download R benchmark 98 | uses: actions/download-artifact@v2 99 | with: 100 | name: R_benchmark 101 | path: github-runners-benchmarks/R 102 | - name: Download Julia benchmark 103 | uses: actions/download-artifact@v2 104 | with: 105 | name: Julia_benchmark 106 | path: github-runners-benchmarks/Julia 107 | - name: Download Python benchmark 108 | uses: actions/download-artifact@v2 109 | with: 110 | name: Python_benchmark 111 | path: github-runners-benchmarks/Python 112 | # SETUP python and install dependencies 113 | - uses: actions/setup-python@v2 114 | with: 115 | python-version: '3.11' 116 | - run: pip install -r requirements.txt 117 | - run: python generate_readme.py 118 | - run: cat README.md 119 | # commit and push the README.md 120 | - name: Commit and push 121 | run: | 122 | git config --local user.email "github-actions[bot]@users.noreply.github.com" 123 | git config --local user.name "github-actions[bot]" 124 | git add . 125 | git commit -m "Bench and update README.md" 126 | git push 127 | if: github.ref == 'refs/heads/main' && github.event_name != 'pull_request' 128 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/*.DS_STORE 2 | .Rproj.user 3 | **__pycache__ 4 | /.vscode 5 | .CondaPkg 6 | *.code-workspace 7 | Manifest.toml 8 | joss/jats* 9 | joss/paper.pdf -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Actuarial Open Source Community 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Benchmarking 2 | 3 | We provide benchmarks to encourage collaboration and competition in making actuarial software run faster than ever before. 4 | 5 | ## Containerized benchmarks 6 | 7 | We currently only have 1 benchmark, we are working to expand the benchmarks. Open a discussion in this repository if you have any thoughts to share. 8 | 9 | Time measurement is currently the best of 3 runs. 10 | 11 | | benchmark | classification | container |A100-SXM4-40GB | H100-SXM5-80GB | 12 | |---------------|-|-|----------------|----------------| 13 | | BasicTerm_ME 100 Million | recursive PyTorch | [link](https://hub.docker.com/repository/docker/actuarial/basicterm_me_python/general) | 15.8284s | 7.205s | 14 | | BasicTerm_ME 100 Million | compiled iterative JAX | [link](https://hub.docker.com/repository/docker/actuarial/basicterm_me_python/general) | 3.448s | 1.551s | 15 | 16 | 17 | ### Notes 18 | 19 | * BasicTerm_ME 100 Million 20 | * You can find lifelib's modelpoint file with 10,000 modelpoints as `model_point_table.xlsx`. We use these modelpoints, but repeat them 10,000 times for 100,000,000 modelpoints. 21 | 22 | 23 | ## GitHub-hosted runners 24 | 25 | These benchmarks run in GitHub-hosted runners in GitHub actions. Used for benchmarks that are not computationally intensive. 26 | 27 | Benchmarks in this repository: 28 | 29 | * `basic_term_benchmark`: Replicate the cashflows of the [LifeLib BasicTerm model](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 30 | * Python [LifeLib BasicTerm_M](https://github.com/lifelib-dev/lifelib/tree/main/lifelib/libraries/basiclife/BasicTerm_M) 31 | * Julia [using LifeSimulator](https://github.com/JuliaActuary/LifeSimulator.jl) 32 | * Python using recursive formulas with [PyTorch](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_pytorch.py) and [NumPy](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_recursive_numpy.py) 33 | * Python using matrix operations (no recursion) on [PyTorch arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_pytorch.py) and [NumPy arrays](https://github.com/actuarialopensource/benchmarks/blob/main/Python/basicterm_array_numpy.py) 34 | * `exposures`: Create date partitions for experience studies 35 | * Julia [ExperienceAnalysis](https://github.com/JuliaActuary/ExperienceAnalysis.jl) 36 | * R [actxps](https://github.com/mattheaphy/actxps) 37 | * `mortality`: Read SOA mortality tables and use them in a simple calculation 38 | * Julia [MortalityTables](https://github.com/JuliaActuary/MortalityTables.jl) 39 | * Python [Pymort](https://github.com/actuarialopensource/pymort) 40 | 41 | The below results are generated by the benchmarking scripts in the folders for each language. These scripts are run automatically by GitHub Actions and populate the results below. 42 | ```yaml 43 | basic_term_benchmark: 44 | - Julia array basic_term: 45 | minimum time: TrialEstimate(30.279 ms) 46 | result: 1.4489630534602132e7 47 | Julia recursive basic_term: 48 | minimum time: TrialEstimate(84.812 ms) 49 | result: 1.4489630534602132e7 50 | - Python array numpy basic_term_m: 51 | minimum time: 83.45314299992879 milliseconds 52 | result: 14489630.534603368 53 | Python array pytorch basic_term_m: 54 | minimum time: 51.54931000004126 milliseconds 55 | result: 14489630.534603368 56 | Python lifelib basic_term_m: 57 | minimum time: 618.0503439999256 milliseconds 58 | result: 14489630.534601536 59 | Python recursive numpy basic_term_m: 60 | minimum time: 63.08064000006652 milliseconds 61 | result: 14489630.534603368 62 | Python recursive pytorch basic_term_m: 63 | minimum time: 75.6699999999455 milliseconds 64 | result: 14489630.53460337 65 | basic_term_me_benchmark: 66 | - Python heavylight numpy basic_term_me: 67 | minimum time: 354.6492100000478 milliseconds 68 | result: 215146132.0684811 69 | Python lifelib basic_term_me: 70 | minimum time: 1191.792300999964 milliseconds 71 | result: 215146132.06848112 72 | Python recursive numpy basic_term_me: 73 | minimum time: 309.30894900006933 milliseconds 74 | result: 215146132.0684814 75 | exposures: 76 | - Julia ExperienceAnalysis.jl: 77 | minimum time: TrialEstimate(29.659 ms) 78 | num_rows: 141281 79 | - R actxps: 80 | min: 486.248656 ms 81 | num_rows: 141281 82 | mortality: 83 | - Julia MortalityTables.jl: 84 | minimum time: TrialEstimate(229.507 μs) 85 | result: 1904.4865526636793 86 | - Python PyMort: 87 | minimum time: 9.425531999909253 milliseconds 88 | result: 1904.4865526636793 89 | savings_benchmark: 90 | - Julia Benchmarks savings: 91 | minimum time: TrialEstimate(119.226 ms) 92 | result: 3.507113709040273e12 93 | - Python lifelib cashvalue_me_ex4: 94 | minimum time: 596.2715760000492 milliseconds 95 | result: 3507113709040.141 96 | Python recursive numpy cashvalue_me_ex4: 97 | minimum time: 543.3022800000344 milliseconds 98 | result: 3507113709040.124 99 | ``` 100 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the 2 | // README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-dockerfile 3 | { 4 | "name": "Existing Dockerfile", 5 | "build": { 6 | // Sets the run context to one level up instead of the .devcontainer folder. 7 | "context": "..", 8 | // Update the 'dockerFile' property if you aren't using the standard 'Dockerfile' filename. 9 | "dockerfile": "../Dockerfile" 10 | }, 11 | "customizations": { 12 | "vscode": { 13 | "extensions": [ 14 | "ms-toolsai.jupyter", 15 | "ms-python.python" 16 | ] 17 | } 18 | }, 19 | "runArgs": [ 20 | "--gpus", 21 | "all" 22 | ] 23 | 24 | // Features to add to the dev container. More info: https://containers.dev/features. 25 | // "features": {}, 26 | 27 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 28 | // "forwardPorts": [], 29 | 30 | // Uncomment the next line to run commands after the container is created. 31 | // "postCreateCommand": "cat /etc/os-release", 32 | 33 | // Configure tool-specific properties. 34 | // "customizations": {}, 35 | 36 | // Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root. 37 | // "remoteUser": "devcontainer" 38 | } 39 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "BasicTerm_ME" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/_data/data.pickle -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 17, 0], "serializer_version": 4} -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/model_point_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/model_point_table.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/mort_table.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/BasicTerm_ME/premium_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/containers/BasicTerm_ME_python/BasicTerm_ME/premium_table.xlsx -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use the PyTorch image with CUDA and cuDNN support 2 | FROM pytorch/pytorch:2.2.2-cuda11.8-cudnn8-runtime 3 | 4 | # Set the working directory in the container 5 | WORKDIR /app 6 | 7 | RUN pip install --upgrade pip 8 | RUN pip install --upgrade "jax[cuda12]" 9 | RUN pip install \ 10 | pandas \ 11 | openpyxl \ 12 | equinox \ 13 | heavylight==1.0.6 14 | 15 | # Copy the rest of the application 16 | COPY . /app/ 17 | 18 | # Environment variable (optional but might help with CUDA memory management) 19 | ENV PYTORCH_CUDA_ALLOC_CONF="garbage_collection_threshold:0.8" 20 | 21 | # Set the entrypoint and provide the script name as default command 22 | ENTRYPOINT ["python", "main.py"] 23 | -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | def main(): 4 | parser = argparse.ArgumentParser(description="Term ME model runner") 5 | parser.add_argument("--multiplier", type=int, default=100, help="Multiplier for model points") 6 | # add an argument that must be either "torch_recursive" or "jax_iterative" 7 | parser.add_argument("--model", type=str, default="jax_iterative", choices=["torch_recursive", "jax_iterative"], help="Model to run") 8 | args = parser.parse_args() 9 | 10 | multiplier = args.multiplier 11 | 12 | if args.model == "torch_recursive": 13 | from term_me_recursive_pytorch import time_recursive_PyTorch # having both imports at top level gave a jax error? 14 | time_recursive_PyTorch(multiplier) 15 | elif args.model == "jax_iterative": 16 | from term_me_iterative_jax import time_iterative_jax 17 | time_iterative_jax(multiplier) 18 | else: 19 | raise ValueError("Invalid model") 20 | 21 | if __name__ == "__main__": 22 | main() -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/notes.md: -------------------------------------------------------------------------------- 1 | docker build . -t lol 2 | docker run lol # no gpu 3 | docker run --gpus all lol 4 | 5 | act -j build -s "CODECOV_TOKEN=your-codecov-token-abc555-5555" -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | openpyxl 3 | heavylight==1.0.6 -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/term_me_iterative_jax.py: -------------------------------------------------------------------------------- 1 | import jax 2 | import pandas as pd 3 | import numpy as np 4 | import timeit 5 | import jax.numpy as jnp 6 | import equinox as eqx 7 | jax.config.update("jax_enable_x64", True) 8 | 9 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 10 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 11 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 12 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 13 | 14 | class ModelPointsEqx(eqx.Module): 15 | premium_pp: jnp.ndarray 16 | duration_mth: jnp.ndarray 17 | age_at_entry: jnp.ndarray 18 | sum_assured: jnp.ndarray 19 | policy_count: jnp.ndarray 20 | policy_term: jnp.ndarray 21 | max_proj_len: jnp.ndarray 22 | 23 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame, size_multiplier: int = 1): 24 | table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 25 | table.sort_values(by="policy_id", inplace=True) 26 | self.premium_pp = jnp.round(jnp.array(np.tile(table["sum_assured"].to_numpy() * table["premium_rate"].to_numpy(), size_multiplier)),decimals=2) 27 | self.duration_mth = jnp.array(jnp.tile(table["duration_mth"].to_numpy(), size_multiplier)) 28 | self.age_at_entry = jnp.array(jnp.tile(table["age_at_entry"].to_numpy(), size_multiplier)) 29 | self.sum_assured = jnp.array(jnp.tile(table["sum_assured"].to_numpy(), size_multiplier)) 30 | self.policy_count = jnp.array(jnp.tile(table["policy_count"].to_numpy(), size_multiplier)) 31 | self.policy_term = jnp.array(jnp.tile(table["policy_term"].to_numpy(), size_multiplier)) 32 | self.max_proj_len = jnp.max(12 * self.policy_term - self.duration_mth) + 1 33 | 34 | class AssumptionsEqx(eqx.Module): 35 | disc_rate_ann: jnp.ndarray 36 | mort_table: jnp.ndarray 37 | expense_acq: jnp.ndarray 38 | expense_maint: jnp.ndarray 39 | 40 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 41 | self.disc_rate_ann = jnp.array(disc_rate_ann["zero_spot"].to_numpy()) 42 | self.mort_table = jnp.array(mort_table.to_numpy()) 43 | self.expense_acq = jnp.array(300) 44 | self.expense_maint = jnp.array(60) 45 | 46 | class LoopState(eqx.Module): 47 | t: jnp.ndarray 48 | tot: jnp.ndarray 49 | pols_lapse_prev: jnp.ndarray 50 | pols_death_prev: jnp.ndarray 51 | pols_if_at_BEF_DECR_prev: jnp.ndarray 52 | 53 | class TermME(eqx.Module): 54 | mp: ModelPointsEqx 55 | assume: AssumptionsEqx 56 | init_ls: LoopState 57 | 58 | def __init__(self, mp: ModelPointsEqx, assume: AssumptionsEqx): 59 | self.mp = mp 60 | self.assume = assume 61 | self.init_ls = LoopState( 62 | t=jnp.array(0), 63 | tot = jnp.array(0), 64 | pols_lapse_prev=jnp.zeros_like(self.mp.duration_mth, dtype=jnp.float64), 65 | pols_death_prev=jnp.zeros_like(self.mp.duration_mth, dtype=jnp.float64), 66 | pols_if_at_BEF_DECR_prev=jnp.where(self.mp.duration_mth > 0, self.mp.policy_count, 0.) 67 | ) 68 | 69 | def __call__(self): 70 | def iterative_core(ls: LoopState, _): 71 | duration_month_t = self.mp.duration_mth + ls.t 72 | duration_t = duration_month_t // 12 73 | age_t = self.mp.age_at_entry + duration_t 74 | pols_if_init = ls.pols_if_at_BEF_DECR_prev - ls.pols_lapse_prev - ls.pols_death_prev 75 | pols_if_at_BEF_MAT = pols_if_init 76 | pols_maturity = (duration_month_t == self.mp.policy_term * 12) * pols_if_at_BEF_MAT 77 | pols_if_at_BEF_NB = pols_if_at_BEF_MAT - pols_maturity 78 | pols_new_biz = jnp.where(duration_month_t == 0, self.mp.policy_count, 0) 79 | pols_if_at_BEF_DECR = pols_if_at_BEF_NB + pols_new_biz 80 | mort_rate = self.assume.mort_table[age_t-18, jnp.clip(duration_t, a_max=5)] 81 | mort_rate_mth = 1 - (1 - mort_rate) ** (1/12) 82 | pols_death = pols_if_at_BEF_DECR * mort_rate_mth 83 | claims = self.mp.sum_assured * pols_death 84 | premiums = self.mp.premium_pp * pols_if_at_BEF_DECR 85 | commissions = (duration_t == 0) * premiums 86 | discount = (1 + self.assume.disc_rate_ann[ls.t//12]) ** (-ls.t/12) 87 | inflation_factor = (1 + 0.01) ** (ls.t/12) 88 | expenses = self.assume.expense_acq * pols_new_biz + pols_if_at_BEF_DECR * self.assume.expense_maint/12 * inflation_factor 89 | lapse_rate = jnp.clip(0.1 - 0.02 * duration_t, a_min=0.02) 90 | net_cf = premiums - claims - expenses - commissions 91 | discounted_net_cf = jnp.sum(net_cf) * discount 92 | nxt_ls = LoopState( 93 | t=ls.t+1, 94 | tot = ls.tot + discounted_net_cf, 95 | pols_lapse_prev=(pols_if_at_BEF_DECR - pols_death) * (1 - (1 - lapse_rate) ** (1/12)), 96 | pols_death_prev=pols_death, 97 | pols_if_at_BEF_DECR_prev=pols_if_at_BEF_DECR 98 | ) 99 | return nxt_ls, None 100 | return jax.lax.scan(iterative_core, self.init_ls, xs=None, length=277)[0].tot 101 | 102 | 103 | def run_jax_term_ME(term_me: TermME): 104 | return term_me() 105 | 106 | run_jax_term_ME_opt = jax.jit(run_jax_term_ME) 107 | 108 | def time_jax_func(mp, assume, func): 109 | term_me = TermME(mp, assume) 110 | result = func(term_me).block_until_ready() 111 | start = timeit.default_timer() 112 | result = func(term_me).block_until_ready() 113 | end = timeit.default_timer() 114 | elapsed_time = end - start # Time in seconds 115 | return float(result), elapsed_time 116 | 117 | def time_iterative_jax(multiplier: int): 118 | mp = ModelPointsEqx(model_point_table, premium_table, size_multiplier=multiplier) 119 | assume = AssumptionsEqx(disc_rate_ann, mort_table) 120 | result, time_in_seconds = time_jax_func(mp, assume, run_jax_term_ME_opt) 121 | print("JAX iterative model") 122 | print(f"number modelpoints={len(mp.duration_mth):,}") 123 | print(f"{result=:,}") 124 | print(f"{time_in_seconds=}") 125 | 126 | if __name__ == "__main__": 127 | time_iterative_jax(100) -------------------------------------------------------------------------------- /containers/BasicTerm_ME_python/term_me_recursive_pytorch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | import torch 6 | from heavylight import LightModel, agg 7 | import timeit 8 | 9 | print(f"{torch.cuda.is_available()=}") 10 | # set 64 bit precision 11 | torch.set_default_dtype(torch.float64) 12 | print(f"{torch.get_default_dtype()=}") 13 | 14 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 15 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 16 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 17 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 18 | 19 | class ModelPoints: 20 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame, size_multiplier: int = 1): 21 | self.table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 22 | self.table.sort_values(by="policy_id", inplace=True) 23 | self.premium_pp = torch.round(torch.tensor(np.tile(self.table["sum_assured"].to_numpy() * self.table["premium_rate"].to_numpy(), size_multiplier)),decimals=2) 24 | self.duration_mth = torch.tensor(np.tile(self.table["duration_mth"].to_numpy(), size_multiplier)) 25 | self.age_at_entry = torch.tensor(np.tile(self.table["age_at_entry"].to_numpy(), size_multiplier)) 26 | self.sum_assured = torch.tensor(np.tile(self.table["sum_assured"].to_numpy(), size_multiplier)) 27 | self.policy_count = torch.tensor(np.tile(self.table["policy_count"].to_numpy(), size_multiplier)) 28 | self.policy_term = torch.tensor(np.tile(self.table["policy_term"].to_numpy(), size_multiplier)) 29 | self.max_proj_len: int = int(torch.max(12 * self.policy_term - self.duration_mth) + 1) 30 | 31 | class Assumptions: 32 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 33 | self.disc_rate_ann = torch.tensor(disc_rate_ann["zero_spot"].to_numpy()) 34 | self.mort_table = torch.tensor(mort_table.to_numpy()) 35 | 36 | def get_mortality(self, age, duration): 37 | return self.mort_table[age-18, torch.clamp(duration, max=5)] 38 | 39 | agg_func = lambda x: float(torch.sum(x)) 40 | 41 | class TermME(LightModel): 42 | def __init__(self, mp: ModelPoints, assume: Assumptions): 43 | super().__init__(agg_function=None) 44 | self.mp = mp 45 | self.assume = assume 46 | 47 | def age(self, t): 48 | return self.mp.age_at_entry + self.duration(t) 49 | 50 | def claim_pp(self, t): 51 | return self.mp.sum_assured 52 | 53 | def claims(self, t): 54 | return self.claim_pp(t) * self.pols_death(t) 55 | 56 | def commissions(self, t): 57 | return (self.duration(t) == 0) * self.premiums(t) 58 | 59 | def disc_factors(self): 60 | return torch.tensor(list((1 + self.disc_rate_mth()[t])**(-t) for t in range(self.mp.max_proj_len))) 61 | 62 | def discount(self, t: int): 63 | return (1 + self.assume.disc_rate_ann[t//12]) ** (-t/12) 64 | 65 | def disc_rate_mth(self): 66 | return torch.tensor(list((1 + self.assume.disc_rate_ann[t//12])**(1/12) - 1 for t in range(self.mp.max_proj_len))) 67 | 68 | def duration(self, t): 69 | return self.duration_mth(t) // 12 70 | 71 | def duration_mth(self, t): 72 | if t == 0: 73 | return self.mp.duration_mth 74 | else: 75 | return self.duration_mth(t-1) + 1 76 | 77 | def expense_acq(self): 78 | return 300 79 | 80 | def expense_maint(self): 81 | return 60 82 | 83 | def expenses(self, t): 84 | return self.expense_acq() * self.pols_new_biz(t) \ 85 | + self.pols_if_at(t, "BEF_DECR") * self.expense_maint()/12 * self.inflation_factor(t) 86 | 87 | def inflation_factor(self, t): 88 | return (1 + self.inflation_rate())**(t/12) 89 | 90 | def inflation_rate(self): 91 | return 0.01 92 | 93 | def lapse_rate(self, t): 94 | return torch.clamp(0.1 - 0.02 * self.duration(t), min=0.02) 95 | 96 | def loading_prem(self): 97 | return 0.5 98 | 99 | def mort_rate(self, t): 100 | return self.assume.get_mortality(self.age(t), self.duration(t)) 101 | 102 | def mort_rate_mth(self, t): 103 | return 1-(1- self.mort_rate(t))**(1/12) 104 | 105 | def net_cf(self, t): 106 | return self.premiums(t) - self.claims(t) - self.expenses(t) - self.commissions(t) 107 | 108 | def pols_death(self, t): 109 | return self.pols_if_at(t, "BEF_DECR") * self.mort_rate_mth(t) 110 | 111 | @agg(agg_func) 112 | def discounted_net_cf(self, t): 113 | return torch.sum(self.net_cf(t)) * self.discount(t) 114 | 115 | def pols_if_at(self, t, timing): 116 | if timing == "BEF_MAT": 117 | if t == 0: 118 | return self.pols_if_init() 119 | else: 120 | return self.pols_if_at(t-1, "BEF_DECR") - self.pols_lapse(t-1) - self.pols_death(t-1) 121 | elif timing == "BEF_NB": 122 | return self.pols_if_at(t, "BEF_MAT") - self.pols_maturity(t) 123 | elif timing == "BEF_DECR": 124 | return self.pols_if_at(t, "BEF_NB") + self.pols_new_biz(t) 125 | else: 126 | raise ValueError("invalid timing") 127 | 128 | def pols_if_init(self): 129 | return torch.where(self.duration_mth(0) > 0, self.mp.policy_count, 0) 130 | 131 | def pols_lapse(self, t): 132 | return (self.pols_if_at(t, "BEF_DECR") - self.pols_death(t)) * (1-(1 - self.lapse_rate(t))**(1/12)) 133 | 134 | def pols_maturity(self, t): 135 | return (self.duration_mth(t) == self.mp.policy_term * 12) * self.pols_if_at(t, "BEF_MAT") 136 | 137 | def pols_new_biz(self, t): 138 | return torch.where(self.duration_mth(t) == 0, self.mp.policy_count, 0) 139 | 140 | def premiums(self, t): 141 | return self.mp.premium_pp * self.pols_if_at(t, "BEF_DECR") 142 | 143 | 144 | def run_recursive_model(model: TermME): 145 | model.cache_graph._caches = defaultdict(dict) 146 | model.cache_graph._caches_agg = defaultdict(dict) 147 | model.RunModel(model.mp.max_proj_len) 148 | return float(sum(model.cache_agg['discounted_net_cf'].values())) 149 | 150 | 151 | def time_recursive_GPU(model: TermME): 152 | model.OptimizeMemoryAndReset() 153 | start = torch.cuda.Event(enable_timing=True) 154 | end = torch.cuda.Event(enable_timing=True) 155 | start.record() 156 | result = run_recursive_model(model) 157 | end.record() 158 | torch.cuda.synchronize() 159 | return result, start.elapsed_time(end) / 1000 160 | 161 | def time_recursive_CPU(model: TermME): 162 | model.OptimizeMemoryAndReset() 163 | start = timeit.default_timer() 164 | result = run_recursive_model(model) 165 | end = timeit.default_timer() 166 | return result, end - start 167 | 168 | def time_recursive_PyTorch(multiplier: int): 169 | if torch.cuda.is_available(): 170 | device = torch.device('cuda') 171 | else: 172 | device = torch.device('cpu') 173 | print(f"{device=}") 174 | 175 | with device: 176 | mp = ModelPoints(model_point_table, premium_table) 177 | mp_multiplied = ModelPoints(model_point_table, premium_table, multiplier) 178 | assume = Assumptions(disc_rate_ann, mort_table) 179 | model = TermME(mp, assume) 180 | 181 | if device.type == 'cuda': 182 | time_recursive = time_recursive_GPU 183 | else: 184 | time_recursive = time_recursive_CPU 185 | run_recursive_model(model) # warm up, generate dependency graph 186 | model.mp = mp_multiplied 187 | result, time_in_seconds = time_recursive(model) 188 | # report results 189 | print("PyTorch recursive model") 190 | print(f"number modelpoints={len(mp_multiplied.duration_mth):,}") 191 | print(f"{result=:,}") 192 | print(f"{time_in_seconds=}") -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/CondaPkg.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | pandas = "1.5.3" 3 | jax = "0.4.8" 4 | jaxlib = "0.4.7" 5 | numpy = "1.24.2" 6 | jaxtyping = "0.2.15" 7 | pyyaml = "6.0" 8 | openpyxl = "3.1.2" 9 | modelx = "0.21.0" 10 | 11 | [pip.deps] 12 | lifelib = "" 13 | pymort = "" 14 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/Project.toml: -------------------------------------------------------------------------------- 1 | name = "Benchmarks" 2 | 3 | [deps] 4 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 5 | CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b" 6 | CondaPkg = "992eb4ea-22a4-4c89-a5bb-47a3300528ab" 7 | DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" 8 | DayCounts = "44e31299-2c53-5a9b-9141-82aa45d7972f" 9 | ExperienceAnalysis = "51cd30ab-a913-41ff-9b6f-9b78880a2ac2" 10 | LoopVectorization = "bdcacae8-1622-11e9-2a5c-532679323890" 11 | MortalityTables = "4780e19d-04b9-53dc-86c2-9e9aa59b5a12" 12 | PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" 13 | Tables = "bd369af6-aec1-5ad0-b16a-f7cc5008161c" 14 | YAML = "ddb6d928-2868-570f-bddf-ab3f9cf99eb6" 15 | 16 | [compat] 17 | julia = "1.9" 18 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/README.md: -------------------------------------------------------------------------------- 1 | # Julia benchmarks 2 | 3 | These benchmarks produce a `benchmark_results.yaml` file, obtained by running `main.jl`. 4 | They are run as part of the `bench-Julia` CI job under `.github/workflows/bench.yml`. 5 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/CondaPkg.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | pandas = "1.5.3" 3 | jax = "0.4.8" 4 | jaxlib = "0.4.7" 5 | numpy = "1.24.2" 6 | jaxtyping = "0.2.15" 7 | pyyaml = "6.0" 8 | openpyxl = "3.1.2" 9 | modelx = "0.21.0" 10 | 11 | [pip.deps] 12 | lifelib = "" 13 | pymort = "" 14 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" 3 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 4 | CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" 5 | Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" 6 | LifeSimulator = "73783465-395e-4165-b528-1c694332812b" 7 | PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" 8 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/README.md: -------------------------------------------------------------------------------- 1 | # Analysis 2 | 3 | To reproduce the images found under `./images` (currently shown in the README), simply run 4 | 5 | ```bash 6 | /path/to/benchmarks/Julia/analysis$ julia --color=yes --project analysis.jl 7 | ``` 8 | 9 | Running the various benchmarks and timings will take at least a few minutes. 10 | 11 | You will likely a machine with at least 16 GB of RAM. 32 GB of RAM is recommended for running the model with 10,000,000 points (last stage of the analysis to stress-test and evaluate the performance at large scale). 12 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/analysis.jl: -------------------------------------------------------------------------------- 1 | using LifeSimulator, CairoMakie, BenchmarkTools, PythonCall, Accessors, Dates 2 | 3 | images_folder() = joinpath(@__DIR__, "images") 4 | 5 | model_string(::LifelibBasiclife) = "basic_life" 6 | model_string(::LifelibSavings) = "universal_life" 7 | model_title(::LifelibBasiclife) = "Term life" 8 | model_title(::LifelibSavings) = "Universal life" 9 | language_used_memoized(::LifelibBasiclife) = "Julia" 10 | language_used_memoized(::LifelibSavings) = "Python" 11 | 12 | include("../read_model.jl") 13 | !@isdefined(proj) && (proj = read_savings_model()) 14 | 15 | # Store results into a dictionary to avoid having to recompute benchmark data every time. 16 | # Empty these dictionaries if you want to regenerate the results. 17 | const TIME_RESULTS = Dict{Model,NamedTuple}() 18 | const MEMORY_RESULTS = Dict{Model,NamedTuple}() 19 | const term_life_model = Ref(LifelibBasiclife(commission_rate = 1.0)) 20 | const universal_life_model = Ref(LifelibSavings()) 21 | 22 | include("time_complexity.jl") 23 | include("memory_complexity.jl") 24 | 25 | @info "Running simulation with 10,000,000 model points" 26 | policies = rand(PolicySet, 10_000_000) 27 | CashFlow(universal_life_model[], rand(PolicySet, 1_000), 5) # JIT compilation 28 | # @with SHOW_PROGRESS => true @time CashFlow(universal_life_model[], policies, 150) 29 | open(joinpath(@__DIR__, "large_run.txt"), "w+") do io 30 | ex = :(CashFlow(universal_life_model[], policies, 150)) 31 | println(io, "julia> ", ex) 32 | redirect_stdout(io) do 33 | @eval @time $ex 34 | end 35 | end 36 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_basic_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_basic_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_universal_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_static_duration_universal_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_basic_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_basic_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_universal_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/memory_complexity_variable_duration_universal_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/time_complexity_basic_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/time_complexity_basic_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/images/time_complexity_universal_life.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Julia/analysis/images/time_complexity_universal_life.png -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/large_run.txt: -------------------------------------------------------------------------------- 1 | julia> CashFlow(universal_life_model[], policies, 150) 2 | 92.053248 seconds (84 allocations: 8.473 GiB, 0.05% gc time) 3 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/memory_complexity.jl: -------------------------------------------------------------------------------- 1 | function generate_memory_complexity_data(model::Model) 2 | @info "Generating memory complexity benchmarks for model $(nameof(typeof(model)))" 3 | sizes = [9, 100, 1_000, 10_000, 100_000] 4 | files = "savings/" .* ["model_point_table_9.csv", "model_point_table_100.csv", "model_point_table_1K.csv", "model_point_table_10K.csv", "model_point_table_100K.csv"] 5 | ts = 50:50:150 6 | allocations = zeros(length(files), length(ts)) 7 | for (i, file) in enumerate(files) 8 | for (j, n) in enumerate(ts) 9 | policies = policies_from_csv(file) 10 | allocations[i, j] = (@benchmark CashFlow($model, isa($model, $LifelibBasiclife) ? estimate_premiums($model, $policies, $n) : $policies, $n)).memory / 1e6 11 | end 12 | end 13 | MEMORY_RESULTS[model] = (; ts, files, sizes, allocations) 14 | end 15 | 16 | function plot_memory_complexity_results(model; folder = images_folder()) 17 | (; ts, sizes, files, allocations) = MEMORY_RESULTS[model] 18 | colors = Makie.wong_colors() 19 | 20 | fig = Figure(; resolution = (1000, 300)) 21 | ax = Axis(fig[1, 1]; title = "Memory allocations - $(model_title(model)) model", xlabel = "Number of time steps", ylabel = "Allocations (MB)", yscale = log10, xticks = ts) 22 | ls = [lines!(ax, ts, allocations[i, :]; color = colors[i]) for i in eachindex(sizes)] 23 | ss = [scatter!(ax, ts, allocations[i, :], color = colors[i]; marker = :x) for i in eachindex(sizes)] 24 | Legend(fig[1, 2], reverse(collect(collect.(zip(ls, ss)))), "n = " .* reverse(string.(sizes))) 25 | file = joinpath(folder, "memory_complexity_variable_duration_$(model_string(model)).png") 26 | @info "Saving plot at $file" 27 | save(file, fig) 28 | 29 | fig = Figure(; resolution = (1000, 300)) 30 | ax = Axis(fig[1, 1]; title = "Memory allocations - $(model_title(model)) model ($(maximum(ts)) timesteps)", xlabel = "Model size", ylabel = "Allocations (MB)", xscale = log10, yscale = log10) 31 | lines!(ax, sizes, allocations[:, end]; color = colors[1]) 32 | scatter!(ax, sizes, allocations[:, end]; color = colors[1], marker = :x) 33 | file = joinpath(folder, "memory_complexity_static_duration_$(model_string(model)).png") 34 | @info "Saving plot at $file" 35 | save(file, fig) 36 | end 37 | 38 | function memory_complexity_benchmarks(model::Model; folder = images_folder()) 39 | !haskey(MEMORY_RESULTS, model) && generate_memory_complexity_data(model) 40 | plot_memory_complexity_results(model; folder) 41 | end 42 | 43 | memory_complexity_benchmarks(term_life_model[]) 44 | memory_complexity_benchmarks(universal_life_model[]) 45 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/analysis/time_complexity.jl: -------------------------------------------------------------------------------- 1 | function generate_time_complexity_data(model::LifelibBasiclife) 2 | @info "Generating time complexity benchmarks for model $(nameof(typeof(model)))" 3 | sizes = [10, 100, 1_000, 10_000] 4 | files = "basic_term/" .* ["model_point_table_10.csv", "model_point_table_100.csv", "model_point_table_1K.csv", "model_point_table_10K.csv"] 5 | iterative_timings = Float64[] 6 | memoized_timings = Float64[] 7 | for file in files 8 | policies = policies_from_csv(file) 9 | 10 | n = BasicTermMemoized.final_timestep[] 11 | push!(iterative_timings, minimum(@benchmark CashFlow($model, estimate_premiums($model, $policies, $n), $n)).time * 1e-9) 12 | 13 | set_basic_term_policies!(policies) 14 | push!(memoized_timings, minimum(@benchmark begin 15 | empty_memoization_caches!() 16 | sum(LifeSimulator.pv_net_cf()) 17 | end).time * 1e-9) 18 | end 19 | TIME_RESULTS[model] = (; sizes, files, iterative_timings, memoized_timings) 20 | end 21 | 22 | function generate_time_complexity_data(model::LifelibSavings) 23 | @info "Generating time complexity benchmarks for model $(nameof(typeof(model)))" 24 | proj.scen_size = 1 25 | sizes = [9, 100, 1_000, 10_000, 100_000] 26 | files = "savings/" .* ["model_point_table_9.csv", "model_point_table_100.csv", "model_point_table_1K.csv", "model_point_table_10K.csv", "model_point_table_100K.csv"] 27 | julia_timings = Float64[] 28 | python_timings = Float64[] 29 | timeit = pyimport("timeit") 30 | for (i, file) in enumerate(files) 31 | trials = Int(min(50, 3e5 ÷ sizes[i])) 32 | policies = policies_from_csv(file) 33 | 34 | # lifelib will always simulate until the largest policy term, so we make sure we have no policies beyond 35 | # a desired simulation end (e.g. 30 years) and at least one policy reaching such a term. 36 | # In this way, timesteps are consistent across evaluations with different numbers of policies. 37 | policies .= map(set -> @set(set.policy.issued_at = Month(0)), policies) 38 | policies .= map(set -> @set(set.policy.term = min(set.policy.term, Year(20))), policies) 39 | set = policies[1]; policies[1] = @set set.policy.term = Year(20) 40 | 41 | use_policies!(proj, policies) 42 | @assert ntimesteps(proj) == 241 43 | 44 | push!(julia_timings, minimum(@benchmark CashFlow(sim, n) setup = begin 45 | policies = policies_from_csv(proj) 46 | n = ntimesteps(proj) 47 | model = LifelibSavings(investment_rates = investment_rate(proj)) 48 | sim = Simulation(model, policies) 49 | end).time * 1e-9) 50 | 51 | push!(python_timings, minimum(pyconvert(Array, timeit.repeat("proj.clear_cache = 1; proj.pv_net_cf().sum()"; globals = pydict(; proj), number = 1, repeat = trials)))) 52 | end 53 | TIME_RESULTS[model] = (; sizes, files, iterative_timings = julia_timings, memoized_timings = python_timings) 54 | end 55 | 56 | function plot_time_complexity_results(model; folder = images_folder()) 57 | (; sizes, files, iterative_timings, memoized_timings) = TIME_RESULTS[model] 58 | colors = Makie.wong_colors() 59 | fig = Figure(; resolution = (1000, 300)) 60 | ax = Axis(fig[1, 1]; title = "Time performance - $(model_title(model)) models", xlabel = "Number of policy sets", ylabel = "Time (s)", xscale = log10, yscale = log10) 61 | l1 = lines!(ax, sizes, iterative_timings; color = colors[1]) 62 | l2 = lines!(ax, sizes, memoized_timings; color = colors[2]) 63 | s1 = scatter!(ax, sizes, iterative_timings; color = colors[1], marker = :x) 64 | s2 = scatter!(ax, sizes, memoized_timings; color = colors[2], marker = :x) 65 | Legend(fig[1, 2], [[l1, s1], [l2, s2]], ["Iterative (Julia)", "Memoized ($(language_used_memoized(model)))"]) 66 | file = joinpath(folder, "time_complexity_$(model_string(model)).png") 67 | @info "Saving plot at $file" 68 | save(file, fig) 69 | end 70 | 71 | function time_complexity_benchmarks(model::Model; folder = images_folder()) 72 | !haskey(TIME_RESULTS, model) && generate_time_complexity_data(model) 73 | plot_time_complexity_results(model; folder) 74 | end 75 | 76 | time_complexity_benchmarks(term_life_model[]) 77 | time_complexity_benchmarks(universal_life_model[]) 78 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/basic_term.jl: -------------------------------------------------------------------------------- 1 | using LifeSimulator: empty_memoization_caches!, pv_net_cf 2 | 3 | function cf1() 4 | empty_memoization_caches!() 5 | sum(pv_net_cf()) 6 | end 7 | 8 | function run_basic_term_benchmark() 9 | cf1_benchmark = @benchmark cf1() 10 | result = cf1() 11 | return Dict( 12 | "minimum time" => string(minimum(cf1_benchmark)), 13 | "result" => string(result), 14 | ) 15 | end 16 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/basic_term_array.jl: -------------------------------------------------------------------------------- 1 | using CSV 2 | using DataFrames 3 | using Tables 4 | using LoopVectorization 5 | 6 | # Random uniform distribution in PyTorch 7 | 8 | read_csv(file) = CSV.read(data_file(file), DataFrame) 9 | data_file(file) = joinpath(dirname(@__DIR__), "Python", "BasicTerm_M", file) 10 | 11 | 12 | function project(max_proj_len, disc_rate, sum_assured, policy_term, age_at_entry, mort, loading_prem, expense_acq, expense_maint, inflation_rate) 13 | time_axis = 0:(max_proj_len-1) 14 | duration = time_axis .÷ 12 15 | discount_factors = @. (1 + disc_rate[duration+1])^(-time_axis / 12) 16 | inflation_factor = @. (1 + inflation_rate)^(time_axis / 12) 17 | lapse_rate = @. max(0.1 - 0.02 * duration, 0.02) 18 | lapse_rate_monthly = @. 1 - (1 - lapse_rate)^(1 / 12) 19 | 20 | monthly_mortality = [mort[ia+d-17, min(d + 1, 6)] for ia in age_at_entry, d in duration] 21 | monthly_mortality .= @turbo @. 1 - (1 - monthly_mortality)^(1 / 12) 22 | 23 | pols_if = let 24 | m = similar(monthly_mortality) 25 | for I in CartesianIndices(m) 26 | i, j = Tuple(I) 27 | m[i, j] = if j == 1 28 | 1.0 29 | elseif policy_term[i] * 12 < j 30 | 0.0 31 | else 32 | m[i, j-1] * (1 - lapse_rate_monthly[j-1]) * (1 - monthly_mortality[i, j-1]) 33 | end 34 | end 35 | m 36 | end 37 | 38 | claims = @. monthly_mortality * pols_if * sum_assured 39 | pv_claims = claims * discount_factors 40 | pv_pols_if = pols_if * discount_factors 41 | net_premium = pv_claims ./ pv_pols_if 42 | premium_pp = @. round((1 + loading_prem) * net_premium, digits=2) 43 | premiums = premium_pp .* pols_if 44 | commissions = (duration .== 0)' .* premiums 45 | expenses = @. (expense_maint / 12 * inflation_factor)' * pols_if 46 | expenses[:, 1] .+= expense_acq 47 | pv_premiums = premiums * discount_factors 48 | pv_expenses = expenses * discount_factors 49 | pv_commissions = commissions * discount_factors 50 | pv_net_cf = @. pv_premiums - pv_claims - pv_expenses - pv_commissions 51 | sum(pv_net_cf) 52 | end 53 | 54 | function run_basicterm_array_benchmark() 55 | # parameters 56 | max_proj_len = 12 * 20 + 1 57 | loading_prem = 0.5 58 | expense_acq = 300.0 59 | expense_maint = 60.0 60 | inflation_rate = 0.01 61 | 62 | mp = read_csv("model_point_table.csv") 63 | disc_rate = read_csv("disc_rate_ann.csv").zero_spot 64 | sum_assured = mp.sum_assured 65 | policy_term = mp.policy_term 66 | age_at_entry = mp.age_at_entry 67 | mort = CSV.read(data_file("mort_table.csv"), Tables.matrix; drop=[1]) 68 | 69 | result = project( 70 | max_proj_len, 71 | disc_rate, 72 | sum_assured, 73 | policy_term, 74 | age_at_entry, 75 | mort, 76 | loading_prem, 77 | expense_acq, 78 | expense_maint, 79 | inflation_rate, 80 | ) 81 | 82 | b1 = @benchmark return project( 83 | $max_proj_len, 84 | $disc_rate, 85 | $sum_assured, 86 | $policy_term, 87 | $age_at_entry, 88 | $mort, 89 | $loading_prem, 90 | $expense_acq, 91 | $expense_maint, 92 | $inflation_rate, 93 | ) 94 | 95 | return Dict( 96 | "result" => result, 97 | "minimum time" => string(minimum(b1)), 98 | ) 99 | end -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/benchmark_results.yaml: -------------------------------------------------------------------------------- 1 | basic_term_benchmark: 2 | Julia array basic_term: 3 | minimum time: "TrialEstimate(29.152 ms)" 4 | result: 1.4489630534602132e7 5 | Julia recursive basic_term: 6 | minimum time: "TrialEstimate(81.070 ms)" 7 | result: "1.4489630534602132e7" 8 | mortality: 9 | Julia MortalityTables.jl: 10 | minimum time: "TrialEstimate(239.946 μs)" 11 | result: 1904.4865526636793 12 | savings_benchmark: 13 | Julia Benchmarks savings: 14 | minimum time: "TrialEstimate(118.603 ms)" 15 | result: 3.507113709040273e12 16 | exposures: 17 | Julia ExperienceAnalysis.jl: 18 | num_rows: 141281 19 | minimum time: "TrialEstimate(29.284 ms)" 20 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/exposures.jl: -------------------------------------------------------------------------------- 1 | using DataFrames 2 | using CSV 3 | using Dates 4 | using BenchmarkTools 5 | using ExperienceAnalysis 6 | using DayCounts 7 | 8 | function expsoures_ExperienceAnalysis( 9 | df_yearly::DataFrame, 10 | study_start::Date, 11 | study_end::Date, 12 | ) 13 | continue_exposure = df_yearly.status .== "Surrender" 14 | df_yearly.exposure = 15 | ExperienceAnalysis.exposure.( 16 | ExperienceAnalysis.Anniversary(Year(1)), # The basis for our exposures 17 | df_yearly.issue_date, # The `from` date 18 | df_yearly.term_date, # the `to` date array we created above 19 | continue_exposure; 20 | study_start=study_start, 21 | study_end = study_end, 22 | left_partials=false 23 | ) 24 | df_yearly = flatten(df_yearly, :exposure) 25 | df_yearly.exposure_fraction = 26 | map(e -> yearfrac(e.from, e.to, DayCounts.Thirty360()), df_yearly.exposure) 27 | return df_yearly 28 | end 29 | 30 | 31 | 32 | function run_exposure_benchmarks() 33 | df = CSV.read(joinpath(dirname(@__DIR__), "data", "census_dat.csv"), DataFrame) 34 | df.term_date = [d == "NA" ? nothing : Date(d, "yyyy-mm-dd") for d in df.term_date] 35 | study_end = Date(2020, 2, 29) 36 | study_start = Date(2006, 6, 15) 37 | df_yearly_exp = copy(df) 38 | result_exp = expsoures_ExperienceAnalysis(copy(df), study_start, study_end) 39 | b_exp = @benchmark expsoures_ExperienceAnalysis($df_yearly_exp, $study_start, $study_end) 40 | 41 | return Dict( 42 | "Julia ExperienceAnalysis.jl" => Dict( 43 | "num_rows" => size(result_exp, 1), 44 | "minimum time" => string(minimum(b_exp)), 45 | ) 46 | ) 47 | end 48 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/main.jl: -------------------------------------------------------------------------------- 1 | using Pkg 2 | Pkg.add(url="https://github.com/JuliaActuary/LifeSimulator.jl") 3 | 4 | include("mortality.jl") 5 | include("exposures.jl") 6 | include("basic_term.jl") 7 | include("basic_term_array.jl") 8 | include("savings.jl") 9 | import YAML 10 | 11 | 12 | function run_benchmarks() 13 | return Dict( 14 | "mortality" => run_mortality_benchmarks(), 15 | "exposures" => run_exposure_benchmarks(), 16 | "basic_term_benchmark" => Dict( 17 | "Julia recursive basic_term" => run_basic_term_benchmark(), 18 | "Julia array basic_term" => run_basicterm_array_benchmark(), 19 | ), 20 | "savings_benchmark" => run_savings_benchmark(), 21 | ) 22 | end 23 | 24 | YAML.write_file("benchmark_results.yaml", run_benchmarks()) 25 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/mortality.jl: -------------------------------------------------------------------------------- 1 | using MortalityTables 2 | using BenchmarkTools 3 | 4 | @inline function npv(qs, r, term = length(qs)) 5 | inforce, result = 1.0, 0.0 6 | v = 1 / (1 + r) 7 | v_t = v 8 | @inbounds @simd for t = 1:min(term, length(qs)) 9 | q = qs[t] 10 | result += inforce * q * v_t 11 | inforce = inforce * (1 - q) 12 | v_t *= v 13 | end 14 | return result 15 | end 16 | 17 | function mortality1(tbls = MortalityTables.table.(3299:3308)) 18 | issue_ages = 18:50 19 | durations = 1:25 20 | term = 29 21 | total = 0.0 22 | @inbounds for i in eachindex(tbls), ia in issue_ages, dur in durations 23 | start_age = ia + dur - 1 24 | total += @views npv(tbls[i].select[ia][start_age:start_age+term], 0.02) 25 | end 26 | return total 27 | end 28 | 29 | function run_mortality_benchmarks() 30 | tbls = MortalityTables.table.(3299:3308) 31 | mort1_result = mortality1(tbls) 32 | b1 = @benchmark mortality1($tbls) 33 | return Dict( 34 | "Julia MortalityTables.jl" => Dict( 35 | "result" => mort1_result, 36 | "minimum time" => string(minimum(b1)), 37 | ), 38 | ) 39 | end 40 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/read_model.jl: -------------------------------------------------------------------------------- 1 | using PythonCall: pyimport 2 | 3 | python_directory() = joinpath(dirname(@__DIR__), "Python") 4 | 5 | "Read a specific `savings` model, such as `SE_EX4` or `ME_EX4`." 6 | function read_savings_model(model = "ME_EX4"; dir = python_directory()) 7 | mx = pyimport("modelx") 8 | mx.read_model(joinpath(dir, "CashValue_$model")).Projection 9 | end 10 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Julia/savings.jl: -------------------------------------------------------------------------------- 1 | using LifeSimulator 2 | using BenchmarkTools 3 | 4 | include("read_model.jl") 5 | 6 | function run_savings_benchmark() 7 | proj = read_savings_model() 8 | proj.scen_size = 1 9 | policies = policies_from_csv("savings/model_point_table_10K.csv") 10 | use_policies!(proj, policies) 11 | model = LifelibSavings(investment_rates = investment_rate(proj)) 12 | n = ntimesteps(proj) 13 | savings_benchmark = @benchmark CashFlow($model, $policies, $n).discounted 14 | savings = CashFlow(model, policies, n).discounted 15 | Dict( 16 | "Julia Benchmarks savings" => Dict( 17 | "minimum time" => string(minimum(savings_benchmark)), 18 | "result" => savings, 19 | ) 20 | ) 21 | end 22 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "BasicTerm_M" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/_data/data.pickle -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 16, 1], "serializer_version": 4} -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/disc_rate_ann.csv: -------------------------------------------------------------------------------- 1 | year,zero_spot 2 | 0,0 3 | 1,0.00555 4 | 2,0.006840000000000001 5 | 3,0.00788 6 | 4,0.00866 7 | 5,0.00937 8 | 6,0.00997 9 | 7,0.0105 10 | 8,0.01098 11 | 9,0.01144 12 | 10,0.01188 13 | 11,0.01226 14 | 12,0.01259 15 | 13,0.01285 16 | 14,0.01308 17 | 15,0.0133 18 | 16,0.01345 19 | 17,0.01358 20 | 18,0.01368 21 | 19,0.01375 22 | 20,0.01378 23 | 21,0.01379 24 | 22,0.01376 25 | 23,0.01373 26 | 24,0.01369 27 | 25,0.01365 28 | 26,0.01361 29 | 27,0.01356 30 | 28,0.01351 31 | 29,0.01346 32 | 30,0.0134 33 | 31,0.01333 34 | 32,0.01325 35 | 33,0.01316 36 | 34,0.01306 37 | 35,0.01295 38 | 36,0.01283 39 | 37,0.01271 40 | 38,0.0126 41 | 39,0.0125 42 | 40,0.01241 43 | 41,0.01235 44 | 42,0.01229 45 | 43,0.01222 46 | 44,0.01214 47 | 45,0.01203 48 | 46,0.0119 49 | 47,0.01178 50 | 48,0.01168 51 | 49,0.01164 52 | 50,0.01166 53 | 51,0.01177 54 | 52,0.01193 55 | 53,0.01215 56 | 54,0.01241 57 | 55,0.0127 58 | 56,0.01301 59 | 57,0.01333 60 | 58,0.01367 61 | 59,0.01402 62 | 60,0.01437 63 | 61,0.01473 64 | 62,0.01508 65 | 63,0.01543 66 | 64,0.01579 67 | 65,0.01613 68 | 66,0.01648 69 | 67,0.01682 70 | 68,0.01715 71 | 69,0.01748 72 | 70,0.0178 73 | 71,0.01812 74 | 72,0.01843 75 | 73,0.01874 76 | 74,0.01903 77 | 75,0.01933 78 | 76,0.01961 79 | 77,0.01989 80 | 78,0.02016 81 | 79,0.02043 82 | 80,0.02069 83 | 81,0.02095 84 | 82,0.0212 85 | 83,0.02144 86 | 84,0.02168 87 | 85,0.02192 88 | 86,0.02215 89 | 87,0.02237 90 | 88,0.02259 91 | 89,0.0228 92 | 90,0.02301 93 | 91,0.02322 94 | 92,0.02342 95 | 93,0.02362 96 | 94,0.02381 97 | 95,0.024 98 | 96,0.02419 99 | 97,0.02437 100 | 98,0.02455 101 | 99,0.02472 102 | 100,0.02489 103 | 101,0.02506 104 | 102,0.02522 105 | 103,0.02539 106 | 104,0.02554 107 | 105,0.0257 108 | 106,0.02585 109 | 107,0.026 110 | 108,0.02615 111 | 109,0.02629 112 | 110,0.02643 113 | 111,0.02657 114 | 112,0.02671 115 | 113,0.02684 116 | 114,0.02698 117 | 115,0.02711 118 | 116,0.02723 119 | 117,0.02736 120 | 118,0.02748 121 | 119,0.0276 122 | 120,0.02772 123 | 121,0.02784 124 | 122,0.02795 125 | 123,0.02807 126 | 124,0.02818 127 | 125,0.02829 128 | 126,0.0284 129 | 127,0.0285 130 | 128,0.02861000000000001 131 | 129,0.02871000000000001 132 | 130,0.02881 133 | 131,0.02891 134 | 132,0.02901 135 | 133,0.02911000000000001 136 | 134,0.0292 137 | 135,0.0293 138 | 136,0.02939 139 | 137,0.02948000000000001 140 | 138,0.02957 141 | 139,0.02966 142 | 140,0.02975 143 | 141,0.02984 144 | 142,0.02992 145 | 143,0.03001 146 | 144,0.03009 147 | 145,0.03017 148 | 146,0.03025 149 | 147,0.03033000000000001 150 | 148,0.03041000000000001 151 | 149,0.03049 152 | 150,0.03056000000000001 153 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/model_point_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/model_point_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/mort_table.csv: -------------------------------------------------------------------------------- 1 | Age,0,1,2,3,4,5 2 | 18,0.0002310671048780701,0.0002541738153658771,0.0002795911969024648,0.0003075503165927113,0.0003383053482519825,0.0003721358830771808 3 | 19,0.0002353200126583699,0.000258852013924207,0.0002847372153166277,0.0003132109368482905,0.0003445320305331195,0.0003789852335864315 4 | 20,0.0002399363756770145,0.0002639300132447159,0.0002903230145691875,0.0003193553160261063,0.000351290847628717,0.0003864199323915887 5 | 21,0.000244937423152777,0.0002694311654680547,0.0002963742820148602,0.0003260117102163462,0.0003586128812379809,0.000394474169361779 6 | 22,0.0002503462740334557,0.0002753809014368013,0.0003029189915804814,0.0003332108907385296,0.0003665319798123826,0.0004031851777936208 7 | 23,0.0002561881183155183,0.0002818069301470701,0.0003099876231617771,0.0003409863854779549,0.0003750850240257504,0.0004125935264283255 8 | 24,0.0002624904157144116,0.0002887394572858528,0.0003176134030144381,0.0003493747433158819,0.0003843122176474701,0.0004227434394122172 9 | 25,0.0002692831139799757,0.0002962114253779732,0.0003258325679157706,0.0003584158247073477,0.0003942574071780824,0.0004336831478958907 10 | 26,0.0002765988893708184,0.0003042587783079002,0.0003346846561386903,0.0003681531217525593,0.0004049684339278153,0.0004454652773205969 11 | 27,0.0002844734120632121,0.0003129207532695333,0.0003442128285964867,0.0003786341114561354,0.000416497522601749,0.0004581472748619239 12 | 28,0.0002929456395766907,0.0003222402035343598,0.0003544642238877958,0.0003899106462765754,0.000428901710904233,0.0004717918819946564 13 | 29,0.0003020581416539583,0.0003322639558193541,0.0003654903514012896,0.0004020393865414186,0.0004422433251955605,0.0004864676577151166 14 | 30,0.0003118574604420908,0.0003430432064863,0.00037734752713493,0.000415082279848423,0.0004565905078332653,0.0005022495586165919 15 | 31,0.0003223945102916596,0.0003546339613208256,0.0003900973574529082,0.0004291070931981991,0.000472017802518019,0.000519219582769821 16 | 32,0.0003337250220279336,0.000367097524230727,0.0004038072766537997,0.0004441880043191797,0.0004886068047510977,0.0005374674852262075 17 | 33,0.0003459100371627477,0.0003805010408790224,0.0004185511449669247,0.0004604062594636172,0.000506446885409979,0.0005570915739509769 18 | 34,0.0003590164582175236,0.000394918104039276,0.0004344099144432036,0.0004778509058875241,0.0005256359964762765,0.0005781995961239042 19 | 35,0.0003731176621295971,0.0004104294283425568,0.0004514723711768126,0.0004966196082944939,0.0005462815691239433,0.0006009097260363377 20 | 36,0.0003882941846297993,0.0004271236030927793,0.0004698359634020572,0.000516819559742263,0.0005685015157164894,0.0006253516672881384 21 | 37,0.0004046344845257716,0.0004450979329783488,0.0004896077262761837,0.0005385684989038021,0.0005924253487941823,0.0006516678836736005 22 | 38,0.0004222357980220614,0.0004644593778242676,0.0005109053156066944,0.0005619958471673638,0.0006181954318841003,0.0006800149750725104 23 | 39,0.0004412050945770665,0.0004853256040347732,0.0005338581644382506,0.0005872439808820756,0.0006459683789702832,0.0007105652168673116 24 | 40,0.0004616601473642979,0.0005078261621007278,0.0005586087783108006,0.0006144696561418807,0.0006759166217560689,0.0007435082839316758 25 | 41,0.0004837307332014202,0.0005321038065215623,0.0005853141871737187,0.0006438456058910905,0.0007082301664801996,0.0007790531831282197 26 | 42,0.0005075599788701063,0.0005583159767571169,0.0006141475744328287,0.0006755623318761115,0.0007431185650637228,0.0008174304215700951 27 | 43,0.0005333058731135947,0.0005866364604249543,0.0006453001064674498,0.0007098301171141948,0.0007808131288256143,0.0008588944417081758 28 | 44,0.0005611429663144072,0.000617257262945848,0.0006789829892404329,0.0007468812881644762,0.0008215694169809239,0.0009037263586790164 29 | 45,0.0005912642829769909,0.00065039071127469,0.0007154297824021591,0.0007869727606423751,0.0008656700367066127,0.0009522370403772741 30 | 46,0.0006238834757334444,0.0006862718233067889,0.0007548990056374678,0.0008303889062012146,0.0009134277968213361,0.00100477057650347 31 | 47,0.0006592372537298736,0.000725160979102861,0.0007976770770131472,0.0008774447847144619,0.0009651892631859082,0.001061708189504499 32 | 48,0.0006975881230237241,0.0007673469353260965,0.0008440816288587063,0.000928489791744577,0.001021338770919035,0.001123472648010938 33 | 49,0.0007392274821309431,0.0008131502303440375,0.0008944652533784413,0.0009839117787162855,0.001082302956587914,0.001190533252246706 34 | 50,0.0007844791222255886,0.0008629270344481476,0.0009492197378929624,0.001044141711682259,0.001148555882850485,0.001263411471135533 35 | 51,0.0008337031888533204,0.0009170735077386526,0.001008780858512518,0.00110965894436377,0.001220624838800147,0.001342687322680162 36 | 52,0.0008873006705383532,0.0009760307375921886,0.001073633811351408,0.001180997192486548,0.001299096911735203,0.001429006602908724 37 | 53,0.0009457184895330499,0.001040290338486355,0.001144319372334991,0.00125875130956849,0.001384626440525339,0.001523089084577873 38 | 54,0.001009455281406362,0.001110400809546999,0.001221440890501699,0.001343584979551869,0.001477943477507056,0.001625737825257762 39 | 55,0.00107906796345734,0.001186974759803075,0.001305672235783382,0.00143623945936172,0.001579863405297893,0.001737849745827682 40 | 56,0.001155179207384984,0.001270697128123482,0.00139776684093583,0.001537543525029414,0.001691297877532355,0.001860427665285591 41 | 57,0.00123848594961408,0.001362334544575489,0.001498567999033038,0.001648424798936341,0.001813267278829976,0.001994594006712974 42 | 58,0.001329769093601377,0.001462746002961515,0.001609020603257667,0.001769922663583434,0.001946914929941777,0.002141606422935955 43 | 59,0.001429904582839669,0.001572895041123636,0.001730184545236,0.0019032029997596,0.00209352329973556,0.002302875629709116 44 | 60,0.001539876051743219,0.001693863656917541,0.001863250022609295,0.002049575024870224,0.002254532527357247,0.002479985780092972 45 | 61,0.00166078929485154,0.001826868224336694,0.002009555046770363,0.0022105105514474,0.00243156160659214,0.002674717767251354 46 | 62,0.001793888833675047,0.001973277717042552,0.002170605488746808,0.002387666037621488,0.002626432641383637,0.002889075905522001 47 | 63,0.001940576906029566,0.002134634596632523,0.002348098056295775,0.002582907861925353,0.002841198648117888,0.003125318512929678 48 | 64,0.002102435256054419,0.002312678781659861,0.002543946659825847,0.002798341325808432,0.003078175458389276,0.003385993004228204 49 | 65,0.002281250165694602,0.002509375182264062,0.002760312700490468,0.003036343970539515,0.003339978367593467,0.003673976204352814 50 | 66,0.002479041241928696,0.002726945366121566,0.002999639902733722,0.003299603893007094,0.003629564282307804,0.003992520710538585 51 | 67,0.002698094560439211,0.002967904016483132,0.003264694418131445,0.00359116385994459,0.003950280245939049,0.004345308270532955 52 | 68,0.002941000868128997,0.003235100954941897,0.003558611050436087,0.003914472155479696,0.004305919371027666,0.004736511308130433 53 | 69,0.003210699666725728,0.003531769633398301,0.003884946596738132,0.004273441256411946,0.004700785382053141,0.005170863920258455 54 | 70,0.003510530141071987,0.003861583155179186,0.004247741470697105,0.004672515617766815,0.005139767179543497,0.005653743897497847 55 | 71,0.003844290062620606,0.004228719068882667,0.004651590975770933,0.005116750073348027,0.00562842508068283,0.006191267588751114 56 | 72,0.004216303995988054,0.004637934395586859,0.005101727835145545,0.0056119006186601,0.00617309068052611,0.006790399748578721 57 | 73,0.004631502369963116,0.005094652606959428,0.005604117867655371,0.006164529654420908,0.006780982619863,0.007459080881849301 58 | 74,0.005095513251082814,0.005605064576191096,0.006165571033810205,0.006782128137191226,0.00746034095091035,0.008206375046001385 59 | 75,0.00561476898612035,0.006176245884732386,0.006793870473205625,0.007473257520526189,0.008220583272578809,0.00904264159983669 60 | 76,0.006196630269622466,0.006816293296584713,0.007497922626243185,0.008247714888867504,0.009072486377754254,0.00997973501552968 61 | 77,0.006849530656051229,0.007534483721656353,0.008287932093821988,0.009116725303204188,0.01002839783352461,0.01103123761687707 62 | 78,0.0075831450876836,0.008341459596451961,0.009175605556097158,0.01009316611170687,0.01110248272287756,0.01221273099516532 63 | 79,0.008408586666725113,0.009249445333397626,0.01017438986673739,0.01119182885341113,0.01231101173875224,0.01354211291262747 64 | 80,0.009338636684281934,0.01027250035271013,0.01129975038798114,0.01242972542677926,0.01367269796945719,0.01503996776640291 65 | 81,0.01038801385549308,0.01142681524104239,0.01256949676514663,0.01382644644166129,0.01520909108582742,0.01673000019441017 66 | 82,0.01157368983020542,0.01273105881322597,0.01400416469454857,0.01540458116400342,0.01694503928040377,0.01863954320844415 67 | 83,0.01291525938959877,0.01420678532855865,0.01562746386141452,0.01719021024755597,0.01890923127231157,0.02080015439954273 68 | 84,0.01443537534660213,0.01587891288126235,0.01746680416938859,0.01921348458632744,0.02113483304496019,0.02324831634945621 69 | 85,0.01616026009703804,0.01777628610674184,0.01955391471741603,0.02150930618915763,0.0236602368080734,0.02602626048888074 70 | 86,0.01812030808631169,0.01993233889494286,0.02192557278443714,0.02411813006288086,0.02652994306916895,0.02918293737608585 71 | 87,0.02035079624486606,0.02238587586935267,0.02462446345628793,0.02708690980191673,0.0297956007821084,0.03277516086031924 72 | 88,0.02289272280409473,0.0251819950845042,0.02770019459295462,0.03047021405225009,0.0335172354574751,0.03686895900322261 73 | 89,0.02579379895447039,0.02837317884991743,0.03121049673490918,0.0343315464084001,0.03776470104924012,0.04154117115416413 74 | 90,0.0291096226976437,0.03202058496740807,0.03522264346414888,0.03874490781056377,0.04261939859162015,0.04688133845078217 75 | 91,0.03290507015568574,0.03619557717125432,0.03981513488837975,0.04379664837721773,0.04817631321493951,0.05299394453643346 76 | 92,0.03725594675567852,0.04098154143124638,0.04507969557437102,0.04958766513180812,0.05454643164498894,0.06000107480948783 77 | 93,0.04225094937882418,0.0464760443167066,0.05112364874837726,0.05623601362321499,0.0618596149855365,0.06804557648409015 78 | 94,0.04799400108452031,0.05279340119297234,0.05807274131226958,0.06388001544349654,0.07026801698784621,0.07729481868663084 79 | 95,0.05460703280218726,0.06006773608240599,0.0660745096906466,0.07268196065971126,0.0799501567256824,0.08794517239825064 80 | 96,0.06223330193255692,0.06845663212581261,0.07530229533839389,0.08283252487223328,0.09111577735945661,0.1002273550954023 81 | 97,0.07104135673834143,0.07814549241217558,0.08596004165339315,0.09455604581873246,0.1040116504006057,0.1144128154406663 82 | 98,0.08122977849975568,0.08935275634973126,0.0982880319847044,0.1081168351831748,0.1189285187014923,0.1308213705716416 83 | 99,0.09303286161187924,0.1023361477730672,0.1125697625503739,0.1238267388054113,0.1362094126859524,0.1498303539545477 84 | 100,0.1067274262812674,0.1174001689093942,0.1291401858003336,0.142054204380367,0.1562596248184037,0.1718855873002441 85 | 101,0.1226410006907388,0.1349051007598127,0.1483956108357939,0.1632351719193733,0.1795586891113107,0.1975145580224417 86 | 102,0.1411616612443195,0.1552778273687514,0.1708056101056266,0.1878861711161893,0.2066747882278082,0.227342267050589 87 | 103,0.1627498830132505,0.1790248713145755,0.1969273584460331,0.2166200942906364,0.2382821037197001,0.2621103140916701 88 | 104,0.1879528305562259,0.2067481136118486,0.2274229249730334,0.2501652174703368,0.2751817392173705,0.3026999131391076 89 | 105,0.2174216153384464,0.2391637768722911,0.2630801545595202,0.2893881700154723,0.3183269870170196,0.3501596857187215 90 | 106,0.2519321643307321,0.2771253807638053,0.3048379188401859,0.3353217107242045,0.3688538817966249,0.4057392699762875 91 | 107,0.2924104904045808,0.3216515394450389,0.3538166933895428,0.3891983627284971,0.4281181990013468,0.4709300189014816 92 | 108,0.3399633355628309,0.373959669119114,0.4113556360310255,0.4524911996341281,0.4977403195975409,0.5475143515572951 93 | 109,0.3959153812539035,0.4355069193792939,0.4790576113172234,0.5269633724489458,0.5796597096938404,0.6376256806632244 94 | 110,0.4618544965251583,0.5080399461776742,0.5588439407954418,0.614728334874986,0.6762011683624847,0.7438212851987333 95 | 111,0.5396868377647932,0.5936555215412725,0.6530210736953999,0.7183231810649399,0.7901554991714339,0.8691710490885773 96 | 112,0.6317040398225142,0.6948744438047657,0.7643618881852423,0.8407980770037666,0.9248778847041433,1 97 | 113,0.7406652682100285,0.8147317950310314,0.8962049745341346,0.9858254719875481,1,1 98 | 114,0.8698975620835765,0.9568873182919342,1,1,1,1 99 | 115,1,1,1,1,1,1 100 | 116,1,1,1,1,1,1 101 | 117,1,1,1,1,1,1 102 | 118,1,1,1,1,1,1 103 | 119,1,1,1,1,1,1 104 | 120,1,1,1,1,1,1 105 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_M/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_M/mort_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "BasicTerm_ME" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/_data/data.pickle -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 17, 0], "serializer_version": 4} -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/model_point_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/model_point_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/mort_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/BasicTerm_ME/premium_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/BasicTerm_ME/premium_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/__init__.py: -------------------------------------------------------------------------------- 1 | from modelx.serialize.jsonvalues import * 2 | 3 | _name = "CashValue_ME_EX4" 4 | 5 | _allow_none = False 6 | 7 | _spaces = [ 8 | "Projection" 9 | ] 10 | 11 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/_data/data.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/_data/data.pickle -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/_system.json: -------------------------------------------------------------------------------- 1 | {"modelx_version": [0, 17, 0], "serializer_version": 4} -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/disc_rate_ann.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/disc_rate_ann.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_1.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_1.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_moneyness.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/model_point_moneyness.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/mort_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/mort_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/product_spec_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/product_spec_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/CashValue_ME_EX4/surr_charge_table.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/actuarialopensource/benchmarks/1a92a9f6f3e1b2abf7e967b725e5c9075372d6b9/github-runners-benchmarks/Python/CashValue_ME_EX4/surr_charge_table.xlsx -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import timeit 3 | from basicterm_m_lifelib import basicterm_m_lifelib 4 | from basicterm_m_recursive_pytorch import basicterm_recursive_pytorch 5 | from basicterm_m_recursive_numpy import basicterm_recursive_numpy 6 | from basicterm_m_array_pytorch import basicterm_array_pytorch 7 | from basicterm_m_array_numpy import basicterm_array_numpy 8 | from pprint import pprint 9 | 10 | 11 | def run_basic_term_benchmarks(): 12 | trials = 20 13 | modelx_time = timeit.repeat(stmt="basicterm_m_lifelib()", setup="from basicterm_m_lifelib import basicterm_m_lifelib", number=1, repeat=trials) 14 | modelx_result = basicterm_m_lifelib() 15 | recursive_pytorch_time = timeit.repeat(stmt="basicterm_recursive_pytorch()", setup="from basicterm_m_recursive_pytorch import basicterm_recursive_pytorch", number=1, repeat=trials) 16 | recursive_pytorch_result = basicterm_recursive_pytorch() 17 | recursive_numpy_time = timeit.repeat(stmt="basicterm_recursive_numpy()", setup="from basicterm_m_recursive_numpy import basicterm_recursive_numpy", number=1, repeat=trials) 18 | recursive_numpy_result = basicterm_recursive_numpy() 19 | array_pytorch_time = timeit.repeat(stmt="basicterm_array_pytorch()", setup="from basicterm_m_array_pytorch import basicterm_array_pytorch", number=1, repeat=trials) 20 | array_pytorch_result = basicterm_array_pytorch() 21 | array_numpy_time = timeit.repeat(stmt="basicterm_array_numpy()", setup="from basicterm_m_array_numpy import basicterm_array_numpy", number=1, repeat=trials) 22 | array_numpy_result = basicterm_array_numpy() 23 | return { 24 | "Python lifelib basic_term_m": { 25 | "minimum time": f"{np.min(modelx_time)*1000} milliseconds", 26 | "result": modelx_result, 27 | }, 28 | "Python recursive pytorch basic_term_m": { 29 | "minimum time": f"{np.min(recursive_pytorch_time)*1000} milliseconds", 30 | "result": recursive_pytorch_result, 31 | }, 32 | "Python recursive numpy basic_term_m": { 33 | "minimum time": f"{np.min(recursive_numpy_time)*1000} milliseconds", 34 | "result": recursive_numpy_result, 35 | }, 36 | "Python array pytorch basic_term_m": { 37 | "minimum time": f"{np.min(array_pytorch_time)*1000} milliseconds", 38 | "result": array_pytorch_result, 39 | }, 40 | "Python array numpy basic_term_m": { 41 | "minimum time": f"{np.min(array_numpy_time)*1000} milliseconds", 42 | "result": array_numpy_result, 43 | } 44 | } 45 | 46 | if __name__ == "__main__": 47 | results = run_basic_term_benchmarks() 48 | pprint(results) 49 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_array_numpy.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | 4 | # Read data using pandas 5 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 6 | disc_rate = np.array(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")["zero_spot"].values, dtype=np.float64) 7 | sum_assured = np.array(mp["sum_assured"].values, dtype=np.float64) 8 | policy_term = np.array(mp["policy_term"].values, dtype=np.int64) 9 | age_at_entry = np.array(mp["age_at_entry"].values, dtype=np.int64) 10 | mort = np.array(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values, dtype=np.float64) 11 | 12 | def run(max_proj_len, disc_rate, sum_assured, policy_term, age_at_entry, mort, loading_prem, expense_acq, expense_maint, inflation_rate): 13 | time_axis = np.arange(max_proj_len)[:, None] 14 | duration = time_axis // 12 15 | discount_factors = np.power(1 + disc_rate[duration], -time_axis / 12) 16 | inflation_factor = np.power(1 + inflation_rate, time_axis / 12) 17 | lapse_rate = np.maximum(0.1 - 0.02 * duration, 0.02) 18 | lapse_rate_monthly = 1 - np.power(1 - lapse_rate, 1 / 12) 19 | attained_age = age_at_entry + duration 20 | annual_mortality = mort[attained_age - 18, np.minimum(duration, 5)] 21 | monthly_mortality = 1 - np.power(1 - annual_mortality, 1 / 12) 22 | pre_pols_if = np.vstack([ 23 | np.ones((1, monthly_mortality.shape[1])), 24 | np.cumprod((1 - lapse_rate_monthly) * (1 - monthly_mortality), axis=0)[:-1], 25 | ]) 26 | pols_if = (time_axis < (policy_term * 12)) * pre_pols_if 27 | pols_death = pols_if * monthly_mortality 28 | claims = sum_assured * pols_death 29 | pv_claims = np.sum(claims * discount_factors, axis=0) 30 | pv_pols_if = np.sum(pols_if * discount_factors, axis=0) 31 | net_premium = pv_claims / pv_pols_if 32 | premium_pp = np.round((1 + loading_prem) * net_premium, decimals=2) 33 | premiums = premium_pp * pols_if 34 | commissions = (duration == 0) * premiums 35 | expenses = (time_axis == 0) * expense_acq * pols_if + pols_if * expense_maint / 12 * inflation_factor 36 | pv_premiums = np.sum(premiums * discount_factors, axis=0) 37 | pv_expenses = np.sum(expenses * discount_factors, axis=0) 38 | pv_commissions = np.sum(commissions * discount_factors, axis=0) 39 | pv_net_cf = pv_premiums - pv_claims - pv_expenses - pv_commissions 40 | return float(pv_net_cf.sum()) 41 | 42 | def basicterm_array_numpy(): 43 | # parameters 44 | max_proj_len = 12 * 20 + 1 45 | loading_prem = 0.5 46 | expense_acq = 300.0 47 | expense_maint = 60.0 48 | inflation_rate = 0.01 49 | 50 | return run( 51 | max_proj_len, 52 | disc_rate, 53 | sum_assured, 54 | policy_term, 55 | age_at_entry, 56 | mort, 57 | loading_prem, 58 | expense_acq, 59 | expense_maint, 60 | inflation_rate, 61 | ) 62 | 63 | if __name__ == "__main__": 64 | print(basicterm_array_numpy()) 65 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_array_pytorch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pandas as pd 3 | 4 | # Ensure PyTorch uses double precision (64-bit) by default, similar to JAX configuration 5 | torch.set_default_dtype(torch.float64) 6 | 7 | # Random uniform distribution in PyTorch 8 | x = torch.rand(1000, dtype=torch.float64) 9 | print(f"{x.dtype=}") # --> dtype('torch.float64') 10 | 11 | 12 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 13 | disc_rate = torch.tensor(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")["zero_spot"].values) 14 | sum_assured = torch.tensor(mp["sum_assured"].values) 15 | policy_term = torch.tensor(mp["policy_term"].values) 16 | age_at_entry = torch.tensor(mp["age_at_entry"].values) 17 | mort = torch.tensor(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values) 18 | 19 | def run(max_proj_len, disc_rate, sum_assured, policy_term, age_at_entry, mort, loading_prem, expense_acq, expense_maint, inflation_rate): 20 | time_axis = torch.arange(max_proj_len)[:, None] 21 | duration = time_axis // 12 22 | discount_factors = (1 + disc_rate[duration]) ** (-time_axis / 12) 23 | inflation_factor = (1 + inflation_rate) ** (time_axis / 12) 24 | lapse_rate = torch.maximum(0.1 - 0.02 * duration, torch.tensor(0.02)) 25 | lapse_rate_monthly = 1 - (1 - lapse_rate) ** (1 / 12) 26 | attained_age = age_at_entry + duration 27 | annual_mortality = mort[attained_age - 18, torch.minimum(duration, torch.tensor(5, dtype=torch.int64))] 28 | monthly_mortality = 1 - (1 - annual_mortality) ** (1 / 12) 29 | pre_pols_if = torch.cat([ 30 | torch.ones((1, monthly_mortality.shape[1])), 31 | torch.cumprod((1 - lapse_rate_monthly) * (1 - monthly_mortality), dim=0)[:-1], 32 | ]) 33 | pols_if = (time_axis < (policy_term * 12)) * pre_pols_if 34 | pols_death = pols_if * monthly_mortality 35 | claims = sum_assured * pols_death 36 | pv_claims = torch.sum(claims * discount_factors, dim=0) 37 | pv_pols_if = torch.sum(pols_if * discount_factors, dim=0) 38 | net_premium = pv_claims / pv_pols_if 39 | premium_pp = torch.round((1 + loading_prem) * net_premium, decimals=2) 40 | premiums = premium_pp * pols_if 41 | commissions = (duration == 0) * premiums 42 | expenses = (time_axis == 0) * expense_acq * pols_if + pols_if * expense_maint / 12 * inflation_factor 43 | pv_premiums = torch.sum(premiums * discount_factors, dim=0) 44 | pv_expenses = torch.sum(expenses * discount_factors, dim=0) 45 | pv_commissions = torch.sum(commissions * discount_factors, dim=0) 46 | pv_net_cf = pv_premiums - pv_claims - pv_expenses - pv_commissions 47 | return float(pv_net_cf.sum()) 48 | 49 | def basicterm_array_pytorch(): 50 | # parameters 51 | max_proj_len = 12 * 20 + 1 52 | loading_prem = torch.tensor(0.5) 53 | expense_acq = torch.tensor(300.0) 54 | expense_maint = torch.tensor(60.0) 55 | inflation_rate = torch.tensor(0.01) 56 | 57 | return run( 58 | max_proj_len, 59 | disc_rate, 60 | sum_assured, 61 | policy_term, 62 | age_at_entry, 63 | mort, 64 | loading_prem, 65 | expense_acq, 66 | expense_maint, 67 | inflation_rate, 68 | ) 69 | 70 | if __name__ == "__main__": 71 | print(basicterm_array_pytorch()) 72 | 73 | 74 | # e2e test 75 | # assert results["net_cf_agg"][100].item() == 97661.8046875 76 | # # integration tests from development 77 | # assert premium_agg[-2].item() == 174528.421875 78 | # assert expenses_agg[-2].item() == 10686.298828125 79 | # assert commissions_agg[11].item() == 751268.375 80 | # assert commissions_agg[-20].item() == 0 81 | # assert claims_agg[-2].item() == 253439.921875 82 | # # unit tests from development 83 | # assert mort_jnp[attained_age - 18, duration][0][0].item() == 0.0006592372665181756 84 | # assert annual_mortality[-1][0].item() == 0.004345308057963848 85 | # assert pols_death[-2][1].item() == 5.005334969609976e-05 86 | # assert pols_death[-1][1].item() == 0 87 | # assert pols_lapse[0][0].item() == 0.008741136640310287 88 | # assert pols_lapse[-2][1].item() == 0.0008985198801383376 89 | # assert pols_lapse[-1][1].item() == 0 90 | # assert lapse_rate[12].item() == 0.07999999821186066 91 | # assert pre_pols_if[-1][1].item() == 0.5332472920417786 92 | # assert pols_if[-1][1].item() == 0 93 | # assert pols_if[-2][1].item() == 0.5341958403587341 94 | # assert pols_maturity[-1][1].item() == 0.5332472920417786 95 | # assert claims[0][0].item() == 34.18231201171875 96 | # assert claims[-2][1].item() == 37.64011764526367 97 | # assert jnp.sum(claims[-1]).item() == 0 98 | # assert discount_factors[11][0].item() == 1 99 | # assert discount_factors[30][0].item() == 0.9831026196479797 100 | # assert pv_claims[0].item() == 5501.505859375 101 | # assert net_premium[0].item() == 63.22805404663086 102 | # assert premiums[-2][1].item() == 32.66129684448242 103 | # assert commissions[1][0].item() == 94.0078353881836 104 | # assert expenses[0][0].item() == 305.0 105 | # assert expenses[-2][1].item() == 3.2564003467559814 106 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_lifelib.py: -------------------------------------------------------------------------------- 1 | import modelx as mx 2 | import numpy as np 3 | 4 | m = mx.read_model("BasicTerm_M") 5 | 6 | def basicterm_m_lifelib(): 7 | m.Projection.clear_cache = 1 8 | return float(np.sum(m.Projection.pv_net_cf())) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_recursive_numpy.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | 6 | # constants 7 | max_proj_len = 12 * 20 + 1 8 | 9 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 10 | disc_rate = np.array(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")['zero_spot'].values, dtype=np.float64) 11 | mort_np = np.array(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values, dtype=np.float64) 12 | sum_assured = np.array(mp["sum_assured"].values, dtype=np.float64) 13 | issue_age = np.array(mp["age_at_entry"].values, dtype=np.int32) 14 | policy_term = np.array(mp["policy_term"].values, dtype=np.int32) 15 | 16 | # classes 17 | class Cash: 18 | def __init__(self): 19 | self.reset() 20 | 21 | def reset(self): 22 | self.caches = defaultdict(dict) 23 | 24 | def __call__(self, func): 25 | @wraps(func) 26 | def wrapper(*args, **kwargs): 27 | key = (args, frozenset(kwargs.items())) 28 | if key not in self.caches[func.__name__]: 29 | self.caches[func.__name__][key] = func(*args, **kwargs) 30 | return self.caches[func.__name__][key] 31 | 32 | return wrapper 33 | 34 | cash = Cash() 35 | 36 | @cash 37 | def get_annual_rate(duration: int): 38 | return mort_np[issue_age + duration - 18, np.minimum(duration, 5)] 39 | @cash 40 | def get_monthly_rate(duration: int): 41 | return 1 - np.power((1 - get_annual_rate(duration)), 1/12) 42 | @cash 43 | def duration(t: int): 44 | return t // 12 45 | @cash 46 | def pols_death(t: int): 47 | return pols_if(t) * get_monthly_rate(duration(t)) 48 | @cash 49 | def pols_if(t: int): 50 | if t == 0: 51 | return 1 52 | return pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1) - pols_maturity(t) 53 | 54 | @cash 55 | def lapse_rate(t: int): 56 | return np.maximum(0.1 - 0.02 * duration(t), 0.02) 57 | @cash 58 | def pols_lapse(t: int): 59 | return (pols_if(t) - pols_death(t)) * (1 - np.power((1 - lapse_rate(t)), 1/12)) 60 | @cash 61 | def pols_maturity(t: int): 62 | if t == 0: 63 | return 0 64 | return (t == 12 * policy_term) * (pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1)) 65 | 66 | @cash 67 | def discount(t: int): 68 | return np.power((1 + disc_rate[duration(t)]), (-t/12)) 69 | @cash 70 | def claims(t: int): 71 | return pols_death(t) * sum_assured 72 | @cash 73 | def inflation_rate(): 74 | return 0.01 75 | @cash 76 | def inflation_factor(t): 77 | return np.power((1 + inflation_rate()), (t/12)) 78 | @cash 79 | def expense_acq(): 80 | return 300 81 | @cash 82 | def expense_maint(): 83 | return 60 84 | @cash 85 | def pv_pols_if(): 86 | return sum(pols_if(t) * discount(t) for t in range(max_proj_len)) 87 | @cash 88 | def pv_claims(): 89 | return sum(claims(t) * discount(t) for t in range(max_proj_len)) 90 | @cash 91 | def net_premium_pp(): 92 | return pv_claims() / pv_pols_if() 93 | @cash 94 | def loading_prem(): 95 | return 0.5 96 | @cash 97 | def expenses(t): 98 | return (t == 0) * expense_acq() * pols_if(t) \ 99 | + pols_if(t) * expense_maint()/12 * inflation_factor(t) 100 | @cash 101 | def premium_pp(): 102 | return np.round((1 + loading_prem()) * net_premium_pp(), decimals=2) 103 | @cash 104 | def premiums(t): 105 | return premium_pp() * pols_if(t) 106 | @cash 107 | def pv_premiums(): 108 | return sum(premiums(t) * discount(t) for t in range(max_proj_len)) 109 | @cash 110 | def pv_expenses(): 111 | return sum(expenses(t) * discount(t) for t in range(max_proj_len)) 112 | 113 | @cash 114 | def commissions(t): 115 | return (duration(t) == 0) * premiums(t) 116 | 117 | @cash 118 | def pv_commissions(): 119 | return sum(commissions(t) * discount(t) for t in range(max_proj_len)) 120 | 121 | @cash 122 | def net_cf(t): 123 | return premiums(t) - claims(t) - expenses(t) - commissions(t) 124 | 125 | @cash 126 | def pv_net_cf(): 127 | return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions() 128 | 129 | @cash 130 | def result_cf(): 131 | t_len = range(max_proj_len) 132 | 133 | data = { 134 | "Premiums": [np.sum(premiums(t)) for t in t_len], 135 | "Claims": [np.sum(claims(t)) for t in t_len], 136 | "Expenses": [np.sum(expenses(t)) for t in t_len], 137 | "Commissions": [np.sum(commissions(t)) for t in t_len], 138 | "Net Cashflow": [np.sum(net_cf(t)) for t in t_len] 139 | } 140 | return pd.DataFrame(data, index=t_len) 141 | 142 | def basicterm_recursive_numpy(): 143 | cash.reset() # Ensure the cache is clear before running calculations 144 | return float(np.sum(pv_net_cf())) 145 | 146 | if __name__ == "__main__": 147 | print(basicterm_recursive_numpy()) 148 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_m_recursive_pytorch.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import torch 5 | 6 | torch.set_default_dtype(torch.float64) 7 | 8 | # constants 9 | max_proj_len = 12 * 20 + 1 10 | 11 | mp = pd.read_csv("BasicTerm_M/model_point_table.csv") 12 | disc_rate = torch.tensor(pd.read_csv("BasicTerm_M/disc_rate_ann.csv")['zero_spot'].values) 13 | mort_np = torch.tensor(pd.read_csv("BasicTerm_M/mort_table.csv").drop(columns=["Age"]).values) 14 | sum_assured = torch.tensor(mp["sum_assured"].values) 15 | issue_age = torch.tensor(mp["age_at_entry"].values) 16 | policy_term = torch.tensor(mp["policy_term"].values) 17 | 18 | # classes 19 | class Cash: 20 | def __init__(self): 21 | self.reset() 22 | 23 | def reset(self): 24 | self.caches = defaultdict(dict) 25 | 26 | def __call__(self, func): 27 | @wraps(func) 28 | def wrapper(*args, **kwargs): 29 | key = (args, frozenset(kwargs.items())) 30 | if key not in self.caches[func.__name__]: 31 | self.caches[func.__name__][key] = func(*args, **kwargs) 32 | return self.caches[func.__name__][key] 33 | 34 | return wrapper 35 | 36 | cash = Cash() 37 | 38 | @cash 39 | def get_annual_rate(duration: int): 40 | return mort_np[issue_age + duration - 18, min(duration, 5)] 41 | @cash 42 | def get_monthly_rate(duration: int): 43 | return 1 - (1 - get_annual_rate(duration)) ** (1/12) 44 | @cash 45 | def duration(t: int): 46 | return t // 12 47 | @cash 48 | def pols_death(t: int): 49 | return pols_if(t) * get_monthly_rate(duration(t)) 50 | @cash 51 | def pols_if(t: int): 52 | if t == 0: 53 | return 1 54 | return pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1) - pols_maturity(t) 55 | 56 | @cash 57 | def lapse_rate(t: int): 58 | return max(0.1 - 0.02 * duration(t), 0.02) 59 | @cash 60 | def pols_lapse(t: int): 61 | return (pols_if(t) - pols_death(t)) * (1 - (1 - lapse_rate(t)) ** (1/12)) 62 | @cash 63 | def pols_maturity(t: int): 64 | if t == 0: 65 | return 0 66 | return (t == 12 * policy_term) * (pols_if(t - 1) - pols_lapse(t - 1) - pols_death(t - 1)) 67 | 68 | @cash 69 | def discount(t: int): 70 | return (1 + disc_rate[duration(t)]) ** (-t/12) 71 | @cash 72 | def claims(t: int): 73 | return pols_death(t) * sum_assured 74 | @cash 75 | def inflation_rate(): 76 | return 0.01 77 | @cash 78 | def inflation_factor(t): 79 | return (1 + inflation_rate()) ** (t/12) 80 | @cash 81 | def expense_acq(): 82 | return 300 83 | @cash 84 | def expense_maint(): 85 | return 60 86 | @cash 87 | def pv_pols_if(): 88 | return sum(pols_if(t) * discount(t) for t in range(max_proj_len)) 89 | @cash 90 | def pv_claims(): 91 | return sum(claims(t) * discount(t) for t in range(max_proj_len)) 92 | @cash 93 | def net_premium_pp(): 94 | return pv_claims() / pv_pols_if() 95 | @cash 96 | def loading_prem(): 97 | return 0.5 98 | @cash 99 | def expenses(t): 100 | return (t == 0) * expense_acq() * pols_if(t) \ 101 | + pols_if(t) * expense_maint()/12 * inflation_factor(t) 102 | @cash 103 | def premium_pp(): 104 | return torch.round((1 + loading_prem()) * net_premium_pp(), decimals=2) 105 | @cash 106 | def premiums(t): 107 | return premium_pp() * pols_if(t) 108 | @cash 109 | def pv_premiums(): 110 | return sum(premiums(t) * discount(t) for t in range(max_proj_len)) 111 | @cash 112 | def pv_expenses(): 113 | return sum(expenses(t) * discount(t) for t in range(max_proj_len)) 114 | @cash 115 | def commissions(t): 116 | return (duration(t) == 0) * premiums(t) 117 | @cash 118 | def pv_commissions(): 119 | return sum(commissions(t) * discount(t) for t in range(max_proj_len)) 120 | @cash 121 | def net_cf(t): 122 | return premiums(t) - claims(t) - expenses(t) - commissions(t) 123 | @cash 124 | def pv_net_cf(): 125 | return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions() 126 | 127 | @cash 128 | def result_cf(): 129 | t_len = range(max_proj_len) 130 | data = { 131 | "Premiums": [torch.sum(premiums(t)).item() for t in t_len], 132 | "Claims": [torch.sum(claims(t)).item() for t in t_len], 133 | "Expenses": [torch.sum(expenses(t)).item() for t in t_len], 134 | "Commissions": [torch.sum(commissions(t)).item() for t in t_len], 135 | "Net Cashflow": [torch.sum(net_cf(t)).item() for t in t_len] 136 | } 137 | return pd.DataFrame(data, index=t_len) 138 | 139 | 140 | def basicterm_recursive_pytorch(): 141 | cash.caches.clear() 142 | return float(torch.sum(pv_net_cf()).item()) 143 | 144 | 145 | 146 | 147 | def run_tests(): 148 | # Note: The test values may need to be adjusted for PyTorch's precision and operation differences 149 | assert abs(pv_net_cf()[0] - 910.9206609336586) < 1e-3 150 | assert abs(pv_premiums()[0] - 8252.085855522233) < 1e-3 151 | assert abs(pv_expenses()[0] - 755.3660261078035) < 1e-3 152 | assert abs(pv_commissions()[0] - 1084.6042701164513) < 1e-3 153 | assert abs(pv_pols_if()[0] - 87.0106058152913) < 1e-3 154 | assert abs(pv_claims()[0] - 5501.19489836432) < 1e-3 155 | assert abs(net_premium_pp()[0] - 63.22441783754982) < 1e-3 156 | # Adjust the following tests for tensors 157 | # assert all(pols_if(200)[:3] == [0, 0.5724017900070532, 0]) 158 | # assert all(claims(130)[:3] == [0, 28.82531005791726, 0]) 159 | # assert premiums(130)[1] == 39.565567796442494 160 | # assert expenses(100)[1] == 3.703818110341339 161 | assert abs(premium_pp()[0] - 94.84) < 1e-2 162 | assert abs(inflation_factor(100) - 1.0864542626396292) < 1e-3 163 | 164 | if __name__ == "__main__": 165 | run_tests() 166 | print("All tests passed") 167 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import timeit 3 | from basicterm_me_lifelib import basicterm_me_lifelib 4 | from basicterm_me_recursive_numpy import basicterm_me_recursive_numpy 5 | from basicterm_me_heavylight_numpy import basicterm_me_heavylight_numpy 6 | from pprint import pprint 7 | 8 | 9 | def run_basic_term_me_benchmarks(): 10 | trials = 7 11 | modelx_time = timeit.repeat(stmt="basicterm_me_lifelib()", setup="from basicterm_me_lifelib import basicterm_me_lifelib", number=1, repeat=trials) 12 | modelx_result = basicterm_me_lifelib() 13 | recursive_numpy_time = timeit.repeat(stmt="basicterm_me_recursive_numpy()", setup="from basicterm_me_recursive_numpy import basicterm_me_recursive_numpy", number=1, repeat=trials) 14 | recursive_numpy_result = basicterm_me_recursive_numpy() 15 | heavylight_time = timeit.repeat(stmt="basicterm_me_heavylight_numpy()", setup="from basicterm_me_heavylight_numpy import basicterm_me_heavylight_numpy", number=1, repeat=trials) 16 | heavylight_result = basicterm_me_heavylight_numpy() 17 | return { 18 | "Python lifelib basic_term_me": { 19 | "minimum time": f"{np.min(modelx_time)*1000} milliseconds", 20 | "result": modelx_result, 21 | }, 22 | "Python recursive numpy basic_term_me": { 23 | "minimum time": f"{np.min(recursive_numpy_time)*1000} milliseconds", 24 | "result": recursive_numpy_result, 25 | }, 26 | "Python heavylight numpy basic_term_me": { 27 | "minimum time": f"{np.min(heavylight_time)*1000} milliseconds", 28 | "result": heavylight_result, 29 | } 30 | } 31 | 32 | if __name__ == "__main__": 33 | results = run_basic_term_me_benchmarks() 34 | pprint(results) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me_heavylight_numpy.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | from heavylight.memory_optimized_model import LightModel 6 | 7 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 8 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 9 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 10 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 11 | 12 | class ModelPoints: 13 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame): 14 | self.table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 15 | self.table.sort_values(by="policy_id", inplace=True) 16 | self.table["premium_pp"] = np.around(self.table["sum_assured"] * self.table["premium_rate"],2) 17 | self.premium_pp = self.table["premium_pp"].to_numpy() 18 | self.duration_mth = self.table["duration_mth"].to_numpy() 19 | self.age_at_entry = self.table["age_at_entry"].to_numpy() 20 | self.sum_assured = self.table["sum_assured"].to_numpy() 21 | self.policy_count = self.table["policy_count"].to_numpy() 22 | self.policy_term = self.table["policy_term"].to_numpy() 23 | self.max_proj_len: int = np.max(12 * self.policy_term - self.duration_mth) + 1 24 | 25 | class Assumptions: 26 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 27 | self.disc_rate_ann = disc_rate_ann["zero_spot"].values 28 | self.mort_table = mort_table.to_numpy() 29 | 30 | def get_mortality(self, age, duration): 31 | return self.mort_table[age-18, np.minimum(duration, 5)] 32 | 33 | class TermME(LightModel): 34 | def __init__(self, mp: ModelPoints, assume: Assumptions): 35 | super().__init__() 36 | self.mp = mp 37 | self.assume = assume 38 | 39 | def age(self, t): 40 | return self.mp.age_at_entry + self.duration(t) 41 | 42 | def claim_pp(self, t): 43 | return self.mp.sum_assured 44 | 45 | def claims(self, t): 46 | return self.claim_pp(t) * self.pols_death(t) 47 | 48 | def commissions(self, t): 49 | return (self.duration(t) == 0) * self.premiums(t) 50 | 51 | def disc_factors(self): 52 | return np.array(list((1 + self.disc_rate_mth()[t])**(-t) for t in range(self.mp.max_proj_len))) 53 | 54 | def discount(self, t: int): 55 | return (1 + self.assume.disc_rate_ann[t//12]) ** (-t/12) 56 | 57 | def disc_rate_mth(self): 58 | return np.array(list((1 + self.assume.disc_rate_ann[t//12])**(1/12) - 1 for t in range(self.mp.max_proj_len))) 59 | 60 | def duration(self, t): 61 | return self.duration_mth(t) //12 62 | 63 | def duration_mth(self, t): 64 | if t == 0: 65 | return self.mp.duration_mth 66 | else: 67 | return self.duration_mth(t-1) + 1 68 | 69 | def expense_acq(self): 70 | return 300 71 | 72 | def expense_maint(self): 73 | return 60 74 | 75 | def expenses(self, t): 76 | return self.expense_acq() * self.pols_new_biz(t) \ 77 | + self.pols_if_at(t, "BEF_DECR") * self.expense_maint()/12 * self.inflation_factor(t) 78 | 79 | def inflation_factor(self, t): 80 | return (1 + self.inflation_rate())**(t/12) 81 | 82 | def inflation_rate(self): 83 | return 0.01 84 | 85 | def lapse_rate(self, t): 86 | return np.maximum(0.1 - 0.02 * self.duration(t), 0.02) 87 | 88 | def loading_prem(self): 89 | return 0.5 90 | 91 | def mort_rate(self, t): 92 | return self.assume.get_mortality(self.age(t), self.duration(t)) 93 | 94 | def mort_rate_mth(self, t): 95 | return 1-(1- self.mort_rate(t))**(1/12) 96 | 97 | def net_cf(self, t): 98 | return self.premiums(t) - self.claims(t) - self.expenses(t) - self.commissions(t) 99 | 100 | def pols_death(self, t): 101 | return self.pols_if_at(t, "BEF_DECR") * self.mort_rate_mth(t) 102 | 103 | def pols_if(self, t): 104 | return self.pols_if_at(t, "BEF_MAT") 105 | 106 | def pols_if_at(self, t, timing): 107 | if timing == "BEF_MAT": 108 | if t == 0: 109 | return self.pols_if_init() 110 | else: 111 | return self.pols_if_at(t-1, "BEF_DECR") - self.pols_lapse(t-1) - self.pols_death(t-1) 112 | elif timing == "BEF_NB": 113 | return self.pols_if_at(t, "BEF_MAT") - self.pols_maturity(t) 114 | elif timing == "BEF_DECR": 115 | return self.pols_if_at(t, "BEF_NB") + self.pols_new_biz(t) 116 | else: 117 | raise ValueError("invalid timing") 118 | 119 | def pols_if_init(self): 120 | return np.where(self.duration_mth(0) > 0, self.mp.policy_count, 0) 121 | 122 | def pols_lapse(self, t): 123 | return (self.pols_if_at(t, "BEF_DECR") - self.pols_death(t)) * (1-(1 - self.lapse_rate(t))**(1/12)) 124 | 125 | def pols_maturity(self, t): 126 | return (self.duration_mth(t) == self.mp.policy_term * 12) * self.pols_if_at(t, "BEF_MAT") 127 | 128 | def pols_new_biz(self, t): 129 | return np.where(self.duration_mth(t) == 0, self.mp.policy_count, 0) 130 | 131 | def premiums(self, t): 132 | return self.mp.premium_pp * self.pols_if_at(t, "BEF_DECR") 133 | 134 | mp = ModelPoints(model_point_table, premium_table) 135 | assume = Assumptions(disc_rate_ann, mort_table) 136 | model = TermME(mp, assume) 137 | 138 | def basicterm_me_heavylight_numpy(): 139 | model.ResetCache() 140 | tot = sum(np.sum(model.premiums(t) - model.claims(t) - model.expenses(t) - model.commissions(t)) \ 141 | * model.discount(t) for t in range(model.mp.max_proj_len)) 142 | return float(tot) 143 | 144 | if __name__ == "__main__": 145 | print(basicterm_me_heavylight_numpy()) 146 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me_lifelib.py: -------------------------------------------------------------------------------- 1 | import modelx as mx 2 | import numpy as np 3 | 4 | m = mx.read_model("BasicTerm_ME") 5 | 6 | def basicterm_me_lifelib(): 7 | m.Projection.clear_cache = 1 8 | return float(np.sum(m.Projection.pv_net_cf())) 9 | 10 | if __name__ == "__main__": 11 | print(basicterm_me_lifelib()) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/basicterm_me_recursive_numpy.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | from collections import defaultdict 3 | import pandas as pd 4 | import numpy as np 5 | 6 | class Cash: 7 | def __init__(self): 8 | self.reset() 9 | 10 | def reset(self): 11 | self.caches = defaultdict(dict) 12 | 13 | def __call__(self, func): 14 | @wraps(func) 15 | def wrapper(*args, **kwargs): 16 | key = (args, frozenset(kwargs.items())) 17 | if key not in self.caches[func.__name__]: 18 | self.caches[func.__name__][key] = func(*args, **kwargs) 19 | return self.caches[func.__name__][key] 20 | 21 | return wrapper 22 | 23 | cash = Cash() 24 | 25 | disc_rate_ann = pd.read_excel("BasicTerm_ME/disc_rate_ann.xlsx", index_col=0) 26 | mort_table = pd.read_excel("BasicTerm_ME/mort_table.xlsx", index_col=0) 27 | model_point_table = pd.read_excel("BasicTerm_ME/model_point_table.xlsx", index_col=0) 28 | premium_table = pd.read_excel("BasicTerm_ME/premium_table.xlsx", index_col=[0,1]) 29 | 30 | class ModelPoints: 31 | def __init__(self, model_point_table: pd.DataFrame, premium_table: pd.DataFrame): 32 | self.table = model_point_table.merge(premium_table, left_on=["age_at_entry", "policy_term"], right_index=True) 33 | self.table.sort_values(by="policy_id", inplace=True) 34 | self.table["premium_pp"] = np.around(self.table["sum_assured"] * self.table["premium_rate"],2) 35 | self.premium_pp = self.table["premium_pp"].to_numpy() 36 | self.duration_mth = self.table["duration_mth"].to_numpy() 37 | self.age_at_entry = self.table["age_at_entry"].to_numpy() 38 | self.sum_assured = self.table["sum_assured"].to_numpy() 39 | self.policy_count = self.table["policy_count"].to_numpy() 40 | self.policy_term = self.table["policy_term"].to_numpy() 41 | 42 | class Assumptions: 43 | def __init__(self, disc_rate_ann: pd.DataFrame, mort_table: pd.DataFrame): 44 | self.disc_rate_ann = disc_rate_ann["zero_spot"].values 45 | self.mort_table = mort_table.to_numpy() 46 | 47 | def get_mortality(self, age, duration): 48 | return self.mort_table[age-18, np.minimum(duration, 5)] 49 | 50 | mp = ModelPoints(model_point_table, premium_table) 51 | assume = Assumptions(disc_rate_ann, mort_table) 52 | 53 | @cash 54 | def age(t): 55 | return mp.age_at_entry + duration(t) 56 | 57 | @cash 58 | def claim_pp(t): 59 | return mp.sum_assured 60 | 61 | @cash 62 | def claims(t): 63 | return claim_pp(t) * pols_death(t) 64 | 65 | @cash 66 | def commissions(t): 67 | return (duration(t) == 0) * premiums(t) 68 | 69 | @cash 70 | def disc_factors(): 71 | return np.array(list((1 + disc_rate_mth()[t])**(-t) for t in range(max_proj_len()))) 72 | 73 | @cash 74 | def discount(t: int): 75 | return (1 + assume.disc_rate_ann[t//12]) ** (-t/12) 76 | 77 | @cash 78 | def disc_rate_mth(): 79 | return np.array(list((1 + assume.disc_rate_ann[t//12])**(1/12) - 1 for t in range(max_proj_len()))) 80 | 81 | @cash 82 | def duration(t): 83 | return duration_mth(t) //12 84 | 85 | @cash 86 | def duration_mth(t): 87 | if t == 0: 88 | return mp.duration_mth 89 | else: 90 | return duration_mth(t-1) + 1 91 | 92 | @cash 93 | def expense_acq(): 94 | return 300 95 | 96 | @cash 97 | def expense_maint(): 98 | return 60 99 | 100 | @cash 101 | def expenses(t): 102 | return expense_acq() * pols_new_biz(t) \ 103 | + pols_if_at(t, "BEF_DECR") * expense_maint()/12 * inflation_factor(t) 104 | 105 | @cash 106 | def inflation_factor(t): 107 | return (1 + inflation_rate())**(t/12) 108 | 109 | @cash 110 | def inflation_rate(): 111 | return 0.01 112 | 113 | @cash 114 | def lapse_rate(t): 115 | return np.maximum(0.1 - 0.02 * duration(t), 0.02) 116 | 117 | @cash 118 | def loading_prem(): 119 | return 0.5 120 | 121 | @cash 122 | def max_proj_len(): 123 | return max(proj_len()) 124 | 125 | @cash 126 | def mort_rate(t): 127 | return assume.get_mortality(age(t), duration(t)) 128 | 129 | @cash 130 | def mort_rate_mth(t): 131 | return 1-(1- mort_rate(t))**(1/12) 132 | 133 | @cash 134 | def net_cf(t): 135 | return premiums(t) - claims(t) - expenses(t) - commissions(t) 136 | 137 | @cash 138 | def pols_death(t): 139 | return pols_if_at(t, "BEF_DECR") * mort_rate_mth(t) 140 | 141 | @cash 142 | def pols_if(t): 143 | return pols_if_at(t, "BEF_MAT") 144 | 145 | @cash 146 | def pols_if_at(t, timing): 147 | if timing == "BEF_MAT": 148 | if t == 0: 149 | return pols_if_init() 150 | else: 151 | return pols_if_at(t-1, "BEF_DECR") - pols_lapse(t-1) - pols_death(t-1) 152 | elif timing == "BEF_NB": 153 | return pols_if_at(t, "BEF_MAT") - pols_maturity(t) 154 | elif timing == "BEF_DECR": 155 | return pols_if_at(t, "BEF_NB") + pols_new_biz(t) 156 | else: 157 | raise ValueError("invalid timing") 158 | 159 | @cash 160 | def pols_if_init(): 161 | return np.where(duration_mth(0) > 0, mp.policy_count, 0) 162 | 163 | @cash 164 | def pols_lapse(t): 165 | return (pols_if_at(t, "BEF_DECR") - pols_death(t)) * (1-(1 - lapse_rate(t))**(1/12)) 166 | 167 | @cash 168 | def pols_maturity(t): 169 | return (duration_mth(t) == mp.policy_term * 12) * pols_if_at(t, "BEF_MAT") 170 | 171 | @cash 172 | def pols_new_biz(t): 173 | return np.where(duration_mth(t) == 0, mp.policy_count, 0) 174 | 175 | @cash 176 | def premiums(t): 177 | return mp.premium_pp * pols_if_at(t, "BEF_DECR") 178 | 179 | @cash 180 | def proj_len(): 181 | return np.maximum(12 * mp.policy_term - duration_mth(0) + 1, 0) 182 | 183 | @cash 184 | def pv_claims(): 185 | return sum(claims(t) * discount(t) for t in range(max_proj_len())) 186 | 187 | @cash 188 | def pv_commissions(): 189 | return sum(commissions(t) * discount(t) for t in range(max_proj_len())) 190 | 191 | @cash 192 | def pv_expenses(): 193 | return sum(expenses(t) * discount(t) for t in range(max_proj_len())) 194 | 195 | @cash 196 | def pv_net_cf(): 197 | return pv_premiums() - pv_claims() - pv_expenses() - pv_commissions() 198 | 199 | @cash 200 | def pv_pols_if(): 201 | return sum(pols_if_at(t, "BEF_DECR") * discount(t) for t in range(max_proj_len())) 202 | 203 | @cash 204 | def pv_premiums(): 205 | return sum(premiums(t) * discount(t) for t in range(max_proj_len())) 206 | 207 | @cash 208 | def result_cf(): 209 | t_len = range(max_proj_len()) 210 | 211 | data = { 212 | "Premiums": [sum(premiums(t)) for t in t_len], 213 | "Claims": [sum(claims(t)) for t in t_len], 214 | "Expenses": [sum(expenses(t)) for t in t_len], 215 | "Commissions": [sum(commissions(t)) for t in t_len], 216 | "Net Cashflow": [sum(net_cf(t)) for t in t_len] 217 | } 218 | 219 | return pd.DataFrame(data, index=t_len) 220 | 221 | 222 | def result_pols(): 223 | t_len = range(max_proj_len()) 224 | 225 | data = { 226 | "pols_if": [sum(pols_if(t)) for t in t_len], 227 | "pols_maturity": [sum(pols_maturity(t)) for t in t_len], 228 | "pols_new_biz": [sum(pols_new_biz(t)) for t in t_len], 229 | "pols_death": [sum(pols_death(t)) for t in t_len], 230 | "pols_lapse": [sum(pols_lapse(t)) for t in t_len] 231 | } 232 | 233 | return pd.DataFrame(data, index=t_len) 234 | 235 | 236 | def basicterm_me_recursive_numpy(): 237 | cash.reset() 238 | return float(np.sum(pv_net_cf())) 239 | 240 | if __name__ == "__main__": 241 | cash.reset() 242 | print(basicterm_me_recursive_numpy()) -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/benchmark_results.yaml: -------------------------------------------------------------------------------- 1 | basic_term_benchmark: 2 | Python array numpy basic_term_m: 3 | minimum time: 81.15190000000894 milliseconds 4 | result: 14489630.534603368 5 | Python array pytorch basic_term_m: 6 | minimum time: 48.05233400000475 milliseconds 7 | result: 14489630.534603368 8 | Python lifelib basic_term_m: 9 | minimum time: 606.547575999997 milliseconds 10 | result: 14489630.534601536 11 | Python recursive numpy basic_term_m: 12 | minimum time: 47.489563999988604 milliseconds 13 | result: 14489630.534603368 14 | Python recursive pytorch basic_term_m: 15 | minimum time: 73.68574099999137 milliseconds 16 | result: 14489630.53460337 17 | basic_term_me_benchmark: 18 | Python heavylight numpy basic_term_me: 19 | minimum time: 347.3877970000103 milliseconds 20 | result: 215146132.0684811 21 | Python lifelib basic_term_me: 22 | minimum time: 1140.047031999984 milliseconds 23 | result: 215146132.06848112 24 | Python recursive numpy basic_term_me: 25 | minimum time: 325.56454799998846 milliseconds 26 | result: 215146132.0684814 27 | mortality: 28 | Python PyMort: 29 | minimum time: 9.101711999988993 milliseconds 30 | result: 1904.4865526636793 31 | savings_benchmark: 32 | Python lifelib cashvalue_me_ex4: 33 | minimum time: 602.367275000006 milliseconds 34 | result: 3507113709040.141 35 | Python recursive numpy cashvalue_me_ex4: 36 | minimum time: 541.5200040000059 milliseconds 37 | result: 3507113709040.124 38 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/main.py: -------------------------------------------------------------------------------- 1 | from mortality import run_mortality_benchmarks 2 | from basicterm_m import run_basic_term_benchmarks 3 | from savings_me import run_savings_benchmarks 4 | from basicterm_me import run_basic_term_me_benchmarks 5 | import yaml 6 | 7 | 8 | def get_results(): 9 | return { 10 | "mortality": run_mortality_benchmarks(), 11 | "basic_term_benchmark": run_basic_term_benchmarks(), 12 | "basic_term_me_benchmark": run_basic_term_me_benchmarks(), 13 | "savings_benchmark": run_savings_benchmarks(), 14 | } 15 | 16 | 17 | if __name__ == "__main__": 18 | results = get_results() 19 | # write to benchmark_results.yaml 20 | with open("benchmark_results.yaml", "w") as f: 21 | yaml.dump(results, f) 22 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/mortality.py: -------------------------------------------------------------------------------- 1 | from pymort.XML import MortXML 2 | import numpy as np 3 | import timeit 4 | 5 | def get_select(): 6 | return np.array( 7 | [MortXML(id).Tables[0].Values.unstack().values for id in range(3299, 3309)] 8 | ) 9 | 10 | def get_ultimate(): 11 | return np.array( 12 | [MortXML(id).Tables[1].Values.unstack().values for id in range(3299, 3309)] 13 | ) 14 | 15 | def mortality1(select = get_select(), ultimate = get_ultimate()): 16 | mortality_table_index = np.arange(10) 17 | duration = np.arange(25) 18 | issue_age = np.arange(18, 51) 19 | mortality_table_index, duration, issue_age = [ 20 | x.flatten() for x in np.meshgrid(mortality_table_index, duration, issue_age) 21 | ] 22 | time_axis = np.arange(30)[:, None] 23 | duration_projected = time_axis + duration 24 | q = np.where( 25 | duration_projected < select.shape[-1], 26 | select[ 27 | mortality_table_index, 28 | issue_age - 18, 29 | np.minimum(duration_projected, select.shape[-1] - 1), 30 | ], # np.minimum avoids some out of bounds error (JAX clips out of bounds indexes so no problem if using JAX) 31 | ultimate[mortality_table_index, issue_age - 18 + duration_projected], 32 | ) 33 | npx = np.concatenate( 34 | [np.ones((1, q.shape[1])), np.cumprod(1 - q, axis=0)[:-1]], axis=0 35 | ) 36 | v = 1 / 1.02 37 | v_eoy = v ** np.arange(1, 31)[:, None] 38 | unit_claims_discounted = npx * q * v_eoy 39 | return np.sum(unit_claims_discounted) 40 | 41 | def run_mortality_benchmarks(): 42 | select, ultimate = get_select(), get_ultimate() 43 | mort1_result = mortality1(select, ultimate) 44 | trials = 20 45 | b1 = timeit.repeat(stmt="mortality1(select, ultimate)", setup="from mortality import mortality1", globals = {"select": select, "ultimate": ultimate}, number=1, repeat=trials) 46 | return { 47 | "Python PyMort": { 48 | "result": float(mort1_result), 49 | "minimum time": f"{np.min(b1)*1000} milliseconds", 50 | } 51 | } 52 | 53 | if __name__ == "__main__": 54 | results = run_mortality_benchmarks() 55 | print(results) 56 | -------------------------------------------------------------------------------- /github-runners-benchmarks/Python/notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 25, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "UserWarning: Existing model 'CashValue_ME_EX4' renamed to 'CashValue_ME_EX4_BAK1'\n" 13 | ] 14 | }, 15 | { 16 | "data": { 17 | "text/plain": [ 18 | "0.6307517449999978" 19 | ] 20 | }, 21 | "execution_count": 25, 22 | "metadata": {}, 23 | "output_type": "execute_result" 24 | } 25 | ], 26 | "source": [ 27 | "import lifelib\n", 28 | "import timeit\n", 29 | "import pandas as pd\n", 30 | "import numpy as np\n", 31 | "import modelx as mx\n", 32 | "import openpyxl\n", 33 | "\n", 34 | "ex4 = mx.read_model('CashValue_ME_EX4')\n", 35 | "Projection = ex4.Projection\n", 36 | "\n", 37 | "timeit.timeit('ex4.Projection.result_pv()', globals=globals(), number=5)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "model_point(): spec_id age_at_entry sex policy_term policy_count \\\n", 50 | "point_id scen_id \n", 51 | "1 1 A 20 M 10 100 \n", 52 | " 2 A 20 M 10 100 \n", 53 | " 3 A 20 M 10 100 \n", 54 | " 4 A 20 M 10 100 \n", 55 | " 5 A 20 M 10 100 \n", 56 | "... ... ... .. ... ... \n", 57 | "9 996 A 20 M 10 100 \n", 58 | " 997 A 20 M 10 100 \n", 59 | " 998 A 20 M 10 100 \n", 60 | " 999 A 20 M 10 100 \n", 61 | " 1000 A 20 M 10 100 \n", 62 | "\n", 63 | " sum_assured duration_mth premium_pp av_pp_init \\\n", 64 | "point_id scen_id \n", 65 | "1 1 500000 0 500000 0 \n", 66 | " 2 500000 0 500000 0 \n", 67 | " 3 500000 0 500000 0 \n", 68 | " 4 500000 0 500000 0 \n", 69 | " 5 500000 0 500000 0 \n", 70 | "... ... ... ... ... \n", 71 | "9 996 500000 0 300000 0 \n", 72 | " 997 500000 0 300000 0 \n", 73 | " 998 500000 0 300000 0 \n", 74 | " 999 500000 0 300000 0 \n", 75 | " 1000 500000 0 300000 0 \n", 76 | "\n", 77 | " accum_prem_init_pp premium_type has_surr_charge \\\n", 78 | "point_id scen_id \n", 79 | "1 1 0 SINGLE False \n", 80 | " 2 0 SINGLE False \n", 81 | " 3 0 SINGLE False \n", 82 | " 4 0 SINGLE False \n", 83 | " 5 0 SINGLE False \n", 84 | "... ... ... ... \n", 85 | "9 996 0 SINGLE False \n", 86 | " 997 0 SINGLE False \n", 87 | " 998 0 SINGLE False \n", 88 | " 999 0 SINGLE False \n", 89 | " 1000 0 SINGLE False \n", 90 | "\n", 91 | " surr_charge_id load_prem_rate is_wl \n", 92 | "point_id scen_id \n", 93 | "1 1 NaN 0.0 False \n", 94 | " 2 NaN 0.0 False \n", 95 | " 3 NaN 0.0 False \n", 96 | " 4 NaN 0.0 False \n", 97 | " 5 NaN 0.0 False \n", 98 | "... ... ... ... \n", 99 | "9 996 NaN 0.0 False \n", 100 | " 997 NaN 0.0 False \n", 101 | " 998 NaN 0.0 False \n", 102 | " 999 NaN 0.0 False \n", 103 | " 1000 NaN 0.0 False \n", 104 | "\n", 105 | "[9000 rows x 15 columns]\n", 106 | "with indices: MultiIndex([(1, 1),\n", 107 | " (1, 2),\n", 108 | " (1, 3),\n", 109 | " (1, 4),\n", 110 | " (1, 5),\n", 111 | " (1, 6),\n", 112 | " (1, 7),\n", 113 | " (1, 8),\n", 114 | " (1, 9),\n", 115 | " (1, 10),\n", 116 | " ...\n", 117 | " (9, 991),\n", 118 | " (9, 992),\n", 119 | " (9, 993),\n", 120 | " (9, 994),\n", 121 | " (9, 995),\n", 122 | " (9, 996),\n", 123 | " (9, 997),\n", 124 | " (9, 998),\n", 125 | " (9, 999),\n", 126 | " (9, 1000)],\n", 127 | " names=['point_id', 'scen_id'], length=9000)\n" 128 | ] 129 | } 130 | ], 131 | "source": [ 132 | "# Projection.model_point_table = Projection.model_point_1\n", 133 | "table = Projection.model_point_table\n", 134 | "# print(\"Number of model points: \", len(table))\n", 135 | "# print(\"Model points: \", table)\n", 136 | "# points = Projection.model_point_table_ext()\n", 137 | "# points = Projection.model_point()[\"scen_id\"].values[990:1010]\n", 138 | "points = Projection.model_point()\n", 139 | "print(\"model_point(): \", points)\n", 140 | "print(\"with indices: \", points.index)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 3, 146 | "metadata": {}, 147 | "outputs": [ 148 | { 149 | "name": "stdout", 150 | "output_type": "stream", 151 | "text": [ 152 | "(9000,)\n", 153 | "900000.0\n", 154 | "[100. 100. 100. ... 100. 100. 100.]\n" 155 | ] 156 | } 157 | ], 158 | "source": [ 159 | "pols = ex4.Projection.pols_if_at(12, \"BEF_DECR\")\n", 160 | "print(np.shape(pols))\n", 161 | "print(sum(pols))\n", 162 | "print(pols)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 4, 168 | "metadata": {}, 169 | "outputs": [ 170 | { 171 | "data": { 172 | "text/plain": [ 173 | "array([100., 100., 100., ..., 100., 100., 100.])" 174 | ] 175 | }, 176 | "execution_count": 4, 177 | "metadata": {}, 178 | "output_type": "execute_result" 179 | } 180 | ], 181 | "source": [ 182 | "Projection.pols_if(1)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 24, 188 | "metadata": {}, 189 | "outputs": [ 190 | { 191 | "data": { 192 | "text/plain": [ 193 | "399477611.70743275" 194 | ] 195 | }, 196 | "execution_count": 24, 197 | "metadata": {}, 198 | "output_type": "execute_result" 199 | } 200 | ], 201 | "source": [ 202 | "Projection.result_pv()[\"Net Cashflow\"].groupby(\"point_id\").mean().sum()" 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "metadata": {}, 216 | "outputs": [ 217 | { 218 | "name": "stdout", 219 | "output_type": "stream", 220 | "text": [ 221 | "121\n" 222 | ] 223 | }, 224 | { 225 | "data": { 226 | "text/plain": [ 227 | "array([50000000., 50000000., 50000000., ..., 30000000., 30000000.,\n", 228 | " 30000000.])" 229 | ] 230 | }, 231 | "execution_count": 17, 232 | "metadata": {}, 233 | "output_type": "execute_result" 234 | } 235 | ], 236 | "source": [ 237 | "print(ex4.Projection.max_proj_len())\n", 238 | "ex4.Projection.pv_premiums()" 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": null, 244 | "metadata": {}, 245 | "outputs": [ 246 | { 247 | "data": { 248 | "text/plain": [ 249 | "array([100, 100, 100, ..., 100, 100, 100])" 250 | ] 251 | }, 252 | "execution_count": 18, 253 | "metadata": {}, 254 | "output_type": "execute_result" 255 | } 256 | ], 257 | "source": [ 258 | "ex4.Projection.pols_new_biz(0)" 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "execution_count": null, 264 | "metadata": {}, 265 | "outputs": [ 266 | { 267 | "name": "stdout", 268 | "output_type": "stream", 269 | "text": [ 270 | "Montlhy investment returns: [ 0.00807793 -0.00048898 -0.00302246 ... -0.00917993 -0.00629737\n", 271 | " -0.00596671]\n", 272 | "with shape: (9000,)\n" 273 | ] 274 | } 275 | ], 276 | "source": [ 277 | "inv = Projection.inv_return_mth(2)\n", 278 | "print(\"Montlhy investment returns: \", inv)\n", 279 | "print(\"with shape: \", np.shape(inv))" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": null, 285 | "metadata": {}, 286 | "outputs": [ 287 | { 288 | "data": { 289 | "text/html": [ 290 | "
\n", 308 | " | \n", 309 | " | Premiums | \n", 310 | "Death | \n", 311 | "Surrender | \n", 312 | "Maturity | \n", 313 | "Expenses | \n", 314 | "Commissions | \n", 315 | "Investment Income | \n", 316 | "Change in AV | \n", 317 | "Net Cashflow | \n", 318 | "
---|---|---|---|---|---|---|---|---|---|---|
point_id | \n", 321 | "scen_id | \n", 322 | "\n", 323 | " | \n", 324 | " | \n", 325 | " | \n", 326 | " | \n", 327 | " | \n", 328 | " | \n", 329 | " | \n", 330 | " | \n", 331 | " |
1 | \n", 336 | "1 | \n", 337 | "50000000.0 | \n", 338 | "0.0 | \n", 339 | "0.0 | \n", 340 | "5.765190e+07 | \n", 341 | "975895.951147 | \n", 342 | "2500000.0 | \n", 343 | "1.793864e+07 | \n", 344 | "1.028674e+07 | \n", 345 | "-3.475896e+06 | \n", 346 | "
2 | \n", 349 | "50000000.0 | \n", 350 | "0.0 | \n", 351 | "0.0 | \n", 352 | "4.781116e+07 | \n", 353 | "975895.951147 | \n", 354 | "2500000.0 | \n", 355 | "7.638184e+06 | \n", 356 | "9.827021e+06 | \n", 357 | "-3.475896e+06 | \n", 358 | "|
3 | \n", 361 | "50000000.0 | \n", 362 | "0.0 | \n", 363 | "0.0 | \n", 364 | "5.184905e+07 | \n", 365 | "975895.951147 | \n", 366 | "2500000.0 | \n", 367 | "1.232610e+07 | \n", 368 | "1.047706e+07 | \n", 369 | "-3.475896e+06 | \n", 370 | "|
4 | \n", 373 | "50000000.0 | \n", 374 | "0.0 | \n", 375 | "0.0 | \n", 376 | "4.752251e+07 | \n", 377 | "975895.951147 | \n", 378 | "2500000.0 | \n", 379 | "7.454824e+06 | \n", 380 | "9.932312e+06 | \n", 381 | "-3.475896e+06 | \n", 382 | "|
5 | \n", 385 | "50000000.0 | \n", 386 | "0.0 | \n", 387 | "0.0 | \n", 388 | "5.796074e+07 | \n", 389 | "975895.951147 | \n", 390 | "2500000.0 | \n", 391 | "1.852191e+07 | \n", 392 | "1.056117e+07 | \n", 393 | "-3.475896e+06 | \n", 394 | "|
... | \n", 397 | "... | \n", 398 | "... | \n", 399 | "... | \n", 400 | "... | \n", 401 | "... | \n", 402 | "... | \n", 403 | "... | \n", 404 | "... | \n", 405 | "... | \n", 406 | "... | \n", 407 | "
9 | \n", 410 | "996 | \n", 411 | "30000000.0 | \n", 412 | "0.0 | \n", 413 | "0.0 | \n", 414 | "4.093654e+07 | \n", 415 | "975895.951147 | \n", 416 | "1500000.0 | \n", 417 | "4.256529e+06 | \n", 418 | "5.753036e+06 | \n", 419 | "-1.490894e+07 | \n", 420 | "
997 | \n", 423 | "30000000.0 | \n", 424 | "0.0 | \n", 425 | "0.0 | \n", 426 | "4.093654e+07 | \n", 427 | "975895.951147 | \n", 428 | "1500000.0 | \n", 429 | "7.287750e+06 | \n", 430 | "6.331561e+06 | \n", 431 | "-1.245624e+07 | \n", 432 | "|
998 | \n", 435 | "30000000.0 | \n", 436 | "0.0 | \n", 437 | "0.0 | \n", 438 | "4.093654e+07 | \n", 439 | "975895.951147 | \n", 440 | "1500000.0 | \n", 441 | "7.480443e+06 | \n", 442 | "6.031063e+06 | \n", 443 | "-1.196305e+07 | \n", 444 | "|
999 | \n", 447 | "30000000.0 | \n", 448 | "0.0 | \n", 449 | "0.0 | \n", 450 | "4.093654e+07 | \n", 451 | "975895.951147 | \n", 452 | "1500000.0 | \n", 453 | "1.098676e+07 | \n", 454 | "6.345723e+06 | \n", 455 | "-8.771397e+06 | \n", 456 | "|
1000 | \n", 459 | "30000000.0 | \n", 460 | "0.0 | \n", 461 | "0.0 | \n", 462 | "4.093654e+07 | \n", 463 | "975895.951147 | \n", 464 | "1500000.0 | \n", 465 | "8.407759e+06 | \n", 466 | "6.481302e+06 | \n", 467 | "-1.148598e+07 | \n", 468 | "
9000 rows × 9 columns
\n", 472 | "