├── .gitignore ├── LICENSE ├── README.md ├── baseline_example.ipynb ├── demo_notebooks ├── generalized_mean_demo.ipynb └── investment_euler_demo.ipynb ├── generalized_mean.py ├── generalized_mean_defaults.yaml ├── generate_figures ├── concentration_euler_residual_linear.py ├── deep_sets_linear_profiling_var_n.py ├── deep_sets_nonlinear_var_nu.py ├── generalized_mean_deep_sets_no_invariance_N_tables.py ├── identity_moments_deep_sets_linear_relative.py ├── linear_baseline_convergence_table.py ├── linear_baseline_theory_vs_predicted.py ├── linear_overfit_table.py ├── linear_performance_table.py ├── moments_deep_sets_nonlinear_residuals.py ├── nonlinear_overfit_table.py ├── testing_utilities.py └── utilities.py ├── hpo_sweeps ├── generalized_mean_deep_sets_sweep.yaml ├── generalized_mean_no_invaritant_sweep.yaml ├── train_time_sweep_deep_moments.yaml ├── train_time_sweep_deep_sets.yaml ├── train_time_sweep_identity.yaml ├── train_time_sweep_nonlinear_deep_moments.yaml ├── train_time_sweep_nonlinear_deep_sets.yaml └── val_loss_over_param_sweep_main.yaml ├── images ├── hpo_output_1.png └── hpo_output_2.png ├── investment_euler.py ├── investment_euler_defaults.yaml ├── investment_euler_simple.yaml ├── pyproject.toml ├── replication_scripts ├── L_16_deep_sets.yaml ├── L_2_deep_moments.yaml ├── L_2_deep_sets .yaml ├── L_8_deep_sets.yaml ├── README.md ├── baseline_deep_moments.yaml ├── baseline_deep_moments_one_run.yaml ├── baseline_deep_sets.yaml ├── baseline_deep_sets_N.yaml ├── baseline_deep_sets_one_run.yaml ├── baseline_identity.yaml ├── baseline_identity_one_run.yaml ├── baseline_nonlinear_deep_moments.yaml ├── baseline_nonlinear_deep_sets.yaml ├── deep_2_4_deep_sets.yaml ├── deep_4_8_deep_sets.yaml ├── deep_sets_nonlinear_nu_130_one_run.yaml ├── deep_sets_nonlinear_nu_150_one_run.yaml ├── deep_sets_nonlinear_overfit.yaml ├── deep_sets_overfit.yaml ├── generalized_mean_deep_sets_L_N.yaml ├── generalized_mean_no_invariance_template.yaml ├── run_all_generalized_mean.sh ├── run_all_sequential.sh ├── shallow_1_2_deep_sets.yaml ├── thin_64_deep_moments.yaml ├── thin_64_deep_sets.yaml ├── thin_64_identity.yaml ├── very_shallow_1_layer_deep_moments.yaml └── wide_256_deep_sets.yaml └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | .DS_Store 132 | **/lightning_logs 133 | .vscode/settings.json 134 | config.yaml 135 | checkpoints/* 136 | ./wand 137 | symmetry_examples 138 | metrics.yaml 139 | test_results.csv 140 | wandb/ 141 | generalized_mean_examples 142 | 143 | # files 144 | *.pdf 145 | *.json 146 | *.tex 147 | generalized_mean_no_invariance_N_*.yaml -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 HighDimensionalEconLab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Symmetry and Dynamic Programming 2 | Source for "Exploiting Symmetry in High-Dimensional Dynamic Programming" 3 | 4 | **Warning**: See the [HyperParameter Tuning section](#hyperparameter-tuning) for more details on robustness checks, tuning, and examples using [Weights and Biases](wandb.ai). Hyperparameter optimization is an essential part of the machine learning workflow, and it rarely not make sense to check for robustness without considering how/when a new HPO process is required. 5 | 6 | Since manual tweaking of hyperparameters is slow and error prone, a variety of ML tools to automate the process and visualization. The primary [investment_euler.py](investment_euler.py) and related code is provided as an expanding of this tooling. 7 | 8 | ## Installing 9 | 10 | ### Quick Installation Instructions 11 | Within a python environment, clone this repository with git and execute `pip install -r requirements.txt`. 12 | 13 | See more complete instructions below in the [detailed installation](#detailed-installation-instructions) section. 14 | 15 | ## Jupyter Notebook for Exploration 16 | 17 | You can load the Jupyter notebook [baseline_example.ipynb](baseline_example.ipynb) directly in VS Code or on the command-line with `jupyter lab` run in the local directory. This notebook loads the `investment_euler.py` and provides utilities to examine the output without using it on the commandline. 18 | 19 | ## CLI Usage 20 | There is a command-line interface to solve for the equilibrium given various model and neural network parameters. This is especially convenient for deploying on the cloud or when running in parallel. 21 | 22 | The default values of all parameters is given by [investment_euler_default.yaml](investment_euler_default.yaml). You can override these by passing in a different YAML file, or by passing in the parameters on the commandline. 23 | 24 | To use this, in a console at the root of this project, you can do things such as the following. 25 | ```bash 26 | python investment_euler.py --trainer.max_epochs=5 27 | ``` 28 | Or to change the neural network architecture, you could try things such as increasing the `L` of the model 29 | ```bash 30 | python investment_euler.py --trainer.max_epochs=2 --model.ml_model.L=8 31 | ``` 32 | Or changing the number of layers 33 | ```bash 34 | python investment_euler.py --trainer.max_epochs=5 --model.ml_model.phi_layers=1 35 | ``` 36 | 37 | You can swap out the entire neural network by passing in a different `ml_model` class. For example, to use a `DeepSetMoments` model, you could do 38 | ```bash 39 | python investment_euler.py --model.ml_model.class_path=econ_layers.layers.DeepSetMoments --model.ml_model.L=4 --model.ml_model.n_in=1 --model.ml_model.n_out=1 --model.ml_model.rho_layers=3 --model.ml_model.rho_hidden_dim=256 --model.ml_model.rho_hidden_bias=false --model.ml_model.rho_last_bias=true 40 | ``` 41 | 42 | To change the economic variables such nonlinearity in prices, you could try things such as 43 | 44 | ```bash 45 | python investment_euler.py --trainer.max_epochs=5 --model.nu=1.1 46 | ``` 47 | 48 | Note that for the `nu != 1` there is no closed form to check against. 49 | 50 | If you a GPU available and you installed the appropriate version of PyTorch, then you can pass in the accelerator option, 51 | ```bash 52 | python investment_euler.py --trainer.accelerator=gpu 53 | ``` 54 | Note, however, that the GPU will be slower for less than about 1024 agents. 55 | 56 | # Hyperparameter Tuning 57 | 58 | Central to deep learning is the need to tuning hyperparameters. A variety of tooling for ML and deep-learning is there to help, mostly under the category of "ML DevOps". This includes tools for hyperparameter optimization, model versioning, managing results, model deployment, and running on clusters/clouds. Here we will only show one of these tools, which provides simple HPO and outstanding visualization. 59 | ## Weights and Biases 60 | One tool for managing parameters and hyperparameter optimization is [Weights and Biases](https://wandb.ai/). This is a free service for academic use. It provides a dashboard to track experiments, and a way to run hyperparameter optimization sweeps. 61 | 62 | 63 | To use, first create an account with [Weights and Biases](https://wandb.ai/) then, assuming you have installed the packages above, ensure you have logged in, 64 | ```bash 65 | wandb login 66 | ``` 67 | 68 | The [train_time_sweep.yaml](train_time_sweep.yaml) file contains a list of parameters defining the sweeep of interest. See [W&B docs](https://docs.wandb.ai/guides/sweeps/define-sweep-configuration) for more details, including how to handle distributions. 69 | probv 70 | For our example sweep, in a terminal run 71 | ```bash 72 | wandb sweep train_time_sweep.yaml 73 | ``` 74 | This will create a new sweep on the server. It will give you a URL to the sweep, which you can open in a browser. You can also see the sweep in your [W&B dashboard](https://wandb.ai/home). You will need the returned ID as well. 75 | 76 | This doesn't create any "agents". To do that, take the `` that was returned and run 77 | ```bash 78 | wandb agent 79 | ``` 80 | Or to only execute a fixed number of experiments on that agent, give it a count (e.g. `wandb agent --count 10 `). 81 | 82 | You can then login to the server and run that same line, with the provided sweep_id, to execute the same experiments on a different machine. 83 | ## Example Results 84 | See [W&B Training Time Sweep Results](https://wandb.ai/highdimensionaleconlab/symmetry_dp_examples/sweeps/ie7xdfv8?workspace=user-) for an example. A few useful features of this tool include, 85 | 86 | ![Visualization 1](images/hpo_output_1.png) 87 | 88 | This provides a standard visualization to evaluate many different hyperparameters, listed along the top and each with its own y-axis. The color matches the objective of the HPO sweep, where the value is shown on the rightmost side. 89 | 90 | 91 | ![Visualization 2](images/hpo_output_2.png) 92 | 93 | Another visualization is to look at the correlation between the hyperparameter and the objective, as shown above, which summarizes the relative importance. 94 | 95 | 96 | # Detailed Installation Instructions 97 | For users with less experience using python, conda, and VS Code, the following provides more details. 98 | 99 | 1. Ensure you have installed Python. For example, using [Anaconda](https://www.anaconda.com/products/individual) 100 | 2. Recommended but not required: Install [VS Code](https://code.visualstudio.com/) along with its [Python Extension](https://code.visualstudio.com/docs/languages/python) 101 | 3. Clone this repository 102 | - Recommended: With VS Code, go `` to open up the commandbar, then choose `Git Clone`, and use the URL `https://github.com/HighDimensionalEconLab/symmetry_dynamic_programming.git`. That will give you a full environment to work with. 103 | - Alternatively, you can clone it with git installed `git clone https://github.com/HighDimensionalEconLab/symmetry_dynamic_programming.git` 104 | 4. (Optional) create a conda [virtual environment](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) 105 | ```bash 106 | conda create -n symmetry_dp python=3.9 107 | conda activate symmetry_dp 108 | ``` 109 | - Python 3.10 is also broadly supported, but PyTorch doesn't fully support Python 3.11 yet. See Troubleshooting below if Python 3.10 has issues. 110 | 111 | 5. (Optional) In VS Code, you can then do `` to open up the commandbar, then choose `> Python: Select Interpreter`, and choose the one in the `symmetry_dp` environment. Future `> Python: Terminal` commands then automatically activate it. 112 | - If you are in VS Code, opening a python terminal with `` then `> Python: Terminal` and other terminals should automatically activate the environment and start in the correct location. 113 | 114 | 6. Install dependencies. With a terminal in that cloned folder (after, optionally, activating an environment as discussed above). 115 | ```bash 116 | pip install -r requirements.txt 117 | ``` 118 | 7. (Optional) installation of PyTorch with GPU support. 119 | - If the above process only installs the CPU version and you have a GPU available, follow for more details https://pytorch.org/get-started/locally/ with the activated environment. 120 | - For example `conda install pytorch pytorch-cuda=11.8 -c pytorch -c nvidia`. 121 | - Then, if you pass the `python investment_euler.py --trainer.accelerator=gpu` etc it will use available hardware 122 | - Note that GPUs are not required for these experiments, and are often slower. 123 | 124 | **Troubleshooting:** 125 | 126 | - If you are having trouble installing packages on Windows with Python 3.10, then either downgrade to 3.9 or see [here](https://stackoverflow.com/questions/64261546/how-to-solve-error-microsoft-visual-c-14-0-or-greater-is-required-when-inst). To summarize those steps: 127 | - Download https://visualstudio.microsoft.com/visual-cpp-build-tools/ 128 | - Local to that folder in a terminal, run `vs_buildtools.exe --norestart --passive --downloadThenInstall --includeRecommended --add Microsoft.VisualStudio.Workload.NativeDesktop --add Microsoft.VisualStudio.Workload.VCTools --add Microsoft.VisualStudio.Workload.MSBuildTools 129 | ` 130 | - If PyTorch is not working after the initial installation, consider [installing manually](https://pytorch.org/get-started/locally/#start-locally) with `conda install pytorch cpuonly -c pytorch ` or something similar, and then retrying the dependencies installation. GPUs are not required for these experiments. If you get compatibility clashes between packages with the `pip install -r requirements.txt` then we recommend using a virtual environment with conda, as described above. 131 | -------------------------------------------------------------------------------- /baseline_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "attachments": {}, 5 | "cell_type": "markdown", 6 | "metadata": {}, 7 | "source": [ 8 | "## Example using Python Script\n", 9 | "\n", 10 | "This notebook uses the `investment_euler.py` provided in this repository and is not entirely self-contained." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "outputs": [ 18 | { 19 | "name": "stderr", 20 | "output_type": "stream", 21 | "text": [ 22 | "c:\\Users\\jesse\\anaconda3\\envs\\pytorch-sandbox\\lib\\site-packages\\tqdm\\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 23 | " from .autonotebook import tqdm as notebook_tqdm\n" 24 | ] 25 | } 26 | ], 27 | "source": [ 28 | "import pandas as pd\n", 29 | "import sys\n", 30 | "import torch\n", 31 | "import pytorch_lightning as pl\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "import numpy as np\n", 34 | "from pytorch_lightning.cli import LightningCLI\n", 35 | "from investment_euler import InvestmentEuler" 36 | ] 37 | }, 38 | { 39 | "attachments": {}, 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | " The `args` allows us to override some of the default values. Everything else is in the `investment_euler_simple.yaml` case, which does not have logging enabled. See `investment_euler.py` and related docs for a full version." 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 2, 49 | "metadata": {}, 50 | "outputs": [ 51 | { 52 | "name": "stderr", 53 | "output_type": "stream", 54 | "text": [ 55 | "Global seed set to 123\n", 56 | "GPU available: False, used: False\n", 57 | "TPU available: False, using: 0 TPU cores\n", 58 | "IPU available: False, using: 0 IPUs\n", 59 | "HPU available: False, using: 0 HPUs\n", 60 | "c:\\Users\\jesse\\anaconda3\\envs\\pytorch-sandbox\\lib\\site-packages\\pytorch_lightning\\callbacks\\model_checkpoint.py:604: UserWarning: Checkpoint directory c:\\Users\\jesse\\Documents\\GitHub\\symmetry_dynamic_programming\\checkpoints exists and is not empty.\n", 61 | " rank_zero_warn(f\"Checkpoint directory {dirpath} exists and is not empty.\")\n", 62 | "\n", 63 | " | Name | Type | Params\n", 64 | "-------------------------------------\n", 65 | "0 | ml_model | DeepSet | 199 K \n", 66 | "-------------------------------------\n", 67 | "199 K Trainable params\n", 68 | "0 Non-trainable params\n", 69 | "199 K Total params\n", 70 | "0.798 Total estimated model params size (MB)\n" 71 | ] 72 | }, 73 | { 74 | "name": "stdout", 75 | "output_type": "stream", 76 | "text": [ 77 | "Epoch 2: 100%|██████████| 96/96 [00:04<00:00, 21.98it/s, loss=3.24e-07, val_loss=1.95e-7, val_u_rel_error=0.000907, val_u_abs_error=2.93e-5]\n" 78 | ] 79 | } 80 | ], 81 | "source": [ 82 | "args = {\"trainer.max_epochs\": 10,\n", 83 | " \"model.N\": 128,\n", 84 | " \"model.nu\": 1, # LQ example nested if nu=1\n", 85 | " } # override some defaults\n", 86 | "sys.argv = [\"dummy.py\"] # workaround since LightningCLI doesn't like being called from a notebook\n", 87 | "\n", 88 | "cli = LightningCLI(\n", 89 | " InvestmentEuler,\n", 90 | " run=False,\n", 91 | " seed_everything_default=123,\n", 92 | " save_config_callback=None,\n", 93 | " parser_kwargs={\"default_config_files\": [\"investment_euler_simple.yaml\"]}, # note using a \"simple\" version without logging\n", 94 | " args = args\n", 95 | ")\n", 96 | "\n", 97 | "# Fit the model. The construction created internal training and validation points\n", 98 | "cli.trainer.fit(cli.model)" 99 | ] 100 | }, 101 | { 102 | "attachments": {}, 103 | "cell_type": "markdown", 104 | "metadata": {}, 105 | "source": [ 106 | "After the model has been trained, we can test it against simulated values" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 3, 112 | "metadata": {}, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "Testing DataLoader 0: 100%|██████████| 128/128 [00:02<00:00, 51.14it/s]\n" 119 | ] 120 | }, 121 | { 122 | "data": { 123 | "text/html": [ 124 | "
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
125 |        "┃        Test metric               DataLoader 0        ┃\n",
126 |        "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
127 |        "│         test_loss           2.0451582827263337e-07   │\n",
128 |        "│     test_u_abs_error         2.93638640869176e-05    │\n",
129 |        "│     test_u_rel_error         0.0009061677264980972   │\n",
130 |        "└───────────────────────────┴───────────────────────────┘\n",
131 |        "
\n" 132 | ], 133 | "text/plain": [ 134 | "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", 135 | "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", 136 | "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", 137 | "│\u001b[36m \u001b[0m\u001b[36m test_loss \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 2.0451582827263337e-07 \u001b[0m\u001b[35m \u001b[0m│\n", 138 | "│\u001b[36m \u001b[0m\u001b[36m test_u_abs_error \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 2.93638640869176e-05 \u001b[0m\u001b[35m \u001b[0m│\n", 139 | "│\u001b[36m \u001b[0m\u001b[36m test_u_rel_error \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.0009061677264980972 \u001b[0m\u001b[35m \u001b[0m│\n", 140 | "└───────────────────────────┴───────────────────────────┘\n" 141 | ] 142 | }, 143 | "metadata": {}, 144 | "output_type": "display_data" 145 | }, 146 | { 147 | "data": { 148 | "text/plain": [ 149 | "[{'test_loss': 2.0451582827263337e-07,\n", 150 | " 'test_u_rel_error': 0.0009061677264980972,\n", 151 | " 'test_u_abs_error': 2.93638640869176e-05}]" 152 | ] 153 | }, 154 | "execution_count": 3, 155 | "metadata": {}, 156 | "output_type": "execute_result" 157 | } 158 | ], 159 | "source": [ 160 | "cli.trainer.test(cli.model) # test the model on generated values, storing the results in model.test_results" 161 | ] 162 | }, 163 | { 164 | "attachments": {}, 165 | "cell_type": "markdown", 166 | "metadata": {}, 167 | "source": [ 168 | "There are three types of points here, each with a separate `loss`:\n", 169 | "1. `train_loss` is loss applied to the generated points where we are attempting to interpolate. Given the large number of parameters, having a low `train_loss` just means it successfully interpolated, not that it will necessarily generalize.\n", 170 | "2. `val_loss` is that applied to a set of generated points which are not trained on, but which are used within the optimization process (e.g., deciding when to stop training, adjusting learning rates). This gives a better sense of generalization because they are \"out of sample\", but when they are used to guide the optimization process they become somewhat contaminated and a low `val_loss` might hide poor generalization.\n", 171 | "3. `test_loss` is applied to a simulated set of points that aren't used in the training process at all. It is the most representative of generalization, and a low `test_loss` is the ultimate goal of the process.\n", 172 | "\n", 173 | "Here, the default stopping criteria is when the `val_loss` becomes sufficiently low relative to numerical precision." 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": 4, 179 | "metadata": {}, 180 | "outputs": [ 181 | { 182 | "data": { 183 | "text/plain": [ 184 | "Text(0.5, 0, 'Time($t$)')" 185 | ] 186 | }, 187 | "execution_count": 4, 188 | "metadata": {}, 189 | "output_type": "execute_result" 190 | }, 191 | { 192 | "data": { 193 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjUAAAHLCAYAAAAwZWlsAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8o6BhiAAAACXBIWXMAAA9hAAAPYQGoP6dpAABw3klEQVR4nO3deVyU5f7/8dfMsCObgCCI+y4IrohlWpJWltnilpWa1elkpfk7HbNM7Zw8tnwtT+rR7Fh2stJji5Wm5lKmuYv7lgtuKCiKgOww9+8Pck4TaKDAsLyfj8c8lGuuueczFwPz5r6v+7pNhmEYiIiIiFRxZkcXICIiIlIWFGpERESkWlCoERERkWpBoUZERESqBYUaERERqRYUakRERKRaUKgRERGRakGhRkRERKoFhRoRERGpFhRqREREpFpQqBEREZFqQaFGKrU333yTli1bYrVay2ybs2fPpn79+uTk5JTZNn9v3rx5mEwmjh8//od9J02ahMlkIjk5ucyev0OHDtx3331Xvb88xrUiVcT30BGKe9/8vq24PmXxHirNe7Y6Ko+fQ6l4CjVSaaWlpfHGG28wduxYzGb7t+qrr76K2WzmwIEDRR732GOPYbFYWLp0abHbHTZsGLm5ubz33nvlUvfVbNiwgUmTJnHp0qVyfR7DMDh48CCtW7cu9v6rjeuVD7UrNycnJ0JDQxk2bBgJCQnXXc+V7W7btu2qff7oAyU8PJwePXrYvi7P7+Hvx+H3t02bNpX5c0rxfv+9cHNzo3nz5jzzzDMkJSWVensV9TMojqNQI5XWBx98QH5+PoMHDy5y35///GdcXV2ZNm2aXfv06dP58MMP+dvf/kafPn2K3a6bmxtDhw7l7bffprwuUv/II4+QlZVFgwYNbG0bNmzg1VdfLfdfqMePHyczM/OqoeZa4wrwt7/9jY8//pjZs2dz5513Mn/+fLp37052dnZ5ll0qFfE9vDIOv781bdq0XJ4Pin/fXE+f8npuR7nyvZgxYwZdu3Zl1qxZxMTEkJmZWartVNTPoDiOk6MLELmaDz/8kL59++Lm5lbkvjp16jBkyBA+/vhjJk+eTEBAAGvXrmXMmDE88MADvPzyy9fc9oABA3jzzTf54YcfuO2228q8dovFgsViKfPtlsT+/fsBrhpqrjWuAHfeeScdO3YE4PHHHycgIIA33niDb775hgEDBpRP0dehvL+Hvx2HilKS9015vLcyMjLw9PR02Hv2j/z+Penv78/bb7/N119/fdVwLjWT9tRIhQsMDOSZZ54p0t6xY0fb3pX4+Hh2795NbGzsVbfz/PPPk5WVxezZszl16hQDBgygZcuWzJs37w9r6NChA7Vr1+brr7++Zr/du3djMpn45ptvbG3bt2/HZDLRvn17u7533nkn0dHRQNH5CZMmTeKFF14AoFGjRrbd6b+dv3Dp0iWGDRuGr68vPj4+DB8+vFR/iX711Vd2c2m6devGkCFDSE1NtfUpybj+Xrdu3QA4evSoXXtCQgKPPfYYQUFBuLq60qZNGz744IMSb/dGlfR7eMXBgwc5efJkmdawfv16OnXqhJubG02aNOG9996zHUr7rWHDhtGwYcMij/9935LMa7lWn+TkZAYMGIC3tzf+/v6MGjWqyB62K8+5f/9+HnroIfz8/Lj55puL3W5J6/5t2y+//MLDDz+Mj48PgYGBvPLKKxiGwalTp7j33nvx9vYmODiYqVOnXvU1/pErITY+Ph6AEydO8PTTT9OiRQvc3d3x9/enf//+ReYd/dHPINz4z6E4lvbUSIU6c+YMycnJREZG2rUXFBSwb98+br/9dqBwNzFQJDj8Vps2bejVqxczZ85k8eLF5OXlsXjxYmrVqlWiWtq3b8/PP/98zT7h4eH4+vry008/0bdvXwDWrVuH2Wxm165dpKWl4e3tjdVqZcOGDTz55JPFbuf+++/nl19+4bPPPuOdd94hICAAKAx4VwwYMIBGjRoxZcoU4uLi+Pe//02dOnV44403/vC1vPXWW/z1r39l8ODB5OXlkZaWxh133MF7772HYRh8+umnQMnG9feu/NL38/OztSUlJdGlSxdMJhPPPPMMgYGBLFu2jBEjRpCWlsbo0aNLvP0bUZLv4RWtWrWie/fu/PjjjyXqn5qaWmSOj8lkwt/fH4A9e/bQq1cvAgMDmTRpEvn5+UycOJGgoKBSvYayNGDAABo2bMiUKVPYtGkT7777LikpKfznP/8p0rd///40a9aMf/zjH2V6CG/gwIG0atWK119/naVLl/Laa69Ru3Zt3nvvPW677TbeeOMNPvnkE/7yl7/QqVMnbrnlllI/x5WAfeV7sXXrVjZs2MCgQYOoV68ex48fZ9asWfTo0YP9+/fj4eFRop9BuLGfQ6kEDJEKtGzZMgMwNm/ebNe+d+9eAzA++eQTwzAMY/z48QZgpKenX3N7y5cvNwDDbDYby5cvL1UtTz75pOHu7v6H/fr06WN07tzZ9vX9999v3H///YbFYjGWLVtmGIZhxMXFGYDx9ddfG4ZhGB9++KEBGPHx8bbHvfXWW0XaDMMwJk6caADGY489Ztd+3333Gf7+/n9Y35YtWwyTyWT85S9/MQzDMJo3b24MHjzYMAzDuP322w0nJycjIyPDMIxrj+uVmletWmWcP3/eOHXqlPH5558bgYGBhqurq3Hq1Clb3xEjRhh169Y1kpOT7bYxaNAgw8fHx8jMzCyy3a1bt171NVwZg/Pnzxd7f5s2bYzu3bsXaS/p99AwDAModhu/d6Xe4m6urq62fv369TPc3NyMEydO2Nr2799vWCwW4/e/WocOHWo0aNCgyHNded2/f+7fvkd+31Zcnyvb6du3r932n376aQMwdu3aVaTvlffItZ67pHX/tu3JJ5+0teXn5xv16tUzTCaT8frrr9vaU1JSDHd3d2Po0KFFtl1cTb99Ty5YsMDw9/c33N3djdOnTxuGYdi9367YuHGjARj/+c9/bG1X+xn8bf3X+3MolYMOP0mF2r17N2azmfDwcLv2Xbt2ARAREQHAhQsXcHJy+sO9LlfOfmrcuDG9e/cuVS1+fn5kZWX94a7lbt26ERcXR0ZGBlB4yOGuu+4iKiqKdevWAYV7b0wmEzfffHOpavitp556qsjzXrhwgbS0tGs+7o033iAwMJC//e1vZGVlceTIEduesJtuuon8/HzOnTsHlGxcY2NjCQwMJCwsjAcffBBPT0+++eYb6tWrBxSeXfXFF19wzz33YBgGycnJtlvv3r1JTU0lLi7uusehNEr6PYTCuku6lwZg5syZrFy50u62bNkyoHDP4ooVK+jXrx/169e3PaZVq1alfh+WpZEjR9p9/eyzzwLw3XffFen7+/dbWXn88cdt/7dYLHTs2BHDMBgxYoSt3dfXlxYtWnDs2LESbfO378lBgwZRq1YtvvrqK0JDQwFwd3e39c3Ly+PChQs0bdoUX1/fUr8Xr/fnUCoHHX6SCrVr1y6aNm2Kh4eHXfvOnTtxdnamZcuWJd7WqlWr+Mtf/kKzZs04fPgw33//Pb169bLrYxgGXl5eHDt2jDp16hS5DygyN+D3unXrRn5+Phs3biQsLIxz587RrVs39u3bZxdqWrduTe3atUtc/+/99sMR/ne4JyUlBW9v72Ifk5+fz/Lly3nwwQdxd3dn69atWK1W2rZtC2ALYr89dPRHZs6cSfPmzUlNTeWDDz7gp59+wtXV1Xb/+fPnuXTpEnPmzGHOnDnFbuNKiCpLxX2fSvo9vB6dO3e+6kTh8+fPk5WVRbNmzYrc16JFi2JDREX4fT1NmjTBbDYXO/+mUaNG5VLD79/HPj4+uLm52Q73/Lb9woULJdrmlfekk5MTQUFBtGjRwm45gqysLKZMmcKHH35IQkKC3eG0384pu576S/JzKJWHQo1UqD179hSZTwOFx8RbtGiBs7MzUHisPD8/n/T0dLy8vIr0P3bsGAMHDqRdu3asWrWK5s2b88477xQJNfHx8Xh4eBQJNFD4S8rDw8Pur7zidOzYETc3N3766Sfq169PnTp1aN68Od26deNf//oXOTk5rFu37pqL3ZXE1c48Ma4x3+HIkSNkZGTY9nDt3r0bwDbGO3fupEGDBvj4+AB/PK5g/2Her18/br75Zh566CEOHTpErVq1bAv2PfzwwwwdOrTYbVwJVSV15UysrKysYu/PzMws9mytkn4PHe1qoaugoMAhzwuUaMyup+7i3sfX897+rWsFTCjcI/Xhhx8yevRoYmJi8PHxwWQyMWjQoFIvMHmjtYpjKdRIhbFarRw6dKjIh/+5c+dYv3693enCV/bYxMfHF/mAvHz5Mvfeey/Ozs589dVX+Pj48PTTT/Pqq69y4MABWrVqBRQemmrXrh0FBQXUqlWLVq1asXXrVtt24uPjbX2vxcXFhc6dO7Nu3Trq169vOxuoW7du5OTk8Mknn5CUlPSHEx7LY29CSkoKAJ6enkDhnrCAgABCQkJITk5m7dq1drvTrzWuxbFYLEyZMoVbb72VGTNm8OKLLxIYGIiXlxcFBQWlOovqWq6sjXLo0CHCwsLs7svMzOTUqVNFAuuV11GS72FZCwwMxN3dncOHDxe579ChQ0Xa/Pz8il0b5cSJE2Va1+HDh+32wBw5cgSr1VrsGUwlUVF136jPP/+coUOH2p1RlZ2dXaT28vgZlMpFc2qkwhQUFJCXl2c3/yE/P58//elP5Ofn2/Y2AMTExAAUWYXWMAweeeQRDh06xBdffGGb5/H0008XWYyvVatWTJw4kT//+c9cvnzZLtAAxMXF0bVr1xLV3q1bNzZv3swPP/xgCzUBAQG0atXKdlbElfaruRI8ynLhrytzCjZu3AgU7qm5spfm+eefx2w2252JdLVxvZYePXrQuXNnpk2bRnZ2NhaLhQceeIAvvviCvXv3Ful//vz5Ur+Onj174uLiwqxZs4r8ZT1nzhzy8/O58847izyuNN/Dsjyl22Kx0Lt3bxYvXmy3zQMHDrBixYoi/Zs0aUJqaqptTxrA2bNn+eqrr8qknitmzpxp9/X06dMBih27kqioum+UxWIpsidl+vTpRfYolcfPoFQu2lMjFcbZ2Zm2bdsya9Ys3N3dcXd3Z9GiRbbd4L8NNY0bNyY8PJxVq1bx2GOP2donTZrE4sWLee+997jpppts7YGBgTz88MN8/PHH/OMf/7Cd6rl79+5iF2bbvn07Fy9e5N577y1R7d26dWPy5MmcOnXKLrzccsstvPfeezRs2NAWsK6mQ4cOALz88ssMGjQIZ2dn7rnnnhI9/9XUr1+fHj168PHHHxMUFMSuXbsIDw/nnnvuYdmyZXz00Ud2f7lfbVz/yAsvvED//v2ZN28eTz31FK+//jo//PAD0dHRPPHEE7Ru3ZqLFy8SFxfHqlWruHjxYpFtfPDBByxfvrxI+6hRo6hTpw4TJkxg/Pjx3HLLLfTt2xcPDw82bNjAZ599Rq9evYqMVWm/h6U9pXvZsmUcPHiwSHvXrl1p3Lgxr776KsuXL6dbt248/fTT5OfnM336dNq0aWMXAgAGDRrE2LFjue+++3juuefIzMxk1qxZNG/evEwnVcfHx9O3b1/uuOMONm7cyPz583nooYeKPeRbEhVV9426++67+fjjj/Hx8aF169Zs3LiRVatW2X4PXHG1n8ErYUeqAYeccyU1VlxcnNGhQwfDzc3NaNOmjTFnzhxj7ty5BmB3aqxhGMbbb79t1KpVy3a65pdffmmYTCbjqaeeKnbb+/btM0wmk/Haa6/Z2lq3bm1s2rSpSN+xY8ca9evXN6xWa4nqTktLMywWi+Hl5WXk5+fb2ufPn28AxiOPPGLXv7jTYw3DMP7+978boaGhhtlstt1/tdOZr7aN3zt79qxx9913G25ubgZguLi4GDfddJOxevXqYvv/flx//3zFnXpdUFBgNGnSxGjSpInt9SclJRkjR440wsLCDGdnZyM4ONjo2bOnMWfOnGK3e7Xbb08Vnz9/vtGlSxfD09PTcHV1NVq2bGm8+uqrRnZ2dpGaSvs9pAxO6QaMDz/80NZ37dq1RocOHQwXFxejcePGxuzZs4s93dkwDOP77783wsPDDRcXF6NFixbG/Pnzy/yU7v379xsPPvig4eXlZfj5+RnPPPOMkZWVZVdHad9vJan7WtsdOnSo4enpWWQ8unfvbrRp06ZIe3E1XWs5AMMoPEV8+PDhRkBAgFGrVi2jd+/exsGDB40GDRoUOW28uJ/Ba9Vf0p9DqRwUaqTSunTpklG7dm3j3//+93U9Picnx3B2djYuX75s156dnW0EBwcb06ZNK4syK41vv/3WAIwdO3Zcs9+NjmtlUJm/h1cLNSJS/jSnRiotHx8f/vrXv/LWW2+V+gwGgPT0dAByc3Pt2j/88EOcnZ3LbZ0ORzl48CAmk4kWLVpcs9+NjmtlUF2/hyJyY0yGofPUpPoaOnQoX375JW3atGHTpk2OLqdcPf7446xateqa1w2S8jdp0iReffVVnQIs4gDaUyPV2kcffUR6enq1DzRQuKemNIsXiohUN9pTIyIiItWC9tSIiIhItaBQIyIiItVCjVl8z2q1cubMGby8vLRUtoiISBVhGAbp6emEhITYXci0ODUm1Jw5c6bINWVERESkajh16tQfrtxeY0LNlSsSnzp1SpePFxERqSLS0tIICwuzfY5fS40JNVcOOXl7eyvUiIiIVDElmTqiicIiIiJSLSjUiIiISLWgUCMiIiLVQo2ZU1NSBQUF5OXlOboMqcKcnZ2xWCyOLkNEpMZRqPmVYRgkJiZy6dIlR5ci1YCvry/BwcFaE0lEpAIp1PzqSqCpU6cOHh4e+jCS62IYBpmZmZw7dw6AunXrOrgiEZGaQ6GGwkNOVwKNv7+/o8uRKs7d3R2Ac+fOUadOHR2KEhGpIJooDLY5NB4eHg6uRKqLK+8lzc8SEak4CjW/oUNOUlb0XhIRqXgKNSIiIlItKNSIiIhItaBQIyIiItWCQk0NceHCBerUqcPx48dvaDuDBg1i6tSpZVJTq1at+Pe//12kvaxqLS9lOQYi1y3nMmQkg9Xq6EpEKg2Fmhpi8uTJ3HvvvTRs2NDWZrVaadmyJS+//LJd36VLl+Li4sKXX35ZZDvjx49n8uTJpKam3lA9WVlZHD58mMjIyD+sddiwYZhMJkwmE87OzjRq1Ii//vWvZGdnl+o5hw0bRr9+/a56f48ePRg9enSR9nnz5uHr62v7uqzGQKREslLg5GbY/hEsfwk+vh/j7TYwJRTeakLet887ukKRSkPr1NQAmZmZzJ07lxUrVti1m81mxo0bx6hRo/jrX/+Kj48PcXFxDBw4kDfeeIP777+/yLbCw8Np0qQJ8+fPZ+TIkddd0969ezEMg/Dw8BLVescdd/Dhhx+Sl5fH9u3bGTp0KCaTiTfeeOO6a7heZTUGUkMZBlxOgpTjcDEeLp2AzAuQnQY5aZCd+uv/UzGyUzFlFw3Pvz23bubWNN7fvpxAL1cCvVyp4+VGoJcrDf096NM2hEAv1wp7aSKOplBTDSxfvpwHHniA9PR0zObCnW979+4lIiKC8+fP8+OPP+Lq6kqXLl2KPHbIkCG8+uqrzJgxg0ceeYS7776b4cOH8/zzV//r75577mHBggXX9YG+c+dOxowZw88//4zVaqV+/fq8/PLLtj0k3333XbG1urq6EhwcDEBYWBixsbGsXLnSLtRYrVbeeOMN5syZQ2JiIs2bN+eVV17hwQcfLHWdf+RGxkBqsLO7MD64A1NeZom6XwkvCYY/R6yhHDFCOWyEctgayjFCyXPyID8/j2wKyLiQyfEL9tt9bekBeraqw6BO9bklxMCy73NoOwg8f11ktCC/MFSdPwjnD0HyL5B6GupGQtNYaNAVnBSKpOpQqLkKwzDIyitwyHO7O1tKtc7Jjh07CA8PtwUaKAwPISEhBAQEsG7dOjp06FDsY52cnBg7dizjx4/ns88+o1OnTvzzn/+85vN17tyZyZMnk5OTg6tryX/hHT16lO7du/PCCy/g7++P1WqlU6dOPP/88/To0YOoqKhr1nrF3r172bBhAw0aNLBrnzJlCvPnz2f27Nk0a9aMn376iYcffpjAwEC6d+9e4jpL4nrHQGqeuJMpLNp2iqPnM7icnMB3eZkUGCbOGAGcNOpw0qhDMj6kG+6k4Um64UE67qQZnqTjzgWzP0EBgTStU4umdWpxc1AthtWpRUN/T1ydzGTkFnA+PYfz6TmcS8/+9d8cNh27wI6Tl1ixL4kV+5J4wfM7RhbMx1g5CVPDm+ByEsaFI5gKcosWfXwdbJyB4eyBqdEthQGnaU+o3bjiB1CkFBRqriIrr4DWE1b8ccdysP9vvfFwKfm3ZufOnUXmpuzatcvWduLECUJCQq76+CFDhjB69GiCg4P57LPP7MLRsWPH2Lt3L3379rW1hYSEkJubS2JiYpFgcS1PPfUU999/P+PHjyc6OpqBAwcyevRopkyZwrp164iKirpqrUuWLKFWrVrk5+eTk5OD2WxmxowZtvtzcnL4xz/+wapVq4iJiQGgcePGrF+/nvfee6/MQ831joFUUYYB+TlgMpVoz4VhGKz95TyzfjzK5viLv7nHjR6mqVxyDqauvw8NantQ398Df08XGrk74+XmhJdb4b/ebs54uzlR29MFJ8vVpz/WcnWilqsTjQI8i9x3KDGdhVtP8eWO0+zLqs0ep4ZEcByO/QAU7gnKMlw4aoRwxAjhiDWUc/jS0fQL3S27CMq7BL8sL7wBRu3GmJrfCa3ugbDOYNYlQG5Y5kWIX1u418ziBGZnsDiD2QksLoU3/6b/27vmQJdz8tl+IoUmgZ7U86ucK/Ar1FQDO3bs4LnnnrNr27lzJx07dgQKJ+W6ubld9fHPPPMMAMnJyXaBBmDZsmWkp6fbhZor1zbKzCzZLnQovGDomjVr2LBhAwUFBezZs4cpU6ZgNpuxWCy4uLhcs9Zbb72VWbNmkZGRwTvvvIOTkxMPPPCA7f4jR46QmZnJ7bffbve43Nxc2rVrV+I6S+p6xkAquVNbYdO/4OyuwgBTkIORnw35OXZ7MwyPAEy+YeATBr71f/03DMK6kO/mx3d7E5n141EOnE0DwNliol9UKDc3C6B+bQ/q1/agtqdLhaw63SLYiwn3tOavd7Tg+/3hvL61D5eObqOd+QinjQCOGKHkeobSpI63bU9QGy9Xtp1IYc6hczgn76e7eRc9LLvoYPoF54vHYNNM2DQTq38zzM9sLQx6ldWVoPBHDAMuHIGTm+D0FvDwh/ZDoXajcqorDw6vhF2fYhxajsl67cup5Pi3xuWZDRW/UrlhgMnE5Zx8/rPxOO//dIyUzMJaGwd6ckuzQG5pHkCXRn54LH8eGtwMUYMrtsbfUai5CndnC/v/1tthz11SGRkZHD161G5PjdVqZceOHYwYMQKAgIAAUlJSin38K6+8wtKlS9m0aROxsbHMnTvXNk9k7dq1vPLKK/j7+7Nw4ULWr1+Pp6cnFy8W/uUZGBhY4jo3bdqE1WolKiqKQ4cOkZWVRVRUFMePHyclJYWuXbtes1ZPT0+aNm0KwAcffEBkZCRz5861vcbLly8DhWduhYaG2j22pIeHvL29iz2j6dKlS/j4+Ni1Xc8YSCVkLYCDSzE2zMB0enORu4v7CDFlJkNmMpzZYde+ov1sJh8M5uTFwqDr4WLhoc71GdGtEXV93Muj+hJzc7bQNzKEvpEhnLgQwc5Tl7jPz4OmgbXw8XAu0v/OiLq8cndrTqd05qdfkvnwl3OMOnKKqNyd9LJsI9Ycxw/n67Likzh6twnm1pZ18HEvuh2gcOLz8nFQKwha3AmhHcFcvifepmXncfBsOi22T8Q7fhmmwJYQ2AICWxbeAprBpVNwciOc3IRxahOmzAt22zDWT8PUrBdEPwmNb7vxmg0Dzu6EXQuw7l6EOavw+UzAQWsYyYY3zqYCnCi8OVOAE/m4k8vqpHpMf20VHRr40alhbTo09CM8xAcXp3Iax/xc2PUpBVs/YG6T6fxrYxKXfg0zAbVcScnM5dj5DI6dz2DehuNEWk7ytfN8EuIPkhbUh1Z1vcunrhJQqLkKk8lUqkNAjhIfH287NfuKFStWcOHCBVvQadeuHfPnzy/y2Pfff5+pU6eyZs0aIiMjGT16NG+++SZPPvkkzs7OdO/enbZt2zJv3jy7U8H37t1LvXr1CAgIKHGdubmFf+lmZ2ezY8cOGjRoQO3atXnzzTcJDw8nIiLimrX+ltls5qWXXmLMmDE89NBDuLu707p1a1xdXTl58uR1H2pq0aIF33//fZH2uLg4mjdvbtd2PWMglUjOZdj5CdaN/8J86TgmINew8HXBTXxlvZl0w4McnMnFiRzDhVycKDC7YDbyCeYCoabkIrfXNmZxysjEz8OZ4Tc14tGYBvh6uDj6lRbRwN+TBv5FD1UVp56fBw9F1+eh6PrkFbRn2/Fb+H5/ItP2nOZy2kVS9iaybG8izhYTMU0CaOTvQZ7VIL/ASl6BQV6BFZfcFN4+/knhBte/jeERiKnFHdDiLmjcA1xu7DCGYRicvJjJ9hMpbDuRQtyJFA4lpWMYsMBlK13M5yHjfOE8oaswATmGMzuNJsRZm9HKdJIell1weAUcXoG1dhPMnZ+AqIfAzeeq2ymiIB8StsORVRgHvsV0/gBQuJbKecOHxQU38aX1FrwaRBFYy5W8Aiv51sJxyy8wyLdaycor4HBSGjkZuXy/P4nv9ycB4OpkJjLMl+hGtenS2J/29f1wdzLBtrlQPwbqtC4axAwD8rML3//ZqYXhPCP5N/9egIxkrCc2YE47jQVIOvUelwr60DjAk2d7NuWetiFk5Baw8Wgya39J5qdfznPukifTTPcTfy6YI4t2sfS5bqX7JpYhk2EYRmkfNHPmTN566y0SExOJjIxk+vTpdO7c+ar9Fy1axCuvvMLx48dp1qwZb7zxBnfddZft/kmTJrFgwQJOnTqFi4sLHTp0YPLkyURHRxfZVk5ODtHR0ezatYsdO3YQFRVVoprT0tLw8fEhNTUVb2/7FJmdnU18fDyNGjW65mGayujs2bOEhoayZMkS7rrrLjZt2sSQIUM4c+YMly9fxmKxsGfPHtq3b8+5c+fw8/MDCs8yuu+++1iwYAH33XcfAKmpqTRo0IC3336bxx57DCicl3Ls2DG75xw2bBgWi4W5c+eWuM5Tp07RuHFjnnvuOS5fvszhw4e5//77GTt2LD/99JNtcnBxtQ4bNoxLly6xePFi2/by8/Np2LAho0eP5i9/+QtQuH7M7NmzmTp1KjfffDOpqan8/PPPeHt7M3ToUIYNG8aJEyd455137Grz9/cnLCyMY8eO0aZNG5544gkef/xxXF1dWbp0KWPHjuXbb7/ljjvuKPEYVOX3VE1wMX4XtT+6BYAUoxbzC2L50nInt0dHcm9UCN5uzrg6mXFxMuPqZMHFyYzFbCK/wMq59BwSLmWRkJJFwqUsTv/6b0ZOPve0rcuATmFV4g+iG2EYBnsT0lixL5Hl+xI5cu7yVfu6kstjluW0Mp+gh3kn3qas/23HyR1Tk1uh8a3g1wC8Q8GnXmFw+O2hlvxcOLUZjv2AcWwtx+/8mHWncvn5SDLbT1wi+XJOkecN8XEjJzOdkPxTNDOdppk5gaamBJqZEqhvOsclPNlmbcE2a3O2WVtwxr0FkY3q0L6+HwfOprF3TxwPmb7nQctaW82Xox6nVr8SLLx5aDns+gzj6A+Ycv639zfHcOZ7awe+KOjG6dox3NehAf3ahRLqe+09eTn5BexNSGXb8RS2Hk9h+4mLtsNAVzhbTPQLTuatC88Wjq2HPya/RpB7GXIuY+SmQ24GJmv+H9cPnDN8mZV/Dxt97+Gp2HDuiQzBYi6679IwDOKTM/jpl/P8dDiZqDBfnuvZrETPUVLX+vz+vVKHmoULF/Loo48ye/ZsoqOjmTZtGosWLeLQoUPUqVOnSP8NGzZwyy23MGXKFO6++24+/fRT3njjDeLi4mxrlHz66afUqVOHxo0bk5WVxTvvvMOiRYs4cuRIkd37o0aN4vDhwyxbtkyh5leTJ09m6tSpeHl5ceuttxIcHMzq1avZunWrrU90dDSPPfYYf/rTn9i+fTvdu3dn8uTJjBo1ym5bEyZMYMGCBRw4cICzZ88yaNAg1q9fb7s/Ozub4OBgli9fbnfa9bx58xg+fDjXejt9/PHHjB07lrNnz+Lk5ESHDh1466236NbNPtX/tlYoPtQAvP7667z99tvEx8fj6emJYRi8++67zJo1i2PHjuHr60v79u156aWXuOWWWxg2bBgfffRRkbpGjBhhW9l469atvPzyy+zcuZPc3FxatmzJiy++aLdo39XG4Leq+nuqurqUmctbKw6xaPtpJvA+B436bKzVi4e6tWJgpzBquVbvMFJejp6/zJoD50jLzsPJbMbZyYSz2YyTxYSzxYyT2cSBs2n8uD+Beuk7iDXHcbtlO/VMycVuz3CphcmnXmHIMZkwTmywOw3+idwxrLR2tH3tbDEREepDhwZ+dGhQm/YNfKnj5YbVapBwKYsj5y5z+Fw6h5Muc/jcZU6ev4RvLQ86NfSnY0M/OjasTUN/D7s5K+fTc1iw5SRfbjpE18zVPGJZyXP5o6jTuC1NAj0Jq+1B2K9zpMJqe1DL1Qmr1WD/2TSyVvyNTicLf6dcMjxZZ41grTWSzS4x3BbVjPvb16NtPZ/rniNjGAZHz2ew7fhFNsdfZOPRCySmZRNpOsL/c1pER/MveJiKBr3fumy4cdHw4iLeXDC8uWh4cYHCf88a/hz2u+WaYaYilWuoiY6OplOnTrYzT6xWK2FhYTz77LO8+OKLRfoPHDiQjIwMlixZYmvr0qULUVFRzJ49+5ovYNWqVfTs2dPWvmzZMsaMGcMXX3xBmzZtFGpKYenSpbzwwgvs3bu3yGTgq/n5559tofWKWbNm8dVXXxU5TDNx4kTWrl3Ljz/++IfbrV27NvPmzbObfHyjtVakq43Bb9WE91RVYhgG3+1JZOI3+2x/1UeF+fJEt8b0bhN0zbOLpOwYhsGBs+msOpDEqv2J5J3ZQ6x5O23N8dQ1XSDElExtU/F7fc4b3qy3RrC+IIL1pvY0btCAm5sFEN2oNuGhPriVYi5iaeQVWFm1P4mPNsSzKb74uYkA/p6FhxovZOTSynSCXuZtrLVGkl47nG4tgunePJCbmgaUyzyYK4fgNh27wMajF9h2NIk6lw/gb0rjMu5kGG5k4MZlw51cizsmF0883V2p7emKv6cLtT1dbP/W9nQhxNedLo39HR5mrihNqCnVnyW5ubls376dcePG2drMZjOxsbFs3Lix2Mds3LiRMWPG2LX17t27yF/dv32OOXPm4OPjYzf5NSkpiSeeeILFixfj4fHHx2BzcnLIyflfUk1LS/vDx1Rnffr04fDhwyQkJBAWFlaix4SHh3Ps2DEiIiJYuHAhrVu3xtnZmenTpxfpu2zZMrtTrK/m9OnTpKSkFFlJ+EZrrUhXGwOpnBJTs3nl672s/HUuQtM6tfj7veHENHH8KbI1jclkonWIN61DvHmuZzMSUzux+mAfVp9O5UxqNmcuZXHx0iV88879GnIu4E4OW60tsQa24ubmQdzdLIC/N6pdYYf4nC1m7oyoy50RdfklKZ3tJ1I4eTGTU7/eTl7MJCUzjwsZhfMGPVwshDbpTEDzu/hn88ASz1+6ESaTyTZXamCn+hiGwYkL3biQkYOnqxOeLoWn/Xu6OpXf5OJKolTviuTkZAoKCggKCrJrDwoK4uDBg8U+JjExsdj+iYmJdm1Llixh0KBBZGZmUrduXVauXGmbhGkYBsOGDeOpp56iY8eOJbrQ4ZQpU3j11VdL8eqqv+Kua3QtPj4+bN++3a7t8ccfL7bvli1bSrTNPXv24OnpSaNG1z5VsrS1VqSrjYFULlarwWdbT/L6dwdJz8nH2WLi6R5NefrWJrg6aX2VyiDYx40h0Q3gN9MnDcMgLSufM6lZnLmURVZeAU83qE2wj+P3eDYP8qJ5kFeR9rTsPE5dzCQ7z0pEaDmelVRCJpOJhgGeNCxm7aLqrtIcQL711lvZuXMnycnJvP/++wwYMIDNmzdTp04dpk+fTnp6ut0eoj8ybtw4uz1EaWlplfKv/prmzjvvtJ1+LXJDLsbDxaOFy/pfuV06hZGWwOH+axj/7SG2/LrwXVSYL2880JYWwUU/kKRyMZlM+Hg44+Ph7NBTg0vD282ZNiGlOCtKyk2pQk1AQAAWi4WkpCS79qSkJNt1eX4vODi4RP2vrEPStGlTunTpQrNmzZg7dy7jxo1jzZo1bNy4sch6Ix07dmTIkCHFTv50dXXV8vUi1Y1hQPxajJ+mYjr+U7FdTMBj07/htBGIh4uFF3q34NGYhpVmfoCIlJ9ShZorp1uvXr3adjaI1Wpl9erVtlVpfy8mJobVq1fbHU5YuXKlbSn7q7FarbY5Me+++y6vvfaa7b4zZ87Qu3dvFi5cWOxp3yJSzVitcOg7rOvexnxmOyYgz7BwxAghwQjgjBHAGcOfM0YACYY/5/HhtpZ1eLVvG8JqV87l3EWk7JX68NOYMWMYOnQoHTt2pHPnzkybNo2MjAyGDx8OwKOPPkpoaChTpkwBCk/B7t69O1OnTqVPnz4sWLCAbdu2MWfOHKBwRdzJkyfTt29f6tatS3JyMjNnziQhIYH+/fsDUL9+fbsaatWqBUCTJk2oV6/e9b96EancCvJh7xdY103FnHwIM4XXKlpQcCufOt1L3QbNCPV1J9TXjea+7vTwdSfU150gbzeHz2sQkYpX6lAzcOBAzp8/z4QJE0hMTCQqKorly5fbJgOfPHnS7jTcrl278umnnzJ+/HheeuklmjVrxuLFi21nv1gsFg4ePMhHH31EcnIy/v7+dOrUiXXr1tGmTZsyepkiUmXk58KpTXBkNda9X2FOPYEZSDPc+U9BL752vZf7b43iyy718XK7ytL8IlIjXdeKwlVRTV+nRipWjXpPnTsIy8dipCZgCutcuER7g65Qu3HJL3Z44SgcXQNHVmGN/wnzbxZaSza8mZt/F2u87ubh7hH07xhWbmuSiEjlU27r1IiI2FgLYNO/sK76G2ZrbuHFHy8chp2F1/kxPIMwPbG68ArW15CTX4B13gO4p8cD/7suzk/WCNYWRHLMvwcjbmvNmLYhOGuRPBG5BoUaESm9i/EUfPVnLKc2YgZ+LIjk04LbiDIfpZP5IG1Nx8i9nMaj8+Pp2CidxoG1SM3KIyUzl0sZeVzMzOVSZi4pmXmcTslkjNGaCJM7P1nbstZoi1NwBN1a1OHh5nXo2MAPs85cEpESUKgRkZIzDNj+IQXLX8aSn0mG4cpr+Q9j6jCM56IbsOPUJT6Ov8ju+LO4pp/g0Ol0dpxO/8PNvl9rON2aB9K9eSCPNw3Av5aWYxCR0lOoEZGSSTuD9etnMB9djQXYbG3JG67PMWpIL7o3L7zwbHioD490aQC043RKJluPX2RLfApJadn4ujvj5+mCn4czvh4u+Hm44OfpTB0vN5oEel73xf1ERK5QqKkhLly4QKtWrdiyZQsNGza87u0MGjSITp068f/+3/8ru+KkcsvLgs2zKfjpbSy5aeQYzryZP5BLESP4sG8EPh7Fn4FUz8+Den4e3NdOyy6ISMXQrLsaYvLkydx77712gcZqtdKyZUtefvllu75Lly7FxcWFL7/8ssh2xo8fz+TJk0lNTS11DcOGDbMt2licU6dO8dhjjxESEoKLiwsNGjRg1KhRXLhwodTPJWXAWgBxH1Pwz3awahKW3DR2WRvzkOUtOg0ez9RB7a8aaEREHEGhpgbIzMxk7ty5jBgxwq7dbDYzbtw4Zs6caQspcXFxDBw4kDfeeIP777+/yLbCw8Np0qQJ8+fPL9Majx07RseOHTl8+DCfffYZR44cYfbs2axevZqYmBguXrxYps8n12AYcGgZBf+KgW+ewXL5LKeNAJ7P/TNzmr/He2Me4o7wuo6uUkSkCIWaamD58uV4enpitVptbXv37sVkMpGcnMx3332Hq6srXbp0KfLYIUOGULt2bWbMmMHJkye5++67GT58OM8///xVn++ee+5hwYIFZfoaRo4ciYuLC99//z3du3enfv363HnnnaxatYqEhIQie5Ok/BQs/Qt8NghL8iEuGZ68ljeEvwTN5aEn/8rMhzsToEm8IlJJKdRUAzt27CA8PNxuJeedO3cSEhJCQEAA69ato0OHDsU+1snJibFjxzJt2jTuuusuOnXqxD//+c9rPl/nzp3ZsmWL7dpcN+rixYusWLGCp59+Gnd3d7v7goODGTJkCAsXLqSGrBPpMLn5Vr7acZrRu+uTbTgzK/8eHvWaQ6eHJvDZn7vTqWFtR5coInJNmij8R3IzyniDJnAp2wvs7dy5k8jISLu2Xbt22dpOnDhBSEjIVR8/ZMgQRo8eTXBwMJ999pldODp27Bh79+6lb9++traQkBByc3NJTEykQYMGN1z/4cOHMQyDVq1aFXt/q1atSElJ4fz589SpU+eGn0/+xzAMdpy6xJdxp1my+yyXMvOAphyq9R7D+3Tiyw71cNKCdyJSRSjU/JF/XD0MXJfAljByc5lucseOHTz33HN2bTt37qRjx44AZGVlXXOp/itXWE9OTrYLNADLli0jPT3dLtRc2ZuSmZlJWfqjPTEuLi5l+nw1jmHYLltw8kImX+1IYPHOBOKT/xfc63i5MrRrQx67qRHuLroUgYhULQo1VVxGRgZHjx6121NjtVrZsWOHbWJwQEAAKSkpxT7+lVdeYenSpWzatInY2Fjmzp3LyJEjAVi7di2vvPIK/v7+LFy4kPXr1+Pp6WmbtBsYGFgmr6Fp06aYTCYOHDjAfffdV+T+AwcOEBgYiK+vb5k8X42SdgZ+WQ6/rCAr8zKftJjOin2JbD3+v/eDu7OFO8ODua99KF2bBGDR6r0iUkUp1PyRl86U8QbL9gMjPj7edmr2FStWrODChQu2oNOuXbtiz1Z6//33mTp1KmvWrCEyMpLRo0fz5ptv8uSTT+Ls7Ez37t1p27Yt8+bNszsVfO/evdSrV4+AgIAyeQ3+/v7cfvvt/Otf/+L555+3m1eTmJjIJ598YgtaUgKpCRD3H4xDyzAl7rI1uxom/nVkMxfxxmSCm5oEcH/7UHq3CcbTVb8KRKTq02+yP+Li6egKrsnf3x+TycTWrVu566672LRpE8888wxubm40b94cgN69ezNu3DhSUlLw8/MD4LvvvuOZZ55hwYIFtrOinnnmGd566y0+/vhjHnvsMQBOnjxZZLG+devW0atXr+uqNzU1lZ07dxZ5DTNmzKBr16707t2b1157jUaNGrFv3z5eeOEFmjdvzoQJE67r+WqU3Az4+V2s66dhLsjGBFgNEzuNJqwqaM+PdKBl44b0bB1Mn4i6BPtU86uHi0iNo1BTxdWtW5e///3vPPzww3h5eXHrrbfSv39/Vq9ejcVSOCciIiKC9u3b89///pc//elPbN++nQEDBvDmm2/aHe7x8fHhueee4/XXX2fo0KGcPXu2yATj7OxsFi9ezPLly+3a582bx/Dhw/9wXsyPP/5Iu3bt7NpGjBjBv//9b7Zu3cqkSZMYMGAA586dwzAM7r//fj7++GM8PMp2cnW1YrXCnkUUrJyI5fJZzMBWa3P+W9CDbc6diWzVlJ6tgniqRSDeblosT0SqL5NRQ86TTUtLw8fHh9TUVLy9ve3uy87OJj4+nkaNGl1zQm1VtnTpUl544QX27t1bZDLw1fz8889MmzaNRYsW2dpmzZrFV199xffff2/Xd+LEiaxdu5Yff/yxzGqeOHEib7/9NitXrix2jZ3KrMLeU6e2ULBsLJYzcYVfWgOZnD8Ej8h+DOxUnw4N/HT2kohUadf6/P497ampIfr06cPhw4dJSEggLCysRI8JDw/n2LFjREREsHDhQlq3bo2zszPTp08v0nfZsmXMmDGjTGt+9dVXadiwIZs2baJz584lDmPVXn4uJO7B2PQvTHs/xwJcNtyYkd+PPfUe4sV7ooio5+PoKkVEKpz21FAz9tRIxSqz95RhQMpxSNgOp7dhJGyDs7sxFRQufGg1TCws6MFntR7hqT5duTM8WFe7FpFqRXtqRKqJnKXjcN02y/b1lbiSYtRis7UV75v7Exsby39vaoibs9aVEZGaTaFGpBJKuJTF2M93E3TM4B/OThwwGrDD2pSd1ibsMzXHs24zujT2Z1a3RtTx0t5FERFQqBGpdFbsS+Svn+8mNSsPV7qwy7MnbeoHEhXmy7AwX1qHeOPqpL0yIiK/p1AjUklk5xUw5bsDfLTxBABt6/kwbWAUjQNrObgyEZGqQaFGpBI4ev4yz366g/1n0wB4olsjXujdEhcnnfElIlJSCjW/UUNOBJMKUJr30hfbT/PK13vJzC2gtqcLUwdEcmsLXY1cRKS0FGoAZ+fCVVYzMzPtrjskcr2uXMH8ynuriNxMcvcu5sUjrfkyLgGAmMb+TBsURZC3Jv6KiFwPhRrAYrHg6+vLuXPnAPDw8NBaH3JdDMMgMzOTc+fO4evra7tUhZ20s+R9MgiXpJ245o3AbOrJ87HNefrWprpCtojIDVCo+VVwcDCALdiI3AhfX1/be8rO2V3kzR+Ac0YiFwwvzro0YP7D0XRtUjZXPBcRqckUan5lMpmoW7cuderUIS8vz9HlSBXm7Oxc/B6aA0so+PxxnAuyOGwNZZLXBCYPv4eGAZX7SvAiIlWFQs3vWCyW4j+QRK6XYWCsnwarX8WCwU8FEfwn7FX+9fAt+HjoqtkiImVFoUakPOVmULDkL1h2fwrAvPxeHG3/ErPujcRZV88WESlTCjUi5eXIavK/GYVT2inyDTN/z3+UhneN5m9dG2oiuohIOVCoESlrGRcwlr+Iac9/cQJOGwFM4imGPDpU68+IiJQjhRqRsmIYsPu/FCwbiyU7BathYl5Bb1YFP8HfBkTTtI6XoysUEanWFGpEykLKcazfPo/52BoswAFrGK/yJ+7qcw/zoxtg1vozIiLlTqFG5EYYBsR9RMGyF7HkZ5FjOPPP/Ps42uwx3u4XRYivVqgWEakoCjUi1ys7lYJvRmHZ/xUWYJO1FW86P82IB27nhYhgTQYWEalgCjUi1yPzIvmzu+OUdpI8w8Jb+QNIjfoTH/YJ19ozIiIOolAjch2+PZyNkdqQdkYW48yjeHhwf+4IL+ayCCIiUmEUakRKISu3gL8t2cdnW07hwTA6hHnx+kM3U8/Pw9GliYjUeAo1IiX0S1I6z3waxy9JlzGZYHiPNjwf2xwnrQwsIlIpKNSI/AHDMJi/6QSTvztAdp6VQC9X3hkQxc3NdGVtEZHKRKFG5BrOXMpi7Be7WXc4GYBbmgcytX8kgV6uDq5MRER+T6FG5PfysjEuJ/LFMSde/WYf6Tn5uDqZefHOlgyNaaiF9EREKimFGpHfOrOD/C/+RFJaDi+nTyIHF9rV9+X/+kfSJLCWo6sTEZFrUKgRuWLTbKwrXsbJyMfF8KGpJYk+t8fyZLfGmgwsIlIFKNSIAMaW9zEtH4sZWFIQzfzaz/F/g26hVV1vR5cmIiIlpFAjNZ6x4xNM3/0FgBn5/ci55SX+c1szXJy0d0ZEpCpRqJGabd9ijK+fwQR8mN+boHv/Tv9O9R1dlYiIXAf9KSo11y/fU/D5CMxYWZjfA8tdryvQiIhUYQo1UjPFryN/wcNYjHy+LehCSs+3eLRrY0dXJSIiN+C6Qs3MmTNp2LAhbm5uREdHs2XLlmv2X7RoES1btsTNzY2IiAi+++47u/snTZpEy5Yt8fT0xM/Pj9jYWDZv3my7//jx44wYMYJGjRrh7u5OkyZNmDhxIrm5uddTvtR0p7eRN38ATtYcVha058hNb/PUrc0dXZWIiNygUoeahQsXMmbMGCZOnEhcXByRkZH07t2bc+fOFdt/w4YNDB48mBEjRrBjxw769etHv3792Lt3r61P8+bNmTFjBnv27GH9+vU0bNiQXr16cf78eQAOHjyI1WrlvffeY9++fbzzzjvMnj2bl1566TpfttRYiXvI/eg+nAsyWV/Qhs0dpzK6d2tHVyUiImXAZBiGUZoHREdH06lTJ2bMmAGA1WolLCyMZ599lhdffLFI/4EDB5KRkcGSJUtsbV26dCEqKorZs2cX+xxpaWn4+PiwatUqevbsWWyft956i1mzZnHs2LES1X1lm6mpqXh76zTdGikjmewZMbhlnWObtTnfhE/n1f7RmExaIVhEpLIqzed3qfbU5Obmsn37dmJjY/+3AbOZ2NhYNm7cWOxjNm7caNcfoHfv3lftn5uby5w5c/Dx8SEyMvKqtaSmplK7du2r3p+Tk0NaWprdTWowq5WMBSNwyzrHEWsIi5pPZeKDCjQiItVJqUJNcnIyBQUFBAUF2bUHBQWRmJhY7GMSExNL1H/JkiXUqlULNzc33nnnHVauXElAQPFXQT5y5AjTp0/nT3/601VrnTJlCj4+PrZbWFhYSV6iVFN5697B89SPZBkuzA6awGuDb8aiaziJiFQrlebsp1tvvZWdO3eyYcMG7rjjDgYMGFDsPJ2EhATuuOMO+vfvzxNPPHHV7Y0bN47U1FTb7dSpU+VZvlRmJzZi/mEyAP9nGcFfH7kPZ132QESk2inVb/aAgAAsFgtJSUl27UlJSQQHBxf7mODg4BL19/T0pGnTpnTp0oW5c+fi5OTE3Llz7fqcOXOGW2+9la5duzJnzpxr1urq6oq3t7fdTWqgzItkLRiGhQIWF9zErYP+H3W83RxdlYiIlINShRoXFxc6dOjA6tWrbW1Wq5XVq1cTExNT7GNiYmLs+gOsXLnyqv1/u92cnBzb1wkJCfTo0YMOHTrw4YcfYjbrL235A4ZBxsLHcc9K5Ki1Lqe6vsbNzQMdXZWIiJSTUl8mYcyYMQwdOpSOHTvSuXNnpk2bRkZGBsOHDwfg0UcfJTQ0lClTpgAwatQounfvztSpU+nTpw8LFixg27Zttj0tGRkZTJ48mb59+1K3bl2Sk5OZOXMmCQkJ9O/fH/hfoGnQoAH/93//ZzvVG7jqHiKR7Lx8FiaFMtBw5b2gV5jSu52jSxIRkXJU6lAzcOBAzp8/z4QJE0hMTCQqKorly5fbJgOfPHnSbi9K165d+fTTTxk/fjwvvfQSzZo1Y/HixYSHhwNgsVg4ePAgH330EcnJyfj7+9OpUyfWrVtHmzZtgMI9O0eOHOHIkSPUq1fPrp5SnpEuNcjflh7k00t38IlHDz59pI8mBouIVHOlXqemqtI6NTXLN7vO8NxnOzCZ4KPhnblFh51ERKqkclunRqQqiE/OYNwXuwEY2aOpAo2ISA2hUCPVSnZeASM/iSMjt4DOjWozOraZo0sSEZEKolAj1crryw6y/2watT1deHdQO5y0Ho2ISI2h3/hSbXy/L5F5G44DMHVAJME+Wo9GRKQmUaiRauHMpSxe+LxwHs0T3Rpxa4s6Dq5IREQqWqlP6RapbAqO/MikFZdJzTLRtp4PL/Ru6eiSRETEARRqpGq7GE/+Zw/xj3wLZ11fZfrgHrg4aQekiEhNpN/+UnUV5JH+yVBcCzKIN4J54t6eNPD3dHRVIiLiIAo1UmVlrXgVrwu7SDU8WNnyNfq2b+DokkRExIEUaqRKMo6swX3LdADedn+O0Q/2dHBFIiLiaAo1UvVkJJP138cB+NQay8BHR+LhoulhIiI1nUKNVDmpi/+CR+4FDlnrQa/JtA7RtbxEREShRqqYgkPf43P4KwoME4vqjWPwTS0cXZKIiFQSCjVSdeRcJvOr5wCYb+rDE4MexGQyObgoERGpLBRqpMpIXzYJr+yznLIG4t7rFYK8dRkEERH5H4UaqRKMU1vx3PlvAP4TMJoHu+iwk4iI2FOokcovP5e0/z6NGYPF1m489NBwzGYddhIREXsKNVLpZfz4Nj7pv3DB8OLizRNpFKBVg0VEpCiFGqnczv+Cy8//B8DcWn/ikZ4dHFyQiIhUVgo1UnlZraT89884G3n8WBDJnYOexdmit6yIiBRPnxBSaWVvnovf+W1kGK7saz+JiDBfR5ckIiKVmEKNVE5pZ2HlRADmuj7CY326O7ggERGp7BRqpFJK/vYV3KwZ7LA2peODf8XdxeLokkREpJLTVQCl0rFaDcYm96FvQQLxzYYzunmQo0sSEZEqQKFGKp3FOxNYfdaFza6j+eG+Ho4uR0REqggdfpJKJTM3nzeWHwTgmduaEujl6uCKRESkqlCokUpl9tpjJKXlUL+2B8NvaujockREpApRqJFKI+FSFu+tPQrAuDtb4uqkycEiIlJyCjVSaby5/CA5+VY6N6rNHeHBji5HRESqGIUaqRS2n0jh651nMJlgwt2tMZl0wUoRESkdhRpxOKvV4O9L9gPQv0M9wkN9HFyRiIhURTqlWxzLauWbnQnsPHUJTxcLf+nVwtEViYhIFaU9NeJQOds/pum39xFlOsLTtzaljrebo0sSEZEqSntqxHFy0sn//lXCjQvEeh5lxM2NHF2RiIhUYdpTI45hGGQsGYdn3gWOW4No0uf/4easU7hFROT6KdSIQxRsfg/PPR9jNUx87P8sd0TVd3RJIiJSxSnUSMU7ugaWvwTAVGMIAwcN0yncIiJywxRqpGIlHyHns0exUMDnBbcQNfAVmgd5OboqERGpBhRqpOJkpZD1nwdxzU9nu7UZ53u8we1ttHKwiIiUDYUaqRgF+WR9NhT3tHgSDH++aPYGT93WytFViYhINaJTuqVC5C17CfeTa8k0XHnDdyJvDOyheTQiIlKmtKdGyp112zyct70HwETLc4x7bADuLjp9W0REypZCjZSv4z9jLB0DwDsFAxg8bCR1fdwdXJSIiFRHCjVSfnIzyPrv41iMAr4piCHs3gm0r+/n6KpERKSaUqiRcpO9cjLumWc4bQRwoONkHuwY5uiSRESkGlOokfJxdjfOW2cD8F6tkYy5u52DCxIRkepOoUbKnrWAy5+PxEIBSwqi6TdgGM4WvdVERKR86ZNGylz+5vepdWE3aYY7u8PH0aFBbUeXJCIiNYBCjZSt3EzyV/8DgJmWhxl5z80OLkhERGoKhRopUyfSDR7IHs+8/F60unsUPh7Oji5JRERqiOsKNTNnzqRhw4a4ubkRHR3Nli1brtl/0aJFtGzZEjc3NyIiIvjuu+/s7p80aRItW7bE09MTPz8/YmNj2bx5s12fixcvMmTIELy9vfH19WXEiBFcvnz5esqXcmIYBuMX72VffiirGv6Fe9vVc3RJIiJSg5Q61CxcuJAxY8YwceJE4uLiiIyMpHfv3pw7d67Y/hs2bGDw4MGMGDGCHTt20K9fP/r168fevXttfZo3b86MGTPYs2cP69evp2HDhvTq1Yvz58/b+gwZMoR9+/axcuVKlixZwk8//cSTTz55HS9Zysu3u8+y7nAyLk5m/t4vXJdBEBGRCmUyDMMozQOio6Pp1KkTM2bMAMBqtRIWFsazzz7Liy++WKT/wIEDycjIYMmSJba2Ll26EBUVxezZs4t9jrS0NHx8fFi1ahU9e/bkwIEDtG7dmq1bt9KxY0cAli9fzl133cXp06cJCQn5w7qvbDM1NRVvb+/SvGQpgdSsPHpOXUvy5Ryej23OqNhmji5JRESqgdJ8fpdqT01ubi7bt28nNjb2fxswm4mNjWXjxo3FPmbjxo12/QF69+591f65ubnMmTMHHx8fIiMjbdvw9fW1BRqA2NhYzGZzkcNUV+Tk5JCWlmZ3k/Lz1oqDJF/OoXGgJ0/1aOzockREpAYqVahJTk6moKCAoKAgu/agoCASExOLfUxiYmKJ+i9ZsoRatWrh5ubGO++8w8qVKwkICLBto06dOnb9nZycqF279lWfd8qUKfj4+NhuYWFazba8xJ1M4ZPNJwF4rV84rk66WKWIiFS8SnP206233srOnTvZsGEDd9xxBwMGDLjqPJ2SGDduHKmpqbbbqVOnyrBaucIwDCZ9sw/DgPvbh9K1SYCjSxIRkRqqVKEmICAAi8VCUlKSXXtSUhLBwcHFPiY4OLhE/T09PWnatCldunRh7ty5ODk5MXfuXNs2fh9w8vPzuXjx4lWf19XVFW9vb7ublL0NRy+w+3Qq7s4WXrqrlaPLERGRGqxUocbFxYUOHTqwevVqW5vVamX16tXExMQU+5iYmBi7/gArV668av/fbjcnJ8e2jUuXLrF9+3bb/WvWrMFqtRIdHV2alyBlqSCP2WuPAjCwUxgBtVwdXJCIiNRkTqV9wJgxYxg6dCgdO3akc+fOTJs2jYyMDIYPHw7Ao48+SmhoKFOmTAFg1KhRdO/enalTp9KnTx8WLFjAtm3bmDNnDgAZGRlMnjyZvn37UrduXZKTk5k5cyYJCQn0798fgFatWnHHHXfwxBNPMHv2bPLy8njmmWcYNGhQic58kvJx6bMneOL4ES5YhjDi5lsdXY6IiNRwpQ41AwcO5Pz580yYMIHExESioqJYvny5bTLwyZMnMZv/twOoa9eufPrpp4wfP56XXnqJZs2asXjxYsLDwwGwWCwcPHiQjz76iOTkZPz9/enUqRPr1q2jTZs2tu188sknPPPMM/Ts2ROz2cwDDzzAu+++e6OvX65Xygm8jnzNLRYrmxr7E1bbw9EViYhIDVfqdWqqKq1TU7Yuf/U8tXZ9wE8FEdR+ainhoT6OLklERKqhclunRgSAjGRcd38CwM9BDyvQiIhIpaBQI6WWtX4WzkYOu62NuLnX/Y4uR0REBFCokdLKzYCt7wOwxGsgNzcLdHBBIiIihRRqpFTytn2Ee34qx61BtIkdootWiohIpaFQIyVXkEfuT4VnnC1y6UeftvUcXJCIiMj/KNRIiVn3fIFn9lnOG94E3TIcJ4vePiIiUnnoU0lKxjC4vGYqAAvMfXiwSzMHFyQiImJPoUZKxDj8Pd5pv3DZcMPcaQQeLqVet1FERKRcKdRIiaSvLtxL81+jJwNvaevgakRERIpSqJE/dmYH3kmbyTMsnG8zQheuFBGRSkmhRv5Q2o/TAVhq7cLAnl0cXI2IiEjxFGrk2tIT8Tj8NQB76w+hYYCngwsSEREpnkKNXFP2hjk4GflstTbnttt6O7ocERGRq1KokavLy8bY9gEAK2rdT0xjfwcXJCIicnUKNXJVBbsX4Z6XwmkjgObdB+qSCCIiUqkp1MhVXV4/G4BF5jvo276Bg6sRERG5Nq2gJlc11vlF2uR/jrnLUNycLY4uR0RE5JoUaqRY+86ksvykmZXmgazvFuHockRERP6QDj9Jseb9fByAO8ODqevj7thiRERESkChRoq4cDmHr3edAWD4TY0cXI2IiEjJKNRIEZ9uPkluvpXIej60r+/r6HJERERKRKFG7OTmW/l40wkAht3UUKdxi4hIlaFQI3aW7T3LufQcAr1c6RMR4uhyRERESkyhRux8+OsE4YejG+DipLeHiIhUHfrUEpsdJ1PYeeoSLhYzD0XXd3Q5IiIipaJQI4UuHGXzigWYsXJPZAiBXq6OrkhERKRUtPieAJD503SeSvgQL6eeRN70oaPLERERKTXtqRHIzcCy978AHAnoSXioj4MLEhERKT2FGsG653NcCzI4bg0i/OZ7HF2OiIjIdVGoES7//D4AX5pvp09kqIOrERERuT4KNTXdmZ14X9xDjuFEfsRgXY1bRESqLIWaGi5r478BWGHtxL03RTq4GhERkeunUFOT5aRj2f8FANsC+tEi2MvBBYmIiFw/hZoazLp7ES4FmRy11qXtTX0cXY6IiMgNUaipqQyDjA2/mSDcVtd5EhGRqk2hpqY6E4dXyn5yDGcKIgbh7qIJwiIiUrUp1NRQVyYIf2ftTL+bIhxcjYiIyI1TqKmJslNx2v8lANv876VlsLeDCxIREblxCjU1kHXXQpyt2Ry2hhJ1052OLkdERKRMKNTUNIZhO/T0hfl27tYKwiIiUk0o1NQ0p7fieekQ2YYz1oiBmiAsIiLVhkJNDZO9qXAvzRJrDP1iwh1cjYiISNlRqKlJslJwOrAYgK3+99I6RBOERUSk+lCoqUGse77EyZrDAWsYHbr2cnQ5IiIiZUqhpgZJ3fEVAN+ZunN3lFYQFhGR6sXJ0QVIxfk/v1e4eGIpYZE98HDRt15ERKoXfbLVEDn5BXyz/xLp1mg+6xjp6HJERETKnA4/1RBrD50nPTufIG9XOjeq7ehyREREypxCTQ3x9a4zANzTNgSL2eTgakRERMqeQk0NkJGTz+oDSQD01QRhERGppq4r1MycOZOGDRvi5uZGdHQ0W7ZsuWb/RYsW0bJlS9zc3IiIiOC7776z3ZeXl8fYsWOJiIjA09OTkJAQHn30Uc6cOWO3jV9++YV7772XgIAAvL29ufnmm/nhhx+up/waZ+X+JLLzrDT09yAi1MfR5YiIiJSLUoeahQsXMmbMGCZOnEhcXByRkZH07t2bc+fOFdt/w4YNDB48mBEjRrBjxw769etHv3792Lt3LwCZmZnExcXxyiuvEBcXx5dffsmhQ4fo27ev3Xbuvvtu8vPzWbNmDdu3bycyMpK7776bxMTE63jZNcs3vx566hsZgsmkQ08iIlI9mQzDMErzgOjoaDp16sSMGTMAsFqthIWF8eyzz/Liiy8W6T9w4EAyMjJYsmSJra1Lly5ERUUxe/bsYp9j69atdO7cmRMnTlC/fn2Sk5MJDAzkp59+olu3bgCkp6fj7e3NypUriY2N/cO609LS8PHxITU1FW/vmrOSbkpGLp0mryLfarBqzC00rePl6JJERERKrDSf36XaU5Obm8v27dvtQoTZbCY2NpaNGzcW+5iNGzcWCR29e/e+an+A1NRUTCYTvr6+APj7+9OiRQv+85//kJGRQX5+Pu+99x516tShQ4cOxW4jJyeHtLQ0u1tNtGxvIvlWg1Z1vRVoRESkWitVqElOTqagoICgoCC79qCgoKseBkpMTCxV/+zsbMaOHcvgwYNticxkMrFq1Sp27NiBl5cXbm5uvP322yxfvhw/P79itzNlyhR8fHxst7CwsNK81Orh8nlWxh0C4F5NEBYRkWquUp39lJeXx4ABAzAMg1mzZtnaDcNg5MiR1KlTh3Xr1rFlyxb69evHPffcw9mzZ4vd1rhx40hNTbXdTp06VVEvo9LIWPN/zE4cxOOWpdwTqVAjIiLVW6lWFA4ICMBisZCUlGTXnpSURHBwcLGPCQ4OLlH/K4HmxIkTrFmzxu642Zo1a1iyZAkpKSm29n/961+sXLmSjz76qNi5PK6urri6upbm5VUvVivs+xJXUz7OAU0I9XV3dEUiIiLlqlR7alxcXOjQoQOrV6+2tVmtVlavXk1MTEyxj4mJibHrD7By5Uq7/lcCzeHDh1m1ahX+/v52/TMzMwuLNduXazabsVqtpXkJNcfJjXjmnCPN8CC08z2OrkZERKTclfraT2PGjGHo0KF07NiRzp07M23aNDIyMhg+fDgAjz76KKGhoUyZMgWAUaNG0b17d6ZOnUqfPn1YsGAB27ZtY86cOUBhoHnwwQeJi4tjyZIlFBQU2Obb1K5dGxcXF2JiYvDz82Po0KFMmDABd3d33n//feLj4+nTp09ZjUW1krZtAd7ACmsn7ohs4OhyREREyl2pQ83AgQM5f/48EyZMIDExkaioKJYvX26bDHzy5Em7PSpdu3bl008/Zfz48bz00ks0a9aMxYsXEx4eDkBCQgLffPMNAFFRUXbP9cMPP9CjRw8CAgJYvnw5L7/8Mrfddht5eXm0adOGr7/+mshIXZyxiII8nA8Wjumx4N70r1WDD8OJiEiNUep1aqqqmrROjXF4FaZPHiDZ8Gbt3et4oFNDR5ckIiJyXcptnRqpGi5t/QyAFUYXekWEOrgaERGRiqFQU93kZeN+dBkAZ+vdhZebs4MLEhERqRgKNdWM9fD3uBVkcMaoTesuvRxdjoiISIVRqKlmLm0pPPT0PV25rVXxaweJiIhURwo11UlOOl4nVgFwsVFf3JwtDi5IRESk4ijUVCP5+5fgbORy1FqXDjG3OrocERGRCqVQU42k/nrW0yrLzdzUNMDB1YiIiFQshZrqIuMCvmfWA5DT8j6cLPrWiohIzaJPvmoid89XWChgr7UhN3Xp6uhyREREKpxCTTWRvq3w0NNal+60r+/r2GJEREQcQKGmOkhNwC95OwCWtg9gMpkcXJCIiEjFU6ipBrJ2LMKMwWZrS26Lbu/ockRERBxCoaYayIpbAMAWz1tpHuTl4GpEREQcQ6GmqivIY3t+Y84ZvtRq96CjqxEREXEYJ0cXIDcm8XIBT6YMAWMw6zq1dnQ5IiIiDqM9NVXckt1nMAzo2NCfen4eji5HRETEYRRqqrivd54BoG9UqIMrERERcSyFmirs6PnL7ElIxclsok9EXUeXIyIi4lAKNVXYN7/upenWLIDani4OrkZERMSxFGqqKMMw+GZXYai5V4eeREREFGqqqj0JqcQnZ+DmbOb21kGOLkdERMThFGqqqCsThG9vHYynq87MFxERUaipggqsBt9eOfQUGeLgakRERCoHhZoq6PQ3f+fOzG9o4JbFLc0DHV2OiIhIpaDjFlVNXhbBu//Fq85ZzGrSBRcn5VIRERHQnpoqJ+/gMlytWZw2AoiK6eXockRERCoNhZoq5uKmzwBYY+lGdGN/B1cjIiJSeSjUVCXZqdQ+8wMAOa3uw2w2ObggERGRykOhpgrJ2vMNzkYeh62hxMR0d3Q5IiIilYpCTRWStuVTANa5dadNqI+DqxEREalcFGqqisvnCTi/CQBzRH9MJh16EhER+S2FmioiPW4RFqzstDame0y0o8sRERGpdBRqqojMuIUAbPe6jUYBng6uRkREpPJRqKkKLp0k6NJOrIYJj/YDHF2NiIhIpaRQUwWkbFkAwGajFT07RTq4GhERkcpJoaYKKNi1CIB9tW+njrebg6sRERGpnBRqKjnj3EECMn4hz7AQ0Km/o8sRERGptBRqKrnkTb+uTWNEcmv7lg6uRkREpPJSqKnMDAPLvi8AOBZ8Bz7uzg4uSEREpPJSqKnErAk7qJ1zmizDhbCYBxxdjoiISKWmUFOJndswH4Af6Uj38EYOrkZERKRyU6iprKwFeBz+GoCz9fvg5mxxcEEiIiKVm0JNJZV/cjPeecmkGh4069rP0eWIiIhUek6OLkCKty6rCf+XM5kI9wu81jzE0eWIiIhUetpTU0l9vesM+4xGuEU9iJNF3yYREZE/ok/LSqjAavDDofMA9Glb18HViIiIVA0KNZXQ3oRUUrPy8HJ1ol2Yr6PLERERqRIUaiqh9UeSAejSxF+HnkREREpIn5iV0PrDhaGmW7MAB1ciIiJSdSjUVDJZuQVsP5ECwM1NFWpERERK6rpCzcyZM2nYsCFubm5ER0ezZcuWa/ZftGgRLVu2xM3NjYiICL777jvbfXl5eYwdO5aIiAg8PT0JCQnh0Ucf5cyZM0W2s3TpUqKjo3F3d8fPz49+/fpdT/mV2pbjF8ktsBLi40ajAE9HlyMiIlJllDrULFy4kDFjxjBx4kTi4uKIjIykd+/enDt3rtj+GzZsYPDgwYwYMYIdO3bQr18/+vXrx969ewHIzMwkLi6OV155hbi4OL788ksOHTpE37597bbzxRdf8MgjjzB8+HB27drFzz//zEMPPXQdL7lyW3+48Kynm5sFYDKZHFyNiIhI1WEyDMMozQOio6Pp1KkTM2bMAMBqtRIWFsazzz7Liy++WKT/wIEDycjIYMmSJba2Ll26EBUVxezZs4t9jq1bt9K5c2dOnDhB/fr1yc/Pp2HDhrz66quMGDGiNOXapKWl4ePjQ2pqKt7e3te1jYpwx7SfOJiYzruD29E3UovuiYhIzVaaz+9S7anJzc1l+/btxMbG/m8DZjOxsbFs3Lix2Mds3LjRrj9A7969r9ofIDU1FZPJhK+vLwBxcXEkJCRgNptp164ddevW5c4777Tt7SlOTk4OaWlpdrfKLnXXEt65OJI/Wb7lpib+ji5HRESkSilVqElOTqagoICgoCC79qCgIBITE4t9TGJiYqn6Z2dnM3bsWAYPHmxLZMeOHQNg0qRJjB8/niVLluDn50ePHj24ePFisduZMmUKPj4+tltYWFhpXqpDJO9aTivzSSI9U/Cv5erockRERKqUSnX2U15eHgMGDMAwDGbNmmVrt1qtALz88ss88MADdOjQgQ8//BCTycSiRYuK3da4ceNITU213U6dOlUhr+FGeCasByC7/i0OrkRERKTqKdUFLQMCArBYLCQlJdm1JyUlERwcXOxjgoODS9T/SqA5ceIEa9assTtuVrdu4aUCWrdubWtzdXWlcePGnDx5stjndXV1xdW16uztMNLOEpwTj9UwUTfqdkeXIyIiUuWUak+Ni4sLHTp0YPXq1bY2q9XK6tWriYmJKfYxMTExdv0BVq5cadf/SqA5fPgwq1atwt/ffj5Jhw4dcHV15dChQ3aPOX78OA0aNCjNS6i0knZ9D8A+GtGuRRMHVyMiIlL1lGpPDcCYMWMYOnQoHTt2pHPnzkybNo2MjAyGDx8OwKOPPkpoaChTpkwBYNSoUXTv3p2pU6fSp08fFixYwLZt25gzZw5QGE4efPBB4uLiWLJkCQUFBbb5NrVr18bFxQVvb2+eeuopJk6cSFhYGA0aNOCtt94CoH///mUyEI52eX9hqIn37kiEs8XB1YiIiFQ9pQ41AwcO5Pz580yYMIHExESioqJYvny5bTLwyZMnMZv/twOoa9eufPrpp4wfP56XXnqJZs2asXjxYsLDwwFISEjgm2++ASAqKsruuX744Qd69OgBwFtvvYWTkxOPPPIIWVlZREdHs2bNGvz8/K7ndVcuhoH/uV/PBmt0q2NrERERqaJKvU5NVVWZ16nJSzyA8+wuZBvOHH1sH20aBP3xg0RERGqAclunRsrH2R3LANhpakmrsDoOrkZERKRqUqipBPIO/wBAYkAMZrMujSAiInI9FGocrSCfuilbAXBvcZuDixEREam6FGocLOP4FjyMLFKMWrRud7OjyxEREamyFGoc7GzccgB2ObUlLMDLwdWIiIhUXQo1DuZ0Yi0AqXW7OrgSERGRqk2hxpFyLhN6ufBK477hdzi4GBERkapNocaBLh74EWfyOWkEEhUZ5ehyREREqjSFGgc6v2sFAAfdO+Dj7uzgakRERKo2hRoH8jqzHoCc+t0cXImIiEjVp1DjIMbl84TkHAOgblQvB1cjIiJS9SnUOMjZ3asAOGTUJ6J5EwdXIyIiUvWV+irdUjZ+zm7MrrzhhNXx509OFkeXIyIiUuVpT42DrDnjxPyC28lvO9jRpYiIiFQLCjUOYLUabDp2AYAujf0dXI2IiEj1oFDjAIeS0knJzMPDxULbej6OLkdERKRaUKhxgI1HC/fSdGxYG2eLvgUiIiJlQZ+oDnDl0FOMDj2JiIiUGYWaCma1GmyOvwhAl8a1HVyNiIhI9aFQU8H2n00jNSuPWq5ORIRqPo2IiEhZUaipYFcOPXVq6IeT5tOIiIiUGX2qVjDbfJommk8jIiJSlrSicAUqSD/PHfH/wMPcii6Nujq6HBERkWpFoaYCndq5kgdZQ1uXIzQJnezockRERKoVHX6qQJcP/QjAKe/2WMwmxxYjIiJSzSjUVCDfpM0AGA1udnAlIiIi1Y9CTQXJTztHvbzjAIRGxTq2GBERkWpIoaaCnNy5CoBfqE+LRg0dW4yIiEg1pFBTQTJs82k6YNZ8GhERkTKnUFNBfM8VzqehoebTiIiIlAeFmgqQl3aOsF/n04RpPo2IiEi5UKipACd3rAQK59M0bdjAwdWIiIhUTwo1FSDj0FoATvtoPo2IiEh5UaipAH6/zqcxaT6NiIhIuVGoKWc5qUmE5R8HIKzd7Y4tRkREpBpTqClnp3YUrk9zmPo0aVDfwdWIiIhUXwo15ezK+jQJPh0wmTSfRkREpLwo1JQzv/NbCv/TqJtjCxEREanmFGrKUU7aOer/Op+mvubTiIiIlCuFmnJ0as96AI4TQqP6YQ6uRkREpHpTqClH6UcLT+U+49lK82lERETKmUJNOXI5twuAnDpRji1ERESkBnBydAHV2S+5/rhbg/FqEu3oUkRERKo9hZpycjknnzHpD2EYD7E1soejyxEREan2dPipnOxNSMUwIMTHjUAvV0eXIyIiUu0p1JST3acvAdC2nq9D6xAREakpFGrKye7TqQBE1PNxcCUiIiI1g0JNObkSaiK1p0ZERKRCKNSUg5SMXE5ezAQgIlR7akRERCqCQk052JNQuJemob8HPh7ODq5GRESkZriuUDNz5kwaNmyIm5sb0dHRbNmy5Zr9Fy1aRMuWLXFzcyMiIoLvvvvOdl9eXh5jx44lIiICT09PQkJCePTRRzlz5kyx28rJySEqKgqTycTOnTuvp/xyp0nCIiIiFa/UoWbhwoWMGTOGiRMnEhcXR2RkJL179+bcuXPF9t+wYQODBw9mxIgR7Nixg379+tGvXz/27t0LQGZmJnFxcbzyyivExcXx5ZdfcujQIfr27Vvs9v76178SEhJS2rIr1K5f59O01SRhERGRCmMyDMMozQOio6Pp1KkTM2bMAMBqtRIWFsazzz7Liy++WKT/wIEDycjIYMmSJba2Ll26EBUVxezZs4t9jq1bt9K5c2dOnDhB/fr1be3Lli1jzJgxfPHFF7Rp04YdO3YQFRVVorrT0tLw8fEhNTUVb2/vUrziUirI5/7XFxKX7sN//9SVzo1ql99ziYiIVHOl+fwu1Z6a3Nxctm/fTmxs7P82YDYTGxvLxo0bi33Mxo0b7foD9O7d+6r9AVJTUzGZTPj6+trakpKSeOKJJ/j444/x8PD4w1pzcnJIS0uzu1WEi/E7+TLvada6Pk94iFeFPKeIiIiUMtQkJydTUFBAUFCQXXtQUBCJiYnFPiYxMbFU/bOzsxk7diyDBw+2JTLDMBg2bBhPPfUUHTt2LFGtU6ZMwcfHx3YLCwsr0eNuVOKBDQBcdArGw1WThEVERCpKpTr7KS8vjwEDBmAYBrNmzbK1T58+nfT0dMaNG1fibY0bN47U1FTb7dSpU+VRchH5p7cDkOIXXiHPJyIiIoVKdUHLgIAALBYLSUlJdu1JSUkEBwcX+5jg4OAS9b8SaE6cOMGaNWvsjputWbOGjRs34upqfw2ljh07MmTIED766KMiz+vq6lqkf0XwubgHAEtohwp/bhERkZqsVHtqXFxc6NChA6tXr7a1Wa1WVq9eTUxMTLGPiYmJsesPsHLlSrv+VwLN4cOHWbVqFf7+/nb93333XXbt2sXOnTvZuXOn7ZTwhQsXMnny5NK8hHJl5GYSmhcPQGDL4sdDREREykep9tQAjBkzhqFDh9KxY0c6d+7MtGnTyMjIYPjw4QA8+uijhIaGMmXKFABGjRpF9+7dmTp1Kn369GHBggVs27aNOXPmAIWB5sEHHyQuLo4lS5ZQUFBgm29Tu3ZtXFxc7M6AAqhVqxYATZo0oV69etf/6svYucPbCMJKsuFNk6YtHF2OiIhIjVLqUDNw4EDOnz/PhAkTSExMJCoqiuXLl9smA588eRKz+X87gLp27cqnn37K+PHjeemll2jWrBmLFy8mPLxwzklCQgLffPMNQJHTs3/44Qd69OhxnS+t4iUf2kgQEO/Sgk7OpR5aERERuQGlXqemqqqIdWr2Th9I+IXlrA56jJ5/fqdcnkNERKQmKbd1auTa/FILV0l2rq9JwiIiIhVNoaaMWDMvEZp/GoDgVjc5uBoREZGaR6GmjCQe2gRAghFA4wYNHFyNiIhIzaNQU0YuHi4MNSfcWuJk0bCKiIhUNH36lhHzmR0AZAS0dXAlIiIiNZNCTRkJTNsPgHuDTg6uREREpGZSqCkD+WlJBFrPYTVMhLTRSsIiIiKOoFBTBs7sL7wy93FCaFi3+GtgiYiISPlSqCkDaUc3A3DaoyVms8nB1YiIiNRMCjVlwClxJwDZdSIdW4iIiEgNplBzowyD4MuFk4RrNers4GJERERqLoWaG5R94QS+Rip5hoWw1tGOLkdERKTG0qWkb9Avac7MzH2eJq6XeCHQz9HliIiI1FgKNTcoMcvCJteu5NT3xWTSJGERERFHUai5Qb3aBHN76yDSc/IdXYqIiEiNpjk1ZcBkMuHt5uzoMkRERGo0hRoRERGpFhRqREREpFpQqBEREZFqQaFGREREqgWFGhEREakWFGpERESkWlCoERERkWpBoUZERESqBYUaERERqRYUakRERKRaUKgRERGRakGhRkRERKoFhRoRERGpFpwcXUBFMQwDgLS0NAdXIiIiIiV15XP7yuf4tdSYUJOeng5AWFiYgysRERGR0kpPT8fHx+eafUxGSaJPNWC1Wjlz5gxeXl6YTKYy3XZaWhphYWGcOnUKb2/vMt12VaexuTaNz9VpbK5N43NtGp+rq2pjYxgG6enphISEYDZfe9ZMjdlTYzabqVevXrk+h7e3d5V4gziCxubaND5Xp7G5No3PtWl8rq4qjc0f7aG5QhOFRUREpFpQqBEREZFqQaGmDLi6ujJx4kRcXV0dXUqlo7G5No3P1Wlsrk3jc20an6urzmNTYyYKi4iISPWmPTUiIiJSLSjUiIiISLWgUCMiIiLVgkKNiIiIVAsKNTdo5syZNGzYEDc3N6Kjo9myZYujS3KIn376iXvuuYeQkBBMJhOLFy+2u98wDCZMmEDdunVxd3cnNjaWw4cPO6bYCjZlyhQ6deqEl5cXderUoV+/fhw6dMiuT3Z2NiNHjsTf359atWrxwAMPkJSU5KCKK9asWbNo27atbSGwmJgYli1bZru/Jo/N773++uuYTCZGjx5ta6vJ4zNp0iRMJpPdrWXLlrb7a/LYACQkJPDwww/j7++Pu7s7ERERbNu2zXZ/dfy9rFBzAxYuXMiYMWOYOHEicXFxREZG0rt3b86dO+fo0ipcRkYGkZGRzJw5s9j733zzTd59911mz57N5s2b8fT0pHfv3mRnZ1dwpRVv7dq1jBw5kk2bNrFy5Ury8vLo1asXGRkZtj7PP/883377LYsWLWLt2rWcOXOG+++/34FVV5x69erx+uuvs337drZt28Ztt93Gvffey759+4CaPTa/tXXrVt577z3atm1r117Tx6dNmzacPXvWdlu/fr3tvpo8NikpKdx00004OzuzbNky9u/fz9SpU/Hz87P1qZa/lw25bp07dzZGjhxp+7qgoMAICQkxpkyZ4sCqHA8wvvrqK9vXVqvVCA4ONt566y1b26VLlwxXV1fjs88+c0CFjnXu3DkDMNauXWsYRuFYODs7G4sWLbL1OXDggAEYGzdudFSZDuXn52f8+9//1tj8Kj093WjWrJmxcuVKo3v37saoUaMMw9B7Z+LEiUZkZGSx99X0sRk7dqxx8803X/X+6vp7WXtqrlNubi7bt28nNjbW1mY2m4mNjWXjxo0OrKzyiY+PJzEx0W6sfHx8iI6OrpFjlZqaCkDt2rUB2L59O3l5eXbj07JlS+rXr1/jxqegoIAFCxaQkZFBTEyMxuZXI0eOpE+fPnbjAHrvABw+fJiQkBAaN27MkCFDOHnyJKCx+eabb+jYsSP9+/enTp06tGvXjvfff992f3X9vaxQc52Sk5MpKCggKCjIrj0oKIjExEQHVVU5XRkPjVXh1eJHjx7NTTfdRHh4OFA4Pi4uLvj6+tr1rUnjs2fPHmrVqoWrqytPPfUUX331Fa1bt9bYAAsWLCAuLo4pU6YUua+mj090dDTz5s1j+fLlzJo1i/j4eLp160Z6enqNH5tjx44xa9YsmjVrxooVK/jzn//Mc889x0cffQRU39/LNeYq3SKVwciRI9m7d6/dcX+BFi1asHPnTlJTU/n8888ZOnQoa9eudXRZDnfq1ClGjRrFypUrcXNzc3Q5lc6dd95p+3/btm2Jjo6mQYMG/Pe//8Xd3d2BlTme1WqlY8eO/OMf/wCgXbt27N27l9mzZzN06FAHV1d+tKfmOgUEBGCxWIrMpE9KSiI4ONhBVVVOV8ajpo/VM888w5IlS/jhhx+oV6+erT04OJjc3FwuXbpk178mjY+LiwtNmzalQ4cOTJkyhcjISP75z3/W+LHZvn07586do3379jg5OeHk5MTatWt59913cXJyIigoqEaPz+/5+vrSvHlzjhw5UuPfO3Xr1qV169Z2ba1atbIdnquuv5cVaq6Ti4sLHTp0YPXq1bY2q9XK6tWriYmJcWBllU+jRo0IDg62G6u0tDQ2b95cI8bKMAyeeeYZvvrqK9asWUOjRo3s7u/QoQPOzs5243Po0CFOnjxZI8anOFarlZycnBo/Nj179mTPnj3s3LnTduvYsSNDhgyx/b8mj8/vXb58maNHj1K3bt0a/9656aabiiwd8csvv9CgQQOgGv9edvRM5apswYIFhqurqzFv3jxj//79xpNPPmn4+voaiYmJji6twqWnpxs7duwwduzYYQDG22+/bezYscM4ceKEYRiG8frrrxu+vr7G119/bezevdu49957jUaNGhlZWVkOrrz8/fnPfzZ8fHyMH3/80Th79qztlpmZaevz1FNPGfXr1zfWrFljbNu2zYiJiTFiYmIcWHXFefHFF421a9ca8fHxxu7du40XX3zRMJlMxvfff28YRs0em+L89uwnw6jZ4/P//t//M3788UcjPj7e+Pnnn43Y2FgjICDAOHfunGEYNXtstmzZYjg5ORmTJ082Dh8+bHzyySeGh4eHMX/+fFuf6vh7WaHmBk2fPt2oX7++4eLiYnTu3NnYtGmTo0tyiB9++MEAityGDh1qGEbh6YOvvPKKERQUZLi6uho9e/Y0Dh065NiiK0hx4wIYH374oa1PVlaW8fTTTxt+fn6Gh4eHcd999xlnz551XNEV6LHHHjMaNGhguLi4GIGBgUbPnj1tgcYwavbYFOf3oaYmj8/AgQONunXrGi4uLkZoaKgxcOBA48iRI7b7a/LYGIZhfPvtt0Z4eLjh6upqtGzZ0pgzZ47d/dXx97LJMAzDMfuIRERERMqO5tSIiIhItaBQIyIiItWCQo2IiIhUCwo1IiIiUi0o1IiIiEi1oFAjIiIi1YJCjYiIiFQLCjUiIiJSLSjUiIiISLWgUCMilUaPHj0YPXp0hTzXhQsXqFOnDsePH79mv0GDBjF16tQKqUlEbowukyAiFcJkMl3z/okTJ/Lcc8/h7OyMl5dXudczZswY0tPTef/9921tzz//PCdOnODLL7+0te3du5dbbrmF+Ph4fHx8yr0uEbl+To4uQERqhrNnz9r+v3DhQiZMmMChQ4dsbbVq1aJWrVoVUktmZiZz585lxYoVdu1btmyhT58+dm3h4eE0adKE+fPnM3LkyAqpT0Sujw4/iUiFCA4Ott18fHwwmUx2bbVq1Spy+KlHjx48++yzjB49Gj8/P4KCgnj//ffJyMhg+PDheHl50bRpU5YtW2Z7jNVqZcqUKTRq1Ah3d3ciIyP5/PPP7Wr57rvvcHV1pUuXLgDk5ubi7OzMhg0bePnllzGZTLb7AO655x4WLFhQvgMkIjdMoUZEKrWPPvqIgIAAtmzZwrPPPsuf//xn+vfvT9euXYmLi6NXr1488sgjZGZmAjBlyhT+85//MHv2bPbt28fzzz/Pww8/zNq1a23bXLduHR06dLB97eTkxM8//wzAzp07OXv2LMuXL7fd37lzZ7Zs2UJOTk4FvWoRuR4KNSJSqUVGRjJ+/HiaNWvGuHHjcHNzIyAggCeeeIJmzZoxYcIELly4wO7du8nJyeEf//gHH3zwAb1796Zx48YMGzaMhx9+mPfee8+2zRMnThASEmL72mw2c+bMGfz9/YmMjCQ4OBhfX1/b/SEhIeTm5pKYmFiRL11ESklzakSkUmvbtq3t/xaLBX9/fyIiImxtQUFBAJw7d44jR46QmZnJ7bffbreN3Nxc2rVrZ/s6KysLNzc3uz47duwgMjKy2Brc3d0BbHuDRKRyUqgRkUrN2dnZ7muTyWTXduWsKqvVyuXLlwFYunQpoaGhdo9zdXW1/T8gIICUlBS7+3fu3HnVUHPx4kUAAgMDr/NViEhFUKgRkWqjdevWuLq6cvLkSbp3737Vfu3atWP+/Pl2bXv27OGBBx4otv/evXupV68eAQEBZVqviJQthRoRqTa8vLz4y1/+wvPPP4/VauXmm28mNTWVn3/+GW9vb4YOHQpA7969GTduHCkpKfj5+QGFe3oOHTrEmTNn8PT0tFuTZt26dfTq1cshr0lESk4ThUWkWvn73//OK6+8wpQpU2jVqhV33HEHS5cupVGjRrY+ERERtG/fnv/+97+2ttdee4158+YRGhrKa6+9ZmvPzs5m8eLFPPHEExX6OkSk9LSisIjUSEuXLuWFF15g7969mM1X//tu1qxZfPXVV3z//fcVWJ2IXA8dfhKRGqlPnz4cPnyYhIQEwsLCrtrP2dmZ6dOnV2BlInK9tKdGREREqgXNqREREZFqQaFGREREqgWFGhEREakWFGpERESkWlCoERERkWpBoUZERESqBYUaERERqRYUakRERKRaUKgRERGRakGhRkRERKqF/w+GgmPEY7Mx/gAAAABJRU5ErkJggg==", 194 | "text/plain": [ 195 | "
" 196 | ] 197 | }, 198 | "metadata": {}, 199 | "output_type": "display_data" 200 | } 201 | ], 202 | "source": [ 203 | "model = cli.model\n", 204 | "# Plot the results of the previous cell\n", 205 | "df = model.test_results[\n", 206 | " model.test_results[\"ensemble\"] == 0\n", 207 | "] # first ensemble in dataframe from model on test data\n", 208 | "\n", 209 | "fig, ax = plt.subplots()\n", 210 | "ax.plot(df[\"t\"], df[\"u_hat\"], label=r\"$u(X_t)$, $\\phi($ReLU$)$\")\n", 211 | "if model.hparams.nu == 1.0: # only add reference line if linear\n", 212 | " ax.plot(df[\"t\"], df[\"u_reference\"], dashes=[10, 5, 10, 5], label=r\"$u(X_t)$, LQ\")\n", 213 | "ax.legend()\n", 214 | "ax.set_title(r\"$u(X_t)$ with $\\phi($ReLU$)$ : Equilibrium Path\")\n", 215 | "ax.set_xlabel(r\"Time($t$)\")\n" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 5, 221 | "metadata": {}, 222 | "outputs": [ 223 | { 224 | "name": "stdout", 225 | "output_type": "stream", 226 | "text": [ 227 | "tensor([[0.0234],\n", 228 | " [0.0210]])\n" 229 | ] 230 | } 231 | ], 232 | "source": [ 233 | "# Example to evaluate model after fitting it, can do at multiple points at same time:\n", 234 | "model.eval()\n", 235 | "X_points = torch.stack((model.X_0 + 0.001 * torch.randn(model.hparams.N),\n", 236 | " model.X_0 + 0.05 * torch.ones(model.hparams.N)))\n", 237 | "# evaluate the policy. Doing it with a `no_grad` can speed things up \n", 238 | "with torch.no_grad():\n", 239 | " u_X = model(X_points) # evaluating like a function provides the policy because of the .forward() method\n", 240 | " print(u_X)" 241 | ] 242 | } 243 | ], 244 | "metadata": { 245 | "kernelspec": { 246 | "display_name": "pytorch-sandbox", 247 | "language": "python", 248 | "name": "python3" 249 | }, 250 | "language_info": { 251 | "codemirror_mode": { 252 | "name": "ipython", 253 | "version": 3 254 | }, 255 | "file_extension": ".py", 256 | "mimetype": "text/x-python", 257 | "name": "python", 258 | "nbconvert_exporter": "python", 259 | "pygments_lexer": "ipython3", 260 | "version": "3.9.15" 261 | }, 262 | "orig_nbformat": 4, 263 | "vscode": { 264 | "interpreter": { 265 | "hash": "535f64b549f1c1a44024bff541c70f1e607ec1cdf54c7c015722119882ca2e26" 266 | } 267 | } 268 | }, 269 | "nbformat": 4, 270 | "nbformat_minor": 2 271 | } 272 | -------------------------------------------------------------------------------- /generalized_mean.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import torch 3 | import pytorch_lightning as pl 4 | import yaml 5 | import math 6 | import numpy as np 7 | import wandb 8 | import timeit 9 | import econ_layers 10 | import torch.nn.functional as F 11 | from torch.utils.data import DataLoader 12 | from pytorch_lightning.cli import LightningCLI 13 | from pathlib import Path 14 | from pytorch_lightning.loggers import WandbLogger 15 | from torch.utils.data import TensorDataset 16 | 17 | 18 | class GeneralizedMean(pl.LightningModule): 19 | def __init__( 20 | self, 21 | a_min: float, 22 | a_max: float, 23 | X_distribution: str, 24 | std: float, 25 | N: int, 26 | p: float, 27 | # some general configuration 28 | verbose: bool, 29 | hpo_objective_name: str, 30 | always_log_hpo_objective: bool, 31 | print_metrics: bool, 32 | save_metrics: bool, 33 | save_test_results: bool, 34 | test_seed: int, 35 | train_data_seed: int, 36 | test_loss_success_threshold: float, 37 | # parameters for method 38 | num_train_points: int, 39 | num_val_points: int, 40 | num_test_points: int, 41 | batch_size: int, 42 | shuffle_training: bool, 43 | # settings for deep learning approximation 44 | ml_model: torch.nn.Module, 45 | ): 46 | super().__init__() 47 | self.save_hyperparameters(ignore=["ml_model"]) # access with self.hparams.alpha, etc. 48 | self.ml_model = ml_model 49 | 50 | # Used for evaluating the model 51 | def forward(self, X): 52 | return self.ml_model(X) # deep sets/etc. 53 | 54 | def training_step(self, batch, batch_idx): 55 | x, y = batch 56 | y = y.unsqueeze(1) # to enable broadcasting of self(x) 57 | loss = F.mse_loss(self(x), y, reduction="mean") 58 | self.log("train_loss", loss, prog_bar=True) 59 | return loss 60 | 61 | def validation_step(self, batch, batch_idx): 62 | x, y = batch 63 | y = y.unsqueeze(1) # to enable broadcasting of self(x) 64 | residuals = y - self(x) 65 | loss = F.mse_loss(self(x), y, reduction="mean") 66 | 67 | rel_error = torch.mean(torch.abs(residuals) / torch.abs(y)) 68 | abs_error = torch.mean(torch.abs(residuals)) 69 | 70 | self.log("val_loss", loss, prog_bar=True) 71 | self.log("val_rel_error", rel_error, prog_bar=True) 72 | self.log("val_abs_error", abs_error, prog_bar=True) 73 | 74 | def test_step(self, batch, batch_idx): 75 | x, y_f = batch 76 | y_f = y_f.unsqueeze(1) # to enable broadcasting of self(x) 77 | y = self(x) 78 | residuals = y_f - y 79 | loss = F.mse_loss(y_f, y, reduction="mean") 80 | rel_error = torch.abs(y_f - y) / torch.abs(y_f) 81 | abs_error = torch.abs(y_f - y) 82 | 83 | self.test_results = pd.concat( 84 | [ 85 | self.test_results, 86 | pd.DataFrame( 87 | { 88 | "x_norm": x.norm(dim=1).cpu().numpy().tolist(), # x is too large to store 89 | "f_x": y_f.squeeze().cpu().numpy().tolist(), 90 | "f_hat_x": y.squeeze().cpu().numpy().tolist(), 91 | "rel_error": rel_error.squeeze().cpu().numpy().tolist(), 92 | "abs_error": abs_error.squeeze().cpu().numpy().tolist(), 93 | } 94 | ), 95 | ] 96 | ) 97 | self.log("test_loss", loss, prog_bar=True) 98 | self.log("test_rel_error", rel_error.mean(), prog_bar=True) 99 | self.log("test_abs_error", abs_error.mean(), prog_bar=True) 100 | 101 | # simulate DGP 102 | def simulate_data(self, num_points, generator=None): 103 | X = torch.empty(num_points, self.hparams.N, device=self.device, dtype=self.dtype) 104 | for i in range(0, num_points): 105 | a_i = torch.empty(1).uniform_( 106 | self.hparams.a_min, self.hparams.a_max, generator=generator 107 | ) 108 | if self.hparams.X_distribution == "normal": 109 | X[i] = torch.normal( 110 | a_i.squeeze(0), 111 | self.hparams.std, 112 | size=(self.hparams.N,), 113 | device=self.device, 114 | dtype=self.dtype, 115 | generator=generator, 116 | ) 117 | X[i] = X[i].abs() # This will almost never happen for a_i in reasonable range. Otherwise use truncated normal 118 | elif self.hparams.X_distribution == "uniform": 119 | d = self.hparams.std * math.sqrt(3) # ensures std is correct 120 | X[i] = ( 121 | torch.rand( 122 | self.hparams.N, device=self.device, dtype=self.dtype, generator=generator 123 | ) 124 | * 2 125 | * d 126 | + a_i 127 | - d 128 | ) # uniform in [a_i - d, a_i + d] 129 | else: 130 | raise ValueError("Distribution not supported") 131 | 132 | Y = ( 133 | X.pow(self.hparams.p).mean(dim=1).pow(1 / self.hparams.p) 134 | ) # generalized mean Doing mean over each row 135 | return X, Y 136 | 137 | def setup(self, stage): 138 | if stage == "fit" or stage is None: 139 | if self.hparams.train_data_seed > 0: 140 | generator = torch.Generator(device=self.device) 141 | generator.manual_seed(self.hparams.train_data_seed) 142 | else: 143 | generator = None # otherwise use default RNG 144 | 145 | # self.train_data = self.old_simulate_data(self.hparams.num_train_points) 146 | X, Y = self.simulate_data(self.hparams.num_train_points, generator=generator) 147 | self.train_data = TensorDataset(X, Y) 148 | 149 | if self.hparams.num_val_points > 0: 150 | X, Y = self.simulate_data(self.hparams.num_val_points, generator=generator) 151 | self.val_data = TensorDataset(X, Y) 152 | else: 153 | self.val_data = [] 154 | if stage == "test": 155 | if self.hparams.test_seed > 0: 156 | generator = torch.Generator(device=self.device) 157 | generator.manual_seed(self.hparams.test_seed) 158 | else: 159 | generator = None # otherwise use default RNG 160 | 161 | X, Y = self.simulate_data(self.hparams.num_test_points, generator=generator) 162 | self.test_data = TensorDataset(X, Y) 163 | self.test_results = pd.DataFrame() 164 | 165 | def train_dataloader(self): 166 | return DataLoader( 167 | self.train_data, 168 | batch_size=self.hparams.batch_size 169 | if self.hparams.batch_size > 0 170 | else len(self.train_data), 171 | shuffle=self.hparams.shuffle_training, 172 | ) 173 | 174 | def val_dataloader(self): 175 | return DataLoader( 176 | self.val_data, 177 | batch_size=self.hparams.batch_size 178 | if self.hparams.batch_size > 0 179 | else len(self.val_data), 180 | ) 181 | 182 | def test_dataloader(self): 183 | return DataLoader( 184 | self.test_data, 185 | batch_size=self.hparams.batch_size 186 | if self.hparams.batch_size > 0 187 | else len(self.test_data), 188 | ) 189 | 190 | def log_and_save(trainer, model, train_time, train_callback_metrics): 191 | if type(trainer.logger) is WandbLogger: 192 | # Valid numeric types 193 | def not_number_type(value): 194 | if value is None: 195 | return True 196 | 197 | if not isinstance(value, (int, float)): 198 | return True 199 | 200 | if math.isnan(value) or math.isinf(value): 201 | return True 202 | 203 | return False # otherwise a valid, non-infinite number 204 | 205 | # If early stopping, evaluate success 206 | early_stopping_check_failed = math.nan 207 | early_stopping_monitor = "" 208 | early_stopping_threshold = math.nan 209 | for callback in trainer.callbacks: 210 | if type(callback) == pl.callbacks.early_stopping.EarlyStopping: 211 | early_stopping_monitor = callback.monitor 212 | early_stopping_value = train_callback_metrics[callback.monitor].cpu().numpy().tolist() 213 | early_stopping_threshold = callback.stopping_threshold 214 | early_stopping_check_failed = not_number_type(early_stopping_value 215 | ) or (early_stopping_value > callback.stopping_threshold) # hardcoded to min for now. 216 | break 217 | 218 | # Check test loss 219 | if model.hparams.test_loss_success_threshold == 0: 220 | test_loss_check_failed = math.nan 221 | elif not_number_type(cli.trainer.logger.experiment.summary["test_loss"]) or ( 222 | cli.trainer.logger.experiment.summary["test_loss"] 223 | > model.hparams.test_loss_success_threshold 224 | ): 225 | test_loss_check_failed = True 226 | else: 227 | test_loss_check_failed = False 228 | 229 | # Determine convergence results 230 | if ( 231 | early_stopping_check_failed in [False, math.nan] 232 | and test_loss_check_failed in [False, math.nan] 233 | ): 234 | retcode = 0 235 | convergence_description = "Success" 236 | elif early_stopping_check_failed == True: 237 | retcode = -1 238 | convergence_description = "Early stopping failure" 239 | elif test_loss_check_failed == True: 240 | retcode = -3 241 | convergence_description = "Test loss failure due to possible overfitting" 242 | else: 243 | retcode = -100 244 | convergence_description = " Unknown failure" 245 | 246 | # Log all calculated results 247 | trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) 248 | trainer.logger.experiment.log({"train_time": train_time}) 249 | trainer.logger.experiment.log({"early_stopping_monitor": early_stopping_monitor}) 250 | trainer.logger.experiment.log({"early_stopping_threshold": early_stopping_threshold}) 251 | trainer.logger.experiment.log({"early_stopping_check_failed": early_stopping_check_failed}) 252 | trainer.logger.experiment.log({"test_loss_check_failed": test_loss_check_failed}) 253 | trainer.logger.experiment.log({"trainable_parameters": trainable_parameters}) 254 | trainer.logger.experiment.log({"retcode": retcode}) 255 | trainer.logger.experiment.log({"convergence_description": convergence_description}) 256 | 257 | # Set objective for hyperparameter optimization 258 | # Objective value given in the settings, or empty 259 | if model.hparams.hpo_objective_name is not None: 260 | hpo_objective_value = dict(cli.trainer.logger.experiment.summary)[ 261 | model.hparams.hpo_objective_name 262 | ] 263 | else: 264 | hpo_objective_value = math.nan 265 | 266 | if model.hparams.always_log_hpo_objective or retcode >= 0: 267 | trainer.logger.experiment.log({"hpo_objective": hpo_objective_value}) 268 | else: 269 | trainer.logger.experiment.log({"hpo_objective": math.nan}) 270 | 271 | # Save test results 272 | trainer.logger.log_text( 273 | key="test_results", dataframe=trainer.model.test_results 274 | ) # Saves on wandb for querying later 275 | 276 | # save the summary statistics in a file 277 | if model.hparams.save_metrics and trainer.log_dir is not None: 278 | metrics_path = Path(trainer.log_dir) / "metrics.yaml" 279 | with open(metrics_path, "w") as fp: 280 | yaml.dump(dict(cli.trainer.logger.experiment.summary), fp) 281 | 282 | if model.hparams.print_metrics: 283 | print(dict(cli.trainer.logger.experiment.summary)) 284 | return 285 | else: # almost no features enabled for other loggers. Could refactor later 286 | if model.hparams.save_test_results and trainer.log_dir is not None: 287 | model.test_results.to_csv(Path(trainer.log_dir) / "test_results.csv", index=False) 288 | 289 | 290 | if __name__ == "__main__": 291 | cli = LightningCLI( 292 | GeneralizedMean, 293 | seed_everything_default=123, 294 | run=False, 295 | save_config_callback=None, # turn this on to save the full config file rather than just having it uploaded 296 | parser_kwargs={"default_config_files": ["generalized_mean_defaults.yaml"]}, 297 | save_config_kwargs={"save_config_overwrite": True}, 298 | ) 299 | # Fit the model. Separating training time for plotting, and evaluate generalization 300 | start = timeit.default_timer() 301 | cli.trainer.fit(cli.model) 302 | train_time = timeit.default_timer() - start 303 | train_callback_metrics = cli.trainer.callback_metrics 304 | cli.trainer.test(cli.model) 305 | 306 | # Add additional calculations such as HPO objective to the log and save files 307 | log_and_save(cli.trainer, cli.model, train_time, train_callback_metrics) 308 | -------------------------------------------------------------------------------- /generalized_mean_defaults.yaml: -------------------------------------------------------------------------------- 1 | trainer: 2 | accelerator: cpu 3 | max_epochs: 1000 4 | min_epochs: 0 5 | max_time: 00:00:3:00 6 | precision: 32 7 | num_sanity_val_steps: 0 8 | logger: 9 | class_path: pytorch_lightning.loggers.WandbLogger 10 | init_args: 11 | offline: true # set to true to not upload during testing 12 | log_model: false # set to true to save the model at the end 13 | name: null # can set name or have it automatically generated 14 | project: symmetry_examples 15 | group: null # can group related runs 16 | tags: 17 | - basic_example 18 | callbacks: 19 | - class_path: pytorch_lightning.callbacks.LearningRateMonitor 20 | init_args: 21 | logging_interval: step 22 | log_momentum: false 23 | # - class_path: pytorch_lightning.callbacks.ModelCheckpoint 24 | # init_args: 25 | # filename: best 26 | # monitor: _loss 27 | # verbose: false 28 | # save_last: true 29 | # save_top_k: 1 30 | # save_weights_only: true 31 | # mode: min 32 | # auto_insert_metric_name: true 33 | - class_path: pytorch_lightning.callbacks.EarlyStopping 34 | init_args: 35 | monitor: train_loss # val_loss or val_rel_error is normally a better choice, but looking at fitting/overfitting here. 36 | min_delta: 0.0 37 | patience: 1000000 38 | mode: min 39 | check_finite: true 40 | divergence_threshold: 1e5 # stops if larger 41 | stopping_threshold: 1.0e-6 # If using val_rel_error then order of magnitude 0.05 or so 42 | # verbose: true 43 | optimizer: 44 | class_path: torch.optim.Adam 45 | init_args: 46 | lr: 0.005 # low learning rate seems to converge relative quickly here. 47 | 48 | # Schedulers either non-binding or not helpful for this problem. 49 | # lr_scheduler: 50 | # class_path: torch.optim.lr_scheduler.StepLR 51 | # init_args: 52 | # step_size: 1000 # number of epochs 53 | # gamma: 0.8 54 | 55 | # lr_scheduler: 56 | # class_path: pytorch_lightning.cli.ReduceLROnPlateau #torch.optim.lr_scheduler.ReduceLROnPlateau 57 | # init_args: 58 | # monitor: val_rel_error 59 | # factor: 0.99 60 | # patience: 10 61 | 62 | model: 63 | # Model parameters 64 | a_min: 1.0 # If this is too tight of an interval and std is too large then there may be draws where it is undefined due to negatives 65 | a_max: 3.0 66 | std: 0.3 # want to make sure it isn't too small or else it is too easy of a problem 67 | X_distribution: normal # uniform or normal 68 | N: 256 # 32 # dimensionality of the state 69 | p: 1.5 70 | 71 | # Settings for output 72 | verbose: false 73 | hpo_objective_name: test_loss 74 | always_log_hpo_objective: false 75 | print_metrics: false 76 | save_metrics: false 77 | save_test_results: false 78 | test_seed: 0 # set to 0 to use default RNG seed 79 | train_data_seed: 0 # set to 0 to use default RNG seed 80 | 81 | # algorithm settings 82 | num_train_points: 10 83 | num_val_points: 50 84 | num_test_points: 200 85 | batch_size: 32 86 | shuffle_training: true 87 | test_loss_success_threshold: 0.0 # 1e-4 88 | 89 | ## Invariance with NN 90 | ml_model: 91 | class_path: econ_layers.layers.DeepSet 92 | init_args: 93 | n_in: 1 94 | n_out: 1 95 | L: 2 96 | 97 | phi_layers: 2 98 | phi_hidden_dim: 128 99 | phi_hidden_bias: true 100 | phi_last_bias: true 101 | phi_activator: 102 | class_path: torch.nn.ReLU 103 | 104 | rho_layers: 2 105 | rho_hidden_dim: 128 106 | rho_hidden_bias: false 107 | rho_last_bias: true 108 | rho_activator: 109 | class_path: torch.nn.ReLU 110 | 111 | ## No invariants 112 | # python generalized_mean.py --model.ml_model.class_path=econ_layers.layers.FlexibleSequential --model.N=32 --model.ml_model.n_in=32 --model.ml_model.layers=3 --model.ml_model.hidden_dim=256 --trainer.max_epochs=1000 113 | # ml_model: 114 | # class_path: econ_layers.layers.FlexibleSequential 115 | # init_args: 116 | # n_in: 32 # must match N 117 | # n_out: 1 118 | # layers: 3 119 | # hidden_dim: 256 120 | # hidden_bias: true 121 | # last_bias: true 122 | 123 | 124 | # CLI for the loggers only modifies the last one: 125 | # python generalized_mean.py --trainer.max_epochs=5 --trainer.callbacks.stopping_threshold=0.01 126 | 127 | 128 | # LBFGS it is very fast, but sometimes can't hit the lower stopping_threshold for some N. set to 0.0075 for example and it can usually achieve it. 129 | # set trainer.max_epochs=1 130 | # Set the maximum number of iterations higher? 131 | # python generalized_mean.py --optimizer.class_path=torch.optim.LBFGS --optimizer.tolerance_grad=1.0e-7 --optimizer.lr=1.0 --optimizer.max_iter=5000 132 | 133 | # LBFGS has trouble with bigger variances 134 | # optimizer: 135 | # class_path: torch.optim.LBFGS 136 | # init_args: 137 | # tolerance_grad: 1.0e-7 138 | # max_iter: 5000 139 | # lr: 1.0 140 | # line_search_fn: null # 'strong_wolfe' doesn't seem to help much here 141 | 142 | -------------------------------------------------------------------------------- /generate_figures/concentration_euler_residual_linear.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | from matplotlib import cm 4 | from utilities import plot_params 5 | 6 | params = plot_params((8, 3.5)) 7 | 8 | # Defining the output path 9 | output_dir = "./figures" 10 | plot_name = "concentration_euler_residual_linear" 11 | output_path = output_dir + "/" + plot_name + ".pdf" 12 | 13 | beta = 0.95 14 | sigma = 0.005 15 | a_1 = 1.0 # this alpha_1 16 | gamma = 90 17 | delta = 0.05 18 | h_1 = -0.050461355001343744 19 | 20 | 21 | def std_euler_res(N): 22 | num = beta * sigma * (a_1 - gamma * (1 - delta) * h_1) # equation 43 23 | return np.abs(num) / np.sqrt(N) 24 | 25 | 26 | def std_policy(N): 27 | num = h_1 * sigma # equation 40 28 | return np.abs(num) / np.sqrt(N) 29 | 30 | 31 | N_space = np.linspace(1, 10000, 1000) 32 | std_val_res = std_euler_res(N_space) 33 | std_val_policy = std_policy(N_space) 34 | 35 | 36 | plt.rcParams.update(params) 37 | 38 | ax_res = plt.subplot(121) 39 | plt.plot(N_space, std_val_res) 40 | plt.title(r"Std. Dev. of $\varepsilon(X;u)$") 41 | ax_res.set_yscale("log") 42 | ax_res.set_xscale("log") 43 | ax_res.xaxis.set_ticks([1, 10, 100, 1000, 10000]) 44 | ax_res.yaxis.set_ticks([10e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7]) 45 | plt.xlabel(r"$N$") 46 | 47 | ax_pol = plt.subplot(122, sharey=ax_res, sharex=ax_res) 48 | plt.plot(N_space, std_val_policy) 49 | plt.title(r"Std. Dev. of $u(X')$ Errors") 50 | plt.xlabel(r"$N$") 51 | plt.tight_layout() 52 | 53 | plt.savefig(output_path) 54 | -------------------------------------------------------------------------------- /generate_figures/deep_sets_linear_profiling_var_n.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import wandb 5 | from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset 6 | from utilities import get_results_by_tag, plot_params 7 | 8 | params = plot_params((8, 3.5)) 9 | quantiles = [0.1, 0.25, 0.5, 0.75, 0.9] 10 | 11 | output_dir = "./figures" 12 | plot_name = "deep-sets-linear-profiling-var-n" 13 | output_path = output_dir + "/" + plot_name + ".pdf" 14 | 15 | api = wandb.Api() 16 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 17 | 18 | 19 | df_deep = get_results_by_tag(api, project, "baseline_deep_sets_N", get_config=True) 20 | assert df_deep.id.nunique() == 1000 21 | 22 | df_deep = df_deep[df_deep["retcode"] == 0] 23 | plot_dict = { 24 | "train_time": { 25 | "ticks_x": [50, 100, 1000, 10000, 100000], 26 | "ticks_y": [ 20,40,60,80,100,120], 27 | "title": r"Computation time (seconds)", 28 | "loc": "upper left", 29 | "subplot": 121, 30 | }, 31 | "test_u_rel_error": { 32 | "ticks_x": [50, 100, 1000, 10000, 100000], 33 | "ticks_y": [0.0001, 0.001], 34 | "title": r"Policy errors ($\epsilon_{\mathrm{rel}}$)", 35 | "loc": "lower left", 36 | "subplot": 122, 37 | }, 38 | } 39 | 40 | 41 | for n, vals in plot_dict.items(): 42 | plt.rcParams.update(params) 43 | df = df_deep.groupby("N")[n].quantile(quantiles).unstack(level=-1) 44 | df.reset_index(inplace=True) 45 | df.columns = ["N"] + [f"quantile_{q}" for q in quantiles] 46 | ax = plt.subplot(vals["subplot"]) 47 | plt.plot(df["N"], df["quantile_0.5"], label=r"Median") 48 | plt.fill_between( 49 | df["N"], 50 | df["quantile_0.1"], 51 | df["quantile_0.9"], 52 | color="cornflowerblue", 53 | alpha=0.2, 54 | label=r"$10$th and $90$th percentiles", 55 | ) 56 | plt.fill_between( 57 | df["N"], 58 | df["quantile_0.25"], 59 | df["quantile_0.75"], 60 | color="cornflowerblue", 61 | alpha=0.6, 62 | label=r"$25$th and $75$th percentiles", 63 | ) 64 | ax.set_xscale("log") 65 | if n == "test_u_rel_error": 66 | ax.set_yscale("log") 67 | 68 | ax.xaxis.set_ticks(vals["ticks_x"]) 69 | ax.yaxis.set_ticks(vals["ticks_y"]) 70 | plt.title(vals["title"]) 71 | plt.xlabel(r"N") 72 | plt.legend(prop={"size": params["font.size"]}, loc=vals["loc"]) 73 | plt.tight_layout() 74 | 75 | plt.savefig(output_path) 76 | -------------------------------------------------------------------------------- /generate_figures/deep_sets_nonlinear_var_nu.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import wandb 5 | from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset 6 | from utilities import get_results_by_tag, plot_params 7 | 8 | params = plot_params((6, 3.5)) 9 | plt.rcParams.update(params) 10 | 11 | 12 | output_dir = "./figures" 13 | plot_name = "deep-sets-nonlinear-var-nu" 14 | output_path = output_dir + "/" + plot_name + ".pdf" 15 | 16 | api = wandb.Api() 17 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 18 | 19 | fig, ax = plt.subplots() 20 | 21 | # looping through the runs with nu variations 22 | nu = { 23 | "deep_sets_nonlinear_nu_150_one_run": 150, 24 | "deep_sets_nonlinear_nu_130_one_run": 130, 25 | "baseline_deep_sets_one_run": 100, 26 | } 27 | 28 | for name, n in nu.items(): 29 | df = get_results_by_tag(api, project, name, get_test_results=True) 30 | assert df.id.nunique() == 1 31 | assert df.retcode[0] == 0 32 | df = df[df["ensemble"] == 0] 33 | label = rf"$\nu = {n/100:.1f}$" 34 | plt.plot(df["t"], df["u_hat"], label=label) 35 | 36 | plt.legend(prop={"size": params["font.size"]}, loc="lower right") 37 | plt.title(r"$u(X_t)$ with $\phi($ReLU$)$: Equilibrium Path") 38 | plt.xlabel(r"Time(t)") 39 | plt.tight_layout() 40 | 41 | plt.savefig(output_path) 42 | 43 | -------------------------------------------------------------------------------- /generate_figures/generalized_mean_deep_sets_no_invariance_N_tables.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import wandb 3 | from utilities import df_to_latex, get_results_by_tag 4 | 5 | output_dir = "./figures" 6 | 7 | api = wandb.Api() 8 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 9 | 10 | N = [2, 4, 8, 32, 64, 512, 1024] 11 | df = pd.DataFrame() 12 | for n in N: 13 | df_n = get_results_by_tag( 14 | api, project, f"generalized_mean_no_invariance_N_{n}", get_config=True 15 | ) 16 | # assert df.id.nunique() == whenever we know how many 17 | df = pd.concat([df_n, df]) 18 | 19 | # success table invariance 20 | df_no_invariance_success = 100 * ( 21 | df[df["retcode"] == 0].groupby("model.N")["retcode"].count() 22 | / df.groupby("model.N")["retcode"].count() 23 | ) 24 | 25 | # rel_error table invariance 26 | df = df[df["retcode"] == 0] 27 | df_no_invariance_test_rel_error = pd.DataFrame(df.groupby("model.N")["test_rel_error"].median()) 28 | 29 | df = get_results_by_tag(api, project, "generalized_mean_deep_sets_L_N", get_config=True, max_runs = 100000) 30 | # assert df_deep.id.nunique() == whenver we know how many 31 | 32 | # success table deepsets 33 | df_deep_sets_success = 100 * ( 34 | df[df["retcode"] == 0].groupby(["model.N", "model.ml_model.L"])["retcode"].count() 35 | / df.groupby(["model.N", "model.ml_model.L"])["retcode"].count() 36 | ) 37 | df_deep_sets_success = df_deep_sets_success.unstack("model.ml_model.L") 38 | # rel_error table deepsets 39 | df = df[df["retcode"] == 0] 40 | df_deep_sets_rel_error = df.pivot_table( 41 | index="model.N", columns="model.ml_model.L", values="test_rel_error", aggfunc="median" 42 | ) 43 | 44 | # success final table 45 | df_success = pd.merge(df_deep_sets_success, df_no_invariance_success, on="model.N", how="outer") 46 | df_success.sort_index(inplace=True) 47 | df_success.rename_axis("N", axis="index", inplace=True) 48 | df_success.rename(columns=lambda x: f'{x}_success', inplace=True) 49 | # so our table is formatted right in latex 50 | 51 | # rel error final table 52 | df_test_rel_error = pd.merge( 53 | df_deep_sets_rel_error, df_no_invariance_test_rel_error, on="model.N", how="outer" 54 | ) 55 | df_test_rel_error.rename_axis("N", axis="index", inplace=True) 56 | df_test_rel_error.sort_index(inplace=True) 57 | df_test_rel_error.rename(columns=lambda x: f'{x}_rel_error', inplace=True) 58 | df_test_rel_error.rename( 59 | columns={"test_rel_error_rel_error": "no_invariance_rel_error"}, inplace=True 60 | ) # just to make utilities mapping clearer 61 | df_test_rel_error = df_test_rel_error*100 62 | with open(output_dir + "/generalized_mean_L_N_rel_error.tex", "w") as file: 63 | file.write(df_to_latex(df_test_rel_error)) 64 | 65 | with open(output_dir + "/generalized_mean_L_N_success.tex", "w") as file: 66 | file.write(df_to_latex(df_success)) 67 | -------------------------------------------------------------------------------- /generate_figures/identity_moments_deep_sets_linear_relative.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import wandb 5 | from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset 6 | from utilities import get_results_by_tag, plot_params 7 | 8 | params = plot_params((10, 3.5)) 9 | plt.rcParams.update(params) 10 | 11 | 12 | output_dir = "./figures" 13 | plot_name = "identity_moments_deep_sets_linear_relative" 14 | output_path = output_dir + "/" + plot_name + ".pdf" 15 | 16 | api = wandb.Api() 17 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 18 | 19 | quantiles = [0.1, 0.25, 0.5, 0.75, 0.9] 20 | tags = ["baseline_identity", "baseline_deep_moments", "baseline_deep_sets"] 21 | title_names = ["Identity", "Moments", "ReLU"] 22 | 23 | # grabbing and plotting the tagged sweeps from above 24 | for n, tag in enumerate(tags): 25 | df = get_results_by_tag(api, project, tag, get_test_results=True) 26 | assert df.id.nunique() == 100 27 | df = df[df["retcode"] >= 0] 28 | quant_result = df.groupby("t")["u_rel_error"].quantile(quantiles).unstack(level=-1) 29 | quant_result.reset_index(inplace=True) 30 | quant_result.columns = ["t"] + [f"quantile_{q}" for q in quantiles] 31 | 32 | # need to set first 33 | if n == 0: 34 | ax_identity = plt.subplot(130 + n + 1) 35 | ax_identity.set_yscale("log") 36 | ax = ax_identity 37 | else: 38 | ax = plt.subplot(130 + n + 1, sharey=ax_identity) 39 | 40 | plt.plot(quant_result["t"], quant_result["quantile_0.5"], label=r"Median") 41 | plt.fill_between( 42 | quant_result["t"], 43 | quant_result["quantile_0.1"], 44 | quant_result["quantile_0.9"], 45 | color="cornflowerblue", 46 | alpha=0.2, 47 | label=r"$10$th and $90$th percentiles", 48 | ) 49 | plt.fill_between( 50 | quant_result["t"], 51 | quant_result["quantile_0.25"], 52 | quant_result["quantile_0.75"], 53 | color="cornflowerblue", 54 | alpha=0.6, 55 | label=r"$25$th and $75$th percentiles", 56 | ) 57 | plt.title(rf"""Policy errors ($\epsilon_{{\mathrm{{rel}}}}$) with $\phi(${title_names[n]}$)$""") 58 | plt.xlabel(r"Time($t$)") 59 | plt.legend(prop={"size": params["font.size"]}, loc="lower left") 60 | plt.tight_layout() 61 | 62 | plt.savefig(output_path) 63 | -------------------------------------------------------------------------------- /generate_figures/linear_baseline_convergence_table.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import wandb 3 | from utilities import df_to_latex, get_results_by_tag 4 | 5 | output_dir = "./figures" 6 | 7 | api = wandb.Api() 8 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 9 | 10 | networks = { 11 | "Identity": {"Baseline": "baseline_identity"}, 12 | "Moments": {"Baseline": "baseline_deep_moments"}, 13 | "Deep Sets": {"Baseline": "baseline_deep_sets"}, 14 | } 15 | retcodes = [0, -1, -2, -3] 16 | df_full = pd.DataFrame() 17 | for name, dict in networks.items(): 18 | for key, tag in dict.items(): 19 | df = get_results_by_tag(api, project, tag) 20 | assert df.id.nunique() == 100 21 | df_summary = pd.DataFrame() 22 | df_summary["Description"] = [key] 23 | df_summary["Group"] = [name] 24 | for num in retcodes: 25 | df_summary[f"retcode = {num}"] = [df[df["retcode"] == num].count()["retcode"]] 26 | df_full = pd.concat([df_full, df_summary]) 27 | df_full = df_full.set_index(["Group", "Description"]) 28 | 29 | with open(output_dir + "/linear_baseline_convergence_table.tex", "w") as file: 30 | file.write(df_to_latex(df_full)) 31 | -------------------------------------------------------------------------------- /generate_figures/linear_baseline_theory_vs_predicted.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import wandb 5 | from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset 6 | 7 | from utilities import get_results_by_tag, plot_params 8 | 9 | params = plot_params(((6, 3.5))) 10 | 11 | output_dir = "./figures" 12 | plot_name = "linear-baseline-theory-vs-predicted" 13 | 14 | output_path = output_dir + "/" + plot_name + ".pdf" 15 | 16 | api = wandb.Api() 17 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 18 | 19 | plt.rcParams.update(params) 20 | fig, ax = plt.subplots() 21 | 22 | dfs = { 23 | "Identity": "baseline_identity_one_run", 24 | "Moments": "baseline_deep_moments_one_run", 25 | "ReLU": "baseline_deep_sets_one_run", 26 | } 27 | for name, tag in dfs.items(): 28 | df = get_results_by_tag(api, project, tag, get_test_results=True) 29 | assert df.id.nunique() == 1 # check one run 30 | assert df.retcode[0] == 0 # check that its successful 31 | df = df[df["ensemble"] == 0] 32 | dfs[name] = df 33 | if name == "Identity": 34 | plt.plot(df["t"], df["u_reference"], dashes=[10, 5, 10, 5], label=r"$u(X_t)$, LQ") 35 | plt.plot(df["t"], df["u_hat"], label=rf"$u(X_t)$, $\phi(${name}$)$") 36 | 37 | plt.legend(prop={"size": params["font.size"]}) 38 | plt.title( 39 | r"$u(X_t)$ with $\phi($Identity$)$, $\phi($Moments$)$ and $\phi($ReLU$)$ : Equilibrium Path" 40 | ) 41 | plt.tight_layout() 42 | plt.xlabel(r"Time($t$)") 43 | plt.tight_layout() 44 | 45 | axins = zoomed_inset_axes(ax, 12, loc="center") 46 | for name, df in dfs.items(): 47 | if name == "Identity": 48 | plt.plot(df["t"], df["u_reference"], dashes=[10, 5, 10, 5], label=r"$u(X_t)$, LQ") 49 | 50 | plt.plot(df["t"], df["u_hat"], label=rf"$u(X_t)$, $\phi(${name}$)$") 51 | 52 | x1, x2, y1, y2 = 42.5, 44.5, 0.03415, 0.03435 53 | axins.set_xlim(x1, x2) 54 | axins.set_ylim(y1, y2) 55 | axins.xaxis.tick_top() 56 | plt.xticks(fontsize=5) 57 | plt.yticks(fontsize=5) 58 | mark_inset(ax, axins, loc1=2, loc2=4, linewidth="0.7", ls="--", ec="0.5") 59 | 60 | plt.savefig(output_path) 61 | -------------------------------------------------------------------------------- /generate_figures/linear_overfit_table.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import wandb 3 | from utilities import df_to_latex, get_results_by_tag 4 | 5 | output_dir = "./figures" 6 | api = wandb.Api() 7 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 8 | 9 | df_results = pd.DataFrame() 10 | df = get_results_by_tag(api, project, "deep_sets_overfit", get_config=True) 11 | assert df.id.nunique() == 400 12 | df = df.rename(columns={"model.train_subsample_trajectories": "Number of data points"}) 13 | df_results = pd.DataFrame() 14 | df_results["success"] = df[df["retcode"] == 0].groupby("Number of data points").count()["id"] 15 | df_results[["train_loss", "test_loss"]] = ( 16 | df[df["retcode"] == 0].groupby("Number of data points")[["train_loss", "test_loss"]].median() 17 | ) 18 | df_results["test_u_rel_error"] = ( 19 | df[df["retcode"] == 0].groupby("Number of data points")["test_u_rel_error"].median() * 100 20 | ) 21 | 22 | with open(output_dir + "/linear_overfit_table.tex", "w") as file: 23 | file.write(df_to_latex(df_results)) 24 | -------------------------------------------------------------------------------- /generate_figures/linear_performance_table.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import wandb 3 | from utilities import df_to_latex, get_results_by_tag 4 | 5 | output_dir = "./figures" 6 | 7 | api = wandb.Api() 8 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 9 | 10 | networks = { 11 | "Identity": {"Baseline": "baseline_identity"}, 12 | "Moments": { 13 | "Baseline: Moments (1,2,3,4)": "baseline_deep_moments", 14 | "Moments (1,2)": "L_2_deep_moments", 15 | "Very Shallow (1 layer)": "very_shallow_1_layer_deep_moments", 16 | }, 17 | "Deep Sets": { 18 | "Baseline: L= 4": "baseline_deep_sets", 19 | "L = 2": "L_8_deep_sets", 20 | "L = 16": "L_16_deep_sets", 21 | r"$\textup{Deep}~(\phi:\textup{2 layers}, \rho:\textup{4 layers})$": "deep_2_4_deep_sets", 22 | r"$\textup{Shallow}~(\phi:\textup{1 layer}, \rho:\textup{2 layers})$": "shallow_1_2_deep_sets", 23 | }, 24 | } 25 | 26 | cols = ["train_time", "train_loss", "val_loss", "test_loss"] 27 | df_full = pd.DataFrame() 28 | for name, dict in networks.items(): 29 | for key, tag in dict.items(): 30 | df = get_results_by_tag(api, project, tag) 31 | assert df.id.nunique() == 100 32 | df_success = df[df["retcode"] == 0] 33 | df_summary = pd.DataFrame() 34 | df_summary["Description"] = [key] 35 | df_summary["Group"] = [name] 36 | df_summary["success"] = [df_success.count()["retcode"]] 37 | df_summary["trainable_parameters"] = [df["trainable_parameters"].quantile(0.5) / 1000] 38 | for col in cols: 39 | df_summary[col] = [df_success[col].quantile(0.5)] 40 | df_summary["test_u_rel_error"] = [df_success["test_u_rel_error"].quantile(0.5) * 100] 41 | 42 | df_full = pd.concat([df_full, df_summary]) 43 | df_full = df_full.set_index(["Group", "Description"]) 44 | 45 | with open(output_dir + "/linear_performance_table.tex", "w") as file: 46 | file.write(df_to_latex(df_full)) 47 | -------------------------------------------------------------------------------- /generate_figures/moments_deep_sets_nonlinear_residuals.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import wandb 5 | from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset 6 | from utilities import get_results_by_tag, plot_params 7 | 8 | params = plot_params((8, 3.5)) 9 | plt.rcParams.update(params) 10 | 11 | output_dir = "./figures" 12 | plot_name = "moments-deep-sets-nonlinear-residual" 13 | output_path = output_dir + "/" + plot_name + ".pdf" 14 | 15 | api = wandb.Api() 16 | 17 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 18 | 19 | # Preparing the results 20 | quantiles = [0.1, 0.25, 0.5, 0.75, 0.9] 21 | tags = ["baseline_nonlinear_deep_moments", "baseline_nonlinear_deep_sets"] 22 | title_names = ["Moments", "ReLU"] 23 | for n, tag in enumerate(tags): 24 | df = get_results_by_tag(api, project, tag, get_test_results=True) 25 | assert(df.id.nunique() == 100) 26 | df = df[df["retcode"] >= 0] 27 | df["residual_squared"] = df["residual"] ** 2 28 | quant_result = df.groupby("t")["residual_squared"].quantile(quantiles).unstack(level=-1) 29 | quant_result.reset_index(inplace=True) 30 | quant_result.columns = ["t"] + [f"quantile_{q}" for q in quantiles] 31 | 32 | if n == 0: 33 | ax_identity = plt.subplot(120 + n + 1) 34 | ax_identity.set_yscale("log") 35 | ax = ax_identity 36 | else: 37 | ax = plt.subplot(120 + n + 1, sharey=ax_identity) 38 | 39 | 40 | plt.plot(quant_result["t"], quant_result["quantile_0.5"], label=r"Median") 41 | plt.fill_between( 42 | quant_result["t"], 43 | quant_result["quantile_0.1"], 44 | quant_result["quantile_0.9"], 45 | color="cornflowerblue", 46 | alpha=0.2, 47 | label=r"$10$th and $90$th percentiles", 48 | ) 49 | plt.fill_between( 50 | quant_result["t"], 51 | quant_result["quantile_0.25"], 52 | quant_result["quantile_0.75"], 53 | color="cornflowerblue", 54 | alpha=0.6, 55 | label=r"$25$th and $75$th percentiles", 56 | ) 57 | plt.title(rf"""Euler residuals squared ($\varepsilon^2$) with $\phi(${title_names[n]}$)$""") 58 | plt.xlabel(r"Time($t$)") 59 | plt.legend(prop={"size": params["font.size"]}, loc="lower right") 60 | plt.tight_layout() 61 | 62 | plt.savefig(output_path) 63 | -------------------------------------------------------------------------------- /generate_figures/nonlinear_overfit_table.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import wandb 3 | from utilities import df_to_latex, get_results_by_tag 4 | 5 | output_dir = "./figures" 6 | 7 | api = wandb.Api() 8 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 9 | 10 | df_results = pd.DataFrame() 11 | 12 | df = get_results_by_tag(api, project, "deep_sets_nonlinear_overfit", get_config=True) 13 | assert df.id.nunique() == 500 14 | df = df.rename(columns={"model.train_subsample_trajectories": "Number of data points"}) 15 | df_results["success"] = df[df["retcode"] == 0].groupby("Number of data points").count()["id"] 16 | df_results[["train_loss", "test_loss"]] = ( 17 | df[df["retcode"] == 0].groupby("Number of data points")[["train_loss", "test_loss"]].median() 18 | ) 19 | 20 | with open(output_dir + "/nonlinear_overfit_table.tex", "w") as file: 21 | file.write(df_to_latex(df_results)) 22 | -------------------------------------------------------------------------------- /generate_figures/testing_utilities.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import wandb 3 | from utilities import get_results_by_tag 4 | 5 | api = wandb.Api() 6 | project = "highdimensionaleconlab/symmetry_dynamic_programming" 7 | 8 | 9 | df = get_results_by_tag(api, project,"deep_sets_nonlinear_nu_150_one_run") 10 | assert(df.id.nunique() == 1) # i.e, one run 11 | assert(df.retcode[0] == 0) # access the retcode of the first row, which is the only one here 12 | 13 | dict_res = df.to_dict(orient='index')[0] # as a dictionary if only one row 14 | assert(dict_res["retcode"] == 0) # dict don't support dict_res.retcode 15 | 16 | # only get a subset of stuff. Summary is by default, config and test_results are optional 17 | df = get_results_by_tag(api, project,"baseline_deep_moments", max_runs = 2, get_summary = False, get_config = False) 18 | assert(df.id.nunique() == 2) 19 | assert(len(df.columns) == 2) # id and name always there 20 | 21 | df = get_results_by_tag(api, project,"baseline_deep_moments", max_runs = 40) 22 | assert(df.id.nunique() == 40) 23 | df = df[df.retcode==0] # only keep successful runs 24 | print(df.val_loss.mean()) # conditional on success 25 | 26 | df = get_results_by_tag(api, project,"deep_sets_nonlinear_nu_150_one_run", get_test_results=True, get_summary = False) 27 | assert(df.id.nunique() == 1) # only one artifact 28 | assert(df.t.nunique() == 64) 29 | df.drop(columns=["id", "name"], inplace=True) # given single run could drop to clean up clutter, but not really 30 | df.set_index(["ensemble", "t"], inplace=True) # can index as we see fit. 31 | 32 | # check the config as well 33 | df = get_results_by_tag(api, project,"deep_sets_nonlinear_nu_150_one_run", get_summary = False, get_config = True, get_test_results=True) 34 | 35 | #This is a big one and includes summary statistics. Some examples below 36 | df = get_results_by_tag(api, project,"baseline_deep_moments", get_test_results=True, max_runs = 10) 37 | assert(df.id.nunique() == 10) 38 | assert(df.t.nunique() == 64) 39 | df.set_index(["id", "ensemble", "t"], inplace=True) # can index as we see fit. 40 | df = df[df.retcode==0] #e.g. filter only successful runs 41 | df.groupby("t").mean(numeric_only=True) # groupings, etc. 42 | df.groupby("t").quantile([0.95, 0.5, 0.05],numeric_only=True).u_rel_error # for example... 43 | df.groupby("ensemble").mean(numeric_only=True).u_rel_error # across all t and all runs... -------------------------------------------------------------------------------- /generate_figures/utilities.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | 4 | def plot_params(figsize, fontsize=10, ticksize=14): 5 | params = { 6 | "text.usetex": True, 7 | "font.family": "serif", 8 | "figure.figsize": figsize, 9 | "figure.dpi": 80, 10 | "figure.edgecolor": "k", 11 | "font.size": fontsize, 12 | "axes.labelsize": fontsize, 13 | "axes.titlesize": fontsize, 14 | "xtick.labelsize": ticksize, 15 | "ytick.labelsize": ticksize, 16 | } 17 | return params 18 | 19 | 20 | def get_results_by_tag( 21 | api, 22 | project, 23 | tag, 24 | get_summary=True, 25 | get_config=False, 26 | get_test_results=False, 27 | max_runs=1000, 28 | drop_summary_cols=[ 29 | "test_results", 30 | "_wandb", 31 | ], # causes trouble when merging the test_results dataframe 32 | drop_config_cols=[ 33 | "trainer.logger.tags", 34 | ], 35 | ): 36 | runs = api.runs(project, filters={"tags": tag}) 37 | df = pd.DataFrame() # will concatenate 38 | 39 | for i in range(min(len(runs), max_runs)): 40 | run = runs[i] 41 | id = run.id 42 | cols = {"id": id, "name": run.name} 43 | if get_summary: 44 | # dropping details which don't fit in dataframes well 45 | cols.update(dict(run.summary)) 46 | for col_name in drop_summary_cols: 47 | if cols.get(col_name) is not None: 48 | del cols[col_name] 49 | if get_config: 50 | cols.update(dict(run.config)) 51 | for col_name in drop_config_cols: 52 | if cols.get(col_name) is not None: 53 | del cols[col_name] 54 | 55 | # Conditionally get the test results or just directly add the new values for the columns 56 | if get_test_results: 57 | reference_path = f"{project}/run-{id}-test_results:v0" 58 | test_results = api.artifact(str(reference_path)).get("test_results") 59 | run_data = pd.DataFrame(data=test_results.data, columns=test_results.columns) 60 | 61 | # Add columns across everything for dropped columns. Repetition but allows for indexing later 62 | for k, v in cols.items(): 63 | run_data[k] = v 64 | else: 65 | # Create a data frame with one row from the columns 66 | run_data = pd.DataFrame({k: [v] for k, v in cols.items()}) 67 | 68 | df = pd.concat([df, run_data], ignore_index=True) 69 | 70 | return df 71 | 72 | 73 | def df_to_latex(df): 74 | formatters = [] 75 | 76 | potential_cols = { 77 | "success": {"name": r"\shortstack{Success \\(\%)}", "format": "{:0.0f}\%".format}, 78 | "train_time": {"name": r"\shortstack{Time \\ (s)}", "format": "{:0.0f}".format}, 79 | "trainable_parameters": { 80 | "name": r"\shortstack{Parameters \\ (Thousands, K)}", 81 | "format": "{:.1f}".format, 82 | }, 83 | "train_loss": { 84 | "name": r"\shortstack{Train MSE \\ ($\varepsilon$)}", 85 | "format": "{:.1e}".format, 86 | }, 87 | "val_loss": {"name": r"\shortstack{Val MSE \\ ($\varepsilon$)}", "format": "{:.1e}".format}, 88 | "test_loss": { 89 | "name": r"\shortstack{Test MSE \\ ($\varepsilon$)}", 90 | "format": "{:.1e}".format, 91 | }, 92 | "test_u_rel_error": { 93 | "name": r"\shortstack{Policy Error\\ ($\epsilon_{\mathrm{rel}}$)}", 94 | "format": "{:.2f}\%".format, 95 | }, 96 | "retcode = 0": {"name": r"\shortstack{Success \\(\%)}", "format": "{:0.0f}\%".format}, 97 | "retcode = -2": { 98 | "name": r"\shortstack{Violation of transversality\\ (\%)}", 99 | "format": "{:0.0f}\%".format, 100 | }, 101 | "retcode = -1": { 102 | "name": r"\shortstack{Early stopping failure \\ (\%)}", 103 | "format": "{:0.0f}\%".format, 104 | }, 105 | "retcode = -3": {"name": r"\shortstack{Overfitting \\ (\%)}", "format": "{:0.0f}\%".format}, 106 | "no_invariance_rel_error": { 107 | "name": r"\shortstack{No Invariance}", 108 | "format": "{:.2f}\%".format, 109 | }, 110 | "retcode_success": {"name": r"\shortstack{No Invariance}", "format": "{:0.0f}\%".format}, 111 | } 112 | # adding the columns for success and rel_error table 113 | L = [1, 2, 4, 8, 16] 114 | for num in L: 115 | potential_cols[f"{num}_success"] = { 116 | "name": rf"\shortstack{{L = {num}}}", 117 | "format": "{:0.0f}\%".format, 118 | } 119 | potential_cols[f"{num}_rel_error"] = { 120 | "name": rf"\shortstack{{L = {num} }}", 121 | "format": "{:.2f}\%".format, 122 | } 123 | 124 | if isinstance(df.index, pd.MultiIndex): 125 | column_format = "ll" 126 | else: 127 | column_format = "c" # I like number column centered could also make l 128 | 129 | for col in df.columns: 130 | if col in potential_cols.keys(): 131 | column_format += "c" # center the columns contents depending on number of columns 132 | df = df.rename(columns={col: potential_cols[col]["name"]}) 133 | formatters.append(potential_cols[col]["format"]) 134 | else: 135 | df = df.drop(columns=[col]) 136 | 137 | latex_str = df.to_latex( 138 | multicolumn=True, 139 | multirow=True, 140 | formatters=formatters, 141 | longtable=False, 142 | sparsify=True, 143 | escape=False, 144 | column_format=column_format, 145 | na_rep="-", 146 | ) 147 | # if isinstance(df.index, pd.MultiIndex): 148 | # latex_str = latex_str.replace('\\bottomrule\n', '') #removes double line when multi-index 149 | 150 | return latex_str 151 | -------------------------------------------------------------------------------- /hpo_sweeps/generalized_mean_deep_sets_sweep.yaml: -------------------------------------------------------------------------------- 1 | program: generalized_mean.py 2 | project: generalized_mean_examples 3 | description: HPO for train_time to early stopping criteria on val_rel_error 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.max_epochs: 13 | value: 2000 14 | trainer.min_epochs: 15 | value: 0 16 | trainer.logger.offline: 17 | value: false # log online for W&B optimization 18 | trainer.max_time: 19 | value: 00:00:05:00 # these tests shouldn't take more than 2-3 minutes 20 | model.hpo_objective_name: 21 | value: val_rel_error # sets the objective. Only logs if monitor < stopping threshold 22 | model.N: 23 | value: 128 # the dimensionality 24 | model.p: 25 | value: 1.5 # the generalized mean power 26 | model.a_max: 27 | value: 2.0 28 | model.num_train_points: 29 | value: 10 30 | model.num_val_points: 31 | value: 50 32 | model.num_test_points: 33 | value: 200 34 | trainer.callbacks.stopping_threshold: 35 | value: 1e-6 36 | 37 | # Provide distributions and variations for optimizer 38 | optimizer.lr: 39 | #value: 0.005 40 | #values: [0.001, 0.0001, 0.0001] 41 | distribution: uniform 42 | min: 0.0001 43 | max: 0.01 44 | 45 | model.ml_model.L: 46 | values: [1,2,3] 47 | model.ml_model.phi_layers: 48 | values: [1,2] 49 | model.ml_model.phi_hidden_dim: 50 | values: [128, 256] 51 | model.ml_model.rho_layers: 52 | values: [1,2,3] 53 | model.ml_model.rho_hidden_dim: 54 | value: 128 -------------------------------------------------------------------------------- /hpo_sweeps/generalized_mean_no_invaritant_sweep.yaml: -------------------------------------------------------------------------------- 1 | program: generalized_mean.py 2 | project: generalized_mean_examples 3 | description: HPO for train_time to find the best val_rel_error 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.max_epochs: 13 | value: 2000 14 | trainer.min_epochs: 15 | value: 0 16 | trainer.logger.offline: 17 | value: false # log online for W&B optimization 18 | trainer.max_time: 19 | value: 00:00:5:00 20 | model.hpo_objective_name: 21 | value: val_rel_error 22 | model.N: 23 | value: 128 # the dimensionality 24 | model.p: 25 | value: 1.5 # the generalized mean power 26 | model.a_max: 27 | value: 2.0 28 | model.num_train_points: 29 | value: 10 30 | model.num_val_points: 31 | value: 50 32 | model.num_test_points: 33 | value: 200 34 | model.always_log_hpo_objective: 35 | value: true 36 | trainer.callbacks.stopping_threshold: 37 | value: 1e-6 38 | trainer.callbacks.monitor: 39 | value: train_loss 40 | trainer.callbacks.divergence_threshold: 41 | value: 1e6 42 | # Provide distributions and variations for optimizer 43 | optimizer.lr: 44 | # values: [0.005, 0.05] 45 | # values: [0.001, 0.0001, 0.0001] 46 | distribution: uniform 47 | min: 0.0001 48 | max: 0.01 49 | 50 | # Using a basic 51 | model.ml_model.class_path: 52 | value: econ_layers.layers.FlexibleSequential 53 | model.ml_model.n_in: 54 | value: 128 # must match the "N" parameter in this case 55 | model.ml_model.n_out: 56 | value: 1 57 | model.ml_model.layers: 58 | values: [1,2,3,4] 59 | model.ml_model.hidden_dim: 60 | values: [64, 128, 256] 61 | model.ml_model.hidden_bias: 62 | values: [true,false] -------------------------------------------------------------------------------- /hpo_sweeps/train_time_sweep_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: train_time_sweep_deep_moments 3 | description: HPO for train_time to early stopping criteria on val_loss of the baseline_deep_moments 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.logger.offline: 13 | value: false # log online for W&B optimization 14 | trainer.max_time: 15 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 16 | model.hpo_objective_name: 17 | value: train_time 18 | model.always_log_hpo_objective: 19 | value: true 20 | # Stopping criteria 21 | trainer.callbacks.monitor: 22 | value: val_loss 23 | trainer.callbacks.stopping_threshold: 24 | value: 1.0e-6 25 | 26 | # Provide distributions and variations for optimizer 27 | model.N: 28 | value: 128 29 | optimizer.lr: 30 | min: 1.0e-4 31 | max: 2.0e-3 32 | model.ml_model.class_path: 33 | value: econ_layers.layers.DeepSetMoments 34 | model.ml_model.init_args.n_in: 35 | value: 1 36 | model.ml_model.init_args.n_out: 37 | value: 1 38 | model.ml_model.init_args.L: 39 | values: [1,2,3,4] 40 | model.ml_model.init_args.rho_layers: 41 | values: [1,2,3,4] 42 | model.ml_model.init_args.rho_hidden_dim: 43 | values: [64, 128, 256] 44 | model.ml_model.init_args.rho_hidden_bias: 45 | values: [true,false] 46 | model.ml_model.init_args.rho_last_bias: 47 | value: true 48 | model.ml_model.init_args.rho_activator.class_path: 49 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /hpo_sweeps/train_time_sweep_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: train_time_sweep_deep_sets 3 | description: HPO for train_time to early stopping criteria on val_loss 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.logger.offline: 13 | value: false # log online for W&B optimization 14 | trainer.max_time: 15 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 16 | model.hpo_objective_name: 17 | value: train_time # sets the objective. Only logs if val_loss < stopping threshold 18 | 19 | # Provide distributions and variations for optimizer 20 | optimizer.lr: 21 | values: [0.001, 0.0001, 0.0001] 22 | model.phi.layers: 23 | values: [1, 2] 24 | model.rho.layers: 25 | values: [3, 4] 26 | model.phi.hidden_dim: 27 | values: [64, 128, 256] 28 | model.rho.hidden_dim: 29 | values: [64, 128, 256] 30 | model.train_trajectories: 31 | values: [2, 4, 8, 16] 32 | model.batch_size: 33 | values: [16, 32] 34 | -------------------------------------------------------------------------------- /hpo_sweeps/train_time_sweep_identity.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: train_time_sweep_identity 3 | description: HPO for train_time to early stopping criteria on val_loss of the baseline_deep_moments 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.logger.offline: 13 | value: false # log online for W&B optimization 14 | trainer.max_time: 15 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 16 | model.hpo_objective_name: 17 | value: train_time 18 | model.always_log_hpo_objective: 19 | value: true 20 | # Stopping criteria 21 | trainer.callbacks.monitor: 22 | value: val_loss 23 | trainer.callbacks.stopping_threshold: 24 | value: 1.0e-6 25 | 26 | # Provide distributions and variations for optimizer 27 | model.N: 28 | value: 128 29 | optimizer.lr: 30 | min: 1.0e-4 31 | max: 2.0e-3 32 | model.ml_model.class_path: 33 | value: econ_layers.layers.DeepSetMoments 34 | model.ml_model.init_args.n_in: 35 | value: 1 36 | model.ml_model.init_args.n_out: 37 | value: 1 38 | model.ml_model.init_args.L: 39 | values: [1] 40 | model.ml_model.init_args.rho_layers: 41 | values: [1,2,3,4] 42 | model.ml_model.init_args.rho_hidden_dim: 43 | values: [64, 128, 256] 44 | model.ml_model.init_args.rho_hidden_bias: 45 | values: [true,false] 46 | model.ml_model.init_args.rho_last_bias: 47 | value: true 48 | model.ml_model.init_args.rho_activator.class_path: 49 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /hpo_sweeps/train_time_sweep_nonlinear_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: train_time_sweep_nonlinear_deep_moments 3 | description: HPO for train_time to early stopping criteria on val_loss of the baseline_nonlinear_deep_moments 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.logger.offline: 13 | value: false # log online for W&B optimization 14 | trainer.max_time: 15 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 16 | model.hpo_objective_name: 17 | value: train_time 18 | # Stopping criteria 19 | trainer.callbacks.monitor: 20 | value: val_loss 21 | trainer.callbacks.stopping_threshold: 22 | value: 1.0e-6 23 | 24 | # Provide distributions and variations for optimizer 25 | model.N: 26 | value: 128 27 | model.nu: 28 | value: 1.5 29 | optimizer.lr: 30 | min: 5.0e-5 31 | max: 2.0e-3 32 | lr_scheduler.step_size: 33 | values: [10, 20, 50] 34 | lr_scheduler.gamma: 35 | values: [0.9, 0.8, 0.5] 36 | model.reset_trajectories_frequency: 37 | values: [0, 2, 5, 10, 20] 38 | 39 | # Deep moments variations 40 | model.ml_model.class_path: 41 | value: econ_layers.layers.DeepSetMoments 42 | model.ml_model.init_args.n_in: 43 | value: 1 44 | model.ml_model.init_args.n_out: 45 | value: 1 46 | model.ml_model.init_args.rho_hidden_bias: 47 | value: true 48 | model.ml_model.init_args.rho_last_bias: 49 | value: true 50 | model.ml_model.init_args.rho_activator.class_path: 51 | value: torch.nn.ReLU 52 | model.ml_model.init_args.L: 53 | values: [2,3,4] 54 | model.ml_model.init_args.rho_layers: 55 | values: [3,4] 56 | model.ml_model.init_args.rho_hidden_dim: 57 | values: [64,128] 58 | model.train_trajectories: 59 | values: [16, 32] 60 | model.val_trajectories: 61 | values: [8, 16] 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /hpo_sweeps/train_time_sweep_nonlinear_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: train_time_sweep_nonlinear_deep_sets 3 | description: HPO for train_time to early stopping criteria on val_loss of the baseline_nonlinear_deep_sets 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | parameters: 11 | # Fix parameters here: 12 | trainer.logger.offline: 13 | value: false # log online for W&B optimization 14 | trainer.max_time: 15 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 16 | model.hpo_objective_name: 17 | value: train_time 18 | # Stopping criteria 19 | trainer.callbacks.monitor: 20 | value: val_loss 21 | trainer.callbacks.stopping_threshold: 22 | value: 1.0e-6 23 | 24 | # Provide distributions and variations for optimizer 25 | model.N: 26 | value: 128 27 | model.nu: 28 | value: 1.5 29 | optimizer.lr: 30 | min: 5.0e-5 31 | max: 2.0e-3 32 | lr_scheduler.step_size: 33 | values: [10, 20, 50] 34 | lr_scheduler.gamma: 35 | values: [0.9, 0.8, 0.5] 36 | model.reset_trajectories_frequency: 37 | values: [0, 1, 2, 5, 10, 20] 38 | model.ml_model.init_args.L: 39 | values: [1,2,3,4] 40 | model.ml_model.init_args.rho_layers: 41 | values: [3,4] 42 | model.ml_model.init_args.rho_hidden_dim: 43 | values: [128, 256] 44 | model.ml_model.init_args.phi_layers: 45 | values: [1,2,3] 46 | model.ml_model.init_args.phi_hidden_dim: 47 | values: [128, 256] 48 | model.train_trajectories: 49 | values: [16, 32] 50 | model.val_trajectories: 51 | values: [8, 16] -------------------------------------------------------------------------------- /hpo_sweeps/val_loss_over_param_sweep_main.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: val_loss_over_param_sweep_main 3 | description: HPO for val_loss to early stopping criteria on val_loss of the baseline_linear_deep_sets 4 | method: bayes 5 | metric: 6 | # See the investment_euler.py log_and_save 7 | name: hpo_objective 8 | goal: minimize 9 | # early_terminate: not necessary here given the trainer.max_time 10 | 11 | trainer: 12 | max_epochs: 13 | value: 400 14 | parameters: 15 | # Fix parameters here: 16 | trainer.logger.offline: 17 | value: false # log online for W&B optimization 18 | trainer.max_time: 19 | value: 00:00:5:00 20 | model.hpo_objective_name: 21 | value: val_loss 22 | # Stopping criteria 23 | trainer.callbacks.monitor: 24 | value: val_loss 25 | trainer.callbacks.stopping_threshold: 26 | value: 1.0e-5 27 | 28 | # Provide distributions and variations for optimizer 29 | model.N: 30 | value: 128 31 | optimizer.lr: 32 | min: 5.0e-5 33 | max: 5.0e-3 34 | lr_scheduler.step_size: 35 | values: [5, 10, 20, 50] 36 | lr_scheduler.gamma: 37 | value: 0.8 38 | model.train_subsample_trajectories: 39 | value: 5 40 | model.train_trajectories: 41 | value: 1 42 | model.batch_size: 43 | value: 2 -------------------------------------------------------------------------------- /images/hpo_output_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HighDimensionalEconLab/symmetry_dynamic_programming/2ba53b40b681a8beab68ad946ddc71b45d9f36af/images/hpo_output_1.png -------------------------------------------------------------------------------- /images/hpo_output_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HighDimensionalEconLab/symmetry_dynamic_programming/2ba53b40b681a8beab68ad946ddc71b45d9f36af/images/hpo_output_2.png -------------------------------------------------------------------------------- /investment_euler.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import torch 3 | import pytorch_lightning as pl 4 | import yaml 5 | import math 6 | import numpy as np 7 | import scipy 8 | import wandb 9 | import timeit 10 | import quantecon 11 | import econ_layers 12 | import scipy.optimize 13 | from torch.utils.data import DataLoader 14 | from pytorch_lightning.cli import LightningCLI 15 | from pathlib import Path 16 | from pytorch_lightning.loggers import WandbLogger 17 | import sys 18 | 19 | 20 | class InvestmentEuler(pl.LightningModule): 21 | def __init__( 22 | self, 23 | N: int, 24 | alpha_0: float, 25 | alpha_1: float, 26 | beta: float, 27 | gamma: float, 28 | sigma: float, 29 | delta: float, 30 | eta: float, 31 | nu: float, 32 | # some general configuration 33 | verbose: bool, 34 | hpo_objective_name: str, 35 | always_log_hpo_objective: bool, 36 | print_metrics: bool, 37 | save_metrics: bool, 38 | save_test_results: bool, 39 | test_seed: int, 40 | X_0_seed: int, 41 | check_transversality: bool, 42 | test_loss_success_threshold: float, 43 | transversality_X_mean_min: float, 44 | transversality_X_mean_max: float, 45 | transversality_u_rel_error: float, 46 | # parameters for method 47 | omega_quadrature_nodes: int, 48 | normalize_shock_vector: bool, 49 | train_trajectories: int, 50 | val_trajectories: int, 51 | test_trajectories: int, 52 | train_subsample_trajectories: int, 53 | reset_trajectories_frequency: int, 54 | batch_size: int, 55 | shuffle_training: bool, 56 | T: int, 57 | X_0_loc: float, 58 | X_0_scale: float, 59 | # settings for deep learning approximation 60 | ml_model: torch.nn.Module, 61 | ): 62 | super().__init__() 63 | self.save_hyperparameters(ignore=["ml_model"]) # access with self.hparams.alpha, etc. 64 | self.ml_model = ml_model 65 | 66 | # Calculates the LQ solution imposing symmetry by hand in the optimization process 67 | def investment_equilibrium_LQ(self): 68 | B = np.array([[0.0], [1.0], [0.0]]) 69 | C = np.array( 70 | [ 71 | [0.0, 0.0], 72 | [self.hparams.eta, self.hparams.sigma], 73 | [self.hparams.eta, self.hparams.sigma], 74 | ] 75 | ) 76 | R = np.array( 77 | [ 78 | [0.0, -self.hparams.alpha_0 / 2, 0.0], 79 | [-self.hparams.alpha_0 / 2, 0.0, self.hparams.alpha_1 / 2], 80 | [0.0, self.hparams.alpha_1 / 2, 0.0], 81 | ] 82 | ) # Equation (24) 83 | Q = self.hparams.gamma / 2 84 | 85 | # calculating A_hat 86 | def F_root(H): 87 | A = np.array( 88 | [ 89 | [1.0, 0.0, 0.0], 90 | [0.0, 1.0 - self.hparams.delta, 0.0], 91 | [H[0], 0.0, 1.0 - self.hparams.delta + H[1]], 92 | ] 93 | ) # Equation (21) 94 | lq = quantecon.LQ(Q, R, A, B, C, beta=self.hparams.beta) 95 | P, F, d = lq.stationary_values() 96 | return np.array([F[0][0], F[0][1], F[0][2]]) - np.array([-H[0], 0.0, -H[1]]) 97 | 98 | H_opt = scipy.optimize.root(F_root, [80.0, -0.2], method="lm", options={"xtol": 1.49012e-8}) 99 | if not (H_opt.success): 100 | sys.exit("H optimization failed to converge.") 101 | return H_opt.x[0], H_opt.x[1] 102 | 103 | # Used for evaluating u(X) given the current network 104 | def forward(self, X): 105 | return self.ml_model(X) # deep sets/etc. 106 | 107 | # model residuals given a set of states 108 | def model_residuals(self, X): 109 | u_X = self(X) 110 | 111 | # equation (12) and (13) 112 | X_primes = torch.stack( 113 | [ 114 | u_X 115 | + (1 - self.hparams.delta) * X 116 | + self.hparams.sigma * self.expectation_shock_vector 117 | + self.hparams.eta * node 118 | for node in self.quadrature_nodes 119 | ] 120 | ).type_as(X) 121 | 122 | # p(X') calculation 123 | p_primes = self.hparams.alpha_0 - self.hparams.alpha_1 * X_primes.pow(self.hparams.nu).mean( 124 | 2 125 | ) 126 | 127 | # Expectation using quadrature over aggregate shock 128 | Ep = (p_primes.T @ self.quadrature_weights).type_as(X).reshape(-1, 1) 129 | Eu = ( 130 | ( 131 | torch.stack(tuple(self(X_primes[i]) for i in range(len(self.quadrature_nodes)))) 132 | .squeeze(2) 133 | .T 134 | @ self.quadrature_weights 135 | ) 136 | .type_as(X) 137 | .reshape(-1, 1) 138 | ) 139 | 140 | # Euler equation itself 141 | residuals = self.hparams.gamma * u_X - self.hparams.beta * ( 142 | Ep + self.hparams.gamma * (1 - self.hparams.delta) * Eu 143 | ) # equation (14) 144 | return residuals 145 | 146 | def training_step(self, X, batch_idx): 147 | residuals = self.model_residuals(X) 148 | loss = (residuals**2).sum() / len(residuals) 149 | self.log("train_loss", loss, prog_bar=True) 150 | return loss 151 | 152 | def validation_step(self, X, batch_idx): 153 | residuals = self.model_residuals(X) 154 | loss = (residuals**2).sum() / len(residuals) 155 | self.log("val_loss", loss, prog_bar=True) 156 | 157 | # calculate policy error relative to analytic if linear 158 | if self.hparams.nu == 1: 159 | u_ref = self.H_0 + self.H_1 * X.mean(1, keepdim=True) # closed form if linear 160 | u_rel_error = torch.mean(torch.abs(self(X) - u_ref) / torch.abs(u_ref)) 161 | self.log("val_u_rel_error", u_rel_error, prog_bar=True) 162 | u_abs_error = torch.mean(torch.abs(self(X) - u_ref)) 163 | self.log("val_u_abs_error", u_abs_error, prog_bar=True) 164 | 165 | # Data and simulation calculations 166 | @torch.no_grad() 167 | def simulate(self, X_0, num_trajectories, f=None, w=None, omega=None, generator=None): 168 | N = self.hparams.N 169 | T = self.hparams.T 170 | 171 | # Simulates random numbers if not provided. 172 | if f is None: 173 | f = self.forward # use the self.forward(..) by default 174 | if w is None: 175 | w = torch.randn( 176 | num_trajectories, 177 | T, 178 | N, 179 | device=self.device, 180 | dtype=self.dtype, 181 | generator=generator, 182 | ) 183 | if omega is None: 184 | omega = torch.randn( 185 | num_trajectories, 186 | T, 187 | 1, 188 | device=self.device, 189 | dtype=self.dtype, 190 | generator=generator, 191 | ) 192 | data = torch.zeros( 193 | num_trajectories, 194 | T + 1, 195 | N, 196 | device=self.device, 197 | dtype=self.dtype, 198 | ) 199 | 200 | data[:, 0, :] = X_0 201 | for t in range(T): 202 | data[:, t + 1, :] = ( 203 | # Simulate using passed in "f", which could be linear self.forward. 204 | f(data[:, t, :]) # num_ensembles by N 205 | + (1 - self.hparams.delta) * data[:, t, :] 206 | + self.hparams.sigma * w[:, t, :] 207 | + self.hparams.eta * omega[:, t] 208 | ) 209 | data_flat = data.flatten(start_dim=0, end_dim=1) 210 | # Associate indices with the data, same order as flatten above 211 | ensemble_indices, t_indices = torch.meshgrid( 212 | torch.arange(num_trajectories), torch.arange(T + 1), indexing="ij" 213 | ) 214 | 215 | return data_flat, ensemble_indices.flatten(), t_indices.flatten() 216 | 217 | # Setup data/etc. Supposed to be in setup instead of the __init__ 218 | def setup(self, stage): 219 | N = self.hparams.N 220 | T = self.hparams.T 221 | 222 | # Solves the LQ problem to find the comparison for the nu=1 case and generating simulations 223 | self.H_0, self.H_1 = self.investment_equilibrium_LQ() # 1 firm is enough for 224 | 225 | # quadrature for use within the expectation calculations 226 | nodes, weights = quantecon.quad.qnwnorm(self.hparams.omega_quadrature_nodes) 227 | nodes = torch.tensor(nodes, device=self.device, dtype=self.dtype) 228 | weights = torch.tensor(weights, device=self.device, dtype=self.dtype) 229 | 230 | # If provided, create a new RNG for reproducibility of the X_0 and expectation shocks 231 | if self.hparams.X_0_seed > 0: 232 | generator = torch.Generator(device=self.device) 233 | generator.manual_seed(self.hparams.X_0_seed) 234 | else: 235 | generator = None # otherwise use default RNG 236 | 237 | # Monte Carlo draw for the expectations, possibly normalizing it 238 | vec = torch.randn(1, N, device=self.device, dtype=self.dtype, generator=generator) 239 | expectation_shock_vector = ( 240 | (vec - vec.mean()) / vec.std() if self.hparams.normalize_shock_vector else vec 241 | ) 242 | 243 | # Draw initial condition for the X_0 to simulate 244 | X_0 = ( 245 | torch.normal( 246 | self.hparams.X_0_loc, 247 | self.hparams.X_0_scale, 248 | size=(N,), 249 | generator=generator, 250 | ) 251 | .abs() 252 | .type_as(expectation_shock_vector) 253 | ) 254 | 255 | # Use a linear policy for initial simulation: h_0 + h_1 mean(X). h_0>0, h_1<0 guarantees stationarity and positivity. |h_0/h_1|<1 guarantees prices p(X)>0 in the sample 256 | def initial_trajectory_policy(X): 257 | return self.H_0 + self.H_1 * X.mean(1, keepdim=True) 258 | 259 | train_data, _, _ = self.simulate( 260 | X_0, self.hparams.train_trajectories, initial_trajectory_policy, generator=generator 261 | ) 262 | if self.hparams.train_subsample_trajectories > 0: 263 | sample_idx = np.random.randint( 264 | len(train_data), size=self.hparams.train_subsample_trajectories 265 | ) 266 | train_data = train_data[sample_idx] 267 | if self.hparams.val_trajectories > 0: 268 | val_data, _, _ = self.simulate( 269 | X_0, 270 | self.hparams.val_trajectories, 271 | initial_trajectory_policy, 272 | generator=generator, 273 | ) 274 | self.register_buffer("val_data", val_data) 275 | else: 276 | self.val_data = [] 277 | 278 | # Store buffers for optimization. Replaces assignment to ensure it is transferred to GPU/etc. properly 279 | self.register_buffer( 280 | "quadrature_nodes", nodes 281 | ) # i.e., instead of self.quadrature_nodes = nodes 282 | self.register_buffer("quadrature_weights", weights) 283 | self.register_buffer("expectation_shock_vector", expectation_shock_vector) 284 | self.register_buffer("X_0", X_0) 285 | self.register_buffer("train_data", train_data) 286 | 287 | def train_dataloader(self): 288 | return DataLoader( 289 | self.train_data, 290 | batch_size=self.hparams.batch_size 291 | if self.hparams.batch_size > 0 292 | else len(self.train_data), 293 | shuffle=self.hparams.shuffle_training, 294 | ) 295 | 296 | def val_dataloader(self): 297 | return DataLoader( 298 | self.val_data, 299 | batch_size=self.hparams.batch_size 300 | if self.hparams.batch_size > 0 301 | else len(self.val_data), 302 | ) 303 | 304 | # Reset simulation of training and validation data 305 | def on_train_epoch_end(self): 306 | # generates trajectories with current policy, regardless of nu 307 | if ( 308 | self.hparams.reset_trajectories_frequency > 0 309 | and (self.current_epoch > 0) 310 | and (self.current_epoch % self.hparams.reset_trajectories_frequency == 0) 311 | ): 312 | train_data, _, _ = self.simulate(self.X_0, self.hparams.train_trajectories) 313 | val_data, _, _ = self.simulate(self.X_0, self.hparams.val_trajectories) 314 | 315 | # With larger problems and random test_data use a test_step instead 316 | @torch.no_grad() 317 | def test_model(self): 318 | N = self.hparams.N 319 | T = self.hparams.T 320 | # Initial conditions and vectors for shocks are identical to those in the first stages 321 | 322 | # If provided, create a new RNG for reproducibility of the test shocks 323 | if self.hparams.test_seed > 0: 324 | generator = torch.Generator(device=self.device) 325 | generator.manual_seed(self.hparams.test_seed) 326 | else: 327 | generator = None # otherwise use default RNG 328 | 329 | # Note that this simulates with the built-in forward function itself, not the linear 330 | X, ensemble, t = self.simulate( 331 | self.X_0, self.hparams.test_trajectories, generator=generator 332 | ) 333 | 334 | # Calculate some reductions over the X dimension 335 | u_hat = self(X).squeeze() # policy 336 | residuals = self.model_residuals(X).squeeze() 337 | loss = residuals.square().mean() 338 | self.logger.experiment.log({"test_loss": loss}) 339 | 340 | X_min = X.min(dim=1)[0] 341 | X_max = X.max(dim=1)[0] 342 | X_mean = X.mean(dim=1) 343 | X_std = X.std(dim=1) 344 | 345 | self.test_results = pd.DataFrame( 346 | { 347 | "ensemble": ensemble.squeeze().cpu().numpy().tolist(), 348 | "t": t.squeeze().cpu().numpy().tolist(), 349 | "u_hat": u_hat.squeeze().cpu().numpy().tolist(), 350 | "residual": residuals.squeeze().cpu().numpy().tolist(), 351 | "X_min": X_min.squeeze().cpu().numpy().tolist(), 352 | "X_max": X_max.squeeze().cpu().numpy().tolist(), 353 | "X_mean": X_mean.squeeze().cpu().numpy().tolist(), 354 | "X_std": X_std.squeeze().cpu().numpy().tolist(), 355 | } 356 | ) 357 | 358 | if self.hparams.nu == 1: 359 | # closed form if linear 360 | u_linear = self.H_0 + self.H_1 * X.mean(1, keepdim=True).squeeze() 361 | u_rel_error = torch.abs(u_hat - u_linear) / torch.abs(u_linear) 362 | u_abs_error = torch.abs(u_hat - u_linear) 363 | self.test_results["u_reference"] = u_linear.squeeze().cpu().numpy().tolist() 364 | self.test_results["u_rel_error"] = u_rel_error.squeeze().cpu().numpy().tolist() 365 | self.test_results["u_abs_error"] = u_abs_error.squeeze().cpu().numpy().tolist() 366 | self.logger.experiment.log( 367 | {"test_u_rel_error": u_rel_error, "test_u_abs_error": u_abs_error} 368 | ) 369 | 370 | 371 | def log_and_save(trainer, model, train_time, train_callback_metrics): 372 | if type(trainer.logger) is WandbLogger: 373 | # Valid numeric types 374 | def not_number_type(value): 375 | if value is None: 376 | return True 377 | 378 | if not isinstance(value, (int, float)): 379 | return True 380 | 381 | if math.isnan(value) or math.isinf(value): 382 | return True 383 | 384 | return False # otherwise a valid, non-infinite number 385 | 386 | # If early stopping, evaluate success 387 | early_stopping_check_failed = math.nan 388 | early_stopping_monitor = "" 389 | early_stopping_threshold = math.nan 390 | for callback in trainer.callbacks: 391 | if type(callback) == pl.callbacks.early_stopping.EarlyStopping: 392 | early_stopping_monitor = callback.monitor 393 | early_stopping_value = ( 394 | train_callback_metrics[callback.monitor].cpu().numpy().tolist() 395 | ) 396 | early_stopping_threshold = callback.stopping_threshold 397 | early_stopping_check_failed = not_number_type(early_stopping_value) or ( 398 | early_stopping_value > callback.stopping_threshold 399 | ) # hardcoded to min for now. 400 | break 401 | 402 | # Check transversality 403 | X_T_mean = trainer.model.test_results.loc[ 404 | trainer.model.test_results["t"] == model.hparams.T 405 | ].X_mean.mean() 406 | 407 | # if nu = 1 it is more robust to check the u_rel_error, otherwise assume T is large enough that divergence would occur for X_T 408 | if not model.hparams.check_transversality: 409 | transversality_check_failed = math.nan 410 | elif ( 411 | (model.hparams.nu == 1) 412 | and model.hparams.val_trajectories > 0 413 | and ( 414 | not_number_type(cli.trainer.logger.experiment.summary["val_u_rel_error"]) 415 | or ( 416 | cli.trainer.logger.experiment.summary[ 417 | "val_u_rel_error" 418 | ] # known at validation time 419 | > trainer.model.hparams.transversality_u_rel_error 420 | ) 421 | ) 422 | ): 423 | transversality_check_failed = True 424 | elif ( 425 | model.hparams.nu != 1 or (model.hparams.nu == 1 and model.hparams.val_trajectories == 0) 426 | ) and ( 427 | not_number_type(X_T_mean) 428 | or (X_T_mean < model.hparams.transversality_X_mean_min) 429 | or (X_T_mean > model.hparams.transversality_X_mean_max) 430 | ): 431 | transversality_check_failed = True 432 | else: 433 | transversality_check_failed = False 434 | 435 | # Check test loss 436 | if model.hparams.test_loss_success_threshold == 0: 437 | test_loss_check_failed = math.nan 438 | elif not_number_type(cli.trainer.logger.experiment.summary["test_loss"]) or ( 439 | cli.trainer.logger.experiment.summary["test_loss"] 440 | > model.hparams.test_loss_success_threshold 441 | ): 442 | test_loss_check_failed = True 443 | else: 444 | test_loss_check_failed = False 445 | 446 | # Determine convergence results 447 | if ( 448 | early_stopping_check_failed in [False, math.nan] 449 | and transversality_check_failed in [False, math.nan] 450 | and test_loss_check_failed in [False, math.nan] 451 | ): 452 | retcode = 0 453 | convergence_description = "Success" 454 | elif early_stopping_check_failed == True: 455 | retcode = -1 456 | convergence_description = "Early stopping failure" 457 | elif transversality_check_failed == True: 458 | retcode = -2 459 | convergence_description = "Transversality check failure" # possible due to finding wrote root but could also be other issues which manifest as a transversality violation 460 | elif test_loss_check_failed == True: 461 | retcode = -3 462 | convergence_description = "Test loss failure due to possible overfitting." # if nu != 1 but T was set low, this might also be due to transversality failures 463 | else: 464 | retcode = -100 465 | convergence_description = " Unknown failure" 466 | 467 | # Log all calculated results 468 | trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) 469 | trainer.logger.experiment.log({"train_time": train_time}) 470 | trainer.logger.experiment.log({"early_stopping_monitor": early_stopping_monitor}) 471 | trainer.logger.experiment.log({"early_stopping_threshold": early_stopping_threshold}) 472 | trainer.logger.experiment.log({"early_stopping_check_failed": early_stopping_check_failed}) 473 | trainer.logger.experiment.log({"transversality_check_failed": transversality_check_failed}) 474 | trainer.logger.experiment.log({"test_loss_check_failed": test_loss_check_failed}) 475 | trainer.logger.experiment.log({"trainable_parameters": trainable_parameters}) 476 | trainer.logger.experiment.log({"retcode": retcode}) 477 | trainer.logger.experiment.log({"convergence_description": convergence_description}) 478 | trainer.logger.experiment.log({"X_T_mean": X_T_mean}) 479 | 480 | # Set objective for hyperparameter optimization 481 | # Objective value given in the settings, or empty 482 | if model.hparams.hpo_objective_name is not None: 483 | hpo_objective_value = dict(cli.trainer.logger.experiment.summary)[ 484 | model.hparams.hpo_objective_name 485 | ] 486 | else: 487 | hpo_objective_value = math.nan 488 | 489 | if model.hparams.always_log_hpo_objective or retcode >= 0: 490 | trainer.logger.experiment.log({"hpo_objective": hpo_objective_value}) 491 | else: 492 | trainer.logger.experiment.log({"hpo_objective": math.nan}) 493 | 494 | # Save test results 495 | trainer.logger.log_text( 496 | key="test_results", dataframe=trainer.model.test_results 497 | ) # Saves on wandb for querying later 498 | 499 | # save the summary statistics in a file 500 | if model.hparams.save_metrics and trainer.log_dir is not None: 501 | metrics_path = Path(trainer.log_dir) / "metrics.yaml" 502 | with open(metrics_path, "w") as fp: 503 | yaml.dump(dict(cli.trainer.logger.experiment.summary), fp) 504 | 505 | if model.hparams.print_metrics: 506 | print(dict(cli.trainer.logger.experiment.summary)) 507 | return 508 | else: # almost no features enabled for other loggers. Could refactor later 509 | if model.hparams.save_test_results and trainer.log_dir is not None: 510 | model.test_results.to_csv(Path(trainer.log_dir) / "test_results.csv", index=False) 511 | 512 | 513 | if __name__ == "__main__": 514 | cli = LightningCLI( 515 | InvestmentEuler, 516 | seed_everything_default=123, 517 | run=False, 518 | save_config_callback=None, # turn this on to save the full config file rather than just having it uploaded 519 | parser_kwargs={"default_config_files": ["investment_euler_defaults.yaml"]}, 520 | save_config_kwargs={"save_config_overwrite": True}, 521 | ) 522 | # Fit the model. Separating training time for plotting, and evaluate generalization 523 | start = timeit.default_timer() 524 | cli.trainer.fit(cli.model) 525 | train_time = timeit.default_timer() - start 526 | train_callback_metrics = cli.trainer.callback_metrics 527 | cli.model.eval() # Enter evaluation mode, not training 528 | cli.model.test_model() # easier to write a manual test function than to use the trainer.test() here 529 | 530 | # Add additional calculations such as HPO objective to the log and save files 531 | log_and_save(cli.trainer, cli.model, train_time, train_callback_metrics) 532 | -------------------------------------------------------------------------------- /investment_euler_defaults.yaml: -------------------------------------------------------------------------------- 1 | trainer: 2 | accelerator: cpu 3 | max_epochs: 100 4 | min_epochs: 0 5 | max_time: 00:00:15:00 6 | precision: 32 7 | num_sanity_val_steps: 0 8 | logger: 9 | class_path: pytorch_lightning.loggers.WandbLogger 10 | init_args: 11 | offline: true # set to true to not upload during testing 12 | log_model: false # set to true to save the model at the end 13 | name: null # can set name or have it automatically generated 14 | project: symmetry_examples 15 | group: null # can group related runs 16 | tags: 17 | - basic_example 18 | callbacks: 19 | - class_path: pytorch_lightning.callbacks.LearningRateMonitor 20 | init_args: 21 | logging_interval: step 22 | log_momentum: false 23 | - class_path: pytorch_lightning.callbacks.ModelCheckpoint 24 | init_args: 25 | filename: best 26 | monitor: val_loss 27 | verbose: false 28 | save_last: true 29 | save_top_k: 1 30 | save_weights_only: true 31 | mode: min 32 | auto_insert_metric_name: true 33 | - class_path: pytorch_lightning.callbacks.EarlyStopping 34 | init_args: 35 | monitor: val_loss 36 | min_delta: 0.0 37 | patience: 50 38 | mode: min 39 | check_finite: true 40 | divergence_threshold: 100000 # stops if larger 41 | stopping_threshold: 1.0e-6 # typically the binding stopping threshold 42 | optimizer: 43 | class_path: torch.optim.Adam 44 | init_args: 45 | lr: 1.0e-3 46 | 47 | # Scheduler currently not tuned 48 | lr_scheduler: 49 | class_path: torch.optim.lr_scheduler.StepLR 50 | init_args: 51 | step_size: 50 # number of epochs 52 | gamma: 0.8 53 | # lr_scheduler: 54 | # class_path: torch.optim.lr_scheduler.ReduceLROnPlateau 55 | # init_args: 56 | # factor: 0.1 57 | # mode: min 58 | # patience: 5 59 | model: 60 | # Model parameters 61 | N: 128 62 | alpha_0: 1.0 63 | alpha_1: 1.0 64 | beta: 0.95 65 | gamma: 90.0 66 | sigma: 0.005 67 | delta: 0.05 68 | eta: 0.001 69 | nu: 1.0 70 | 71 | # Settings for output 72 | verbose: false 73 | hpo_objective_name: test_loss 74 | always_log_hpo_objective: false 75 | print_metrics: false 76 | save_metrics: false 77 | save_test_results: false 78 | test_seed: 0 # set to 0 to use default RNG seed 79 | X_0_seed: 0 # set to 0 to use default RNG seed 80 | check_transversality: true 81 | # if checking transversality for nu != 1, sees divergence of the test mean(X) from iteration. For small T these can be initial condition sensitive 82 | transversality_X_mean_min: 0.1 83 | transversality_X_mean_max: 10.0 84 | transversality_u_rel_error: 0.2 # if nu = 1, checks relative error of test. defaults to 20% deviation 85 | test_loss_success_threshold: 1e-4 # typically failures of overfitting are far above this 86 | 87 | 88 | # Settings for method 89 | omega_quadrature_nodes: 7 90 | normalize_shock_vector: true 91 | reset_trajectories_frequency: 0 # in epochs, 0 to never reset 92 | train_trajectories: 16 93 | train_subsample_trajectories: 0 # if > 0 will randomly sample from the train trajectories datapoints, otherwise uses all 94 | val_trajectories: 8 95 | test_trajectories: 32 96 | batch_size: 16 # set to 0 for full dataset 97 | shuffle_training: true 98 | T: 63 99 | X_0_loc: 0.9 100 | X_0_scale: 0.05 101 | 102 | # # Settings for deep sets neural networks 103 | ml_model: 104 | class_path: econ_layers.layers.DeepSet 105 | init_args: 106 | n_in: 1 107 | n_out: 1 108 | L: 4 109 | 110 | phi_layers: 1 111 | phi_hidden_dim: 128 112 | phi_hidden_bias: false 113 | phi_last_bias: true 114 | phi_activator: 115 | class_path: torch.nn.ReLU 116 | 117 | rho_layers: 4 118 | rho_hidden_dim: 256 119 | rho_hidden_bias: true 120 | rho_last_bias: true 121 | rho_activator: 122 | class_path: torch.nn.ReLU 123 | 124 | # Can also change on CLI such as 125 | #python investment_euler.py --model.ml_model.class_path=econ_layers.layers.DeepSetMoments --model.ml_model.L=4 --model.ml_model.n_in=1 --model.ml_model.n_out=1 --model.ml_model.rho_layers=3 --model.ml_model.rho_hidden_dim=256 --model.ml_model.rho_hidden_bias=false --model.ml_model.rho_last_bias=true 126 | -------------------------------------------------------------------------------- /investment_euler_simple.yaml: -------------------------------------------------------------------------------- 1 | trainer: 2 | max_epochs: 100 3 | min_epochs: 0 4 | max_time: 00:00:15:00 5 | precision: 32 6 | num_sanity_val_steps: 0 7 | logger: null 8 | callbacks: 9 | - class_path: pytorch_lightning.callbacks.EarlyStopping 10 | init_args: 11 | monitor: val_loss 12 | min_delta: 0.0 13 | patience: 50 14 | mode: min 15 | check_finite: true 16 | divergence_threshold: 100000 # stops if larger 17 | stopping_threshold: 1.0e-6 # typically the binding stopping threshold 18 | optimizer: 19 | class_path: torch.optim.Adam 20 | init_args: 21 | lr: 1.0e-3 22 | 23 | # Scheduler currently not tuned 24 | lr_scheduler: 25 | class_path: torch.optim.lr_scheduler.StepLR 26 | init_args: 27 | step_size: 50 # number of epochs 28 | gamma: 0.8 29 | # lr_scheduler: 30 | # class_path: torch.optim.lr_scheduler.ReduceLROnPlateau 31 | # init_args: 32 | # factor: 0.1 33 | # mode: min 34 | # patience: 5 35 | model: 36 | # Model parameters 37 | N: 128 38 | alpha_0: 1.0 39 | alpha_1: 1.0 40 | beta: 0.95 41 | gamma: 90.0 42 | sigma: 0.005 43 | delta: 0.05 44 | eta: 0.001 45 | nu: 1.0 46 | 47 | # Settings for output 48 | verbose: false 49 | hpo_objective_name: test_loss 50 | always_log_hpo_objective: false 51 | print_metrics: false 52 | save_metrics: false 53 | save_test_results: false 54 | 55 | 56 | # Settings for method 57 | omega_quadrature_nodes: 7 58 | normalize_shock_vector: true 59 | reset_trajectories_frequency: 0 # in epochs, 0 to never reset 60 | train_trajectories: 16 61 | val_trajectories: 8 62 | test_trajectories: 32 63 | batch_size: 16 # set to 0 for full dataset 64 | shuffle_training: true 65 | T: 63 66 | X_0_loc: 0.9 67 | X_0_scale: 0.05 68 | 69 | # Settings for deep sets neural networks 70 | ml_model: 71 | class_path: econ_layers.layers.DeepSet 72 | init_args: 73 | n_in: 1 74 | n_out: 1 75 | L: 4 76 | 77 | phi_layers: 1 78 | phi_hidden_dim: 128 79 | phi_hidden_bias: false 80 | phi_last_bias: true 81 | phi_activator: 82 | class_path: torch.nn.ReLU 83 | 84 | rho_layers: 4 85 | rho_hidden_dim: 256 86 | rho_hidden_bias: true 87 | rho_last_bias: true 88 | rho_activator: 89 | class_path: torch.nn.ReLU -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 100 -------------------------------------------------------------------------------- /replication_scripts/L_16_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: L_16_deep_sets 3 | description: Multiple seeds deep sets parameterization with L=16 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [L_16_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | # Variation on parameters for experiment 30 | model.ml_model.L: 31 | value: 16 32 | -------------------------------------------------------------------------------- /replication_scripts/L_2_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: L_2_deep_moments 3 | description: Multiple seeds given baseline deep moments parameterization with L=2 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [L_2_deep_moments] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | optimizer.lr: 23 | value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.ml_model.class_path: 29 | value: econ_layers.layers.DeepSetMoments 30 | model.ml_model.init_args.L: 31 | value: 2 32 | model.ml_model.init_args.n_in: 33 | value: 1 34 | model.ml_model.init_args.n_out: 35 | value: 1 36 | model.ml_model.init_args.rho_layers: 37 | value: 4 38 | model.ml_model.init_args.rho_hidden_dim: 39 | value: 128 40 | model.ml_model.init_args.rho_hidden_bias: 41 | value: true 42 | model.ml_model.init_args.rho_last_bias: 43 | value: true 44 | model.ml_model.init_args.rho_activator.class_path: 45 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /replication_scripts/L_2_deep_sets .yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: L_2_deep_sets 3 | description: Multiple seeds deep sets parameterization with L=2 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [L_2_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | 30 | # Variation on parameters for experiment 31 | model.ml_model.L: 32 | value: 2 33 | -------------------------------------------------------------------------------- /replication_scripts/L_8_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: L_8_deep_sets 3 | description: Multiple seeds deep sets parameterization with L=8 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [L_8_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | # Variation on parameters for experiment 30 | model.ml_model.L: 31 | value: 8 32 | -------------------------------------------------------------------------------- /replication_scripts/README.md: -------------------------------------------------------------------------------- 1 | # Replication Instructions 2 | 3 | 1. Login with W&B to your entity, and choose a ``. Here we will use `symmetry_test`, 4 | 2. For every `.yaml` in this folder run this, e.g. `wandb sweep --name --project ` where the `name` argument is optional, but helps organize the sweeps in the W&B UI. 5 | 6 | For example: 7 | ```bash 8 | wandb sweep --name baseline_deep_sets --project symmetry_test replication_scripts/baseline_deep_sets.yaml 9 | wandb sweep --name baseline_deep_moments --project symmetry_test replication_scripts/baseline_deep_moments.yaml 10 | ``` 11 | 12 | Optionally, you can include your W&B entity with `--entity ` as well. 13 | 14 | 3. For each of those sweeps, create an agent by copying from the output of the `wandb sweep` command line. These have the form `wandb agent /` 15 | 16 | For slower experiments (e.g the `baseline_deep_sets_N`), you can run multiple agents on as many machines as you wish. 17 | 18 | # Full Replication Instructions 19 | In bash (or with Git bash for Windows) in the main folder, run `./run_all_sequential.sh` 20 | 21 | - The script creates all of the sweep files, then automatically runs a sweep agent (in serial) 22 | - That will be very slow (e.g. days) since it is doing all experiments serially, and there are many ensembles and seeds executed 23 | - But you can modify as required to only use a subset of the files. 24 | 25 | -------------------------------------------------------------------------------- /replication_scripts/baseline_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_deep_moments 3 | description: Multiple seeds given baseline deep moments parameterization 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_deep_moments] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | optimizer.lr: 23 | value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.ml_model.class_path: 29 | value: econ_layers.layers.DeepSetMoments 30 | model.ml_model.init_args.L: 31 | value: 4 32 | model.ml_model.init_args.n_in: 33 | value: 1 34 | model.ml_model.init_args.n_out: 35 | value: 1 36 | model.ml_model.init_args.rho_layers: 37 | value: 4 38 | model.ml_model.init_args.rho_hidden_dim: 39 | value: 128 40 | model.ml_model.init_args.rho_hidden_bias: 41 | value: true 42 | model.ml_model.init_args.rho_last_bias: 43 | value: true 44 | model.ml_model.init_args.rho_activator.class_path: 45 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /replication_scripts/baseline_deep_moments_one_run.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_deep_moments_one_run 3 | description: Single run with deep moments with comparable aggregate and idiosyncratic shocks 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_deep_moments_one_run] 8 | seed: 9 | value: 123 10 | trainer.logger.offline: 11 | value: false # log online for W&B optimization 12 | trainer.max_time: 13 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 14 | 15 | # Stopping criteria 16 | trainer.callbacks.monitor: 17 | value: val_loss 18 | trainer.callbacks.stopping_threshold: 19 | value: 1.0e-6 20 | #Learning rate 21 | optimizer.lr: 22 | value: 1.0e-4 23 | 24 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 25 | model.N: 26 | value: 128 27 | model.ml_model.class_path: 28 | value: econ_layers.layers.DeepSetMoments 29 | model.ml_model.init_args.L: 30 | value: 4 31 | model.ml_model.init_args.n_in: 32 | value: 1 33 | model.ml_model.init_args.n_out: 34 | value: 1 35 | model.ml_model.init_args.rho_layers: 36 | value: 4 37 | model.ml_model.init_args.rho_hidden_dim: 38 | value: 128 39 | model.ml_model.init_args.rho_hidden_bias: 40 | value: true 41 | model.ml_model.init_args.rho_last_bias: 42 | value: true 43 | model.ml_model.init_args.rho_activator.class_path: 44 | value: torch.nn.ReLU 45 | model.test_seed: 46 | value: 10 47 | model.X_0_seed: 48 | value: 323 -------------------------------------------------------------------------------- /replication_scripts/baseline_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_deep_sets 3 | description: Multiple seeds given baseline deep sets parameterization 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | -------------------------------------------------------------------------------- /replication_scripts/baseline_deep_sets_N.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_deep_sets_N 3 | description: Multiple seeds for different N with the baseline deep sets parameterization 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_deep_sets_N] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:05:00 # Extended the time a little longer because for large enough N it starts scaling linearly due to linear algebra operations 15 | trainer.accelerator: 16 | value: gpu 17 | 18 | # Stopping criteria 19 | trainer.callbacks.monitor: 20 | value: val_loss 21 | trainer.callbacks.stopping_threshold: 22 | value: 1.0e-6 23 | 24 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 25 | model.N: 26 | values: [64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536] # higher levels require the gpu accelerator 27 | -------------------------------------------------------------------------------- /replication_scripts/baseline_deep_sets_one_run.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_deep_sets_one_run 3 | description: Single run with deep sets with comparable aggregate and idiosyncratic shocks 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_deep_sets_one_run] 8 | seed: 9 | value: 123 10 | 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.test_seed: 29 | value: 10 30 | model.X_0_seed: 31 | value: 323 -------------------------------------------------------------------------------- /replication_scripts/baseline_identity.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_identity 3 | description: Multiple seeds given baseline single moment parameterization 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_identity] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | # Learning rate 22 | optimizer.lr: 23 | value: 0.00075 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.ml_model.class_path: 29 | value: econ_layers.layers.DeepSetMoments 30 | model.ml_model.init_args.L: 31 | value: 1 # i.e., only the first moment 32 | model.ml_model.init_args.n_in: 33 | value: 1 34 | model.ml_model.init_args.n_out: 35 | value: 1 36 | model.ml_model.init_args.rho_layers: 37 | value: 4 38 | model.ml_model.init_args.rho_hidden_dim: 39 | value: 128 40 | model.ml_model.init_args.rho_hidden_bias: 41 | value: false 42 | model.ml_model.init_args.rho_last_bias: 43 | value: true 44 | model.ml_model.init_args.rho_activator.class_path: 45 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /replication_scripts/baseline_identity_one_run.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_identity_one_run 3 | description: Single run with one moment with comparable aggregate and idiosyncratic shocks 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_identity_one_run] 8 | seed: 9 | value: 123 10 | trainer.logger.offline: 11 | value: false # log online for W&B optimization 12 | trainer.max_time: 13 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 14 | 15 | # Stopping criteria 16 | trainer.callbacks.monitor: 17 | value: val_loss 18 | trainer.callbacks.stopping_threshold: 19 | value: 1.0e-6 20 | # Learning rate 21 | optimizer.lr: 22 | value: 0.00075 23 | 24 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 25 | model.N: 26 | value: 128 27 | model.ml_model.class_path: 28 | value: econ_layers.layers.DeepSetMoments 29 | model.ml_model.init_args.L: 30 | value: 1 # i.e., only the first moment 31 | model.ml_model.init_args.n_in: 32 | value: 1 33 | model.ml_model.init_args.n_out: 34 | value: 1 35 | model.ml_model.init_args.rho_layers: 36 | value: 4 37 | model.ml_model.init_args.rho_hidden_dim: 38 | value: 128 39 | model.ml_model.init_args.rho_hidden_bias: 40 | value: false 41 | model.ml_model.init_args.rho_last_bias: 42 | value: true 43 | model.ml_model.init_args.rho_activator.class_path: 44 | value: torch.nn.ReLU 45 | model.test_seed: 46 | value: 10 47 | model.X_0_seed: 48 | value: 323 -------------------------------------------------------------------------------- /replication_scripts/baseline_nonlinear_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_nonlinear_deep_moments 3 | description: Multiple seeds given baseline deep moments parameterization for the nonlinear case 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_nonlinear_deep_moments] 8 | seed: 9 | min: 1 10 | max: 100 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 #Learning rate 21 | model.reset_trajectories_frequency: 22 | value: 5 # otherwise sticks with linear until the test set 23 | optimizer.lr: 24 | value: 1.0e-4 25 | lr_scheduler.step_size: 26 | value: 10 27 | lr_scheduler.gamma: 28 | value: 0.8 29 | 30 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 31 | model.N: 32 | value: 128 33 | model.nu: 34 | value: 1.5 35 | 36 | model.ml_model.class_path: 37 | value: econ_layers.layers.DeepSetMoments 38 | model.ml_model.init_args.L: 39 | value: 4 40 | model.ml_model.init_args.n_in: 41 | value: 1 42 | model.ml_model.init_args.n_out: 43 | value: 1 44 | model.ml_model.init_args.rho_layers: 45 | value: 4 46 | model.ml_model.init_args.rho_hidden_dim: 47 | value: 128 48 | model.ml_model.init_args.rho_hidden_bias: 49 | value: true 50 | model.ml_model.init_args.rho_last_bias: 51 | value: true 52 | model.ml_model.init_args.rho_activator.class_path: 53 | value: torch.nn.ReLU 54 | -------------------------------------------------------------------------------- /replication_scripts/baseline_nonlinear_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: baseline_nonlinear_deep_sets 3 | description: Multiple seeds given baseline deep sets parameterization for the nonlinear case 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [baseline_nonlinear_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:08:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | 23 | model.reset_trajectories_frequency: 24 | value: 10 # otherwise sticks with linear until the test set 25 | optimizer.lr: 26 | value: 1.0e-3 27 | lr_scheduler.step_size: 28 | value: 40 29 | lr_scheduler.gamma: 30 | value: 0.8 31 | 32 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 33 | model.N: 34 | value: 128 35 | model.nu: 36 | value: 1.5 37 | 38 | model.ml_model.phi_layers: 39 | value: 1 40 | 41 | 42 | -------------------------------------------------------------------------------- /replication_scripts/deep_2_4_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: deep_2_4_deep_sets 3 | description: Multiple seeds deep sets with 3 layers for phi and 4 for rho 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [deep_2_4_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | 30 | # Variation on parameters for experiment 31 | model.ml_model.phi_layers: 32 | value: 2 33 | model.ml_model.rho_layers: 34 | value: 4 -------------------------------------------------------------------------------- /replication_scripts/deep_4_8_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: deep_4_8_deep_sets 3 | description: Multiple seeds deep sets with 4 layers for phi and 8 for rho 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [deep_4_8_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | 30 | # Variation on parameters for experiment 31 | model.ml_model.phi_layers: 32 | value: 4 33 | model.ml_model.rho_layers: 34 | value: 8 -------------------------------------------------------------------------------- /replication_scripts/deep_sets_nonlinear_nu_130_one_run.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: deep_sets_nonlinear_nu_130_one_run 3 | description: Multiple seeds given baseline deep sets parameterization with nu = 1.3 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [deep_sets_nonlinear_nu_130_one_run] 8 | seed: 9 | value: 123 10 | 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | 23 | model.reset_trajectories_frequency: 24 | value: 10 # otherwise sticks with linear until the test set 25 | optimizer.lr: 26 | value: 1.0e-3 27 | lr_scheduler.step_size: 28 | value: 40 29 | lr_scheduler.gamma: 30 | value: 0.8 31 | 32 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 33 | model.N: 34 | value: 128 35 | model.nu: 36 | value: 1.3 37 | model.ml_model.phi_layers: 38 | value: 1 39 | model.train_trajectories: 40 | value: 32 41 | model.val_trajectories: 42 | value: 16 43 | model.test_seed: 44 | value: 10 45 | model.X_0_seed: 46 | value: 323 -------------------------------------------------------------------------------- /replication_scripts/deep_sets_nonlinear_nu_150_one_run.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: deep_sets_nonlinear_nu_150_one_run 3 | description: Multiple seeds given baseline deep sets parameterization with nu = 150 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [deep_sets_nonlinear_nu_150_one_run] 8 | seed: 9 | value: 123 10 | 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | 23 | model.reset_trajectories_frequency: 24 | value: 10 # otherwise sticks with linear until the test set 25 | optimizer.lr: 26 | value: 1.0e-3 27 | lr_scheduler.step_size: 28 | value: 40 29 | lr_scheduler.gamma: 30 | value: 0.8 31 | 32 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 33 | model.N: 34 | value: 128 35 | model.nu: 36 | value: 1.5 37 | 38 | model.ml_model.phi_layers: 39 | value: 1 40 | model.train_trajectories: 41 | value: 32 42 | model.val_trajectories: 43 | value: 16 44 | model.test_seed: 45 | value: 10 46 | model.X_0_seed: 47 | value: 323 -------------------------------------------------------------------------------- /replication_scripts/deep_sets_nonlinear_overfit.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: deep_sets_nonlinear_overfit 3 | description: deep sets over parameterization with different numbers of datapoints 4 | method: grid 5 | 6 | parameters: 7 | trainer.logger.tags: 8 | value: [deep_sets_nonlinear_overfit] 9 | seed: 10 | min: 1 11 | max: 100 # number of ensembles to solve 12 | 13 | trainer.logger.offline: 14 | value: false # log online for W&B optimization 15 | trainer.max_time: 16 | value: 00:00:05:00 17 | trainer.max_epochs: 18 | value: 500 19 | # Stopping criteria 20 | trainer.callbacks: 21 | value: [] # turning off early stopping in particular 22 | # Provide distributions and variations for optimizer 23 | model.test_loss_success_threshold: 24 | value: 1.0e-4 # just for posting of the return code 25 | model.N: 26 | value: 128 27 | optimizer.lr: 28 | value: 0.003 29 | lr_scheduler.step_size: 30 | value: 50 31 | lr_scheduler.gamma: 32 | value: 0.8 33 | model.train_subsample_trajectories: 34 | values: [2, 3, 4, 5, 10] 35 | model.train_trajectories: 36 | value: 1 37 | model.val_trajectories: 38 | value: 0 # not using validation for stopping criteria 39 | model.batch_size: 40 | value: 2 41 | model.nu: 42 | value: 1.5 -------------------------------------------------------------------------------- /replication_scripts/deep_sets_overfit.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: deep_sets_overfit 3 | description: deep sets over parameterization with different numbers of datapoints 4 | method: grid 5 | 6 | parameters: 7 | trainer.logger.tags: 8 | value: [deep_sets_overfit] 9 | seed: 10 | min: 1 11 | max: 100 # number of ensembles to solve 12 | 13 | trainer.logger.offline: 14 | value: false # log online for W&B optimization 15 | trainer.max_time: 16 | value: 00:00:05:00 17 | trainer.max_epochs: 18 | value: 500 19 | # Stopping criteria 20 | trainer.callbacks: 21 | value: [] # turning off early stopping in particular 22 | # Provide distributions and variations for optimizer 23 | model.test_loss_success_threshold: 24 | value: 1.0e-4 # just for posting of the return code 25 | model.N: 26 | value: 128 27 | optimizer.lr: 28 | value: 0.003 29 | lr_scheduler.step_size: 30 | value: 50 31 | lr_scheduler.gamma: 32 | value: 0.8 33 | model.train_subsample_trajectories: 34 | values: [2, 3, 4, 5] 35 | model.train_trajectories: 36 | value: 1 37 | model.val_trajectories: 38 | value: 0 # not using validation for stopping criteria 39 | model.batch_size: 40 | value: 2 -------------------------------------------------------------------------------- /replication_scripts/generalized_mean_deep_sets_L_N.yaml: -------------------------------------------------------------------------------- 1 | program: generalized_mean.py 2 | project: generalized_mean_examples 3 | name: generalized_mean_deep_sets_L_N 4 | description: Generalized mean with deep sets for N and L multiple seeds 5 | method: grid 6 | parameters: 7 | trainer.logger.tags: 8 | value: [generalized_mean_deep_sets_L_N] 9 | seed: 10 | min: 123 11 | max: 223 # 100 + number of seeds 12 | trainer.max_epochs: 13 | value: 5000 14 | trainer.min_epochs: 15 | value: 0 16 | trainer.logger.offline: 17 | value: false # log online for W&B optimization 18 | trainer.max_time: 19 | value: 00:00:03:00 20 | model.num_train_points: 21 | value: 10 22 | model.num_val_points: 23 | value: 0 24 | model.num_test_points: 25 | value: 200 26 | trainer.callbacks.stopping_threshold: #stop if train_loss< callbacks.stopping_threshold 27 | value: 1e-5 # TODO: make smaller for full table 28 | model.test_loss_success_threshold: 29 | value: 0 30 | trainer.callbacks.monitor: 31 | value: train_loss 32 | optimizer.lr: 33 | value: 0.005 34 | model.ml_model.L: 35 | values: [1,2,4,8,16] 36 | model.N: 37 | values: [2,4,8,32,64,512,1024] -------------------------------------------------------------------------------- /replication_scripts/generalized_mean_no_invariance_template.yaml: -------------------------------------------------------------------------------- 1 | program: generalized_mean.py 2 | project: generalized_mean_examples 3 | name: generalized_mean_no_invariance_N_NNN 4 | description: Generalized mean without using invariances and with multiple seeds 5 | method: grid 6 | parameters: 7 | trainer.logger.tags: 8 | value: [generalized_mean_no_invariance_N_NNN] 9 | seed: 10 | min: 123 11 | max: 223 # 123 + 100 # number of seeds 12 | trainer.max_epochs: 13 | value: 5000 14 | trainer.min_epochs: 15 | value: 0 16 | trainer.logger.offline: 17 | value: false # log online for W&B optimization 18 | trainer.max_time: 19 | value: 00:00:03:00 20 | model.num_train_points: 21 | value: 10 22 | model.num_val_points: 23 | value: 10 24 | model.num_test_points: 25 | value: 200 26 | trainer.callbacks.stopping_threshold: #stop if train_loss< callbacks.stopping_threshold 27 | value: 1e-5 # TODO: make smaller for full table 28 | model.test_loss_success_threshold: 29 | value: 0 30 | trainer.callbacks.monitor: 31 | value: train_loss 32 | optimizer.lr: 33 | value: 0.005 34 | model.N: 35 | value: NNN 36 | model.ml_model.class_path: 37 | value: econ_layers.layers.FlexibleSequential 38 | model.ml_model.n_in: 39 | value: NNN 40 | model.ml_model.n_out: 41 | value: 1 42 | model.ml_model.layers: 43 | value: 4 44 | model.ml_model.hidden_dim: 45 | value: 128 46 | -------------------------------------------------------------------------------- /replication_scripts/run_all_generalized_mean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Function which takes a sweep name, creates the sweep, then creates a single agent before continuing 4 | # This function could be replaced with something like https://github.com/wandb/wandb/issues/5207 5 | #!/bin/bash 6 | 7 | # Define the project name 8 | PROJECT_NAME="symmetry_dynamic_programming" # swap out globally 9 | 10 | # Define the run_sweep_and_agent function 11 | run_sweep_and_agent () { 12 | # Set the SWEEP_NAME variable 13 | SWEEP_NAME="$1" 14 | 15 | # Run the wandb sweep command and store the output in a temporary file 16 | wandb sweep --project "$PROJECT_NAME" --name "$SWEEP_NAME" "replication_scripts/$SWEEP_NAME.yaml" >temp_output.txt 2>&1 17 | 18 | # Extract the sweep ID using awk 19 | SWEEP_ID=$(awk '/wandb agent/{ match($0, /wandb agent (.+)/, arr); print arr[1]; }' temp_output.txt) 20 | 21 | # Remove the temporary output file 22 | rm temp_output.txt 23 | 24 | # Run the wandb agent command 25 | wandb agent $SWEEP_ID 26 | } 27 | 28 | # List of "N" we will use for the no invariance experiments 29 | list_of_N="2 4 8 32 64 512 1024" 30 | 31 | # Path to the template 32 | template="replication_scripts/generalized_mean_no_invariance_template.yaml" 33 | 34 | # Loop over each N 35 | for N in $list_of_N; do 36 | # Generate the output filename 37 | output_file="replication_scripts/generalized_mean_no_invariance_N_${N}.yaml" 38 | 39 | # Use sed to replace all occurrences of NNN with the current N 40 | sed "s/NNN/${N}/g" $template > $output_file 41 | done 42 | 43 | # Run all sweeps 44 | 45 | run_sweep_and_agent "generalized_mean_deep_sets_L_N" 46 | 47 | # Loop over each N for each invariance sweep 48 | for N in $list_of_N; do 49 | run_sweep_and_agent "generalized_mean_no_invariance_N_${N}" 50 | done -------------------------------------------------------------------------------- /replication_scripts/run_all_sequential.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Function which takes a sweep name, creates the sweep, then creates a single agent before continuing 4 | # This function could be replaced with something like https://github.com/wandb/wandb/issues/5207 5 | #!/bin/bash 6 | 7 | # Define the project name 8 | PROJECT_NAME="symmetry_dynamic_programming" # swap out globally 9 | 10 | # Define the run_sweep_and_agent function 11 | run_sweep_and_agent () { 12 | # Set the SWEEP_NAME variable 13 | SWEEP_NAME="$1" 14 | 15 | # Run the wandb sweep command and store the output in a temporary file 16 | wandb sweep --project "$PROJECT_NAME" --name "$SWEEP_NAME" "replication_scripts/$SWEEP_NAME.yaml" >temp_output.txt 2>&1 17 | 18 | # Extract the sweep ID using awk 19 | SWEEP_ID=$(awk '/wandb agent/{ match($0, /wandb agent (.+)/, arr); print arr[1]; }' temp_output.txt) 20 | 21 | # Remove the temporary output file 22 | rm temp_output.txt 23 | 24 | # Run the wandb agent command 25 | wandb agent $SWEEP_ID 26 | } 27 | 28 | # Call experiments sequentially. VERY SLOW given large number of experiments 29 | 30 | # Primary examples with multiple seeds 31 | run_sweep_and_agent "baseline_deep_sets" 32 | run_sweep_and_agent "baseline_deep_moments" 33 | run_sweep_and_agent "baseline_identity" 34 | 35 | # One run examples 36 | run_sweep_and_agent "baseline_deep_sets_one_run" 37 | run_sweep_and_agent "baseline_deep_moments_one_run" 38 | run_sweep_and_agent "baseline_identity_one_run" 39 | run_sweep_and_agent "deep_sets_nonlinear_nu_130_one_run" 40 | run_sweep_and_agent "deep_sets_nonlinear_nu_150_one_run" 41 | 42 | # Additional robustness tables 43 | run_sweep_and_agent "L_2_deep_sets" 44 | run_sweep_and_agent "L_8_deep_sets" 45 | run_sweep_and_agent "L_16_deep_sets" 46 | run_sweep_and_agent "shallow_1_2_deep_sets" # almost not worth trouble 47 | run_sweep_and_agent "deep_2_4_deep_sets" 48 | run_sweep_and_agent "deep_4_8_deep_sets" 49 | run_sweep_and_agent "wide_256_deep_sets" 50 | #run_sweep_and_agent "thin_64_deep_sets" 51 | #run_sweep_and_agent "thin_64_identity" 52 | run_sweep_and_agent "L_2_deep_moments" 53 | run_sweep_and_agent "very_shallow_1_layer_deep_moments" 54 | #run_sweep_and_agent "thin_64_deep_moments" 55 | 56 | # Overfitting example 57 | run_sweep_and_agent "deep_sets_overfit" 58 | run_sweep_and_agent "deep_sets_nonlinear_overfit" 59 | 60 | # Nonlinear versions 61 | run_sweep_and_agent "baseline_nonlinear_deep_sets" 62 | run_sweep_and_agent "baseline_nonlinear_deep_moments" 63 | 64 | 65 | # This can take a day given the massive number of variations tested. Using GPU 66 | run_sweep_and_agent "baseline_deep_sets_N" 67 | -------------------------------------------------------------------------------- /replication_scripts/shallow_1_2_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: shallow_1_2_deep_sets 3 | description: Multiple seeds deep sets with 1 layer for phi and 2 for rho 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [shallow_1_2_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | 30 | # Variation on parameters for experiment 31 | model.ml_model.phi_layers: 32 | value: 1 33 | model.ml_model.rho_layers: 34 | value: 2 -------------------------------------------------------------------------------- /replication_scripts/thin_64_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: thin_64_deep_moments 3 | description: Multiple seeds given baseline deep moments parameterization with 64 nodes 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [thin_64_deep_moments] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | optimizer.lr: 23 | value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.ml_model.class_path: 29 | value: econ_layers.layers.DeepSetMoments 30 | model.ml_model.init_args.L: 31 | value: 4 32 | model.ml_model.init_args.n_in: 33 | value: 1 34 | model.ml_model.init_args.n_out: 35 | value: 1 36 | model.ml_model.init_args.rho_layers: 37 | value: 4 38 | model.ml_model.init_args.rho_hidden_dim: 39 | value: 64 40 | model.ml_model.init_args.rho_hidden_bias: 41 | value: true 42 | model.ml_model.init_args.rho_last_bias: 43 | value: true 44 | model.ml_model.init_args.rho_activator.class_path: 45 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /replication_scripts/thin_64_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: thin_64_deep_sets 3 | description: Multiple seeds deep sets 64 wide for all 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [thin_64_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | # Variation on parameters for experiment 30 | model.ml_model.phi_hidden_dim: 31 | value: 64 32 | model.ml_model.rho_hidden_dim: 33 | value: 64 -------------------------------------------------------------------------------- /replication_scripts/thin_64_identity.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: thin_64_identity 3 | description: Multiple seeds given baseline single moment parameterization with dim_hidden = 64 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [thin_64_identity] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | # Learning rate 22 | optimizer.lr: 23 | value: 0.00075 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.ml_model.class_path: 29 | value: econ_layers.layers.DeepSetMoments 30 | model.ml_model.init_args.L: 31 | value: 1 # i.e., only the first moment 32 | model.ml_model.init_args.n_in: 33 | value: 1 34 | model.ml_model.init_args.n_out: 35 | value: 1 36 | model.ml_model.init_args.rho_layers: 37 | value: 4 38 | model.ml_model.init_args.rho_hidden_dim: 39 | value: 64 40 | model.ml_model.init_args.rho_hidden_bias: 41 | value: false 42 | model.ml_model.init_args.rho_last_bias: 43 | value: true 44 | model.ml_model.init_args.rho_activator.class_path: 45 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /replication_scripts/very_shallow_1_layer_deep_moments.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: very_shallow_1_layer_deep_moments 3 | description: Multiple seeds given baseline deep moments parameterization with rho one layer 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [very_shallow_1_layer_deep_moments] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:03:00 # these tests shouldn't take more than 2-3 minutes 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | optimizer.lr: 23 | value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | model.ml_model.class_path: 29 | value: econ_layers.layers.DeepSetMoments 30 | model.ml_model.init_args.L: 31 | value: 4 32 | model.ml_model.init_args.n_in: 33 | value: 1 34 | model.ml_model.init_args.n_out: 35 | value: 1 36 | model.ml_model.init_args.rho_layers: 37 | value: 1 38 | model.ml_model.init_args.rho_hidden_dim: 39 | value: 128 40 | model.ml_model.init_args.rho_hidden_bias: 41 | value: true 42 | model.ml_model.init_args.rho_last_bias: 43 | value: true 44 | model.ml_model.init_args.rho_activator.class_path: 45 | value: torch.nn.ReLU -------------------------------------------------------------------------------- /replication_scripts/wide_256_deep_sets.yaml: -------------------------------------------------------------------------------- 1 | program: investment_euler.py 2 | name: wide_256_deep_sets 3 | description: Multiple seeds deep sets 256 wide for all 4 | method: grid 5 | parameters: 6 | trainer.logger.tags: 7 | value: [wide_256_deep_sets] 8 | seed: 9 | min: 1 10 | max: 100 # number of ensembles to solve 11 | trainer.logger.offline: 12 | value: false # log online for W&B optimization 13 | trainer.max_time: 14 | value: 00:00:10:00 15 | 16 | # Stopping criteria 17 | trainer.callbacks.monitor: 18 | value: val_loss 19 | trainer.callbacks.stopping_threshold: 20 | value: 1.0e-6 21 | #Learning rate 22 | #optimizer.lr: 23 | #value: 1.0e-4 24 | 25 | # Core parameters which may be different from baseline investment_euler_defaults.yaml 26 | model.N: 27 | value: 128 28 | 29 | # Variation on parameters for experiment 30 | model.ml_model.phi_hidden_dim: 31 | value: 256 32 | model.ml_model.rho_hidden_dim: 33 | value: 256 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | lightning>=2 2 | torch >= 2 3 | matplotlib>=3.7 4 | black 5 | jsonargparse[all]>=4.20 6 | pandas 7 | econ-layers >= 0.0.28 8 | SciPy >= 1.7.1 9 | quantecon>=0.6 10 | wandb>=0.14 --------------------------------------------------------------------------------