├── MANIFEST.in ├── docs ├── .DS_Store ├── source │ ├── .DS_Store │ ├── _static │ │ ├── cost1.png │ │ ├── tree.png │ │ └── damage_simulation.png │ ├── code │ │ ├── tree.py │ │ ├── storage_tree.py │ │ └── output_paper.py │ ├── api │ │ ├── bau.rst │ │ ├── cost.rst │ │ ├── tree.rst │ │ ├── damage.rst │ │ ├── forcing.rst │ │ ├── utility.rst │ │ ├── analysis.rst │ │ ├── optimization.rst │ │ ├── damage_simulation.rst │ │ ├── storage_tree.rst │ │ └── index.rst │ ├── examples │ │ ├── index.rst │ │ └── output_paper.rst │ ├── documentation │ │ ├── tree.rst │ │ ├── bau.rst │ │ ├── storage_tree.rst │ │ ├── utility.rst │ │ ├── cost.rst │ │ ├── optimization.rst │ │ ├── damage_simulation.rst │ │ └── damage.rst │ ├── installation.rst │ ├── index.rst │ ├── overview.rst │ └── conf.py └── Makefile ├── ezclimate ├── __init__.py ├── cost.py ├── bau.py ├── tools.py ├── forcing.py ├── tree.py ├── damage_simulation.py ├── storage_tree.py ├── damage.py ├── utility.py ├── analysis.py └── optimization.py ├── setup.py ├── LICENSE ├── .gitignore └── README.md /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include the license file 2 | include LICENSE.txt -------------------------------------------------------------------------------- /docs/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Litterman/EZClimate/HEAD/docs/.DS_Store -------------------------------------------------------------------------------- /docs/source/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Litterman/EZClimate/HEAD/docs/source/.DS_Store -------------------------------------------------------------------------------- /docs/source/_static/cost1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Litterman/EZClimate/HEAD/docs/source/_static/cost1.png -------------------------------------------------------------------------------- /docs/source/_static/tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Litterman/EZClimate/HEAD/docs/source/_static/tree.png -------------------------------------------------------------------------------- /docs/source/code/tree.py: -------------------------------------------------------------------------------- 1 | import dlw 2 | 3 | tree = dlw.tree.TreeModel(decision_times=[0, 15, 45, 85, 185, 285, 385]) 4 | -------------------------------------------------------------------------------- /docs/source/_static/damage_simulation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Litterman/EZClimate/HEAD/docs/source/_static/damage_simulation.png -------------------------------------------------------------------------------- /docs/source/api/bau.rst: -------------------------------------------------------------------------------- 1 | bau module 2 | ---------- 3 | 4 | .. automodule:: ezclimate.bau 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/cost.rst: -------------------------------------------------------------------------------- 1 | cost module 2 | ----------- 3 | 4 | .. automodule:: ezclimate.cost 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/tree.rst: -------------------------------------------------------------------------------- 1 | tree module 2 | ----------- 3 | 4 | .. automodule:: ezclimate.tree 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/damage.rst: -------------------------------------------------------------------------------- 1 | damage module 2 | ------------- 3 | 4 | .. automodule:: ezclimate.damage 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/forcing.rst: -------------------------------------------------------------------------------- 1 | forcing module 2 | -------------- 3 | 4 | .. automodule:: ezclimate.forcing 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/utility.rst: -------------------------------------------------------------------------------- 1 | utility module 2 | -------------- 3 | 4 | .. automodule:: ezclimate.utility 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/analysis.rst: -------------------------------------------------------------------------------- 1 | analysis module 2 | --------------- 3 | 4 | .. automodule:: ezclimate.analysis 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/examples/index.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | Examples using the ezclimate package. 5 | 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | output_paper 11 | -------------------------------------------------------------------------------- /docs/source/api/optimization.rst: -------------------------------------------------------------------------------- 1 | optimization module 2 | ------------------- 3 | 4 | .. automodule:: ezclimate.optimization 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/damage_simulation.rst: -------------------------------------------------------------------------------- 1 | damage_simulation module 2 | ------------------------ 3 | 4 | .. automodule:: ezclimate.damage_simulation 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/storage_tree.rst: -------------------------------------------------------------------------------- 1 | storage_tree module 2 | ------------------- 3 | 4 | .. automodule:: ezclimate.storage_tree 5 | :members: 6 | :undoc-members: 7 | :special-members: __getitem__ 8 | :show-inheritance: -------------------------------------------------------------------------------- /docs/source/api/index.rst: -------------------------------------------------------------------------------- 1 | Library Reference 2 | ================= 3 | 4 | Description of the functions, classes and modules contained within ezclimate. 5 | 6 | 7 | .. toctree:: 8 | :maxdepth: 3 9 | 10 | tree 11 | bau 12 | damage_simulation 13 | forcing 14 | damage 15 | cost 16 | storage_tree 17 | utility 18 | optimization 19 | analysis 20 | -------------------------------------------------------------------------------- /docs/source/code/storage_tree.py: -------------------------------------------------------------------------------- 1 | import ezclimate.storage_tree as st 2 | 3 | sst = st.SmallStorageTree(decision_times=[0, 15, 45, 85, 185, 285, 385]) 4 | 5 | sst.tree[385] 6 | sst[385] # BaseStorageClass defines its own __getitem__ 7 | 8 | bst = st.BigStorageTree(subinterval_len=5, decision_times=[0, 15, 45, 85, 185, 285, 385]) 9 | bst[380] # time period that is not a decision time 10 | bst[385] -------------------------------------------------------------------------------- /docs/source/documentation/tree.rst: -------------------------------------------------------------------------------- 1 | ==== 2 | Tree 3 | ==== 4 | 5 | The :class:`ezclimate.tree.TreeModel` provides the structure for a non-recombining tree but does not store the actual values for the nodes in the tree. The tree can therefore be stored in a 1D-array and nodes, periods and states can be reached using the methods in :class:`ezclimate.tree.TreeModel`. The last period will have no brachning, and hence the same number of nodes as the previous period. See :mod:`~ezclimate.tree` module for more details. 6 | 7 | .. image:: ../_static/tree.png 8 | :width: 600 px 9 | :align: center 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /docs/source/documentation/bau.rst: -------------------------------------------------------------------------------- 1 | === 2 | BAU 3 | === 4 | 5 | The :class:`ezclimate.bau.DLWBusinessAsUsual` provides an analysis framework of business as usual scenario. We assume constant consumption growth and GHG emissions that grow linearly over time without mitigation. For analysis, emission level are given at certain decision time points. Emissions between those decision time points are calcualted using linear interploation. GHG levels are calculated in accordance with the emission path. 6 | 7 | Users can create their own business as usual assumptions (e.g., non-linear growth of GHG emission), by writing their own class, inheriting the base class :class:`ezclimate.bau.BusinessAsUsual`. 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /ezclimate/__init__.py: -------------------------------------------------------------------------------- 1 | hard_dependencies = ("numpy", ) 2 | missing_dependencies = [] 3 | 4 | for dependency in hard_dependencies: 5 | try: 6 | __import__(dependency) 7 | except ImportError as e: 8 | missing_dependencies.append(dependency) 9 | 10 | if missing_dependencies: 11 | raise ImportError("Missing required dependencies {0}".format(missing_dependencies)) 12 | 13 | from ezclimate.optimization import GeneticAlgorithm, GradientSearch, CoordinateDescent 14 | from ezclimate.analysis import * 15 | from ezclimate.bau import * 16 | from ezclimate.cost import * 17 | from ezclimate.damage import * 18 | from ezclimate.damage_simulation import * 19 | from ezclimate.forcing import * 20 | from ezclimate.storage_tree import * 21 | from ezclimate.tree import * 22 | from ezclimate.utility import * 23 | 24 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | Requirements 5 | ------------ 6 | 7 | EZ-Climate is compatible with Python 2 and 3. 8 | 9 | Numpy_ is required, and we recommend matplotlib_ for visualization of 10 | results. 11 | 12 | .. _Numpy: http://www.numpy.org/ 13 | .. _matplotlib: http://www.matplotlib.org/ 14 | 15 | 16 | Install EZ-Climate 17 | ------------------ 18 | 19 | We encourage you to use pip_ to install ezclimate on your system. 20 | :: 21 | 22 | pip install ezclimate 23 | 24 | If you wish to build from sources, download_ or clone_ the repository and use 25 | :: 26 | 27 | python setup.py install 28 | 29 | 30 | .. _download: https://pypi.python.org/pypi/ezclimate/ 31 | .. _clone: https://github.com/Litterman/EZClimate.git 32 | 33 | .. _pip: http://www.pip-installer.org/en/latest/ -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | try: 4 | from pypandoc import convert 5 | except ImportError: 6 | import codecs 7 | read_md = lambda f: codecs.open(f, 'r', 'utf-8').read() 8 | else: 9 | read_md = lambda f: convert(f, 'rst') 10 | 11 | setup(name='ezclimate', 12 | version='2.0.7', 13 | description='EZ-Climate model', 14 | long_description=read_md('README.md'), 15 | classifiers=[ 16 | 'License :: OSI Approved :: MIT License', 17 | 'Programming Language :: Python :: 3.7' 18 | ], 19 | keywords='EZ-climate, optimal carbon price, CO2 tax, social cost of carbon, SCC, social cost of carbon dioxide, SC-CO2', 20 | url='http://github.com/Litterman/EZClimate', 21 | author='Kent Daniel, Robert Litterman, Gernot Wagner', 22 | license='MIT', 23 | packages=find_packages(), 24 | install_requires=['numpy',], 25 | include_package_data=False, 26 | zip_safe=True 27 | ) 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Kent Daniel, Robert Litterman, Gernot Wagner 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | EZ-Climate documentation 2 | ======================== 3 | 4 | This project is based on the working paper_ 'Applying Asset Pricing Theory to Calibrate the Price of Climate Risk' by Kent D. Daniel, Robert B. Litterman, and Gernot Wagner which aims at providing an analyzing framework for pricing greenhouse gas emissions. 5 | 6 | * **First steps:** 7 | 8 | * :doc:`Overview ` 9 | * :doc:`Installation ` 10 | 11 | * **Documentation:** 12 | 13 | * :doc:`Tree ` 14 | * :doc:`BAU ` 15 | * :doc:`Damage Simulation ` 16 | * :doc:`Damage ` 17 | * :doc:`Cost ` 18 | * :doc:`Storage Tree ` 19 | * :doc:`Utility ` 20 | * :doc:`Optimization ` 21 | 22 | * :doc:`examples/index` 23 | * :doc:`api/index` 24 | 25 | 26 | 27 | .. toctree:: 28 | :hidden: 29 | 30 | overview 31 | installation 32 | documentation/tree 33 | documentation/bau 34 | documentation/damage_simulation 35 | documentation/damage 36 | documentation/cost 37 | documentation/storage_tree 38 | documentation/utility 39 | documentation/optimization 40 | examples/index 41 | api/index 42 | 43 | .. _paper: http://www.nber.org/papers/w22795 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # Sphinx documentation 65 | docs/_build/ 66 | 67 | # PyBuilder 68 | target/ 69 | 70 | # Jupyter Notebook 71 | .ipynb_checkpoints 72 | 73 | # pyenv 74 | .python-version 75 | 76 | # celery beat schedule file 77 | celerybeat-schedule 78 | 79 | # SageMath parsed files 80 | *.sage.py 81 | 82 | # dotenv 83 | .env 84 | 85 | # virtualenv 86 | .venv 87 | venv/ 88 | ENV/ 89 | 90 | # Spyder project settings 91 | .spyderproject 92 | .spyproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | 97 | # mkdocs documentation 98 | /site 99 | 100 | # mypy 101 | .mypy_cache/ -------------------------------------------------------------------------------- /docs/source/examples/output_paper.rst: -------------------------------------------------------------------------------- 1 | ================================ 2 | Example of output from DLW-paper 3 | ================================ 4 | 5 | Below is an example from the DLW-paper, referred to as the base case. 6 | 7 | First, we need to create an object of :class:`ezclimate.tree.TreeModel` with desicion times at 0, 15, 45, 85, 185, and 285 years from now. 8 | 9 | .. literalinclude:: ../code/output_paper.py 10 | :lines: 1-3 11 | 12 | 13 | Next we create an :class:`ezclimate.bau.DLWBusinessAsUsual` object and set up the business as usual emission using the tree structure given by :attr:`t`. 14 | 15 | .. literalinclude:: ../code/output_paper.py 16 | :lines: 5-8 17 | 18 | 19 | We move on to create an :class:`ezclimate.cost.DLWCost` object using the base case parameters. 20 | 21 | .. literalinclude:: ../code/output_paper.py 22 | :lines: 10-13 23 | 24 | After this we are ready to create an :class:`ezclimate.damage.DLWDamage` object and simulate damages using the :func:`damage_simulation` method, again using the base case parameters. 25 | 26 | .. literalinclude:: ../code/output_paper.py 27 | :lines: 15-19 28 | 29 | 30 | We are now ready to initiate the :class:`ezclimate.utility.EZUtility` object using the above created objects. 31 | 32 | .. literalinclude:: ../code/output_paper.py 33 | :lines: 21-23 34 | 35 | 36 | Next step is to find the optimial mitigation plan using the optimization algorithms found in :mod:`ezclimate.optimization`, and print the Social Cost of Carbon (SCC) given by this mitigation plan. 37 | 38 | 39 | .. literalinclude:: ../code/output_paper.py 40 | :lines: 26-37 41 | 42 | 43 | **Putting it all together** 44 | 45 | 46 | .. literalinclude:: ../code/output_paper.py 47 | :lines: 40-76 48 | -------------------------------------------------------------------------------- /docs/source/documentation/storage_tree.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Storage Tree 3 | ============ 4 | 5 | Values generated during utility calculation, such as damage, cost etc., are stored in the two derived classes of the abstract class :class:`ezclimate.storage_tree.BaseStorageTree` namely, :class:`ezclimate.storage_tree.SmallStorageTree`, and :class:`ezclimate.storage_tree.BigStorageTree`. The 'small' storage tree stores values for every time period where decisions about mitigation are made, and the 'big' storage tree stores values for every subinterval period too. The base class defines the method for initializing the dictionary :attr:`tree` were the values are stored. The keys of the dictionary are the time periods in the tree where values are stored. It also defines methods for getting and setting, saving, and information about periods. Moreover, it defines an abstract method :func:`get_next_period_array` that needs to be initialized in derived classes. 6 | 7 | Small storage tree 8 | ------------------ 9 | In the :class:`ezclimate.storage_tree.SmallStorageTree` there's no storage in nodes between periods in :attr:`decision_times` - that needs to be defined when initilizing and object of the class. For example, 10 | 11 | .. literalinclude:: ../code/storage_tree.py 12 | :lines: 1-3 13 | 14 | Hence the :obj:`sst` will have 7 keys in its :attr:`tree` dictionary. To access elements in the :attr:`tree` dictionary, the following is equivalent: 15 | 16 | .. literalinclude:: ../code/storage_tree.py 17 | :lines: 5-6 18 | 19 | 20 | Big storage tree 21 | ---------------- 22 | In the :class:`ezclimate.storage_tree.BigStorageTree` there's storage in nodes between periods in :attr:`decision_times`. Besides defining the :attr:`decision_times` when initilizing an object of the class, the user also needs to define the length of the subinterval. 23 | 24 | .. literalinclude:: ../code/storage_tree.py 25 | :lines: 8-10 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EZ-Climate 2 | 3 | EZ-Climate is a model for pricing carbon dioxide (CO2) emission. It explores the implications of these richer preference specifications for CO2 price paths. We develop the EZ-Climate model, a simple discrete-time model in which the representative agent has an Epstein-Zin preference specification, and in which uncertainty about the effect of CO2 emissions on global temperature and on eventual damages is gradually resolved over time. In the EZ-Climate model the CO2 price is equal to the price of one ton of CO2 emitted at any given point in time that maximizes the utility of the representative agent at that time. We embed a number of features including tail risk, the potential for technological change, and backstop technologies. In contrast to most modeled carbon price paths, the EZ-Climate model suggests a high optimal carbon price today that is expected to decline over time. It also points to the importance of backstop technologies and to potentially very large. 4 | 5 | ## Downloads 6 | 7 | You can find the most recent releases at: https://pypi.python.org/pypi/ezclimate/. 8 | 9 | ## Documentation 10 | 11 | See [Declining CO2 price paths](https://gwagner.com/ezclimate/) (PNAS, 1 October 2019) for our joint PNAS paper employing this code. 12 | 13 | ## Installation 14 | 15 | We encourage you to use pip to install ezclimate on your system. 16 | 17 | ```bash 18 | pip install ezclimate 19 | ``` 20 | 21 | If you wish to build from sources, download or clone the repository. 22 | 23 | ```bash 24 | python setup.py install 25 | ``` 26 | 27 | ## Requirements 28 | 29 | EZ-Climate is compatible with Python 2 and 3. [Numpy](http://www.numpy.org/) is required, and we recommend [matplotlib](http://www.matplotlib.org/) for visualization of results. 30 | 31 | ## Authors 32 | 33 | * [Robert Litterman](https://scholar.google.com/citations?user=6Hpxnm4AAAAJ&hl=en) 34 | * [Kent Daniel](http://www.kentdaniel.net/) 35 | * [Gernot Wagner](https://www.gwagner.com/) 36 | 37 | -------------------------------------------------------------------------------- /docs/source/documentation/utility.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Utility 3 | ======= 4 | 5 | In :class:`ezclimate.utility.EZUtility`, we calculate the utility under the Epstein-Zin framwork. Functions of calculating mariginal utiltiy are also provided for analysis purpose. 6 | 7 | An agent maximizes lifetime utility at each time and for each state of nature by choosing the optimal path of mitigation dependent on Earth’s fragility. Since all uncertainty has been resolved at the final period, we calculate the utility from back forward. 8 | 9 | ------------------- 10 | Utility Calculation 11 | ------------------- 12 | Firstly, we calculate the the utility in the final period, which, in our base case, is the period starting in 2400, the agent receives the utility from all consumption from time T forward. The resulting final-period utility is: 13 | 14 | .. math:: 15 | 16 | U_T = \left[\frac{1-\beta}{1-\beta(1+r)^{\rho}}\right]^{\frac{1}{\rho}}C_T 17 | 18 | In this specification, :math:`(1-\beta)/ \beta` is the pure rate of time preference. The parameter :math:`\rho` measures the agent’s willingness to substitute consumption across time. :math:`C_T` measures the consumption at time T. 19 | 20 | Then we calculate the utilties of all periods from back forward given mitigation path. 21 | 22 | .. math:: 23 | 24 | U_t = \left[(1-\beta){c_t}^{\rho} + \beta \left[\mu_t(\tilde U_{t+1}) \right]^{\rho} \right]^{\frac{1}{\rho}} 25 | 26 | 27 | where :math:`\mu_t(\tilde U_{t+1})` is the certainty-equivalent of future lifetime utility, based on the agent’s information at time t, and is given by: 28 | 29 | .. math:: 30 | 31 | \mu_t(\tilde U_{t+1}) = \left( E_t\left[{U_{t+1}}^{\alpha} \right] \right)^{\frac{1}{\alpha}} 32 | 33 | :math:`\alpha` captures the agent’s willingness to substitute consumption across (uncertain) future consumption streams. The higher :math:`\alpha` is, the more willing the agent is to substitute consumption across states of nature at a given point in time. 34 | 35 | ------------- 36 | Penalty 37 | ------------- 38 | 39 | When the GHG levels are below 280, penalties cost are imposed. The penalties in previous nodes in the path leading to the current node is summed and added to current period's penalty, given by 40 | 41 | .. math:: 42 | 43 | \max \left( 0, \min \left( \frac{280-GHG\ level}{GHG\ level}, max\ penalty \right) \right) 44 | 45 | -------------------------------------------------------------------------------- /docs/source/documentation/cost.rst: -------------------------------------------------------------------------------- 1 | ==== 2 | Cost 3 | ==== 4 | 5 | Calibrating the economic cost side of EZ-Climate requires specifying a relationship between the marginal cost of emissions reductions or per-ton tax rate, :math:`\tau`, the resulting flow of emissions in gigatonnes of :math:`CO_2`-equivalent emissions per year (Gt :math:`CO_2`), :math:`g(\tau)`, and the fraction of emissions reduced, :math:`x(\tau)`. 6 | 7 | We calibrate :math:`\tau`, :math:`g(\tau)`, and :math:`x(\tau)` in EZ-Climate based on McKinsey’s global MACC effort (McKinsey_), with one crucial modification; we assume no mitigation (:math:`x(\tau)=0`) at :math:`\tau \le 0` i.e. no net-negative or zero-cost mitigation. Table 1 below shows the resulting calibration. 8 | 9 | .. image:: ../_static/cost1.png 10 | :width: 600 px 11 | :align: center 12 | 13 | Fitting McKinsey’s modified point estimates (in $US) from Table 1 to a power function for :math:`x(\tau)` yields: 14 | 15 | .. math:: 16 | 17 | x(\tau) = 0.0923 \tau^{0.414}. 18 | 19 | 20 | The corresponding inverse function, solving for the appropriate tax rate to achieve :math:`x` is: 21 | 22 | .. math:: 23 | 24 | \tau(x) = 314.32 x^{2.413}. 25 | 26 | 27 | The above equation shows the marginal cost of abatement. Ultimately, we are interested in the total cost to society, :math:`\kappa(x)`, for each particular fractional-mitigation :math:`x`. We calculate using the envelope theorem and get the following equation: 28 | 29 | .. math:: 30 | 31 | \kappa(x) = \left(\frac{92.08 g_0}{c_0}\right) x^{3.413}, 32 | 33 | 34 | where :math:`g_0=52` Gt :math:`CO_2` represents the current level of global annual emissions, and :math:`c_0=$31` trilling/year in current (2015) global consumption. The equation for :math:`\kappa(x)` expresses the societal cost of a given level of mitigation as a percentage of consumption. We assume that, absent technological change, the function is time invariant. 35 | 36 | 37 | We also allow for backstop technology and technological changes. For discussion and derivation of these parameters, see paper_. At the end, we have the following cost function: 38 | 39 | .. math:: 40 | 41 | \kappa_t(x) = \kappa(x) \left( 1-\phi_0 - \phi_1 X_t \right) 42 | 43 | 44 | where :math:`\phi_0` is a constant component, and :math:`\phi_1 X_t` a component linked to mitigation efforts to date (where :math:`X_t` is the average mitigation up to time :math:`t`). See :mod:`ezclimate.cost` for more details. 45 | 46 | .. _McKinsey: http://www.mckinsey.com/business-functions/sustainability-and-resource-productivity/our-insights/pathways-to-a-low-carbon-economy 47 | .. _paper: http://www.nber.org/papers/w22795 -------------------------------------------------------------------------------- /docs/source/documentation/optimization.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Optimization 3 | ============ 4 | 5 | The purpose of the optimization is to find the mitigation value in every node of the decision tree that maximizes the current utility 6 | 7 | .. math:: 8 | 9 | x^*= \operatorname*{arg\,max}_x U(x) 10 | 11 | Our approach to solving this problem is to use the genetic algorithm (GA) combine with a gradient search (GS) method. The GA is used to search the state space globally and to find good initial points for the GS, which applies a gradient descent algorithm to multiple initial points. 12 | 13 | Genetic Algorithm (GA) 14 | ---------------------- 15 | 16 | The GA is an evolutionary algorithm, inspired by the evolution of species in nature. The evolution process starts from a population of vectors with uniformly distributed [0, :attr:`bound`] random elements. For each generation, the evolution steps are: 17 | 18 | 1. Select the individuals to perform cross-over and mutation. 19 | 2. Cross over among the selected candidate. 20 | 3. Mutate result as offspring. 21 | 4. Combine the result of offspring and parent together. And selected the top 80 percent of original population amount. 22 | 5. Random Generate 20 percent of original population amount new individuals and combine the above new population. 23 | 24 | The mutation and cross-over methods are choosen to fit the optimization problem of the EZ-Climate model. The GA class can be found at :mod:`ezclimate.optimization.GeneticAlgorithm`. 25 | 26 | Gradient Search (GS) 27 | -------------------- 28 | 29 | The GS uses the gradient descent algorithm and the numerical gradient to find the optimal mitigation points. Moveover, it uses the Adaptive Moment Estimation (Adam_) learning rate together with an accelarator scaler to update the points. Adam is a method that computes adaptive learning rates for each parameter. In addition to storing an exponentially decaying average of past squared gradients, Adam also keeps an exponentially decaying average of past gradients. The accelerator is used to amplify low gradient values of mitigation values in nodes in the end of the tree, and thus reduce computation time. The :func:`run` method takes :attr:`initial_point_list` and :attr:`topk` as arguments, runs the gradient descent optimization of the :attr:`topk` first elements of the :attr:`initial_point_list`, and picks the resulting point with the highest utility. The GS class can be found at :mod:`ezclimate.optimization.GradientSearch`. 30 | 31 | 32 | GA and GS together 33 | ------------------ 34 | 35 | An example of how to use the :class:`GeneticAlgorithm` and :class:`GradientSearch` can be found `here <../examples/output_paper.html>`_. The :func:`GradientSearch.run` takes the last generation population of the :func:`GeneticAlgorithm.run` as the :attr:`initial_point_list` argument and performs the gradient descent optimization with these as intial guess. 36 | 37 | 38 | .. _Adam: http://sebastianruder.com/optimizing-gradient-descent/index.html#fnref:15 -------------------------------------------------------------------------------- /docs/source/overview.rst: -------------------------------------------------------------------------------- 1 | Overview 2 | ======== 3 | Pricing greenhouse gas emissions is a risk management problem. It involves making trade-offs between consumption today and unknown damages in the (distant) future. The optimal carbon dioxide (:math:`CO_2`) price, thus, is based on society’ s willingness to substitute consumption across time and across uncertain states of nature. Standard constant relative risk aversion preference specifications, meanwhile, conflate the two. Moreover, they are inconsistent with observed asset valuations, based on a large body of work in macroeconomics and finance. This literature has developed a richer set of preferences that are more consistent with asset price behavior and separate risk across time and across states of nature. 4 | 5 | In this model, we explore the implications of these richer preference specifications for the optimal :math:`CO_2` price. We develop the EZ-Climate model, a simple discrete-time model in which the representative agent has an Epstein-Zin preference specification, and in which uncertainty about the effect of :math:`CO_2` emissions on global temperature and on eventual damages is gradually resolved over time. In the EZ-Climate model the optimal price is equal to the price of one ton of :math:`CO_2` emitted at any given point in time that maximizes the utility of the representative agent at that time. We embed a number of features including tail risk, the potential for technological change, and backstop technologies. In contrast to most modeled carbon price paths, the EZ-Climate model suggests a high optimal carbon price today that is expected to decline over time. It also points to the importance of backstop technologies and to potentially very large deadweight costs of delay. 6 | 7 | Our representative agent solves the optimization problem of trading off the (known) costs of climate mitigation against the uncertain future benefits associated with mitigation. She maximizes lifetime utility at each time and for each state of nature by choosing the optimal path of mitigation, :math:`x_t^*(\theta_t)`, dependent on Earth’s fragility, :math:`\theta_t`. 8 | 9 | Mitigating emissions is costly. Hence, assuming no government action to price carbon, atomistic agents do zero mitigation. However, as GHGs build up in the atmosphere, temperatures rise. As a result, a fraction of the baseline consumption is lost to damages. The damages as a function of mitigation are not known ex-ante. They are, in turn, a function of :math:`\theta_t`. Each period of the model, agents learn more about the level of fragility, but they only know the actual fragility in the final two periods of the model. 10 | 11 | These assumptions simplify reality in two important ways: As :math:`\theta_t` is the only unknown in EZ-Climate model, we do not allow for interactions of shocks to fragility with those to other state variables (e.g., productivity). The second simplification is the assumption of full knowledge of :math:`\theta` in period :math:`T-1`. 12 | 13 | See paper_ for futher detail. 14 | 15 | .. _paper: http://www.nber.org/papers/w22795 -------------------------------------------------------------------------------- /docs/source/code/output_paper.py: -------------------------------------------------------------------------------- 1 | from ezclimate.tree import TreeModel 2 | from ezclimate.bau import DLWBusinessAsUsual 3 | from ezclimate.cost import DLWCost 4 | from ezclimate.damage import DLWDamage 5 | from ezclimate.utility import EZUtility 6 | from ezclimate.optimization import GeneticAlgorithm, GradientSearch 7 | import numpy as np 8 | 9 | # set up tree 10 | t = TreeModel(decision_times=[0, 15, 45, 85, 185, 285, 385]) 11 | 12 | # set up business-as-usual (bau) model 13 | bau_default_model = DLWBusinessAsUsual() 14 | bau_default_model.bau_emissions_setup(tree=t) 15 | 16 | # set up cost function 17 | c = DLWCost(tree=t, emit_at_0=bau_default_model.emit_level[0], g=92.08, a=3.413, join_price=2000.0, 18 | max_price=2500.0, tech_const=1.5, tech_scale=0.0, cons_at_0=30460.0) 19 | 20 | # set up damage function 21 | df = DLWDamage(tree=t, bau=bau_default_model, cons_growth=0.015, ghg_levels=[450, 650, 1000], subinterval_len=5) 22 | df.damage_simulation(draws=4000000, peak_temp=6.0, disaster_tail=18.0, tip_on=True, 23 | temp_map=1, temp_dist_params=None, maxh=100.0) 24 | 25 | # set up utility function 26 | u = EZUtility(tree=t, damage=df, cost=c, period_len=5.0, eis=0.9, ra=7.0, time_pref=0.005) 27 | 28 | 29 | ga_model = GeneticAlgorithm(pop_amount=150, num_generations=75, cx_prob=0.8, mut_prob=0.5, 30 | bound=2.0, num_feature=63, utility=u, print_progress=True) 31 | gs_model = GradientSearch(var_nums=63, utility=u, accuracy=1e-8, 32 | iterations=20, print_progress=True) 33 | final_pop, fitness = ga_model.run() 34 | sort_pop = final_pop[np.argsort(fitness)][::-1] 35 | m_opt, u_opt = gs_model.run(initial_point_list=sort_pop, topk=1) 36 | 37 | print("SCC: ", c.price(0, m_opt[0], 0)) 38 | 39 | def base_case(): 40 | t = TreeModel(decision_times=[0, 15, 45, 85, 185, 285, 385]) 41 | 42 | bau_default_model = DLWBusinessAsUsual() 43 | bau_default_model.bau_emissions_setup(tree=t) 44 | 45 | c = DLWCost(t, bau_default_model.emit_level[0], g=92.08, a=3.413, join_price=2000.0, max_price=2500.0, 46 | tech_const=1.5, tech_scale=0.0, cons_at_0=30460.0) 47 | 48 | df = DLWDamage(tree=t, bau=bau_default_model, cons_growth=0.015, ghg_levels=[450, 650, 1000], subinterval_len=5) 49 | df.damage_simulation(draws=4000000, peak_temp=6.0, disaster_tail=18.0, tip_on=True, 50 | temp_map=1, temp_dist_params=None, maxh=100.0) 51 | 52 | u = EZUtility(tree=t, damage=df, cost=c, period_len=5.0, eis=0.9, ra=7.0, time_pref=0.005) 53 | 54 | ga_model = GeneticAlgorithm(pop_amount=150, num_generations=75, cx_prob=0.8, mut_prob=0.5, 55 | bound=1.5, num_feature=63, utility=u, print_progress=True) 56 | gs_model = GradientSearch(var_nums=63, utility=u, accuracy=1e-8, 57 | iterations=200, print_progress=True) 58 | 59 | final_pop, fitness = ga_model.run() 60 | sort_pop = final_pop[np.argsort(fitness)][::-1] 61 | m_opt, u_opt = gs_model.run(initial_point_list=sort_pop, topk=1) 62 | 63 | print("SCC: ", c.price(0, m_opt[0], 0)) 64 | 65 | if __name__ == "__main__": 66 | base_case() 67 | -------------------------------------------------------------------------------- /docs/source/documentation/damage_simulation.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | Damage Simulation 3 | ================= 4 | 5 | To simulate the distribution of damages, :math:`D_t`, the class :class:`ezclimate.damage_simulation.DamageSimulation` is used. The following picture demonstrate the work flow of the simulation process. 6 | 7 | .. image:: ../_static/damage_simulation.png 8 | :width: 900 px 9 | :align: center 10 | 11 | ------------------ 12 | Temperature Change 13 | ------------------ 14 | 15 | Firstly, temperature change over the next 100 years is simulated for given maximum level of GHG. We use assumptions akin to Pindyck_ to fit a displaced gamma distribution around final GHG concentrations, while setting levels of GHG 100 years in the future equal to equilibrium levels. Parameters for these distribution can be found in the paper_. The time path for the temperature change at time :math:`t` (in years) is interpolate using function: 16 | 17 | .. _Pindyck: http://web.mit.edu/rpindyck/www/Papers/UncertainOutcomesJEEM2012.pdf 18 | .. _paper: http://www.nber.org/papers/w22795 19 | 20 | .. math:: 21 | 22 | \Delta T(t) = 2 \Delta T_{100}(1 - 0.5^{\frac{t}{100}}). 23 | 24 | ------------------ 25 | Map to Damage 26 | ------------------ 27 | 28 | The next step is to translate average global surface warming into global mean economic losses via the damage function :math:`D_t`. There are two components to :math:`D_t`: a non-catastrophic and a catastrophic one. 29 | 30 | The non-catastrophic component is comprised by a loss function of the form: 31 | 32 | .. math:: 33 | 34 | L(\Delta T(t)) = e^{-13.97 \gamma \Delta T(t)^2} 35 | 36 | where :math:`\gamma` is drawn from a displaced gamma distribution. Based on non-catastrophic damages, consumption at any time :math:`t` is reduced as follows: 37 | 38 | .. math:: 39 | 40 | CD_t = \bar c_t L(\Delta T(t)). 41 | 42 | Then we need to calibrate the consumption reduction by adding tipping point which is related to catastrophic damge. Based on our assumption, there is a probability that a tipping point will be hit in each period and for each state, given :math:`Delta T(t)` and peakT above which we can expect to have hit a climatic tipping point. 43 | 44 | .. math:: 45 | 46 | Prob(TP) = {1 - \left(1 - \left(\frac{\Delta T(t)}{max(\Delta T(t), peakT)}\right)^2\right)^{\frac{period}{30}}}. 47 | 48 | Conditional on hitting a tipping point at time, the level of consumption reduction for each period :math:`t` is then at a level of: 49 | 50 | .. math:: 51 | 52 | CDTP_t = CD_t e^{TP\_damage} = \bar c_t L(\Delta T(t)) e^{-TP\_damage}. 53 | 54 | ---------------------- 55 | Calculate Damage Table 56 | ---------------------- 57 | 58 | We order the scenarios based on :math:`D_T`, the damage to consumption in the final period. We then choose states of nature with specified probabilities to represent different percentiles of this distribution. For example, if the first state of nature is the worst 1% of outcomes, then we assume the damage coefficient at time :math:`t` for the given level of mitigation is the average damage at time :math:`t` for the worst 1% of values for. 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /docs/source/documentation/damage.rst: -------------------------------------------------------------------------------- 1 | ====== 2 | Damage 3 | ====== 4 | 5 | The mapping from mitigation policy, :math:`X_t`, to damages over time, :math:`D_t`, goes via radiative forcing, which determines the excess energy created by GHGs in the atmosphere. The damage distribution associated with a given level of radiative forcing is interpolated, or extrapolated, relative to the radiative forcing of damage distributions estimated from three scenarios. The first is based on the IEA’s (2013) reference ‘New Policies Scenario’ and leads to eventual atmospheric CO levels of around 1000 ppm. The second assumes constant mitigation leading to eventual levels of 650 ppm, equivalent to reducing emissions by almost 60% relative to the 1000 ppm scenario. The third scenario assumes a constant mitigation of over 90%, leading to eventual :math:`CO_2 concentrations of 450 ppm. 6 | 7 | For each of the three maximum GHG concentration levels, 450, 650, and 1000 ppm, the distribution of :math:`D_t` is calculated through Monte-Carlo simulations, using :mod:`ezclimate.damage_simulation`. The simulations are used to calculate damages in each period for any particular state of nature :math:`\theta_t` and any chosen time path for mitigation actions, :math:`X_t`. We do this by first calculating the radiative forcing associated with each simulation at the end of each period, and then interpolating the damage smoothly between the three different simulations with respect to their levels of radiative forcing. Functional forms for both GHG levels and climate forcing as a function of GHG emissions are fitted to the Representative Concentration Pathway (RCP) scenarios adopted by the IPCC for its fifth assessment report (IPCC_ 2013). In the IPCC report emissions, GHG concentrations, and radiative forcing are given for each of four RCP scenarios. The radiative forcing is assumed to be proportional to the integral over time of an excess GHG level raised to a power. The carbon absorption itself is similarly fit to the RCP scenarios, and is assumed to be proportional to the difference between the GHG level in the atmosphere and the cumulative carbon absorption up to that point in time, raised to a power. 8 | 9 | Our task now is to calculate an interpolated damage function using our three simulations where we have damage coefficients (for a given state and period) to find a smooth function that gives damages for any particular level of radiative forcing up to each point in time. To do so, we assume a linear interpolation of damages between the 650 and 1000 ppm scenarios, and a quadratic interpolation between 450 and 650 ppm. In addition, we impose a smooth pasting condition at 650 ppm, having the level and derivative of the interpolation below 650 ppm match the level and slope of the line above. 10 | 11 | Below 450 ppm, we assume damages exponentially decay toward zero. Mathematically, we let :math:`S = \frac{d \cdot p \cdot ln(0.5)}{l}`, where :math:`d` is the derivative of the quadratic interpolation function at 450 ppm, :math:`p=0.91667` is the average mitigation in the 450 ppm simulation, and :math:`l` is the level of damages. Radiative forcing at any point below 450 ppm then is :math:`x` percent below that of the 450 ppm simulation, with :math:`x = (R-r) / R`, where :math:`R` is the radiative forcing in the 450 ppm simulation and :math:`r` is the radiative forcing given the mitigation policy. Letting :math:`\sigma = 60`, the extension of the damage function for :math:`x > 0` is defined as :math:`Damage(x) = l \cdot 0.5^{S \cdot x} e^{-(x \cdot p)^2 / \sigma}`, which has the desired properties. 12 | 13 | The climate sensitivity, summarized by state of nature :math:`\theta_t` is not known prior to the final period (:math:`T=t`). Rather, what the representative agent knows is the distribution of possible final states, :math:`\theta_T`. We specify that the damage in period :math:`t`, given average mitigation of :math:`X_t` up to time :math:`t`, is the probability weighted average of the interpolated damage function over all final states of nature reachable from that node. Specifically, the damage function at time :math:`t`, for the node indexed by :math:`\theta_t` is assumed to be: 14 | 15 | .. math:: 16 | 17 | D_t(X_t, \theta_t) = \sum_{\theta_T} Pr(\theta_T \vert \theta_t) \cdot D_t(X_t, \theta_T) 18 | 19 | .. _IPCC: http://www.ipcc.ch/report/ar5/wg1/ -------------------------------------------------------------------------------- /ezclimate/cost.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from abc import ABCMeta, abstractmethod 3 | from ezclimate.storage_tree import BigStorageTree 4 | 5 | class Cost(object, metaclass=ABCMeta): 6 | """Abstract Cost class for the EZ-Climate model.""" 7 | 8 | @abstractmethod 9 | def cost(self): 10 | pass 11 | 12 | @abstractmethod 13 | def price(self): 14 | pass 15 | 16 | 17 | class DLWCost(Cost): 18 | """Class to evaluate the cost curve for the EZ-Climate model. 19 | 20 | Parameters 21 | ---------- 22 | tree : `TreeModel` object 23 | tree structure used 24 | emit_at_0 : float 25 | initial GHG emission level 26 | g : float 27 | initial scale of the cost function 28 | a : float 29 | curvature of the cost function 30 | join_price : float 31 | price at which the cost curve is extended 32 | max_price : float 33 | price at which carbon dioxide can be removed from atmosphere in unlimited scale 34 | tech_const : float 35 | determines the degree of exogenous technological improvement over time. A number 36 | of 1.0 implies 1 percent per yer lower cost 37 | tech_scale : float 38 | determines the sensitivity of technological change to previous mitigation 39 | cons_at_0 : float 40 | initial consumption. Default $30460bn based on US 2010 values. 41 | 42 | Attributes 43 | ---------- 44 | cbs_level : float 45 | constant 46 | cbs_deriv : float 47 | constant 48 | cbs_b : float 49 | constant 50 | cbs_k : float 51 | constant 52 | cons_per_ton : float 53 | constant 54 | 55 | """ 56 | 57 | def __init__(self, tree, emit_at_0, g, a, join_price, max_price, 58 | tech_const, tech_scale, cons_at_0): 59 | self.tree = tree 60 | self.g = g 61 | self.a = a 62 | self.max_price = max_price 63 | self.tech_const = tech_const 64 | self.tech_scale = tech_scale 65 | self.cbs_level = (join_price / (g * a))**(1.0 / (a - 1.0)) 66 | self.cbs_deriv = self.cbs_level / (join_price * (a - 1.0)) 67 | self.cbs_b = self.cbs_deriv * (max_price - join_price) / self.cbs_level 68 | self.cbs_k = self.cbs_level * (max_price - join_price)**self.cbs_b 69 | self.cons_per_ton = cons_at_0 / emit_at_0 70 | 71 | def cost(self, period, mitigation, ave_mitigation): 72 | """Calculates the mitigation cost for the period. For details about the cost function 73 | see DLW-paper. 74 | 75 | Parameters 76 | ---------- 77 | period : int 78 | period in tree for which mitigation cost is calculated 79 | mitigation : ndarray 80 | current mitigation values for period 81 | ave_mitigation : ndarray 82 | average mitigation up to this period for all nodes in the period 83 | 84 | Returns 85 | ------- 86 | ndarray : 87 | cost 88 | 89 | """ 90 | if mitigation.min() < 0.0: 91 | m0 = np.maximum(mitigation,0.) 92 | else: 93 | m0 = mitigation 94 | 95 | years = self.tree.decision_times[period] 96 | tech_term = (1.0 - ((self.tech_const + self.tech_scale*ave_mitigation) / 100.0))**years 97 | cbs = self.g * (m0**self.a) 98 | bool_arr = (m0 <= self.cbs_level).astype(int) 99 | if np.all(bool_arr): 100 | c = (cbs * tech_term) / self.cons_per_ton 101 | else: 102 | ixhigh = np.where(m0 > self.cbs_level) 103 | mbig=m0[ixhigh] 104 | ctmp=cbs 105 | extension = ((mbig-self.cbs_level) * self.max_price 106 | - (self.cbs_b/(self.cbs_b-1.0)) *mbig*(self.cbs_k/mbig)**(1.0/self.cbs_b) 107 | + self.cbs_b*self.cbs_level * (self.cbs_k/self.cbs_level)**(1.0/self.cbs_b) / (self.cbs_b-1.0)) 108 | ctmp[ixhigh] = (self.g * self.cbs_level**self.a) + extension 109 | c = ctmp * tech_term / self.cons_per_ton 110 | return c 111 | 112 | def price(self, years, mitigation, ave_mitigation): 113 | """Inverse of the cost function. Gives emissions price for any given 114 | degree of mitigation, average_mitigation, and horizon. 115 | 116 | Parameters 117 | ---------- 118 | years : int y 119 | years of technological change so far 120 | mitigation : float 121 | mitigation value in node 122 | ave_mitigation : float 123 | average mitigation up to this period 124 | 125 | Returns 126 | ------- 127 | float 128 | the price. 129 | 130 | """ 131 | tech_term = (1.0 - ((self.tech_const + self.tech_scale*ave_mitigation) / 100))**years 132 | if mitigation < self.cbs_level: 133 | return self.g * self.a * (mitigation**(self.a-1.0)) * tech_term 134 | else: 135 | return (self.max_price - (self.cbs_k/mitigation)**(1.0/self.cbs_b)) * tech_term 136 | -------------------------------------------------------------------------------- /ezclimate/bau.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from abc import ABCMeta, abstractmethod 3 | 4 | class BusinessAsUsual(object, metaclass=ABCMeta): 5 | """Abstract BAU class for the EZ-Climate model. 6 | 7 | Parameters 8 | ---------- 9 | ghg_start : float 10 | today's GHG-level 11 | ghg_end : float 12 | GHG-level in the last period 13 | 14 | Attributes 15 | ---------- 16 | ghg_start : float 17 | today's GHG-level 18 | ghg_end : float 19 | GHG-level in the last period 20 | emission_by_decisions : ndarray 21 | emissions at decision time periods?? 22 | emission_per_period : ndarray 23 | total emission at decision time period?? 24 | emission_to_ghg : ndarray 25 | GHG levels in decision time period?? 26 | emission_to_bau : float 27 | constant for converting GHG to emission?? 28 | 29 | """ 30 | def __init__(self, ghg_start, ghg_end): 31 | self.ghg_start = ghg_start 32 | self.ghg_end = ghg_end 33 | self.emission_by_decisions = None 34 | self.emission_per_period = None 35 | self.emission_to_ghg = None 36 | self.emission_to_bau = None 37 | self.bau_path = None 38 | 39 | @abstractmethod 40 | def emission_by_time(self): 41 | pass 42 | 43 | 44 | class DLWBusinessAsUsual(BusinessAsUsual): 45 | """Business-as-usual scenario of emissions. Emissions growth is assumed to slow down 46 | exogenously - these assumptions represent an attempt to model emissions growth in a 47 | business-as-usual scenario that is in the absence of incentives. 48 | 49 | Parameters 50 | ---------- 51 | ghg_start : float 52 | today's GHG-level 53 | ghg_end : float 54 | GHG-level in the last period 55 | emit_time : ndarray or list 56 | time, in years, from now when emissions occurs 57 | emit_level : ndarray or list 58 | emission levels in future times `emit_time` 59 | 60 | Attributes 61 | ---------- 62 | ghg_start : float 63 | today's GHG-level 64 | ghg_end : float 65 | GHG-level in the last period 66 | emission_by_decisions : ndarray 67 | emissions at decision time periods?? 68 | emission_per_period : ndarray 69 | total emission at decision time period?? 70 | emission_to_ghg : ndarray 71 | GHG levels in decision time period?? 72 | emission_to_bau : float 73 | constant for converting GHG to emission?? 74 | emit_time : ndarray or list 75 | time, in years, from now when emissions occurs 76 | emit_level : ndarray or list 77 | emission levels in future times `emit_time` 78 | 79 | """ 80 | def __init__(self, ghg_start=400.0, ghg_end=1000.0, emit_time=[0, 30, 60], emit_level=[52.0, 70.0, 81.4]): 81 | super(DLWBusinessAsUsual, self).__init__(ghg_start, ghg_end) 82 | self.emit_time = emit_time 83 | self.emit_level = emit_level 84 | 85 | def emission_by_time(self, time): 86 | """Returns the BAU emissions at any time 87 | 88 | Parameters 89 | ---------- 90 | time : int 91 | future time period in years 92 | 93 | Returns 94 | ------- 95 | float 96 | emission 97 | 98 | """ 99 | if time < self.emit_time[1]: 100 | emissions = self.emit_level[0] + float(time) / (self.emit_time[1] - self.emit_time[0]) \ 101 | * (self.emit_level[1] - self.emit_level[0]) 102 | elif time < self.emit_time[2]: 103 | emissions = self.emit_level[1] + float(time - self.emit_time[1]) / (self.emit_time[2] 104 | - self.emit_time[1]) * (self.emit_level[2] - self.emit_level[1]) 105 | else: 106 | emissions = self.emit_level[2] 107 | return emissions 108 | 109 | def bau_emissions_setup(self, tree): 110 | """Create default business as usual emissions path. The emission rate in each period is 111 | assumed to be the average of the emissions at the beginning and at the end of the period. 112 | 113 | Parameters 114 | ---------- 115 | tree : `TreeModel` object 116 | provides the tree structure used 117 | 118 | """ 119 | num_periods = tree.num_periods 120 | self.emission_by_decisions = np.zeros(num_periods) 121 | self.emission_per_period = np.zeros(num_periods) 122 | self.bau_path = np.zeros(num_periods) 123 | self.bau_path[0] = self.ghg_start 124 | self.emission_by_decisions[0] = self.emission_by_time(tree.decision_times[0]) 125 | period_len = tree.decision_times[1:] - tree.decision_times[:-1] 126 | 127 | for n in range(1, num_periods): 128 | self.emission_by_decisions[n] = self.emission_by_time(tree.decision_times[n]) 129 | self.emission_per_period[n] = period_len[n-1] * (self.emission_by_decisions[n-1] + self.emission_by_decisions[n]) / 2 130 | 131 | #the total increase in ghg level of 600 (from 400 to 1000) in the bau path is allocated over time 132 | self.emission_to_ghg = (self.ghg_end - self.ghg_start) * self.emission_per_period / self.emission_per_period.sum() 133 | self.emission_to_bau = self.emission_to_ghg[-1] / self.emission_per_period[-1] 134 | for n in range(1, num_periods): 135 | self.bau_path[n] = self.bau_path[n-1] + self.emission_per_period[n]*self.emission_to_bau 136 | 137 | -------------------------------------------------------------------------------- /ezclimate/tools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import csv 3 | import io 4 | 5 | ########### 6 | ### I/O ### 7 | ########### 8 | 9 | def find_path(file_name, directory="data", file_type=".csv"): 10 | import os 11 | cwd = os.getcwd() 12 | if not os.path.exists(directory): 13 | os.makedirs(directory) 14 | d = os.path.join(cwd, os.path.join(directory,file_name+file_type)) 15 | return d 16 | 17 | def create_file(file_name): 18 | import os 19 | d = find_path(file_name) 20 | if not os.path.isfile(d): 21 | open(d, 'w').close() 22 | return d 23 | 24 | def file_exists(file_name): 25 | import os 26 | d = find_path(file_name) 27 | return os.path.isfile(d) 28 | 29 | def load_csv(file_name, delimiter=';', comment=None): 30 | d = find_path(file_name) 31 | pass 32 | 33 | import io 34 | 35 | def clean_lines(f): 36 | """ 37 | Filter out blank lines to avoid prior cross-platform line termination problems. 38 | """ 39 | lines = f.read().splitlines() 40 | lines = [line for line in lines if line.strip()] 41 | content = '\n'.join(lines) 42 | sio = io.StringIO() 43 | sio.write(content) 44 | sio.seek(0) 45 | return sio 46 | 47 | def write_columns_csv(lst, file_name, header=[], index=None, start_char=None, delimiter=';', open_as='w'): 48 | """ 49 | write_columns_csv outputs tree data to an NEW (not existing) csv file 50 | 51 | lst : a list of a list containing data for a single tree 52 | file_name : 53 | headers : names of the trees; these are put in the first row of the csv file. 54 | index : index data (e.g., Year and Node) 55 | """ 56 | d = find_path(file_name) 57 | if index is not None: 58 | index.extend(lst) 59 | output_lst = list(zip(*index)) 60 | else: 61 | output_lst = list(zip(*lst)) 62 | 63 | with open(d, open_as) as f: 64 | writer = csv.writer(f, delimiter=delimiter) 65 | if start_char is not None: 66 | writer.writerow([start_char]) 67 | if header: 68 | writer.writerow(header) 69 | for row in output_lst: 70 | writer.writerow(row) 71 | 72 | def clean_lines(f): 73 | """ 74 | Filter out blank lines in the given file in order to avoid 75 | cross-platform line termination problems that 76 | previously led to data files with blank lines. 77 | """ 78 | lines = f.read().splitlines() 79 | lines = [line for line in lines if line.strip()] 80 | content = '\n'.join(lines) 81 | sio = io.StringIO() 82 | sio.write(content) 83 | sio.seek(0) 84 | return sio 85 | 86 | 87 | def write_columns_to_existing(lst, file_name, header="", delimiter=';'): 88 | """ 89 | writes the tree elements in lst to and EXISTING file with name file_name. 90 | """ 91 | is_nested_list = lst and (isinstance(lst[0], list) or 92 | isinstance(lst[0], np.ndarray)) 93 | if is_nested_list: 94 | lst = list(zip(*lst)) # transpose columns -> rows 95 | 96 | file_path = find_path(file_name) 97 | output_rows = [] 98 | 99 | # read and extend input 100 | with open(file_path, 'r') as finput: 101 | reader = csv.reader(clean_lines(finput), delimiter=delimiter) 102 | 103 | # extend header row 104 | row = next(reader) 105 | row.extend(header if is_nested_list else [header]) 106 | output_rows.append(row) 107 | 108 | # extend rest of the rows 109 | for i,row in enumerate(reader): 110 | row.extend(lst[i] if is_nested_list else [lst[i]]) 111 | output_rows.append(row) 112 | 113 | # emit output, overwriting original file 114 | with open(file_path, 'w') as foutput: 115 | writer = csv.writer(foutput, delimiter=delimiter) 116 | writer.writerows(output_rows) 117 | 118 | def append_to_existing(lst, file_name, header="", index=None, delimiter=';', start_char=None): 119 | write_columns_csv(lst, file_name, header, index, start_char=start_char, delimiter=delimiter, open_as='a') 120 | 121 | def import_csv(file_name, delimiter=';', header=True, indices=None, start_at=0, break_at='\n', ignore=""): 122 | d = find_path(file_name) 123 | input_lst = [] 124 | indices_lst = [] 125 | with open(d, 'r') as f: 126 | reader = csv.reader(clean_lines(f), delimiter=delimiter) 127 | for _ in range(0, start_at): 128 | next(reader) 129 | if header: 130 | header_row = next(reader) 131 | for row in reader: 132 | if row[0] == break_at: 133 | break 134 | if row[0] == ignore: 135 | continue 136 | if indices: 137 | input_lst.append(row[indices:]) 138 | indices_lst.append(row[:indices]) 139 | else: 140 | input_lst.append(row) 141 | if header and not indices : 142 | return header_row, np.array(input_lst, dtype="float64") 143 | elif header and indices: 144 | return header_row[indices:], indices_lst, np.array(input_lst, dtype="float64") 145 | return np.array(input_lst, dtype="float64") 146 | 147 | 148 | ########## 149 | ### MP ### 150 | ########## 151 | 152 | def _pickle_method(method): 153 | func_name = method.__func__.__name__ 154 | obj = method.__self__ 155 | cls = method.__self__.__class__ 156 | if func_name.startswith('__') and not func_name.endswith('__'): #deal with mangled names 157 | cls_name = cls.__name__.lstrip('_') 158 | func_name = '_' + cls_name + func_name 159 | return _unpickle_method, (func_name, obj, cls) 160 | 161 | def _unpickle_method(func_name, obj, cls): 162 | for cls in cls.__mro__: 163 | try: 164 | func = cls.__dict__[func_name] 165 | except KeyError: 166 | pass 167 | else: 168 | break 169 | return func.__get__(obj, cls) 170 | -------------------------------------------------------------------------------- /ezclimate/forcing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class Forcing(object): 5 | """Radiative forcing for the EZ-Climate model. Determines the excess energy created 6 | by GHGs in the atmosphere. 7 | 8 | Attributes 9 | ---------- 10 | sink_start : float 11 | sink constant 12 | forcing_start : float 13 | forcing start constant 14 | forcing_p1 : float 15 | forcing constant 16 | forcing_p2 : float 17 | forcing constant 18 | forcing_p3 : float 19 | forcing constant 20 | absorption_p1 : float 21 | absorption constant 22 | absorption_p2 : float 23 | absorption constant 24 | lsc_p1 : float 25 | class constant 26 | lsc_p2 : float 27 | class constant 28 | 29 | """ 30 | sink_start = 35.596 31 | forcing_start = 4.926 32 | forcing_p1 = 0.13173 33 | forcing_p2 = 0.607773 34 | forcing_p3 = 315.3785 35 | forcing_log_p1 = 5.35067129 36 | forcing_log_p2 = np.log(278.06340701) 37 | forcing_flag = 'log' # log or power 38 | #forcing_flag = 'power' # log or power 39 | absorption_p1 = 0.94835 40 | absorption_p2 = 0.741547 41 | lsc_p1 = 285.6268 42 | lsc_p2 = 0.88414 43 | 44 | @classmethod 45 | def forcing_and_ghg_at_node(cls, m, node, tree, bau, subinterval_len, returning="forcing"): 46 | """Calculates the radiative forcing based on GHG evolution leading up to the 47 | damage calculation in `node`. 48 | 49 | Parameters 50 | ---------- 51 | m : ndarray 52 | array of mitigations 53 | node : int 54 | node for which forcing is to be calculated 55 | tree : `TreeModel` object 56 | tree structure used 57 | bau : `BusinessAsUsual` object 58 | business-as-usual scenario of emissions 59 | subinterval_len : float 60 | subinterval length 61 | returning : string, optional 62 | * "forcing": implies only the forcing is returned 63 | * "ghg": implies only the GHG level is returned 64 | * "both": implies both the forcing and GHG level is returned 65 | 66 | Returns 67 | ------- 68 | tuple or float 69 | if `returning` is 70 | * "forcing": only the forcing is returned 71 | * "ghg": only the GHG level is returned 72 | * "both": both the forcing and GHG level is returned 73 | 74 | """ 75 | if node == 0: 76 | if returning == "forcing": 77 | return 0.0 78 | elif returning== "ghg": 79 | return bau.ghg_start 80 | else: 81 | return 0.0, bau.ghg_start 82 | 83 | period = tree.get_period(node) 84 | path = tree.get_path(node, period) 85 | 86 | period_lengths = tree.decision_times[1:period+1] - tree.decision_times[:period] 87 | increments = period_lengths/subinterval_len 88 | 89 | cum_sink = cls.sink_start 90 | cum_forcing = cls.forcing_start 91 | ghg_level = bau.ghg_start 92 | 93 | for p in range(0, period): 94 | start_emission = (1.0 - m[path[p]]) * bau.emission_by_decisions[p] 95 | if p < tree.num_periods-1: 96 | end_emission = (1.0 - m[path[p]]) * bau.emission_by_decisions[p+1] 97 | else: 98 | end_emission = start_emission 99 | increment = int(increments[p]) 100 | for i in range(0, increment): 101 | p_co2_emission = start_emission + i * (end_emission-start_emission) / increment 102 | p_co2 = 0.71 * p_co2_emission 103 | p_c = p_co2 / 3.67 104 | add_p_ppm = subinterval_len * p_c / 2.13 105 | lsc = cls.lsc_p1 + cls.lsc_p2 * cum_sink 106 | absorption = 0.5 * cls.absorption_p1 * np.sign(ghg_level - lsc) * np.abs(ghg_level - lsc) ** cls.absorption_p2 107 | cum_sink += absorption 108 | if cls.forcing_flag == 'log': 109 | if ghg_level > 260.: 110 | log_forcing = cls.forcing_log_p1*(np.log(ghg_level)-cls.forcing_log_p2) 111 | else: 112 | b_log = cls.forcing_log_p1*(np.log(260.)-cls.forcing_log_p2) 113 | m_log = cls.forcing_log_p1/260. 114 | log_forcing = b_log + m_log*(ghg_level-260.) 115 | cum_forcing += log_forcing 116 | elif cls.forcing_flag == 'power': 117 | power_forcing = cls.forcing_p1*np.sign(ghg_level-cls.forcing_p3)*np.abs(ghg_level-cls.forcing_p3)**cls.forcing_p2 118 | cum_forcing += power_forcing 119 | #if ghg_level < 0.0: 120 | # print 'forcing: ',cls.forcing_flag, ghg_level, log_forcing, power_forcing 121 | ghg_level += add_p_ppm - absorption 122 | 123 | if returning == "forcing": 124 | return cum_forcing 125 | elif returning == "ghg": 126 | return ghg_level 127 | else: 128 | return cum_forcing, ghg_level 129 | 130 | @classmethod 131 | def forcing_at_node(cls, m, node, tree, bau, subinterval_len): 132 | """Calculates the forcing based mitigation leading up to the 133 | damage calculation in `node`. 134 | 135 | Parameters 136 | ---------- 137 | m : ndarray 138 | array of mitigations in each node. 139 | node : int 140 | the node for which the forcing is being calculated. 141 | 142 | Returns 143 | ------- 144 | float 145 | forcing 146 | 147 | """ 148 | 149 | return cls.forcing_and_ghg_at_node(m, node, tree, bau, subinterval_len, returning="forcing") 150 | 151 | @classmethod 152 | def ghg_level_at_node(cls, m, node, tree, bau, subinterval_len): 153 | """Calculates the GHG level leading up to the damage calculation in `node`. 154 | 155 | Parameters 156 | ---------- 157 | m : ndarray 158 | array of mitigations in each node. 159 | node : int 160 | the node for which the GHG level is being calculated. 161 | 162 | Returns 163 | ------- 164 | float 165 | GHG level at node 166 | 167 | """ 168 | return cls.forcing_and_ghg_at_node(m, node, tree, bau, subinterval_len, returning="ghg") 169 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = ../../ez_climate_docs 9 | PDFBUILDDIR = /tmp 10 | PDF = ../manual.pdf 11 | 12 | # Internal variables. 13 | PAPEROPT_a4 = -D latex_paper_size=a4 14 | PAPEROPT_letter = -D latex_paper_size=letter 15 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 16 | # the i18n builder cannot share the environment and doctrees with the others 17 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 18 | 19 | .PHONY: help 20 | help: 21 | @echo "Please use \`make ' where is one of" 22 | @echo " html to make standalone HTML files" 23 | @echo " dirhtml to make HTML files named index.html in directories" 24 | @echo " singlehtml to make a single large HTML file" 25 | @echo " pickle to make pickle files" 26 | @echo " json to make JSON files" 27 | @echo " htmlhelp to make HTML files and a HTML help project" 28 | @echo " qthelp to make HTML files and a qthelp project" 29 | @echo " applehelp to make an Apple Help Book" 30 | @echo " devhelp to make HTML files and a Devhelp project" 31 | @echo " epub to make an epub" 32 | @echo " epub3 to make an epub3" 33 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 34 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 35 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 36 | @echo " text to make text files" 37 | @echo " man to make manual pages" 38 | @echo " texinfo to make Texinfo files" 39 | @echo " info to make Texinfo files and run them through makeinfo" 40 | @echo " gettext to make PO message catalogs" 41 | @echo " changes to make an overview of all changed/added/deprecated items" 42 | @echo " xml to make Docutils-native XML files" 43 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 44 | @echo " linkcheck to check all external links for integrity" 45 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 46 | @echo " coverage to run coverage check of the documentation (if enabled)" 47 | @echo " dummy to check syntax errors of document sources" 48 | 49 | .PHONY: clean 50 | clean: 51 | rm -rf $(BUILDDIR)/* 52 | 53 | .PHONY: html 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | .PHONY: dirhtml 60 | dirhtml: 61 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 62 | @echo 63 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 64 | 65 | .PHONY: singlehtml 66 | singlehtml: 67 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 68 | @echo 69 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 70 | 71 | .PHONY: pickle 72 | pickle: 73 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 74 | @echo 75 | @echo "Build finished; now you can process the pickle files." 76 | 77 | .PHONY: json 78 | json: 79 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 80 | @echo 81 | @echo "Build finished; now you can process the JSON files." 82 | 83 | .PHONY: htmlhelp 84 | htmlhelp: 85 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 86 | @echo 87 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 88 | ".hhp project file in $(BUILDDIR)/htmlhelp." 89 | 90 | .PHONY: qthelp 91 | qthelp: 92 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 93 | @echo 94 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 95 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 96 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/dlw.qhcp" 97 | @echo "To view the help file:" 98 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/dlw.qhc" 99 | 100 | .PHONY: applehelp 101 | applehelp: 102 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 103 | @echo 104 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 105 | @echo "N.B. You won't be able to view it unless you put it in" \ 106 | "~/Library/Documentation/Help or install it in your application" \ 107 | "bundle." 108 | 109 | .PHONY: devhelp 110 | devhelp: 111 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 112 | @echo 113 | @echo "Build finished." 114 | @echo "To view the help file:" 115 | @echo "# mkdir -p $$HOME/.local/share/devhelp/dlw" 116 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/dlw" 117 | @echo "# devhelp" 118 | 119 | .PHONY: epub 120 | epub: 121 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 122 | @echo 123 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 124 | 125 | .PHONY: epub3 126 | epub3: 127 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 128 | @echo 129 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 130 | 131 | .PHONY: latex 132 | latex: 133 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 134 | @echo 135 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 136 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 137 | "(use \`make latexpdf' here to do that automatically)." 138 | 139 | .PHONY: latexpdf 140 | latexpdf: 141 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 142 | @echo "Running LaTeX files through pdflatex..." 143 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 144 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 145 | 146 | .PHONY: latexpdfja 147 | latexpdfja: 148 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 149 | @echo "Running LaTeX files through platex and dvipdfmx..." 150 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 151 | cp $(PDFBUILDDIR)/latex/*.pdf $(PDF) 152 | @echo "pdflatex finished; see $(PDF)" 153 | 154 | .PHONY: text 155 | text: 156 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 157 | @echo 158 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 159 | 160 | .PHONY: man 161 | man: 162 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 163 | @echo 164 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 165 | 166 | .PHONY: texinfo 167 | texinfo: 168 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 169 | @echo 170 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 171 | @echo "Run \`make' in that directory to run these through makeinfo" \ 172 | "(use \`make info' here to do that automatically)." 173 | 174 | .PHONY: info 175 | info: 176 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 177 | @echo "Running Texinfo files through makeinfo..." 178 | make -C $(BUILDDIR)/texinfo info 179 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 180 | 181 | .PHONY: gettext 182 | gettext: 183 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 184 | @echo 185 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 186 | 187 | .PHONY: changes 188 | changes: 189 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 190 | @echo 191 | @echo "The overview file is in $(BUILDDIR)/changes." 192 | 193 | .PHONY: linkcheck 194 | linkcheck: 195 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 196 | @echo 197 | @echo "Link check complete; look for any errors in the above output " \ 198 | "or in $(BUILDDIR)/linkcheck/output.txt." 199 | 200 | .PHONY: doctest 201 | doctest: 202 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 203 | @echo "Testing of doctests in the sources finished, look at the " \ 204 | "results in $(BUILDDIR)/doctest/output.txt." 205 | 206 | .PHONY: coverage 207 | coverage: 208 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 209 | @echo "Testing of coverage in the sources finished, look at the " \ 210 | "results in $(BUILDDIR)/coverage/python.txt." 211 | 212 | .PHONY: xml 213 | xml: 214 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 215 | @echo 216 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 217 | 218 | .PHONY: pseudoxml 219 | pseudoxml: 220 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 221 | @echo 222 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 223 | 224 | .PHONY: dummy 225 | dummy: 226 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 227 | @echo 228 | @echo "Build finished. Dummy builder generates no files." 229 | -------------------------------------------------------------------------------- /ezclimate/tree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class TreeModel(object): 4 | """Tree model for the EZ-Climate model. It provides the structure of a non-recombining tree. 5 | 6 | Parameters 7 | ---------- 8 | decision_times : ndarray or list 9 | years in the future where decisions will be made 10 | prob_scale : float, optional 11 | scaling constant for probabilities 12 | 13 | Attributes 14 | ---------- 15 | decision_times : ndarray 16 | years in the future where decisions will be made 17 | prob_scale : float 18 | scaling constant for probabilities 19 | node_prob : ndarray 20 | probability of reaching node from period 0 21 | final_states_prob : ndarray 22 | last periods `node_prob` 23 | 24 | """ 25 | 26 | def __init__(self, decision_times, prob_scale=1.0): 27 | self.decision_times = decision_times 28 | if isinstance(self.decision_times, list): 29 | self.decision_times = np.array(self.decision_times) 30 | self.prob_scale = prob_scale 31 | self.node_prob = None 32 | self.final_states_prob = None 33 | self._create_probs() 34 | 35 | @property 36 | def num_periods(self): 37 | """int: the number of periods in the tree""" 38 | return len(self.decision_times)-1 39 | 40 | @property 41 | def num_decision_nodes(self): 42 | """int: the number of nodes in tree""" 43 | return (2**self.num_periods) - 1 44 | 45 | @property 46 | def num_final_states(self): 47 | """int: the number of nodes in the last period""" 48 | return 2**(self.num_periods-1) 49 | 50 | def _create_probs(self): 51 | """Creates the probabilities of every nodes in the tree structure.""" 52 | self.final_states_prob = np.zeros(self.num_final_states) 53 | self.node_prob = np.zeros(self.num_decision_nodes) 54 | self.final_states_prob[0] = 1.0 55 | sum_probs = 1.0 56 | next_prob = 1.0 57 | 58 | for n in range(1, self.num_final_states): 59 | next_prob = next_prob * self.prob_scale**(1.0 / n) 60 | self.final_states_prob[n] = next_prob 61 | self.final_states_prob /= np.sum(self.final_states_prob) 62 | 63 | self.node_prob[self.num_final_states-1:] = self.final_states_prob 64 | for period in range(self.num_periods-2, -1, -1): 65 | for state in range(0, 2**period): 66 | pos = self.get_node(period, state) 67 | self.node_prob[pos] = self.node_prob[2*pos + 1] + self.node_prob[2*pos + 2] 68 | 69 | def get_num_nodes_period(self, period): 70 | """Returns the number of nodes in the period. 71 | 72 | Parameters 73 | ---------- 74 | period : int 75 | period 76 | 77 | Returns 78 | ------- 79 | int 80 | number of nodes in period 81 | 82 | Examples 83 | -------- 84 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 85 | >>> t.get_num_nodes_period(2) 86 | 4 87 | >>> t.get_num_nodes_period(5) 88 | 32 89 | 90 | """ 91 | if period >= self.num_periods: 92 | return 2**(self.num_periods-1) 93 | return 2**period 94 | 95 | def get_nodes_in_period(self, period): 96 | """Returns the first and last nodes in the period. 97 | 98 | Parameters 99 | ---------- 100 | period : int 101 | period 102 | 103 | Returns 104 | ------- 105 | int 106 | number of nodes in period 107 | 108 | Examples 109 | -------- 110 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 111 | >>> t.get_nodes_in_period(0) 112 | (0, 0) 113 | >>> t.get_nodes_in_period(1) 114 | (1, 2) 115 | >>> t.get_nodes_in_period(4) 116 | (15, 30) 117 | 118 | """ 119 | if period >= self.num_periods: 120 | period = self.num_periods-1 121 | nodes = self.get_num_nodes_period(period) 122 | first_node = self.get_node(period, 0) 123 | return (first_node, first_node+nodes-1) 124 | 125 | def get_node(self, period, state): 126 | """Returns the node in period and state provided. 127 | 128 | Parameters 129 | ---------- 130 | period : int 131 | period 132 | state : int 133 | state of the node 134 | 135 | Returns 136 | ------- 137 | int 138 | node number 139 | 140 | Examples 141 | -------- 142 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 143 | >>> t.get_node(1, 1) 144 | 2 145 | >>> t.get_node(4, 10) 146 | 25 147 | >>> t.get_node(4, 20) 148 | ValueError: No such state in period 4 149 | 150 | Raises 151 | ------ 152 | ValueError 153 | If period is too large or if the state is too large 154 | for the period. 155 | 156 | """ 157 | if period > self.num_periods: 158 | raise ValueError("Given period is larger than number of periods") 159 | if state >= 2**period: 160 | raise ValueError("No such state in period {}".format(period)) 161 | return 2**period + state - 1 162 | 163 | def get_state(self, node, period=None): 164 | """Returns the state the node represents. 165 | 166 | Parameters 167 | ---------- 168 | node : int 169 | the node 170 | period : int, optional 171 | the period 172 | 173 | Returns 174 | ------- 175 | int 176 | state 177 | 178 | Examples 179 | -------- 180 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 181 | >>> t.get_state(0) 182 | 0 183 | >>> t.get_state(4, 2) 184 | 1 185 | 186 | """ 187 | if node >= self.num_decision_nodes: 188 | return node - self.num_decision_nodes 189 | if not period: 190 | period = self.get_period(node) 191 | return node - (2**period - 1) 192 | 193 | def get_period(self, node): 194 | """Returns what period the node is in. 195 | 196 | Parameters 197 | ---------- 198 | node : int 199 | the node 200 | 201 | Returns 202 | ------- 203 | int 204 | period 205 | 206 | Examples 207 | -------- 208 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 209 | >>> t.get_period(0) 210 | 0 211 | >>> t.get_period(4) 212 | 2 213 | 214 | """ 215 | if node >= self.num_decision_nodes: 216 | return self.num_periods 217 | 218 | for i in range(0, self.num_periods): 219 | if int((node+1) / 2**i ) == 1: 220 | return i 221 | 222 | def get_parent_node(self, child): 223 | """Returns the previous or parent node of the given child node. 224 | 225 | Parameters 226 | ---------- 227 | child : int 228 | the child node 229 | 230 | Returns 231 | ------- 232 | int 233 | partent node 234 | 235 | Examples 236 | -------- 237 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 238 | >>> t.get_parent_node(2) 239 | 0 240 | >>> t.get_parent_node(4) 241 | 1 242 | >>> t.get_parent_node(10) 243 | 4 244 | 245 | """ 246 | if child == 0: 247 | return 0 248 | if child > self.num_decision_nodes: 249 | return child - self.num_final_states 250 | if child % 2 == 0: 251 | return int((child - 2) / 2) 252 | else: 253 | return int((child - 1 ) / 2) 254 | 255 | def get_path(self, node, period=None): 256 | """Returns the unique path taken to come to given node. 257 | 258 | Parameters 259 | ---------- 260 | node : int 261 | the node 262 | 263 | Returns 264 | ------- 265 | ndarray 266 | path to get to `node` 267 | 268 | Examples 269 | -------- 270 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 271 | >>> t.get_path(2) 272 | array([0, 2]) 273 | >>> t.get_parent_node(4) 274 | array([0, 1, 4]) 275 | >>> t.get_parent_node(62) 276 | array([ 0, 2, 6, 14, 30, 62]) 277 | 278 | """ 279 | if period is None: 280 | period = self.get_period(node) 281 | path = [node] 282 | for i in range(0, period): 283 | parent = self.get_parent_node(path[i]) 284 | path.append(parent) 285 | path.reverse() 286 | return np.array(path) 287 | 288 | def get_probs_in_period(self, period): 289 | """Returns the probabilities to get from period 0 to nodes in period. 290 | 291 | Parameters 292 | ---------- 293 | period : int 294 | the period 295 | 296 | Returns 297 | ------- 298 | ndarray 299 | probabilities 300 | 301 | Examples 302 | -------- 303 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 304 | >>> t.get_probs_in_period(2) 305 | array([ 0.25, 0.25, 0.25, 0.25]) 306 | >>> t.get_probs_in_period(4) 307 | array([ 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 308 | 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 0.0625, 309 | 0.0625, 0.0625]) 310 | 311 | """ 312 | first, last = self.get_nodes_in_period(period) 313 | return self.node_prob[list(range(first, last+1))] 314 | 315 | def reachable_end_states(self, node, period=None, state=None): 316 | """Returns what future end states can be reached from given node. 317 | 318 | Parameters 319 | ---------- 320 | node : int 321 | the node 322 | period : int, optional 323 | the period 324 | state : int, optional 325 | the state the node is in 326 | 327 | Returns 328 | ------- 329 | tuple 330 | (worst end state, best end state) 331 | 332 | Examples 333 | -------- 334 | >>> t = TreeModel([0, 15, 45, 85, 185, 285, 385]) 335 | >>> t.reachable_end_states(0) 336 | (0, 31) 337 | >>> t.reachable_end_states(10) 338 | (12, 15) 339 | >>> t.reachable_end_states(32) 340 | (1, 1) 341 | 342 | """ 343 | if period is None: 344 | period = self.get_period(node) 345 | if period >= self.num_periods: 346 | return (node - self.num_decision_nodes, node - self.num_decision_nodes) 347 | if state is None: 348 | state = self.get_state(node, period) 349 | 350 | k = int(self.num_final_states / 2**period) 351 | return (k*state, k*(state+1)-1) 352 | -------------------------------------------------------------------------------- /ezclimate/damage_simulation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import multiprocessing as mp 3 | from ezclimate.tools import _pickle_method, _unpickle_method 4 | from ezclimate.tools import write_columns_csv, append_to_existing 5 | try: 6 | import copyreg 7 | except: 8 | import copy_reg as copyreg 9 | import types 10 | 11 | copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method) 12 | 13 | class DamageSimulation(object): 14 | """Simulation of damages for the EZ-Climate model. 15 | 16 | The damage function simulation is a key input into the pricing engine. Damages are 17 | represented in arrays of dimension n x p, where n = num states and p = num periods. 18 | The arrays are created by Monte Carlo simulation. Each array specifies for each state 19 | and time period a damage coefficient. 20 | 21 | Up to a point, the Monte Carlo follows Pindyck (2012) 'Uncertain Outcomes and Climate Change 22 | Policy': 23 | 24 | * There is a gamma distribution for temperature 25 | * There is a gamma distribution for economic impact (conditional on temperature) 26 | 27 | However, in addition, this program adds a probability of a tipping point (conditional on temperature). 28 | This probability is a decreasing function of the parameter `peak_temp`, conditional on a tipping 29 | point. Damage itself is a decreasing function of the parameter `disaster_tail`. 30 | 31 | Parameters 32 | ---------- 33 | tree : `TreeModel` object 34 | tree structure used 35 | ghg_levels : ndarray or list 36 | end GHG level for each path 37 | peak_temp : float 38 | tipping point parameter 39 | disaster_tail : float 40 | curvature of tipping point 41 | tip_on : bool 42 | flag that turns tipping points on or off 43 | temp_map : int 44 | mapping from GHG to temperature 45 | 46 | * 0: implies Pindyck displace gamma 47 | * 1: implies Wagner-Weitzman normal 48 | * 2: implies Roe-Baker 49 | * 3: implies user-defined normal 50 | * 4: implies user-defined gamma 51 | 52 | temp_dist_params : ndarray or list 53 | if temp_map is either 3 or 4, user needs to define the distribution parameters 54 | maxh : float 55 | time paramter from Pindyck which indicates the time it takes for temp to get half 56 | way to its max value for a given level of ghg 57 | cons_growth : float 58 | yearly growth in consumption 59 | 60 | Attributes 61 | ---------- 62 | tree : `TreeModel` object 63 | tree structure used 64 | ghg_levels : ndarray or list 65 | end GHG level for each path 66 | peak_temp : float 67 | tipping point parameter 68 | disaster_tail : float 69 | curvature of tipping point 70 | tip_on : bool 71 | flag that turns tipping points on or off 72 | temp_map : int 73 | mapping from GHG to temperature 74 | temp_dist_params : ndarray or list 75 | if temp_map is either 3 or 4, user needs to define the distribution parameters 76 | maxh : float 77 | time parameter from Pindyck which indicates the time it takes for temp to get half 78 | way to its max value for a given level of ghg 79 | cons_growth : float 80 | yearly growth in consumption 81 | d : ndarray 82 | simulated damages 83 | 84 | """ 85 | 86 | def __init__(self, tree, ghg_levels, peak_temp, disaster_tail, tip_on, 87 | temp_map, temp_dist_params, maxh, cons_growth): 88 | self.tree = tree 89 | self.peak_temp = peak_temp 90 | self.disaster_tail = disaster_tail 91 | self.tip_on = tip_on 92 | self.temp_map = temp_map 93 | self.dist_params = temp_dist_params 94 | self.maxh = maxh 95 | self.cons_growth = cons_growth 96 | self.ghg_levels = ghg_levels 97 | self.d = None 98 | 99 | def _write_to_file(self): 100 | filename = "simulated_damages" 101 | write_columns_csv(self.d[0].T, filename) 102 | for arr in self.d[1:]: 103 | append_to_existing(arr.T, filename, start_char='#') 104 | 105 | def _gamma_array(self, shape, rate, dimension): 106 | return np.random.gamma(shape, 1.0/rate, dimension) 107 | 108 | def _normal_array(self, mean, stdev, dimension): 109 | return np.random.normal(mean, stdev, dimension) 110 | 111 | def _uniform_array(self, dimension): 112 | return np.random.random(dimension) 113 | 114 | def _sort_array(self, array): 115 | return array[array[:, self.tree.num_periods-1].argsort()] 116 | 117 | def _normal_simulation(self): 118 | """Draw random samples from normal distribution for mapping GHG to temperature for 119 | user-defined distribution parameters. 120 | """ 121 | assert self.temp_dist_params and len(self.temp_dist_params) == 2, "Normal distribution needs 2 parameters." 122 | 123 | ave, std = temp_dist_params 124 | n = len(ave) 125 | temperature = np.array([self._normal_array(ave[i],std[i], self.draws) for i in range(0, n)]) 126 | return np.exp(temperature) 127 | 128 | def _gamma_simulation(self): 129 | """Draw random samples from gamma distribution for mapping GHG to temperature for 130 | user-defined distribution parameters. 131 | """ 132 | assert self.temp_dist_params and len(self.temp_dist_params) == 3, "Gamma distribution needs 3 parameters." 133 | 134 | k, theta, displace = temp_dist_params 135 | n = len(k) 136 | return np.array([self._gamma_array(k[i], theta[i], self.draws) 137 | + displace[i] for i in range(0, n)]) 138 | 139 | def _pindyck_simulation(self): 140 | """Draw random samples for mapping GHG to temperature based on Pindyck. The `pindyck_impact_k` 141 | is the shape parameter from Pyndyck damage function, `pindyck_impact_theta` the scale parameter 142 | from Pyndyck damage function, and `pindyck_impact_displace` the displacement parameter from Pyndyck 143 | damage function. 144 | """ 145 | pindyck_temp_k = [2.81, 4.6134, 6.14] 146 | pindyck_temp_theta = [1.6667, 1.5974, 1.53139] 147 | pindyck_temp_displace = [-0.25, -0.5, -1.0] 148 | return np.array([self._gamma_array(pindyck_temp_k[i], pindyck_temp_theta[i], self.draws) 149 | + pindyck_temp_displace[i] for i in range(0, 3)]) 150 | 151 | def _ww_simulation(self): 152 | """Draw random samples for mapping GHG to temperature based on Wagner-Weitzman.""" 153 | ww_temp_ave = [0.573, 1.148, 1.563] 154 | ww_temp_stddev = [0.462, 0.441, 0.432] 155 | temperature = np.array([self._normal_array(ww_temp_ave[i], ww_temp_stddev[i], self.draws) 156 | for i in range(0, 3)]) 157 | return np.exp(temperature) 158 | 159 | def _rb_simulation(self): 160 | """Draw random samples for mapping GHG to temperature based on Roe-Baker.""" 161 | rb_fbar = [0.75233, 0.844652, 0.858332] 162 | rb_sigf = [0.049921, 0.033055, 0.042408] 163 | rb_theta = [2.304627, 3.333599, 2.356967] 164 | temperature = np.array([self._normal_array(rb_fbar[i], rb_sigf[i], self.draws) 165 | for i in range(0, 3)]) 166 | return np.maximum(0.0, (1.0 / (1.0 - temperature)) - np.array(rb_theta)[:, np.newaxis]) 167 | 168 | def _pindyck_impact_simulation(self): 169 | """Pindyck gamma distribution mapping temperature into damages.""" 170 | pindyck_impact_k=4.5 171 | pindyck_impact_theta=21341.0 172 | pindyck_impact_displace=-0.0000746, 173 | impact = self._gamma_array(pindyck_impact_k, pindyck_impact_theta, self.draws) + \ 174 | pindyck_impact_displace 175 | return impact 176 | 177 | def _disaster_simulation(self): 178 | """Simulating disaster random variable, allowing for a tipping point to occur 179 | with a given probability, leading to a disaster and a `disaster_tail` impact on consumption. 180 | """ 181 | disaster = self._uniform_array((self.draws, self.tree.num_periods)) 182 | return disaster 183 | 184 | def _disaster_cons_simulation(self): 185 | """Simulates consumption conditional on disaster, based on the parameter disaster_tail.""" 186 | disaster_cons = self._gamma_array(1.0, self.disaster_tail, self.draws) 187 | return disaster_cons 188 | 189 | def _interpolation_of_temp(self, temperature): 190 | return temperature[:, np.newaxis] * 2.0 * (1.0 - 0.5**(self.tree.decision_times[1:] / self.maxh)) 191 | 192 | 193 | def _economic_impact_of_temp(self, temperature): 194 | """Economic impact of temperatures, Pindyck [2009].""" 195 | impact = self._pindyck_impact_simulation() 196 | term1 = -2.0 * impact[:, np.newaxis] * self.maxh * temperature[:,np.newaxis] / np.log(0.5) 197 | term2 = (self.cons_growth - 2.0 * impact[:, np.newaxis] \ 198 | * temperature[:, np.newaxis]) * self.tree.decision_times[1:] 199 | term3 = (2.0 * impact[:, np.newaxis] * self.maxh \ 200 | * temperature[:, np.newaxis] * 0.5**(self.tree.decision_times[1:] / self.maxh)) / np.log(0.5) 201 | return np.exp(term1 + term2 + term3) 202 | 203 | def _tipping_point_update(self, tmp, consump, peak_temp_interval=30.0): 204 | """Determine whether a tipping point has occurred, if so reduce consumption for 205 | all periods after this date. 206 | """ 207 | draws = tmp.shape[0] 208 | disaster = self._disaster_simulation() 209 | disaster_cons = self._disaster_cons_simulation() 210 | period_lengths = self.tree.decision_times[1:] - self.tree.decision_times[:-1] 211 | 212 | tmp_scale = np.maximum(self.peak_temp, tmp) 213 | ave_prob_of_survival = 1.0 - np.square(tmp / tmp_scale) 214 | prob_of_survival = ave_prob_of_survival**(period_lengths / peak_temp_interval) 215 | # this part may be done better, this takes a long time to loop over 216 | res = prob_of_survival < disaster 217 | rows, cols = np.nonzero(res) 218 | if self.multiple_tipping_points: 219 | positions = list(zip(rows, cols)) 220 | else: 221 | row, count = np.unique(rows, return_counts=True) 222 | positions = list(zip(row, cols[np.insert(count.cumsum()[:-1],0,0)])) 223 | 224 | for pos in positions: 225 | consump[pos[0], pos[1]:] *= np.exp(-disaster_cons[pos[0]]) 226 | return consump 227 | # tipping point after final period goes here? 228 | 229 | 230 | def _run_path(self, temperature): 231 | """Calculate the distribution of damage for specific GHG-path. Implementation of 232 | the temperature and economic impacts from Pindyck [2012] page 6. 233 | """ 234 | d = np.zeros((self.tree.num_final_states, self.tree.num_periods)) 235 | tmp = self._interpolation_of_temp(temperature) 236 | consump = self._economic_impact_of_temp(temperature) 237 | peak_cons = np.exp(self.cons_growth*self.tree.decision_times[1:]) 238 | 239 | # adding tipping points 240 | if self.tip_on: 241 | consump = self._tipping_point_update(tmp, consump) 242 | 243 | # sort based on outcome of simulation 244 | consump = self._sort_array(consump) 245 | damage = 1.0 - (consump / peak_cons) 246 | weights = self.tree.final_states_prob*(self.draws) 247 | weights = (weights.cumsum()).astype(int) 248 | 249 | d[0,] = damage[:weights[0], :].mean(axis=0) 250 | for n in range(1, self.tree.num_final_states): 251 | d[n,] = np.maximum(0.0, damage[weights[n-1]:weights[n], :].mean(axis=0)) 252 | return d 253 | 254 | def simulate(self, draws, write_to_file=True, multiple_tipping_points=False): 255 | """Create damage function values in 'p-period' version of the Summers - Zeckhauser model. 256 | 257 | Parameters 258 | ---------- 259 | draws : int 260 | number of samples drawn in Monte Carlo simulation. 261 | write_to_file : bool, optional 262 | wheter to save simulated values 263 | multiple_tipping_points : bool, optional 264 | if to allow multiple tipping points 265 | 266 | Returns 267 | ------- 268 | ndarray 269 | 3D-array of simulated damages 270 | 271 | Raises 272 | ------ 273 | ValueError 274 | If temp_map is not in the interval 0-4. 275 | 276 | Note 277 | ---- 278 | Uses the :mod:`~multiprocessing` package. 279 | 280 | """ 281 | dnum = len(self.ghg_levels) 282 | self.draws = draws 283 | self.multiple_tipping_points = multiple_tipping_points 284 | self.peak_cons = np.exp(self.cons_growth*self.tree.decision_times[1:]) 285 | 286 | if self.temp_map == 0: 287 | temperature = self._pindyck_simulation() 288 | elif self.temp_map == 1: 289 | temperature = self._ww_simulation() 290 | elif self.temp_map == 2: 291 | temperature = self._rb_simulation() 292 | elif self.temp_map == 3: 293 | temperature = self._normal_simulation() 294 | elif self.temp_map == 4: 295 | temperature = self._gamma_simulation() 296 | else: 297 | raise ValueError("temp_map not in interval 0-4") 298 | 299 | pool = mp.Pool(processes=dnum) 300 | self.d = np.array(pool.map(self._run_path, temperature)) 301 | 302 | if write_to_file: 303 | self._write_to_file() 304 | return self.d 305 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # EZ-Climate documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Apr 5 21:37:34 2017. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | import os 20 | import sys 21 | sys.path.insert(0, os.path.abspath('../..')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | # 27 | # needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 34 | 'sphinx.ext.todo', 35 | 'sphinx.ext.viewcode', 36 | 'sphinx.ext.napoleon', 37 | 'sphinx.ext.mathjax', 38 | ] 39 | 40 | # Add any paths that contain templates here, relative to this directory. 41 | templates_path = ['_templates'] 42 | napoleon_include_special_with_doc = True 43 | 44 | # The suffix(es) of source filenames. 45 | # You can specify multiple suffix as a list of string: 46 | # 47 | # source_suffix = ['.rst', '.md'] 48 | source_suffix = '.rst' 49 | 50 | # The encoding of source files. 51 | # 52 | # source_encoding = 'utf-8-sig' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # General information about the project. 58 | project = u'EZ-Climate' 59 | copyright = u'2017,' 60 | author = u'' 61 | 62 | # The version info for the project you're documenting, acts as replacement for 63 | # |version| and |release|, also used in various other places throughout the 64 | # built documents. 65 | # 66 | # The short X.Y version. 67 | version = u'1.0' 68 | # The full version, including alpha/beta/rc tags. 69 | release = u'1.0.1' 70 | 71 | # The language for content autogenerated by Sphinx. Refer to documentation 72 | # for a list of supported languages. 73 | # 74 | # This is also used if you do content translation via gettext catalogs. 75 | # Usually you set "language" from the command line for these cases. 76 | language = None 77 | 78 | # There are two options for replacing |today|: either, you set today to some 79 | # non-false value, then it is used: 80 | # 81 | # today = '' 82 | # 83 | # Else, today_fmt is used as the format for a strftime call. 84 | # 85 | # today_fmt = '%B %d, %Y' 86 | 87 | # List of patterns, relative to source directory, that match files and 88 | # directories to ignore when looking for source files. 89 | # This patterns also effect to html_static_path and html_extra_path 90 | exclude_patterns = [] 91 | 92 | # The reST default role (used for this markup: `text`) to use for all 93 | # documents. 94 | # 95 | # default_role = None 96 | 97 | # If true, '()' will be appended to :func: etc. cross-reference text. 98 | # 99 | # add_function_parentheses = True 100 | 101 | # If true, the current module name will be prepended to all description 102 | # unit titles (such as .. function::). 103 | # 104 | # add_module_names = True 105 | 106 | # If true, sectionauthor and moduleauthor directives will be shown in the 107 | # output. They are ignored by default. 108 | # 109 | # show_authors = False 110 | 111 | # The name of the Pygments (syntax highlighting) style to use. 112 | pygments_style = 'sphinx' 113 | 114 | # A list of ignored prefixes for module index sorting. 115 | # modindex_common_prefix = [] 116 | 117 | # If true, keep warnings as "system message" paragraphs in the built documents. 118 | # keep_warnings = False 119 | 120 | # If true, `todo` and `todoList` produce output, else they produce nothing. 121 | todo_include_todos = True 122 | 123 | 124 | # -- Options for HTML output ---------------------------------------------- 125 | 126 | # The theme to use for HTML and HTML Help pages. See the documentation for 127 | # a list of builtin themes. 128 | # 129 | html_theme = 'classic' 130 | 131 | # Theme options are theme-specific and customize the look and feel of a theme 132 | # further. For a list of options available for each theme, see the 133 | # documentation. 134 | # 135 | # html_theme_options = {} 136 | 137 | # Add any paths that contain custom themes here, relative to this directory. 138 | # html_theme_path = [] 139 | 140 | # The name for this set of Sphinx documents. 141 | # " v documentation" by default. 142 | # 143 | # html_title = u'EZ-Climate v1.0.1' 144 | 145 | # A shorter title for the navigation bar. Default is the same as html_title. 146 | # 147 | # html_short_title = None 148 | 149 | # The name of an image file (relative to this directory) to place at the top 150 | # of the sidebar. 151 | # 152 | # html_logo = None 153 | 154 | # The name of an image file (relative to this directory) to use as a favicon of 155 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 156 | # pixels large. 157 | # 158 | # html_favicon = None 159 | 160 | # Add any paths that contain custom static files (such as style sheets) here, 161 | # relative to this directory. They are copied after the builtin static files, 162 | # so a file named "default.css" will overwrite the builtin "default.css". 163 | html_static_path = ['_static'] 164 | 165 | # Add any extra paths that contain custom files (such as robots.txt or 166 | # .htaccess) here, relative to this directory. These files are copied 167 | # directly to the root of the documentation. 168 | # 169 | # html_extra_path = [] 170 | 171 | # If not None, a 'Last updated on:' timestamp is inserted at every page 172 | # bottom, using the given strftime format. 173 | # The empty string is equivalent to '%b %d, %Y'. 174 | # 175 | # html_last_updated_fmt = None 176 | 177 | # If true, SmartyPants will be used to convert quotes and dashes to 178 | # typographically correct entities. 179 | # 180 | # html_use_smartypants = True 181 | 182 | # Custom sidebar templates, maps document names to template names. 183 | # 184 | # html_sidebars = {} 185 | 186 | # Additional templates that should be rendered to pages, maps page names to 187 | # template names. 188 | # 189 | # html_additional_pages = {} 190 | 191 | # If false, no module index is generated. 192 | # 193 | # html_domain_indices = True 194 | 195 | # If false, no index is generated. 196 | # 197 | # html_use_index = True 198 | 199 | # If true, the index is split into individual pages for each letter. 200 | # 201 | # html_split_index = False 202 | 203 | # If true, links to the reST sources are added to the pages. 204 | # 205 | # html_show_sourcelink = True 206 | 207 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 208 | # 209 | # html_show_sphinx = True 210 | 211 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 212 | # 213 | # html_show_copyright = True 214 | 215 | # If true, an OpenSearch description file will be output, and all pages will 216 | # contain a tag referring to it. The value of this option must be the 217 | # base URL from which the finished HTML is served. 218 | # 219 | # html_use_opensearch = '' 220 | 221 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 222 | # html_file_suffix = None 223 | 224 | # Language to be used for generating the HTML full-text search index. 225 | # Sphinx supports the following languages: 226 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 227 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 228 | # 229 | # html_search_language = 'en' 230 | 231 | # A dictionary with options for the search language support, empty by default. 232 | # 'ja' uses this config value. 233 | # 'zh' user can custom change `jieba` dictionary path. 234 | # 235 | # html_search_options = {'type': 'default'} 236 | 237 | # The name of a javascript file (relative to the configuration directory) that 238 | # implements a search results scorer. If empty, the default will be used. 239 | # 240 | # html_search_scorer = 'scorer.js' 241 | 242 | # Output file base name for HTML help builder. 243 | htmlhelp_basename = 'dlwdoc' 244 | 245 | # -- Options for LaTeX output --------------------------------------------- 246 | 247 | latex_elements = { 248 | # The paper size ('letterpaper' or 'a4paper'). 249 | # 250 | # 'papersize': 'letterpaper', 251 | 252 | # The font size ('10pt', '11pt' or '12pt'). 253 | # 254 | # 'pointsize': '10pt', 255 | 256 | # Additional stuff for the LaTeX preamble. 257 | # 258 | # 'preamble': '', 259 | 260 | # Latex figure (float) alignment 261 | # 262 | # 'figure_align': 'htbp', 263 | } 264 | 265 | # Grouping the document tree into LaTeX files. List of tuples 266 | # (source start file, target name, title, 267 | # author, documentclass [howto, manual, or own class]). 268 | latex_documents = [ 269 | (master_doc, 'ezclimate.tex', u'EZ-Climate Documentation', 270 | u'Oscar Sjogren', 'manual'), 271 | ] 272 | 273 | # The name of an image file (relative to this directory) to place at the top of 274 | # the title page. 275 | # 276 | # latex_logo = None 277 | 278 | # For "manual" documents, if this is true, then toplevel headings are parts, 279 | # not chapters. 280 | # 281 | # latex_use_parts = False 282 | 283 | # If true, show page references after internal links. 284 | # 285 | # latex_show_pagerefs = False 286 | 287 | # If true, show URL addresses after external links. 288 | # 289 | # latex_show_urls = False 290 | 291 | # Documents to append as an appendix to all manuals. 292 | # 293 | # latex_appendices = [] 294 | 295 | # It false, will not define \strong, \code, itleref, \crossref ... but only 296 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added 297 | # packages. 298 | # 299 | # latex_keep_old_macro_names = True 300 | 301 | # If false, no module index is generated. 302 | # 303 | # latex_domain_indices = True 304 | 305 | 306 | # -- Options for manual page output --------------------------------------- 307 | 308 | # One entry per manual page. List of tuples 309 | # (source start file, name, description, authors, manual section). 310 | man_pages = [ 311 | (master_doc, 'EZ-Climate', u'EZ-Climate Documentation', 312 | [author], 1) 313 | ] 314 | 315 | # If true, show URL addresses after external links. 316 | # 317 | # man_show_urls = False 318 | 319 | 320 | # -- Options for Texinfo output ------------------------------------------- 321 | 322 | # Grouping the document tree into Texinfo files. List of tuples 323 | # (source start file, target name, title, author, 324 | # dir menu entry, description, category) 325 | texinfo_documents = [ 326 | (master_doc, 'EZ-Climate', u'EZ-Climate Documentation', 327 | author, 'EZ-Climate', 'One line description of project.', 328 | 'Miscellaneous'), 329 | ] 330 | 331 | # Documents to append as an appendix to all manuals. 332 | # 333 | # texinfo_appendices = [] 334 | 335 | # If false, no module index is generated. 336 | # 337 | # texinfo_domain_indices = True 338 | 339 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 340 | # 341 | # texinfo_show_urls = 'footnote' 342 | 343 | # If true, do not generate a @detailmenu in the "Top" node's menu. 344 | # 345 | # texinfo_no_detailmenu = False 346 | 347 | 348 | # -- Options for Epub output ---------------------------------------------- 349 | 350 | # Bibliographic Dublin Core info. 351 | epub_title = project 352 | epub_author = author 353 | epub_publisher = author 354 | epub_copyright = copyright 355 | 356 | # The basename for the epub file. It defaults to the project name. 357 | # epub_basename = project 358 | 359 | # The HTML theme for the epub output. Since the default themes are not 360 | # optimized for small screen space, using the same theme for HTML and epub 361 | # output is usually not wise. This defaults to 'epub', a theme designed to save 362 | # visual space. 363 | # 364 | # epub_theme = 'epub' 365 | 366 | # The language of the text. It defaults to the language option 367 | # or 'en' if the language is not set. 368 | # 369 | # epub_language = '' 370 | 371 | # The scheme of the identifier. Typical schemes are ISBN or URL. 372 | # epub_scheme = '' 373 | 374 | # The unique identifier of the text. This can be a ISBN number 375 | # or the project homepage. 376 | # 377 | # epub_identifier = '' 378 | 379 | # A unique identification for the text. 380 | # 381 | # epub_uid = '' 382 | 383 | # A tuple containing the cover image and cover page html template filenames. 384 | # 385 | # epub_cover = () 386 | 387 | # A sequence of (type, uri, title) tuples for the guide element of content.opf. 388 | # 389 | # epub_guide = () 390 | 391 | # HTML files that should be inserted before the pages created by sphinx. 392 | # The format is a list of tuples containing the path and title. 393 | # 394 | # epub_pre_files = [] 395 | 396 | # HTML files that should be inserted after the pages created by sphinx. 397 | # The format is a list of tuples containing the path and title. 398 | # 399 | # epub_post_files = [] 400 | 401 | # A list of files that should not be packed into the epub file. 402 | epub_exclude_files = ['search.html'] 403 | 404 | # The depth of the table of contents in toc.ncx. 405 | # 406 | # epub_tocdepth = 3 407 | 408 | # Allow duplicate toc entries. 409 | # 410 | # epub_tocdup = True 411 | 412 | # Choose between 'default' and 'includehidden'. 413 | # 414 | # epub_tocscope = 'default' 415 | 416 | # Fix unsupported image types using the Pillow. 417 | # 418 | # epub_fix_images = False 419 | 420 | # Scale large images. 421 | # 422 | # epub_max_image_width = 0 423 | 424 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 425 | # 426 | # epub_show_urls = 'inline' 427 | 428 | # If false, no index is generated. 429 | # 430 | # epub_use_index = True 431 | -------------------------------------------------------------------------------- /ezclimate/storage_tree.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from abc import ABCMeta, abstractmethod 3 | 4 | class BaseStorageTree(object, metaclass=ABCMeta): 5 | """Abstract storage class for the EZ-Climate model. 6 | 7 | Parameters 8 | ---------- 9 | decision_times : ndarray or list 10 | array of years from start where decisions about mitigation levels are done 11 | 12 | Attributes 13 | ---------- 14 | decision_times : ndarray 15 | array of years from start where decisions about mitigation levels are done 16 | information_times : ndarray 17 | array of years where new information is given to the agent in the model 18 | periods : ndarray 19 | periods in the tree 20 | tree : dict 21 | dictionary where keys are `periods` and values are nodes in period 22 | 23 | Methods: 24 | -------- 25 | write_tree(self, file_name, header, delimiter=";") : 26 | *** Never used!! 27 | 28 | write_columns(self, file_name, header, start_year=2015, delimiter=";"): 29 | called in analysis.py, in store_trees() 30 | 31 | write_columns_existing(self, file_name, header): 32 | 33 | """ 34 | 35 | def __init__(self, decision_times): 36 | self.decision_times = decision_times 37 | if isinstance(decision_times, list): 38 | self.decision_times = np.array(decision_times) 39 | self.information_times = self.decision_times[:-2] 40 | self.periods = None 41 | self.tree = None 42 | 43 | def __len__(self): 44 | return len(self.tree) 45 | 46 | def __getitem__(self, key): 47 | if isinstance(key, int) or isinstance(key, float): 48 | return self.tree.__getitem__(key).copy() 49 | else: 50 | raise TypeError('Index must be int, not {}'.format(type(key).__name__)) 51 | 52 | def _init_tree(self): 53 | self.tree = dict.fromkeys(self.periods) 54 | i = 0 55 | for key in self.periods: 56 | self.tree[key] = np.zeros(2**i) 57 | if key in self.information_times: 58 | i += 1 59 | @property 60 | def last(self): 61 | """ndarray: last period's array.""" 62 | return self.tree[self.decision_times[-1]] 63 | 64 | @property 65 | def last_period(self): 66 | """int: index of last period.""" 67 | return self.decision_times[-1] 68 | 69 | @property 70 | def nodes(self): 71 | """int: number of nodes in the tree.""" 72 | n = 0 73 | for array in list(self.tree.values()): 74 | n += len(array) 75 | return n 76 | 77 | @abstractmethod 78 | def get_next_period_array(self, period): 79 | """Return the array of the next period from `periods`.""" 80 | pass 81 | 82 | def set_value(self, period, values): 83 | """If period is in periods, set the value of element to `values` (ndarray).""" 84 | if period not in self.periods: 85 | raise ValueError("Not a valid period") 86 | if isinstance(values, list): 87 | values = np.array(values) 88 | if self.tree[period].shape != values.shape: 89 | raise ValueError("shapes {} and {} not aligned".format(self.tree[period].shape, values.shape)) 90 | self.tree[period] = values 91 | 92 | def is_decision_period(self, time_period): 93 | """Checks if time_period is a decision time for mitigation, where 94 | time_period is the number of years since start. 95 | 96 | Parameters 97 | ---------- 98 | time_period : int 99 | time since the start year of the model 100 | 101 | Returns 102 | ------- 103 | bool 104 | True if time_period also is a decision time, else False 105 | 106 | """ 107 | return time_period in self.decision_times 108 | 109 | def is_real_decision_period(self, time_period): 110 | """Checks if time_period is a decision time besides the last period, where 111 | time_period is the number of years since start. 112 | 113 | Parameters 114 | ---------- 115 | time_period : int 116 | time since the start year of the model 117 | 118 | Returns 119 | ------- 120 | bool 121 | True if time_period also is a real decision time, else False 122 | 123 | """ 124 | return time_period in self.decision_times[:-1] 125 | 126 | def is_information_period(self, time_period): 127 | """Checks if time_period is a information time for fragility, where 128 | time_period is the number of years since start. 129 | 130 | Parameters 131 | ---------- 132 | time_period : int 133 | time since the start year of the model 134 | 135 | Returns 136 | ------- 137 | bool 138 | True if time_period also is an information time, else False 139 | 140 | """ 141 | return time_period in self.information_times 142 | 143 | def write_tree(self, file_name, header, delimiter=";"): 144 | """Save values in `tree` as a tree into file `file_name` in the 145 | 'data' directory in the current working directory. If there is no 'data' 146 | directory, one is created. 147 | 148 | Parameters 149 | ---------- 150 | file_name : str 151 | name of saved file 152 | header : str 153 | first row of file 154 | delimiter : str, optional 155 | delimiter in file 156 | 157 | """ 158 | from ezclimate.tools import find_path 159 | import csv 160 | 161 | real_times = self.decision_times[:-1] 162 | size = len(self.tree[real_times[-1]]) 163 | output_lst = [] 164 | prev_k = size 165 | 166 | for t in real_times: 167 | temp_lst = [""]*(size*2) 168 | k = int(size/len(self.tree[t])) 169 | temp_lst[k::prev_k] = self.tree[t].tolist() 170 | output_lst.append(temp_lst) 171 | prev_k = k 172 | 173 | write_lst = list(zip(*output_lst)) 174 | d = find_path(file_name) 175 | with open(d, 'wb') as f: 176 | writer = csv.writer(f, delimiter=delimiter) 177 | writer.writerow([header]) 178 | for row in write_lst: 179 | writer.writerow(row) 180 | 181 | def write_columns(self, file_name, header, start_year=2015, delimiter=";"): 182 | """Save values in `tree` as columns into file `file_name` in the 183 | 'data' directory in the current working directory. If there is no 'data' 184 | directory, one is created. 185 | 186 | +------------+------------+-----------+ 187 | | Year | Node | header | 188 | +============+============+===========+ 189 | | start_year | 0 | val0 | 190 | +------------+------------+-----------+ 191 | | .. | .. | .. | 192 | +------------+------------+-----------+ 193 | 194 | Parameters 195 | ---------- 196 | file_name : str 197 | name of saved file 198 | header : str 199 | description of values in tree 200 | start_year : int, optional 201 | start year of analysis 202 | delimiter : str, optional 203 | delimiter in file 204 | 205 | """ 206 | from ezclimate.tools import write_columns_csv, file_exists 207 | if file_exists(file_name): 208 | self.write_columns_existing(file_name, header) 209 | else: 210 | real_times = self.decision_times[:-1] 211 | years = [] 212 | nodes = [] 213 | output_lst = [] 214 | k = 0 215 | for t in real_times: 216 | for n in range(len(self.tree[t])): 217 | years.append(t+start_year) 218 | nodes.append(k) 219 | output_lst.append(self.tree[t][n]) 220 | k += 1 221 | write_columns_csv(lst=[output_lst], file_name=file_name, header=["Year", "Node", header], 222 | index=[years, nodes], delimiter=delimiter) 223 | 224 | def write_columns_existing(self, file_name, header): 225 | """Save values in `tree` as columns into file `file_name` in the 226 | 'data' directory in the current working directory, when `file_name` already exists. 227 | If there is no 'data' directory, one is created. 228 | 229 | +------------+------------+-----------------+------------------+ 230 | | Year | Node | other_header | header | 231 | +============+============+=================+==================+ 232 | | start_year | 0 | other_val0 | val0 | 233 | +------------+------------+-----------------+------------------+ 234 | | .. | .. | .. | .. | 235 | +------------+------------+-----------------+------------------+ 236 | 237 | Parameters 238 | ---------- 239 | file_name : str 240 | name of saved file 241 | header : str 242 | description of values in tree 243 | delimiter : str, optional 244 | delimiter in file 245 | 246 | """ 247 | from ezclimate.tools import write_columns_to_existing 248 | output_lst = [] 249 | for t in self.decision_times[:-1]: 250 | output_lst.extend(self.tree[t]) 251 | write_columns_to_existing(lst=output_lst, file_name=file_name, header=header) 252 | 253 | 254 | class SmallStorageTree(BaseStorageTree): 255 | """Storage tree class for the EZ-Climate model. No storage in nodes between 256 | periods in `decision_times`. 257 | 258 | Parameters 259 | ---------- 260 | decision_times : ndarray or list 261 | array of years from start where decisions about mitigation levels are done 262 | 263 | Attributes 264 | ---------- 265 | decision_times : ndarray 266 | array of years from start where decisions about mitigation levels are done 267 | information_times : ndarray 268 | array of years where new information is given to the agent in the model 269 | periods : ndarray 270 | periods in the tree 271 | tree : dict 272 | dictionary where keys are `periods` and values are nodes in period 273 | 274 | """ 275 | def __init__(self, decision_times): 276 | super(SmallStorageTree, self).__init__(decision_times) 277 | self.periods = self.decision_times 278 | self._init_tree() 279 | 280 | def get_next_period_array(self, period): 281 | """Returns the array of the next decision period. 282 | 283 | Parameters 284 | ---------- 285 | period : int 286 | period 287 | 288 | Examples 289 | -------- 290 | >>> sst = SmallStorageTree([0, 15, 45, 85, 185, 285, 385]) 291 | >>> sst.get_next_period_array(0) 292 | array([0., 0.]) 293 | >>> sst.get_next_period_array(15) 294 | array([ 0., 0., 0., 0.]) 295 | 296 | Raises 297 | ------ 298 | IndexError 299 | If `period` is not in real decision times 300 | 301 | """ 302 | if self.is_real_decision_period(period): 303 | index = self.decision_times[np.where(self.decision_times==period)[0]+1][0] 304 | return self.tree[index].copy() 305 | raise IndexError("Given period is not in real decision times") 306 | 307 | def index_below(self, period): 308 | """Returns the key of the previous decision period. 309 | 310 | Parameters 311 | ---------- 312 | period : int 313 | period 314 | 315 | Examples 316 | -------- 317 | >>> sst = SmallStorageTree([0, 15, 45, 85, 185, 285, 385]) 318 | >>> sst.index_below(15) 319 | 0 320 | 321 | Raises 322 | ------ 323 | IndexError 324 | If `period` is not in decision times or first element in decision times 325 | 326 | """ 327 | if period in self.decision_times[1:]: 328 | period = self.decision_times[np.where(self.decision_times==period)[0]-1] 329 | return period[0] 330 | raise IndexError("Period not in decision times or first period") 331 | 332 | class BigStorageTree(BaseStorageTree): 333 | """Storage tree class for the EZ-Climate model. Storage in nodes between 334 | periods in `decision_times`. 335 | 336 | Parameters 337 | ---------- 338 | subinterval_len : float 339 | years between periods in tree 340 | decision_times : ndarray or list 341 | array of years from start where decisions about mitigation levels are done 342 | 343 | Attributes 344 | ---------- 345 | decision_times : ndarray 346 | array of years from start where decisions about mitigation levels are done 347 | information_times : ndarray 348 | array of years where new information is given to the agent in the model 349 | periods : ndarray 350 | periods in the tree 351 | tree : dict 352 | dictionary where keys are `periods` and values are nodes in period 353 | subinterval_len : float 354 | years between periods in tree 355 | 356 | """ 357 | 358 | def __init__(self, subinterval_len, decision_times): 359 | super(BigStorageTree, self).__init__(decision_times) 360 | self.subinterval_len = subinterval_len 361 | self.periods = np.arange(0, self.decision_times[-1]+self.subinterval_len, 362 | self.subinterval_len) 363 | self._init_tree() 364 | 365 | @property 366 | def first_period_intervals(self): 367 | """ndarray: the number of subintervals in the first period.""" 368 | return int((self.decision_times[1] - self.decision_times[0]) / self.subinterval_len) 369 | 370 | def get_next_period_array(self, period): 371 | """Returns the array of the next period. 372 | 373 | Parameters 374 | ---------- 375 | period : int 376 | period 377 | 378 | Examples 379 | -------- 380 | >>> bst = BigStorageTree(5.0, [0, 15, 45, 85, 185, 285, 385]) 381 | >>> sst.get_next_period_array(0) 382 | array([0., 0.]) 383 | >>> sst.get_next_period_array(10) 384 | array([ 0., 0., 0., 0.]) 385 | 386 | Raises 387 | ------ 388 | IndexError 389 | If `period` is not a valid period or too large 390 | 391 | """ 392 | if period + self.subinterval_len <= self.decision_times[-1]: 393 | return self.tree[period+self.subinterval_len].copy() 394 | raise IndexError("Period is not a valid period or too large") 395 | 396 | def between_decision_times(self, period): 397 | """Check which decision time the period is between and returns 398 | the index of the lower decision time. 399 | 400 | Parameters 401 | ---------- 402 | period : int 403 | period 404 | 405 | Returns 406 | ------- 407 | int 408 | index 409 | 410 | Examples 411 | -------- 412 | >>> bst = BigStorageTree(5, [0, 15, 45, 85, 185, 285, 385]) 413 | >>> bst.between_decision_times(5) 414 | 0 415 | >>> bst.between_decision_times(15) 416 | 1 417 | 418 | """ 419 | if period == 0: 420 | return 0 421 | for i in range(len(self.information_times)): 422 | if self.decision_times[i] <= period and period < self.decision_times[i+1]: 423 | return i 424 | return i+1 425 | 426 | def decision_interval(self, period): 427 | """Check which interval the period is between. 428 | 429 | Parameters 430 | ---------- 431 | period : int 432 | period 433 | 434 | Returns 435 | ------- 436 | int 437 | index 438 | 439 | Examples 440 | -------- 441 | >>> bst = BigStorageTree(5, [0, 15, 45, 85, 185, 285, 385]) 442 | >>> bst.decision_interval(5) 443 | 1 444 | >>> bst.between_decision_times(15) 445 | 1 446 | >>> bst.between_decision_times(20) 447 | 2 448 | 449 | """ 450 | if period == 0: 451 | return 0 452 | for i in range(1, len(self.decision_times)): 453 | if self.decision_times[i-1] < period and period <= self.decision_times[i]: 454 | return i 455 | return i 456 | -------------------------------------------------------------------------------- /ezclimate/damage.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from abc import ABCMeta, abstractmethod 3 | from ezclimate.damage_simulation import DamageSimulation 4 | from ezclimate.forcing import Forcing 5 | 6 | class Damage(object, metaclass=ABCMeta): 7 | """Abstract damage class for the EZ-Climate model. 8 | 9 | Parameters 10 | ---------- 11 | tree : `TreeModel` object 12 | provides the tree structure used 13 | bau : `BusinessAsUsual` object 14 | business-as-usual scenario of emissions 15 | 16 | Attributes 17 | ---------- 18 | tree : `TreeModel` object 19 | provides the tree structure used 20 | bau : `BusinessAsUsual` object 21 | business-as-usual scenario of emissions 22 | 23 | """ 24 | def __init__(self, tree, bau): 25 | self.tree = tree 26 | self.bau = bau 27 | 28 | @abstractmethod 29 | def average_mitigation(self): 30 | """The average_mitigation function should return a 1D array of the 31 | average mitigation for every node in the period. 32 | """ 33 | pass 34 | 35 | @abstractmethod 36 | def damage_function(self): 37 | """The damage_function should return a 1D array of the damages for 38 | every node in the period. 39 | """ 40 | pass 41 | 42 | class DLWDamage(Damage): 43 | """Damage class for the EZ-Climate model. Provides the damages from emissions and mitigation outcomes. 44 | 45 | Parameters 46 | ---------- 47 | tree : `TreeModel` object 48 | provides the tree structure used 49 | bau : `BusinessAsUsual` object 50 | business-as-usual scenario of emissions 51 | cons_growth : float 52 | constant consumption growth rate 53 | ghg_levels : ndarray or list 54 | end GHG levels for each end scenario 55 | 56 | Attributes 57 | ---------- 58 | tree : `TreeModel` object 59 | provides the tree structure used 60 | bau : `BusinessAsUsual` object 61 | business-as-usual scenario of emissions 62 | cons_growth : float 63 | constant consumption growth rate 64 | ghg_levels : ndarray or list 65 | end GHG levels for each end scenario 66 | dnum : int 67 | number of simulated damage paths 68 | d : ndarray 69 | simulated damages 70 | d_rcomb : ndarray 71 | adjusted simulated damages for recombining tree 72 | cum_forcings : ndarray 73 | cumulative forcing interpolation coeffiecients, used to calculate forcing based mitigation 74 | forcing : `Forcing` object 75 | class for calculating cumulative forcing and GHG levels 76 | damage_coefs : ndarray 77 | interpolation coefficients used to calculate damages 78 | 79 | """ 80 | 81 | def __init__(self, tree, bau, cons_growth, ghg_levels, subinterval_len): 82 | super(DLWDamage, self).__init__(tree, bau) 83 | self.ghg_levels = ghg_levels 84 | if isinstance(self.ghg_levels, list): 85 | self.ghg_levels = np.array(self.ghg_levels) 86 | self.cons_growth = cons_growth 87 | self.dnum = len(ghg_levels) 88 | self.subinterval_len = subinterval_len 89 | self.cum_forcings = None 90 | self.d = None 91 | self.d_rcomb = None 92 | self.emit_pct = None 93 | self.damage_coefs = None 94 | 95 | def _recombine_nodes(self): 96 | """Creating damage coefficients for recombining tree. The state reached by an up-down move is 97 | separate from a down-up move because in general the two paths will lead to different degrees of 98 | mitigation and therefore of GHG level. A 'recombining' tree is one in which the movement from 99 | one state to the next through time is nonetheless such that an up move followed by a down move 100 | leads to the same fragility. 101 | """ 102 | nperiods = self.tree.num_periods 103 | sum_class = np.zeros(nperiods, dtype=int) 104 | new_state = np.zeros([nperiods, self.tree.num_final_states], dtype=int) 105 | temp_prob = self.tree.final_states_prob.copy() 106 | self.d_rcomb = self.d.copy() 107 | 108 | for old_state in range(self.tree.num_final_states): 109 | temp = old_state 110 | n = nperiods-2 111 | d_class = 0 112 | while n >= 0: 113 | if temp >= 2**n: 114 | temp -= 2**n 115 | d_class += 1 116 | n -= 1 117 | sum_class[d_class] += 1 118 | new_state[d_class, sum_class[d_class]-1] = old_state 119 | 120 | sum_nodes = np.append(0, sum_class.cumsum()) 121 | prob_sum = np.array([self.tree.final_states_prob[sum_nodes[i]:sum_nodes[i+1]].sum() for i in range(len(sum_nodes)-1)]) 122 | for period in range(nperiods): 123 | for k in range(self.dnum): 124 | d_sum = np.zeros(nperiods) 125 | old_state = 0 126 | for d_class in range(nperiods): 127 | d_sum[d_class] = (self.tree.final_states_prob[old_state:old_state+sum_class[d_class]] * self.d_rcomb[k, old_state:old_state+sum_class[d_class], period]).sum() 128 | old_state += sum_class[d_class] 129 | self.tree.final_states_prob[new_state[d_class, 0:sum_class[d_class]]] = temp_prob[0] 130 | for d_class in range(nperiods): 131 | self.d_rcomb[k, new_state[d_class, 0:sum_class[d_class]], period] = d_sum[d_class] / prob_sum[d_class] 132 | 133 | self.tree.node_prob[-len(self.tree.final_states_prob):] = self.tree.final_states_prob 134 | for p in range(1,nperiods-1): 135 | nodes = self.tree.get_nodes_in_period(p) 136 | for node in range(nodes[0], nodes[1]+1): 137 | worst_end_state, best_end_state = self.tree.reachable_end_states(node, period=p) 138 | self.tree.node_prob[node] = self.tree.final_states_prob[worst_end_state:best_end_state+1].sum() 139 | 140 | def _damage_interpolation(self): 141 | """Create the interpolation coefficients used in `damage_function`.""" 142 | if self.d is None: 143 | print("Importing stored damage simulation") 144 | self.import_damages() 145 | 146 | self._recombine_nodes() 147 | if self.emit_pct is None: 148 | bau_emission = self.bau.ghg_end - self.bau.ghg_start 149 | self.emit_pct = 1.0 - (self.ghg_levels-self.bau.ghg_start) / bau_emission 150 | 151 | self.damage_coefs = np.zeros((self.tree.num_final_states, self.tree.num_periods, self.dnum-1, self.dnum)) 152 | amat = np.ones((self.tree.num_periods, self.dnum, self.dnum)) 153 | bmat = np.ones((self.tree.num_periods, self.dnum)) 154 | 155 | self.damage_coefs[:, :, -1, -1] = self.d_rcomb[-1, :, :] 156 | self.damage_coefs[:, :, -1, -2] = (self.d_rcomb[-2, :, :] - self.d_rcomb[-1, :, :]) / self.emit_pct[-2] 157 | amat[:, 0, 0] = 2.0 * self.emit_pct[-2] 158 | amat[:, 1:, 0] = self.emit_pct[:-1]**2 159 | amat[:, 1:, 1] = self.emit_pct[:-1] 160 | amat[:, 0, -1] = 0.0 161 | 162 | for state in range(0, self.tree.num_final_states): 163 | bmat[:, 0] = self.damage_coefs[state, :, -1, -2] * self.emit_pct[-2] 164 | bmat[:, 1:] = self.d_rcomb[:-1, state, :].T 165 | self.damage_coefs[state, :, 0] = np.linalg.solve(amat, bmat) 166 | 167 | def import_damages(self, file_name="simulated_damages"): 168 | """Import saved simulated damages. File must be saved in 'data' directory 169 | inside current working directory. Save imported values in `d`. 170 | 171 | Parameters 172 | ---------- 173 | file_name : str, optional 174 | name of file of saved simulated damages 175 | 176 | Raises 177 | ------ 178 | IOError 179 | If file does not exist. 180 | 181 | """ 182 | from ezclimate.tools import import_csv 183 | try: 184 | d = import_csv(file_name, ignore="#", header=False) 185 | except IOError as e: 186 | import sys 187 | print(("Could not import simulated damages:\n\t{}".format(e))) 188 | sys.exit(0) 189 | 190 | n = self.tree.num_final_states 191 | self.d = np.array([d[n*i:n*(i+1)] for i in range(0, self.dnum)]) 192 | self._damage_interpolation() 193 | 194 | def damage_simulation(self, draws, peak_temp=9.0, disaster_tail=12.0, tip_on=True, 195 | multi_tips=False, temp_map=1, temp_dist_params=None, maxh=100.0, save_simulation=True): 196 | """Initialization and simulation of damages, given by :mod:`ez_climate.DamageSimulation`. 197 | 198 | Parameters 199 | ---------- 200 | draws : int 201 | number of Monte Carlo draws 202 | peak_temp : float, optional 203 | tipping point parameter 204 | disaster_tail : float, optional 205 | curvature of tipping point 206 | tip_on : bool, optional 207 | flag that turns tipping points on or off 208 | multi_tips : bool, optional 209 | if to allow multiple tipping points in simulation 210 | temp_map : int, optional 211 | mapping from GHG to temperature 212 | * 0: implies Pindyck displace gamma 213 | * 1: implies Wagner-Weitzman normal 214 | * 2: implies Roe-Baker 215 | * 3: implies user-defined normal 216 | * 4: implies user-defined gamma 217 | temp_dist_params : ndarray or list, optional 218 | if temp_map is either 3 or 4, user needs to define the distribution parameters 219 | maxh : float, optional 220 | time parameter from Pindyck which indicates the time it takes for temp to get half 221 | way to its max value for a given level of ghg 222 | cons_growth : float, optional 223 | yearly growth in consumption 224 | save_simulation : bool, optional 225 | True if simulated values should be save, False otherwise 226 | 227 | Returns 228 | ------- 229 | ndarray 230 | simulated damages 231 | 232 | """ 233 | ds = DamageSimulation(tree=self.tree, ghg_levels=self.ghg_levels, peak_temp=peak_temp, 234 | disaster_tail=disaster_tail, tip_on=tip_on, temp_map=temp_map, 235 | temp_dist_params=temp_dist_params, maxh=maxh, cons_growth=self.cons_growth) 236 | self.ds = ds 237 | print("Starting damage simulation..") 238 | self.d = ds.simulate(draws, write_to_file=save_simulation, multiple_tipping_points=multi_tips) 239 | print("Done!") 240 | self._damage_interpolation() 241 | return self.d 242 | 243 | def _forcing_based_mitigation(self, forcing, period): 244 | """Calculation of mitigation based on forcing up to period. Interpolating between the forcing associated 245 | with the constant degree of mitigation consistent with the damage simulation scenarios. 246 | """ 247 | p = period - 1 248 | if forcing > self.cum_forcings[p][1]: 249 | weight_on_sim2 = (self.cum_forcings[p][2] - forcing) / (self.cum_forcings[p][2] - self.cum_forcings[p][1]) 250 | weight_on_sim3 = 0 251 | elif forcing > self.cum_forcings[p][0]: 252 | weight_on_sim2 = (forcing - self.cum_forcings[p][0]) / (self.cum_forcings[p][1] - self.cum_forcings[p][0]) 253 | weight_on_sim3 = (self.cum_forcings[p][1] - forcing) / (self.cum_forcings[p][1] - self.cum_forcings[p][0]) 254 | else: 255 | weight_on_sim2 = 0 256 | weight_on_sim3 = 1.0 + (self.cum_forcings[p][0] - forcing) / self.cum_forcings[p][0] 257 | 258 | return weight_on_sim2 * self.emit_pct[1] + weight_on_sim3*self.emit_pct[0] 259 | 260 | def _forcing_init(self): 261 | """Initialize `Forcing` object and cum_forcings used in calculating the force mitigation up to a node.""" 262 | if self.emit_pct is None: 263 | bau_emission = self.bau.ghg_end - self.bau.ghg_start 264 | self.emit_pct = 1.0 - (self.ghg_levels-self.bau.ghg_start) / bau_emission 265 | 266 | self.cum_forcings = np.zeros((self.tree.num_periods, self.dnum)) 267 | mitigation = np.ones((self.dnum, self.tree.num_decision_nodes)) * self.emit_pct[:, np.newaxis] 268 | 269 | for i in range(0, self.dnum): 270 | for n in range(1, self.tree.num_periods+1): 271 | node = self.tree.get_node(n, 0) 272 | self.cum_forcings[n-1, i] = Forcing.forcing_at_node(mitigation[i], node, self.tree, 273 | self.bau, self.subinterval_len) 274 | 275 | def average_mitigation_node(self, m, node, period=None): 276 | """Calculate the average mitigation until node. 277 | 278 | Parameters 279 | ---------- 280 | m : ndarray or list 281 | array of mitigation 282 | node : int 283 | node for which average mitigation is to be calculated for 284 | period : int, optional 285 | the period the node is in 286 | 287 | Returns 288 | ------- 289 | float 290 | average mitigation 291 | 292 | """ 293 | if period == 0: 294 | return 0 295 | if period is None: 296 | period = self.tree.get_period(node) 297 | state = self.tree.get_state(node, period) 298 | path = self.tree.get_path(node, period) 299 | new_m = m[path[:-1]] 300 | 301 | period_len = self.tree.decision_times[1:period+1] - self.tree.decision_times[:period] 302 | bau_emissions = self.bau.emission_by_decisions[:period] 303 | total_emission = np.dot(bau_emissions, period_len) 304 | ave_mitigation = np.dot(new_m, bau_emissions*period_len) 305 | return ave_mitigation / total_emission 306 | 307 | def average_mitigation(self, m, period): 308 | """Calculate the average mitigation for all node in a period. 309 | 310 | m : ndarray or list 311 | array of mitigation 312 | period : int 313 | period to calculate average mitigation for 314 | 315 | Returns 316 | ------- 317 | ndarray 318 | average mitigations 319 | 320 | """ 321 | nodes = self.tree.get_num_nodes_period(period) 322 | ave_mitigation = np.zeros(nodes) 323 | for i in range(nodes): 324 | node = self.tree.get_node(period, i) 325 | ave_mitigation[i] = self.average_mitigation_node(m, node, period) 326 | return ave_mitigation 327 | 328 | def _ghg_level_node(self, m, node): 329 | return Forcing.ghg_level_at_node(m, node, self.tree, self.bau, self.subinterval_len) 330 | 331 | def ghg_level_period(self, m, period=None, nodes=None): 332 | """Calculate the GHG levels corresponding to the given mitigation. 333 | Need to provide either `period` or `nodes`. 334 | 335 | Parameters 336 | ---------- 337 | m : ndarray or list 338 | array of mitigation 339 | period : int, optional 340 | what period to calculate GHG levels for 341 | nodes : ndarray or list, optional 342 | the nodes to calculate GHG levels for 343 | 344 | Returns 345 | ------- 346 | ndarray 347 | GHG levels 348 | 349 | """ 350 | if nodes is None and period is not None: 351 | start_node, end_node = self.tree.get_nodes_in_period(period) 352 | if period >= self.tree.num_periods: 353 | add = end_node-start_node+1 354 | start_node += add 355 | end_node += add 356 | nodes = np.array(list(range(start_node, end_node+1))) 357 | if period is None and nodes is None: 358 | raise ValueError("Need to give function either nodes or the period") 359 | 360 | ghg_level = np.zeros(len(nodes)) 361 | for i in range(len(nodes)): 362 | ghg_level[i] = self._ghg_level_node(m, nodes[i]) 363 | return ghg_level 364 | 365 | def ghg_level(self, m, periods=None): 366 | """Calculate the GHG levels for more than one period. 367 | 368 | Parameters 369 | ---------- 370 | m : ndarray or list 371 | array of mitigation 372 | periods : int, optional 373 | number of periods to calculate GHG levels for 374 | 375 | Returns 376 | ------- 377 | ndarray 378 | GHG levels 379 | 380 | """ 381 | if periods is None: 382 | periods = self.tree.num_periods-1 383 | if periods >= self.tree.num_periods: 384 | ghg_level = np.zeros(self.tree.num_decision_nodes+self.tree.num_final_states) 385 | else: 386 | ghg_level = np.zeros(self.tree.num_decision_nodes) 387 | for period in range(periods+1): 388 | start_node, end_node = self.tree.get_nodes_in_period(period) 389 | if period >= self.tree.num_periods: 390 | add = end_node-start_node+1 391 | start_node += add 392 | end_node += add 393 | nodes = np.array(list(range(start_node, end_node+1))) 394 | ghg_level[nodes] = self.ghg_level_period(m, nodes=nodes) 395 | return ghg_level 396 | 397 | def _damage_function_node(self, m, node): 398 | """Calculate the damage at any given node, based on mitigation actions in `m`.""" 399 | if self.damage_coefs is None: 400 | self._damage_interpolation() 401 | if self.cum_forcings is None: 402 | self._forcing_init() 403 | if node == 0: 404 | return 0.0 405 | 406 | period = self.tree.get_period(node) 407 | forcing, ghg_level = Forcing.forcing_and_ghg_at_node(m, node, self.tree, self.bau, self.subinterval_len, "both") 408 | force_mitigation = self._forcing_based_mitigation(forcing, period) 409 | ghg_extension = 1.0 / (1 + np.exp(0.05*(ghg_level-200))) 410 | 411 | worst_end_state, best_end_state = self.tree.reachable_end_states(node, period=period) 412 | probs = self.tree.final_states_prob[worst_end_state:best_end_state+1] 413 | 414 | if force_mitigation < self.emit_pct[1]: 415 | damage = (probs *(self.damage_coefs[worst_end_state:best_end_state+1, period-1, 1, 1] * force_mitigation \ 416 | + self.damage_coefs[worst_end_state:best_end_state+1, period-1, 1, 2])).sum() 417 | 418 | elif force_mitigation < self.emit_pct[0]: 419 | damage = (probs * (self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 0] * force_mitigation**2 \ 420 | + self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 1] * force_mitigation \ 421 | + self.damage_coefs[worst_end_state:best_end_state+1, period-1, 0, 2])).sum() 422 | else: 423 | damage = 0.0 424 | i = 0 425 | for state in range(worst_end_state, best_end_state+1): 426 | if self.d_rcomb[0, state, period-1] > 1e-5: 427 | deriv = 2.0 * self.damage_coefs[state, period-1, 0, 0]*self.emit_pct[0] \ 428 | + self.damage_coefs[state, period-1, 0, 1] 429 | decay_scale = deriv / (self.d_rcomb[0, state, period-1]*np.log(0.5)) 430 | dist = force_mitigation - self.emit_pct[0] + np.log(self.d_rcomb[0, state, period-1]) \ 431 | / (np.log(0.5) * decay_scale) 432 | damage += probs[i] * (0.5**(decay_scale*dist) * np.exp(-np.square(force_mitigation-self.emit_pct[0])/60.0)) 433 | i += 1 434 | 435 | return (damage / probs.sum()) + ghg_extension 436 | 437 | def damage_function(self, m, period): 438 | """Calculate the damage for every node in a period, based on mitigation actions `m`. 439 | 440 | Parameters 441 | ---------- 442 | m : ndarray or list 443 | array of mitigation 444 | period : int 445 | period to calculate damages for 446 | 447 | Returns 448 | ------- 449 | ndarray 450 | damages 451 | 452 | """ 453 | nodes = self.tree.get_num_nodes_period(period) 454 | damages = np.zeros(nodes) 455 | for i in range(nodes): 456 | node = self.tree.get_node(period, i) 457 | damages[i] = self._damage_function_node(m, node) 458 | return damages 459 | 460 | 461 | -------------------------------------------------------------------------------- /ezclimate/utility.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | import numpy as np 3 | from ezclimate.storage_tree import BigStorageTree, SmallStorageTree 4 | 5 | np.seterr(all='ignore') 6 | 7 | class EZUtility(object): 8 | """Calculation of Epstein-Zin utility for the EZ-Climate model. 9 | 10 | The Epstein-Zin utility allows for different rates of substitution across time and 11 | states. For specification see DLW-paper. 12 | 13 | Parameters 14 | ---------- 15 | tree : `TreeModel` object 16 | tree structure used 17 | damage : `Damage` object 18 | class that provides damage methods 19 | cost : `Cost` object 20 | class that provides cost methods 21 | period_len : float 22 | subinterval length 23 | eis : float, optional 24 | elasticity of intertemporal substitution 25 | ra : float, optional 26 | risk-aversion 27 | time_pref : float, optional 28 | pure rate of time preference 29 | 30 | 31 | Attributes 32 | ---------- 33 | tree : `TreeModel` object 34 | tree structure used 35 | damage : `Damage` object 36 | class that provides damage methods 37 | cost : `Cost` object 38 | class that provides cost methods 39 | period_len : float 40 | subinterval length 41 | decision_times : ndarray 42 | years in the future where decisions will be made 43 | cons_growth : float 44 | consumption growth 45 | growth_term : float 46 | 1 + cons_growth 47 | r : float 48 | the parameter rho from the DLW-paper 49 | a : float 50 | the parameter alpha from the DLW-paper 51 | b : float 52 | the parameter beta from the DLW-paper 53 | 54 | """ 55 | 56 | def __init__(self, tree, damage, cost, period_len, eis=0.9, ra=7.0, time_pref=0.005, 57 | add_penalty_cost=False, max_penalty=0.0, penalty_scale=1.0): 58 | self.tree = tree 59 | self.damage = damage 60 | self.cost = cost 61 | self.period_len = period_len 62 | self.decision_times = tree.decision_times 63 | self.cons_growth = damage.cons_growth 64 | self.growth_term = 1.0 + self.cons_growth 65 | self.r = 1.0 - 1.0/eis 66 | self.a = 1.0 - ra 67 | self.b = (1.0-time_pref)**period_len 68 | self.potential_cons = np.ones(self.decision_times.shape) + self.cons_growth 69 | self.potential_cons = self.potential_cons ** self.decision_times 70 | self.add_penalty_cost = add_penalty_cost 71 | self.max_penalty = max_penalty 72 | self.penalty_scale = penalty_scale 73 | 74 | def _end_period_utility(self, m, utility_tree, cons_tree, cost_tree): 75 | """Calculate the terminal utility.""" 76 | period_ave_mitigation = self.damage.average_mitigation(m, self.tree.num_periods) 77 | period_damage = self.damage.damage_function(m, self.tree.num_periods) 78 | damage_nodes = self.tree.get_nodes_in_period(self.tree.num_periods) 79 | 80 | period_mitigation = m[damage_nodes[0]:damage_nodes[1]+1] 81 | period_cost = self.cost.cost(self.tree.num_periods, period_mitigation, period_ave_mitigation) 82 | 83 | continuation = (1.0 / (1.0 - self.b*(self.growth_term**self.r)))**(1.0/self.r) 84 | 85 | cost_tree.set_value(cost_tree.last_period, period_cost) 86 | period_consumption = self.potential_cons[-1] * (1.0 - period_damage) 87 | period_consumption[period_consumption<=0.0] = 1e-18 88 | cons_tree.set_value(cons_tree.last_period, period_consumption) 89 | utility_tree.set_value(utility_tree.last_period, (1.0 - self.b)**(1.0/self.r) * cons_tree.last * continuation) 90 | 91 | def _end_period_marginal_utility(self, mu_tree_0, mu_tree_1, ce_tree, utility_tree, cons_tree): 92 | """Calculate the terminal marginal utility.""" 93 | ce_term = utility_tree.last**self.r - (1.0 - self.b)*cons_tree.last**self.r 94 | ce_tree.set_value(ce_tree.last_period, ce_term) 95 | 96 | mu_0_last = (1.0 - self.b)*(utility_tree[utility_tree.last_period-self.period_len] / cons_tree.last)**(1.0-self.r) 97 | mu_tree_0.set_value(mu_tree_0.last_period, mu_0_last) 98 | mu_0 = self._mu_0(cons_tree[cons_tree.last_period-self.period_len], ce_tree[ce_tree.last_period-self.period_len]) 99 | mu_tree_0.set_value(mu_tree_0.last_period-self.period_len, mu_0) 100 | 101 | next_term = self.b * (1.0 - self.b) / (1.0 - self.b * self.growth_term**self.r) 102 | mu_1 = utility_tree[utility_tree.last_period-self.period_len]**(1-self.r) * next_term * cons_tree.last**(self.r-1.0) 103 | mu_tree_1.set_value(mu_tree_1.last_period-self.period_len, mu_1) 104 | 105 | 106 | def _certain_equivalence(self, period, damage_period, utility_tree): 107 | """Calculate certainty equivalence utility. If we are between decision nodes, i.e. no branching, 108 | then certainty equivalent utility at time period depends only on the utility next period 109 | given information known today. Otherwise the certainty equivalent utility is the ability 110 | weighted sum of next period utility over the partition reachable from the state. 111 | """ 112 | if utility_tree.is_information_period(period): 113 | damage_nodes = self.tree.get_nodes_in_period(damage_period+1) 114 | probs = self.tree.node_prob[damage_nodes[0]:damage_nodes[1]+1] 115 | even_probs = probs[::2] 116 | odd_probs = probs[1::2] 117 | even_util = ((utility_tree.get_next_period_array(period)[::2])**self.a) * even_probs 118 | odd_util = ((utility_tree.get_next_period_array(period)[1::2])**self.a) * odd_probs 119 | ave_util = (even_util + odd_util) / (even_probs + odd_probs) 120 | cert_equiv = ave_util**(1.0/self.a) 121 | else: 122 | # no branching implies certainty equivalent utility at time period depends only on 123 | # the utility next period given information known today 124 | cert_equiv = utility_tree.get_next_period_array(period) 125 | 126 | return cert_equiv 127 | 128 | def _utility_generator(self, m, utility_tree, cons_tree, cost_tree, ce_tree, cons_adj=0.0): 129 | """Generator for calculating utility for each utility period besides the terminal utility.""" 130 | periods = utility_tree.periods[::-1] 131 | 132 | for period in periods[1:]: 133 | damage_period = utility_tree.between_decision_times(period) 134 | cert_equiv = self._certain_equivalence(period, damage_period, utility_tree) 135 | 136 | if utility_tree.is_decision_period(period+self.period_len): 137 | damage_nodes = self.tree.get_nodes_in_period(damage_period) 138 | period_mitigation = m[damage_nodes[0]:damage_nodes[1]+1] 139 | period_ave_mitigation = self.damage.average_mitigation(m, damage_period) 140 | period_cost = self.cost.cost(damage_period, period_mitigation, period_ave_mitigation) 141 | period_damage = self.damage.damage_function(m, damage_period) 142 | cost_tree.set_value(cost_tree.index_below(period+self.period_len), period_cost) 143 | 144 | period_consumption = self.potential_cons[damage_period] * (1.0 - period_damage) * (1.0 - period_cost) 145 | period_consumption[period_consumption <= 0.0] = 1e-18 146 | 147 | if not utility_tree.is_decision_period(period): 148 | next_consumption = cons_tree.get_next_period_array(period) 149 | segment = period - utility_tree.decision_times[damage_period] 150 | interval = segment + utility_tree.subinterval_len 151 | 152 | if utility_tree.is_decision_period(period+self.period_len): 153 | if period < utility_tree.decision_times[-2]: 154 | next_cost = cost_tree[period+self.period_len] 155 | next_consumption *= (1.0 - np.repeat(period_cost,2)) / (1.0 - next_cost) 156 | next_consumption[next_consumption<=0.0] = 1e-18 157 | 158 | if period < utility_tree.decision_times[-2]: 159 | temp_consumption = next_consumption/np.repeat(period_consumption,2) 160 | period_consumption = np.sign(temp_consumption)*(np.abs(temp_consumption)**(segment/float(interval))) \ 161 | * np.repeat(period_consumption,2) 162 | else: 163 | temp_consumption = next_consumption/period_consumption 164 | period_consumption = np.sign(temp_consumption)*(np.abs(temp_consumption)**(segment/float(interval))) \ 165 | * period_consumption 166 | 167 | 168 | if period == 0: 169 | period_consumption += cons_adj 170 | 171 | ce_term = self.b * cert_equiv**self.r 172 | ce_tree.set_value(period, ce_term) 173 | cons_tree.set_value(period, period_consumption) 174 | u = ((1.0-self.b)*period_consumption**self.r + ce_term)**(1.0/self.r) 175 | yield u, period 176 | 177 | def utility(self, m, return_trees=False): 178 | """Calculating utility for the specific mitigation decisions `m`. 179 | 180 | Parameters 181 | ---------- 182 | m : ndarray or list 183 | array of mitigations 184 | return_trees : bool 185 | True if method should return trees calculated in producing the utility 186 | 187 | Returns 188 | ------- 189 | ndarray or tuple 190 | tuple of `BaseStorageTree` if return_trees else ndarray with utility at period 0 191 | 192 | Examples 193 | --------- 194 | Assuming we have declared a EZUtility object as 'ezu' and have a mitigation array 'm' 195 | 196 | >>> ezu.utility(m) 197 | array([ 9.83391921]) 198 | >>> tree_dict = ezu.utility(m, return_trees=True) 199 | 200 | 201 | """ 202 | utility_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 203 | cons_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 204 | ce_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 205 | cost_tree = SmallStorageTree(decision_times=self.decision_times) 206 | 207 | self._end_period_utility(m, utility_tree, cons_tree, cost_tree) 208 | it = self._utility_generator(m, utility_tree, cons_tree, cost_tree, ce_tree) 209 | for u, period in it: 210 | utility_tree.set_value(period, u) 211 | 212 | if return_trees: 213 | return {'Utility':utility_tree, 'Consumption':cons_tree, 'Cost':cost_tree, 'CertainEquivalence':ce_tree} 214 | return utility_tree[0] 215 | 216 | 217 | def adjusted_utility(self, m, period_cons_eps=None, node_cons_eps=None, final_cons_eps=0.0, 218 | first_period_consadj=0.0, return_trees=False): 219 | """Calculating adjusted utility for sensitivity analysis. Used e.g. to find zero-coupon bond price. 220 | Values in parameters are used to adjusted the utility in different ways. 221 | 222 | Parameters 223 | ---------- 224 | m : ndarray 225 | array of mitigations 226 | period_cons_eps : ndarray, optional 227 | array of increases in consumption per period 228 | node_cons_eps : `SmallStorageTree`, optional 229 | increases in consumption per node 230 | final_cons_eps : float, optional 231 | value to increase the final utilities by 232 | first_period_consadj : float, optional 233 | value to increase consumption at period 0 by 234 | return_trees : bool, optional 235 | True if method should return trees calculated in producing the utility 236 | 237 | Returns 238 | ------- 239 | ndarray or tuple 240 | tuple of `BaseStorageTree` if return_trees else ndarray with utility at period 0 241 | 242 | Examples 243 | --------- 244 | Assuming we have declared a EZUtility object as 'ezu' and have a mitigation array 'm' 245 | 246 | >>> ezu.adjusted_utility(m, final_cons_eps=0.1) 247 | array([ 9.83424045]) 248 | >>> tree_dict = ezu.adjusted_utility(m, final_cons_eps=0.1, return_trees=True) 249 | 250 | >>> arr = np.zeros(int(ezu.decision_times[-1]/ezu.period_len) + 1) 251 | >>> arr[-1] = 0.1 252 | >>> ezu.adjusted_utility(m, period_cons_eps=arr) 253 | array([ 9.83424045]) 254 | 255 | >>> bst = BigStorageTree(5.0, [0, 15, 45, 85, 185, 285, 385]) 256 | >>> bst.set_value(bst.last_period, np.repeat(0.01, len(bst.last))) 257 | >>> ezu.adjusted_utility(m, node_cons_eps=bst) 258 | array([ 9.83391921]) 259 | 260 | The last example differs from the rest in that the last values of the `node_cons_eps` will never be 261 | used. Hence if you want to update the last period consumption, use one of these two methods. 262 | 263 | >>> ezu.adjusted_utility(m, first_period_consadj=0.01) 264 | array([ 9.84518772]) 265 | 266 | """ 267 | utility_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 268 | cons_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 269 | ce_tree = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 270 | cost_tree = SmallStorageTree(decision_times=self.decision_times) 271 | 272 | periods = utility_tree.periods[::-1] 273 | if period_cons_eps is None: 274 | period_cons_eps = np.zeros(len(periods)) 275 | if node_cons_eps is None: 276 | node_cons_eps = BigStorageTree(subinterval_len=self.period_len, decision_times=self.decision_times) 277 | 278 | self._end_period_utility(m, utility_tree, cons_tree, cost_tree) 279 | 280 | it = self._utility_generator(m, utility_tree, cons_tree, cost_tree, ce_tree, first_period_consadj) 281 | i = len(utility_tree)-2 282 | for u, period in it: 283 | if period == periods[1]: 284 | mu_0 = (1.0-self.b) * (u/cons_tree[period])**(1.0-self.r) 285 | next_term = self.b * (1.0-self.b) / (1.0-self.b*self.growth_term**self.r) 286 | mu_1 = (u**(1.0-self.r)) * next_term * (cons_tree.last**(self.r-1.0)) 287 | u += (final_cons_eps+period_cons_eps[-1]+node_cons_eps.last) * mu_1 288 | u += (period_cons_eps[i]+node_cons_eps.tree[period]) * mu_0 289 | utility_tree.set_value(period, u) 290 | else: 291 | mu_0, m_1, m_2 = self._period_marginal_utility(mu_0, mu_1, m, period, utility_tree, cons_tree, ce_tree) 292 | u += (period_cons_eps[i] + node_cons_eps.tree[period])*mu_0 293 | utility_tree.set_value(period, u) 294 | i -= 1 295 | 296 | if return_trees: 297 | return utility_tree, cons_tree, cost_tree, ce_tree 298 | return utility_tree.tree[0] 299 | 300 | def _mu_0(self, cons, ce_term): 301 | """Marginal utility with respect to consumption function.""" 302 | t1 = (1.0 - self.b)*cons**(self.r-1.0) 303 | t2 = (ce_term - (self.b-1.0)*cons**self.r)**((1.0/self.r)-1.0) 304 | return t1 * t2 305 | 306 | def _mu_1(self, cons, prob, cons_1, cons_2, ce_1, ce_2, do_print=False): 307 | """ marginal utility with respect to consumption next period.""" 308 | t1 = (1.0-self.b) * self.b * prob * cons_1**(self.r-1.0) 309 | t2 = (ce_1 - (self.b-1.0) * cons_1**self.r )**((self.a/self.r)-1) 310 | t3 = (prob * (ce_1 - (self.b*(cons_1**self.r)) + cons_1**self.r)**(self.a/self.r) \ 311 | + (1.0-prob) * (ce_2 - (self.b-1.0) * cons_2**self.r)**(self.a/self.r))**((self.r/self.a)-1.0) 312 | t4 = prob * (ce_1-self.b * (cons_1**self.r) + cons_1**self.r)**(self.a/self.r) \ 313 | + (1.0-prob) * (ce_2 - self.b * (cons_2**self.r) + cons_2**self.r)**(self.a/self.r) 314 | t5 = (self.b * t4**(self.r/self.a) - (self.b-1.0) * cons**self.r )**((1.0/self.r)-1.0) 315 | 316 | return t1 * t2 * t3 * t5 317 | 318 | def _mu_2(self, cons, prev_cons, ce_term): 319 | """Marginal utility with respect to last period consumption.""" 320 | t1 = (1.0-self.b) * self.b * prev_cons**(self.r-1.0) 321 | t2 = ((1.0 - self.b) * cons**self.r - (self.b - 1.0) * self.b \ 322 | * prev_cons**self.r + self.b * ce_term)**((1.0/self.r)-1.0) 323 | return t1 * t2 324 | 325 | def _period_marginal_utility(self, prev_mu_0, prev_mu_1, m, period, utility_tree, cons_tree, ce_tree): 326 | """Marginal utility for each node in a period.""" 327 | damage_period = utility_tree.between_decision_times(period) 328 | mu_0 = self._mu_0(cons_tree[period], ce_tree[period]) 329 | 330 | prev_ce = ce_tree.get_next_period_array(period) 331 | prev_cons = cons_tree.get_next_period_array(period) 332 | if utility_tree.is_information_period(period): 333 | probs = self.tree.get_probs_in_period(damage_period+1) 334 | up_prob = np.array([probs[i]/(probs[i]+probs[i+1]) for i in range(0, len(probs), 2)]) 335 | down_prob = 1.0 - up_prob 336 | 337 | up_cons = prev_cons[::2] 338 | down_cons = prev_cons[1::2] 339 | up_ce = prev_ce[::2] 340 | down_ce = prev_ce[1::2] 341 | 342 | mu_1 = self._mu_1(cons_tree[period], up_prob, up_cons, down_cons, up_ce, down_ce) 343 | mu_2 = self._mu_1(cons_tree[period], down_prob, down_cons, up_cons, down_ce, up_ce) 344 | return mu_0, mu_1, mu_2 345 | else: 346 | mu_1 = self._mu_2(cons_tree[period], prev_cons, prev_ce) 347 | return mu_0, mu_1, None 348 | 349 | def partial_grad(self, m, i, delta=1e-8): 350 | """Calculate the ith element of the gradient vector. 351 | 352 | Parameters 353 | ---------- 354 | m : ndarray 355 | array of mitigations 356 | i : int 357 | node to calculate partial grad for 358 | 359 | Returns 360 | ------- 361 | float 362 | gradient element 363 | 364 | """ 365 | m_copy = m.copy() 366 | m_copy[i] -= delta 367 | minus_utility = self.utility(m_copy) 368 | m_copy[i] += 2*delta 369 | plus_utility = self.utility(m_copy) 370 | grad = (plus_utility-minus_utility) / (2*delta) 371 | return grad 372 | -------------------------------------------------------------------------------- /ezclimate/analysis.py: -------------------------------------------------------------------------------- 1 | """ 2 | 1. This code establishes three classes: 3 | 4 | class ClimateOutput(object): 5 | class RiskDecomposition(object): 6 | class ConstraintAnalysis(object): 7 | 8 | A. Each of these classes has a save_output() method to save to: 9 | 10 | ClimateOutput(object): - node_period_output.csv 11 | 12 | - stores: 13 | Mitigation, Prices, Average Mitigation, Average Emission, GHG Level 14 | 15 | - also, via a call to store_trees, stores: 16 | Utility, Consumption, Cost and CertainEquivalence 17 | - these come from a call to utility.utility(m,return_trees = True) 18 | 19 | RiskDecomposition(object): - sensitivity_output.csv 20 | 21 | ConstraintAnalysis(object): - constraint_output.csv 22 | 23 | 2. In addition, there are a set of functions defined to aid in analyzing/storing results. 24 | 25 | functions: 26 | ---------- 27 | 28 | additional_ghg_emission(m, utility) : 29 | 30 | store_trees(prefix=None, start_year=2015, tree_dict = {}): store tree in csv files. 31 | 32 | - called in ClimateOutput.save_output() to save Utility, Consumption, Cost, and Certainty Equivalence trees, 33 | and in RiskDecomposition.store_output() to store the SDF and DeltaConsumption trees. 34 | 35 | - store_trees calls tree.write_columns() for each of the args provided. 36 | - tree.write_columns() is defined in storage_tree.py 37 | 38 | delta_consumption(m, utility, cons_tree, cost_tree, delta_m): 39 | 40 | constraint_first_period(utility, first_node, m_size): 41 | 42 | find_ir(m, utility, payment, a=0.0, b=1.0): 43 | 44 | find_term_structure(m, utility, payment, a=0.0, b=1.5): 45 | 46 | find_bec(m, utility, constraint_cost, a=-150, b=150): 47 | 48 | perpetuity_yield(price, start_date, a=0.1, b=100000): 49 | 50 | """ 51 | import numpy as np 52 | from scipy.optimize import brentq 53 | from ezclimate.storage_tree import BigStorageTree, SmallStorageTree 54 | from ezclimate.optimization import GeneticAlgorithm, GradientSearch 55 | from ezclimate.tools import write_columns_csv, append_to_existing, import_csv 56 | 57 | def additional_ghg_emission(m, utility): 58 | """Calculate the emission added by every node. 59 | 60 | Parameters 61 | ---------- 62 | m : ndarray or list 63 | array of mitigation 64 | utility : `Utility` object 65 | object of utility class 66 | 67 | Returns 68 | ------- 69 | ndarray 70 | additional emission in nodes 71 | 72 | """ 73 | additional_emission = np.zeros(len(m)) 74 | cache = set() 75 | for node in range(utility.tree.num_final_states, len(m)): 76 | path = utility.tree.get_path(node) 77 | for i in range(len(path)): 78 | if path[i] not in cache: 79 | additional_emission[path[i]] = (1.0 - m[path[i]]) * utility.damage.bau.emission_to_ghg[i] 80 | cache.add(path[i]) 81 | return additional_emission 82 | 83 | def store_trees(prefix=None, start_year=2015, tree_dict={}): 84 | """Saves values of `BaseStorageTree` objects. The file is saved into the 'data' directory 85 | in the current working directory. If there is no 'data' directory, one is created. 86 | 87 | Parameters 88 | ---------- 89 | prefix : str, optional 90 | prefix to be added to file_name 91 | start_year : int, optional 92 | start year of analysis 93 | **kwargs 94 | arbitrary keyword arguments of `BaseStorageTree` objects 95 | 96 | """ 97 | if prefix is None: 98 | prefix = "" 99 | for name in tree_dict.keys(): 100 | tree_dict[name].write_columns(prefix + "trees", name, start_year) 101 | 102 | def delta_consumption(m, utility, cons_tree, cost_tree, delta_m): 103 | """Calculate the changes in consumption and the mitigation cost component 104 | of consumption when increasing period 0 mitigiation with `delta_m`. 105 | 106 | Parameters 107 | ---------- 108 | m : ndarray or list 109 | array of mitigation 110 | utility : `Utility` object 111 | object of utility class 112 | cons_tree : `BigStorageTree` object 113 | consumption storage tree of consumption values 114 | from optimal mitigation values 115 | cost_tree : `SmallStorageTree` object 116 | cost storage tree of cost values from optimal mitigation values 117 | delta_m : float 118 | value to increase period 0 mitigation by 119 | 120 | Returns 121 | ------- 122 | tuple 123 | (storage tree of changes in consumption, ndarray of costs in first sub periods) 124 | 125 | """ 126 | m_copy = m.copy() 127 | m_copy[0] += delta_m 128 | 129 | tree_dict = utility.utility(m_copy, return_trees=True) 130 | new_cons_tree = tree_dict['Consumption'] 131 | new_cost_tree = tree_dict['Cost'] 132 | new_utility_tree = tree_dict['Utility'] 133 | 134 | for period in new_cons_tree.periods: 135 | new_cons_tree.tree[period] = (new_cons_tree.tree[period]-cons_tree.tree[period]) / delta_m 136 | 137 | first_period_intervals = new_cons_tree.first_period_intervals 138 | cost_array = np.zeros((first_period_intervals, 2)) 139 | for i in range(first_period_intervals): 140 | potential_consumption = (1.0 + utility.cons_growth)**(new_cons_tree.subinterval_len * i) 141 | cost_array[i, 0] = potential_consumption * cost_tree[0] 142 | cost_array[i, 1] = (potential_consumption * new_cost_tree[0] - cost_array[i, 0]) / delta_m 143 | 144 | return new_cons_tree, cost_array, new_utility_tree[0] 145 | 146 | def constraint_first_period(utility, first_node, m_size): 147 | """Calculate the changes in consumption, the mitigation cost component of consumption, 148 | and new mitigation values when constraining the first period mitigation to `first_node`. 149 | 150 | Parameters 151 | ---------- 152 | m : ndarray or list 153 | array of mitigation 154 | utility : `Utility` object 155 | object of utility class 156 | first_node : float 157 | value to constrain first period to 158 | 159 | Returns 160 | ------- 161 | tuple 162 | (new mitigation array, storage tree of changes in consumption, ndarray of costs in first sub periods) 163 | 164 | """ 165 | fixed_values = np.array([first_node]) 166 | fixed_indicies = np.array([0]) 167 | ga_model = GeneticAlgorithm(pop_amount=400, num_generations=200, cx_prob=0.8, mut_prob=0.5, bound=1.5, 168 | num_feature=m_size, utility=utility, fixed_values=fixed_values, 169 | fixed_indices=fixed_indicies, print_progress=True) 170 | 171 | gs_model = GradientSearch(var_nums=m_size, utility=utility, accuracy=1e-7, 172 | iterations=200, fixed_values=fixed_values, fixed_indices=fixed_indicies, 173 | print_progress=True) 174 | 175 | final_pop, fitness = ga_model.run() 176 | sort_pop = final_pop[np.argsort(fitness)][::-1] 177 | new_m, new_utility = gs_model.run(initial_point_list=sort_pop, topk=1) 178 | 179 | print("SCC and Utility after constrained gs: {}, {}".format(new_m[0], new_utility)) 180 | 181 | """ 182 | u_f_calls=0 183 | 184 | def new_iu(m): 185 | global u_f_calls 186 | uu = -1.*utility.utility(m,return_trees=False) 187 | u_f_calls += 1 188 | if u_f_calls%500 == 0: 189 | print(u_f_calls, uu[0], m) 190 | return uu 191 | """ 192 | u_f_calls = [0] 193 | 194 | def new_iu(m): 195 | uu = -1.*utility.utility(m, return_trees=False) 196 | u_f_calls[0] += 1 197 | if u_f_calls[0]%500 == 0: 198 | print(u_f_calls[0], uu[0], m) 199 | return uu 200 | 201 | from scipy.optimize import fmin as fmin 202 | newfmin_out = fmin(new_iu, new_m, xtol=5.e-5,maxfun=10**5,maxiter=2*(10**5),full_output=True) 203 | 204 | new_m = newfmin_out[0] 205 | new_utility = -1.0*newfmin_out[1] 206 | 207 | return new_m 208 | 209 | def find_ir(m, utility, payment, a=0.0, b=1.0): 210 | """Find the price of a bond that creates equal utility at time 0 as adding `payment` to the value of 211 | consumption in the final period. The purpose of this function is to find the interest rate 212 | embedded in the `EZUtility` model. 213 | 214 | Parameters 215 | ---------- 216 | m : ndarray or list 217 | array of mitigation 218 | utility : `Utility` object 219 | object of utility class 220 | payment : float 221 | value added to consumption in the final period 222 | a : float, optional 223 | initial guess 224 | b : float, optional 225 | initial guess - f(b) needs to give different sign than f(a) 226 | 227 | Returns 228 | ------- 229 | tuple 230 | result of optimization 231 | 232 | Note 233 | ---- 234 | requires the 'scipy' package 235 | 236 | """ 237 | 238 | def min_func(price): 239 | utility_with_final_payment = utility.adjusted_utility(m, final_cons_eps=payment) 240 | first_period_eps = payment * price 241 | utility_with_initial_payment = utility.adjusted_utility(m, first_period_consadj=first_period_eps) 242 | return utility_with_final_payment - utility_with_initial_payment 243 | 244 | return brentq(min_func, a, b) 245 | 246 | def find_term_structure(m, utility, payment, a=0.0, b=1.5): 247 | """Find the price of a bond that creates equal utility at time 0 as adding `payment` to the value of 248 | consumption in the final period. The purpose of this function is to find the interest rate 249 | embedded in the `EZUtility` model. 250 | 251 | Parameters 252 | ---------- 253 | m : ndarray or list 254 | array of mitigation 255 | utility : `Utility` object 256 | object of utility class 257 | payment : float 258 | value added to consumption in the final period 259 | a : float, optional 260 | initial guess 261 | b : float, optional 262 | initial guess - f(b) needs to give different sign than f(a) 263 | 264 | Returns 265 | ------- 266 | tuple 267 | result of optimization 268 | 269 | Note 270 | ---- 271 | requires the 'scipy' package 272 | 273 | """ 274 | 275 | def min_func(price): 276 | period_cons_eps = np.zeros(int(utility.decision_times[-1]/utility.period_len) + 1) 277 | period_cons_eps[-2] = payment 278 | utility_with_payment = utility.adjusted_utility(m, period_cons_eps=period_cons_eps) 279 | first_period_eps = payment * price 280 | utility_with_initial_payment = utility.adjusted_utility(m, first_period_consadj=first_period_eps) 281 | return utility_with_payment - utility_with_initial_payment 282 | 283 | return brentq(min_func, a, b) 284 | 285 | def find_bec(m, utility, constraint_cost, a=-150, b=150): 286 | """Used to find a value for consumption that equalizes utility at time 0 in two different solutions. 287 | 288 | Parameters 289 | ---------- 290 | m : ndarray or list 291 | array of mitigation 292 | utility : `Utility` object 293 | object of utility class 294 | constraint_cost : float 295 | utility cost of constraining period 0 to zero 296 | a : float, optional 297 | initial guess 298 | b : float, optional 299 | initial guess - f(b) needs to give different sign than f(a) 300 | 301 | Returns 302 | ------- 303 | tuple 304 | result of optimization 305 | 306 | Note 307 | ---- 308 | requires the 'scipy' package 309 | 310 | """ 311 | 312 | def min_func(delta_con): 313 | base_utility = utility.utility(m) 314 | new_utility = utility.adjusted_utility(m, first_period_consadj=delta_con) 315 | print(base_utility, new_utility, constraint_cost) 316 | return new_utility - base_utility - constraint_cost 317 | 318 | return brentq(min_func, a, b) 319 | 320 | def perpetuity_yield(price, start_date, a=0.1, b=100000): 321 | """Find the yield of a perpetuity starting at year `start_date`. 322 | 323 | Parameters 324 | ---------- 325 | price : float 326 | price of bond ending at `start_date` 327 | start_date : int 328 | start year of perpetuity 329 | a : float, optional 330 | initial guess 331 | b : float, optional 332 | initial guess - f(b) needs to give different sign than f(a) 333 | 334 | Returns 335 | ------- 336 | tuple 337 | result of optimization 338 | 339 | Note 340 | ---- 341 | requires the 'scipy' package 342 | 343 | """ 344 | 345 | def min_func(perp_yield): 346 | return price - (100. / (perp_yield+100.))**start_date * (perp_yield + 100)/perp_yield 347 | 348 | return brentq(min_func, a, b) 349 | 350 | 351 | class ClimateOutput(object): 352 | """Calculate and save output from the EZ-Climate model. 353 | 354 | Parameters 355 | ---------- 356 | utility : `Utility` object 357 | object of utility class 358 | 359 | Attributes 360 | ---------- 361 | utility : `Utility` object 362 | object of utility class 363 | prices : ndarray 364 | SCC prices 365 | ave_mitigations : ndarray 366 | average mitigations 367 | ave_emissions : ndarray 368 | average emissions 369 | expected_period_price : ndarray 370 | expected SCC for the period 371 | expected_period_mitigation : ndarray 372 | expected mitigation for the period 373 | expected_period_emissions : ndarray 374 | expected emission for the period 375 | 376 | """ 377 | 378 | def __init__(self, utility): 379 | self.utility = utility 380 | self.prices = None 381 | self.ave_mitigations = None 382 | self.ave_emissions = None 383 | self.expected_period_price = None 384 | self.expected_period_mitigation = None 385 | self.expected_period_emissions = None 386 | self.ghg_levels = None 387 | 388 | def calculate_output(self, m): 389 | """Calculated values based on optimal mitigation. For every **node** the function calculates and saves 390 | 391 | * average mitigation 392 | * average emission 393 | * GHG level 394 | * SCC 395 | 396 | as attributes. 397 | 398 | For every **period** the function also calculates and saves 399 | 400 | * expected SCC/price 401 | * expected mitigation 402 | * expected emission 403 | 404 | as attributes. 405 | 406 | Parameters 407 | ---------- 408 | m : ndarray or list 409 | array of mitigation 410 | 411 | """ 412 | 413 | bau = self.utility.damage.bau 414 | tree = self.utility.tree 415 | periods = tree.num_periods 416 | 417 | self.prices = np.zeros(len(m)) 418 | self.ave_mitigations = np.zeros(len(m)) 419 | self.ave_emissions = np.zeros(len(m)) 420 | self.expected_period_price = np.zeros(periods) 421 | self.expected_period_mitigation = np.zeros(periods) 422 | self.expected_period_emissions = np.zeros(periods) 423 | additional_emissions = additional_ghg_emission(m, self.utility) 424 | self.ghg_levels = self.utility.damage.ghg_level(m) 425 | 426 | for period in range(0, periods): 427 | years = tree.decision_times[period] 428 | period_years = tree.decision_times[period+1] - tree.decision_times[period] 429 | nodes = tree.get_nodes_in_period(period) 430 | num_nodes_period = 1 + nodes[1] - nodes[0] 431 | period_lens = tree.decision_times[:period+1] 432 | 433 | for node in range(nodes[0], nodes[1]+1): 434 | path = np.array(tree.get_path(node, period)) 435 | new_m = m[path] 436 | mean_mitigation = np.dot(new_m, period_lens) / years 437 | price = self.utility.cost.price(years, m[node], mean_mitigation) 438 | self.prices[node] = price 439 | self.ave_mitigations[node] = self.utility.damage.average_mitigation_node(m, node, period) 440 | self.ave_emissions[node] = additional_emissions[node] / (period_years*bau.emission_to_bau) 441 | 442 | probs = tree.get_probs_in_period(period) 443 | self.expected_period_price[period] = np.dot(self.prices[nodes[0]:nodes[1]+1], probs) 444 | self.expected_period_mitigation[period] = np.dot(self.ave_mitigations[nodes[0]:nodes[1]+1], probs) 445 | self.expected_period_emissions[period] = np.dot(self.ave_emissions[nodes[0]:nodes[1]+1], probs) 446 | 447 | def save_output(self, m, prefix=None): 448 | """Function to save calculated values in `calculate_output` in the file `prefix` + 'node_period_output' 449 | in the 'data' directory in the current working directory. 450 | 451 | The function also saves the values calculated in the utility function in the file 452 | `prefix` + 'tree' in the 'data' directory in the current working directory. 453 | 454 | If there is no 'data' directory, one is created. 455 | 456 | Parameters 457 | ---------- 458 | m : ndarray or list 459 | array of mitigation 460 | prefix : str, optional 461 | prefix to be added to file_name 462 | 463 | """ 464 | if prefix is not None: 465 | prefix += "_" 466 | else: 467 | prefix = "" 468 | 469 | #print('in ClimateOutput.save_output(), prefix =',prefix) 470 | write_columns_csv([m, self.prices, self.ave_mitigations, self.ave_emissions, self.ghg_levels], 471 | prefix+"node_period_output", ["Node", "Mitigation", "Prices", "Average Mitigation", 472 | "Average Emission", "GHG Level"], [list(range(len(m)))]) 473 | 474 | append_to_existing([self.expected_period_price, self.expected_period_mitigation, self.expected_period_emissions], 475 | prefix+"node_period_output", header=["Period", "Expected Price", "Expected Mitigation", 476 | "Expected Emission"], index=[list(range(self.utility.tree.num_periods))], start_char='\n') 477 | 478 | 479 | tree_dict = self.utility.utility(m, return_trees=True) 480 | store_trees(prefix = prefix, tree_dict = tree_dict) 481 | 482 | 483 | class RiskDecomposition(object): 484 | """Calculate and save analysis of output from the EZ-Climate model. 485 | 486 | Parameters 487 | ---------- 488 | utility : `Utility` object 489 | object of utility class 490 | 491 | Attributes 492 | ---------- 493 | utility : `Utility` object 494 | object of utility class 495 | sdf_tree : `BaseStorageTree` object 496 | SDF for each node 497 | expected_damages : ndarray 498 | expected damages in each period 499 | risk_premiums : ndarray 500 | risk premium in each period 501 | expected_sdf : ndarray 502 | expected SDF in each period 503 | cross_sdf_damages : ndarray 504 | cross term between the SDF and damages 505 | discounted_expected_damages : ndarray 506 | expected discounted damages for each period 507 | net_discount_damages : ndarray 508 | net discount damage, i.e. when cost is also accounted for 509 | cov_term : ndarray 510 | covariance between SDF and damages 511 | 512 | """ 513 | 514 | def __init__(self, utility): 515 | self.utility = utility 516 | self.sdf_tree = BigStorageTree(utility.period_len, utility.decision_times) 517 | self.sdf_tree.set_value(0, np.array([1.0])) 518 | 519 | n = len(self.sdf_tree) 520 | self.expected_damages = np.zeros(n) 521 | self.risk_premiums = np.zeros(n) 522 | self.expected_sdf = np.zeros(n) 523 | self.cross_sdf_damages = np.zeros(n) 524 | self.discounted_expected_damages = np.zeros(n) 525 | self.net_discount_damages = np.zeros(n) 526 | self.cov_term = np.zeros(n) 527 | 528 | self.expected_sdf[0] = 1.0 529 | 530 | 531 | def save_output(self, m, prefix=None): 532 | """Save attributes calculated in `sensitivity_analysis` into the file prefix + `sensitivity_output` 533 | in the `data` directory in the current working directory. 534 | 535 | Furthermore, the perpetuity yield, the discount factor for the last period is calculated, and SCC, 536 | expected damage and risk premium for the first period is calculated and saved in into the file 537 | prefix + `tree` in the `data` directory in the current working directory. If there is no `data` directory, 538 | one is created. 539 | 540 | Parameters 541 | ---------- 542 | m : ndarray or list 543 | array of mitigation 544 | prefix : str, optional 545 | prefix to be added to file_name 546 | 547 | """ 548 | end_price = find_term_structure(m, self.utility, 0.01) 549 | perp_yield = perpetuity_yield(end_price, self.sdf_tree.periods[-2]) 550 | 551 | damage_scale = self.utility.cost.price(0, m[0], 0) / (self.net_discount_damages.sum()+self.risk_premiums.sum()) 552 | scaled_discounted_ed = self.net_discount_damages * damage_scale 553 | scaled_risk_premiums = self.risk_premiums * damage_scale 554 | 555 | if prefix is not None: 556 | prefix += "_" 557 | else: 558 | prefix = "" 559 | 560 | write_columns_csv([self.expected_sdf, self.net_discount_damages, self.expected_damages, self.risk_premiums, 561 | self.cross_sdf_damages, self.discounted_expected_damages, self.cov_term, 562 | scaled_discounted_ed, scaled_risk_premiums], prefix + "sensitivity_output", 563 | ["Year", "Discount Prices", "Net Expected Damages", "Expected Damages", "Risk Premium", 564 | "Cross SDF & Damages", "Discounted Expected Damages", "Cov Term", "Scaled Net Expected Damages", 565 | "Scaled Risk Premiums"], [self.sdf_tree.periods.astype(int)+2015]) 566 | 567 | append_to_existing([[end_price], [perp_yield], [scaled_discounted_ed.sum()], [scaled_risk_premiums.sum()], 568 | [self.utility.cost.price(0, m[0], 0)]], prefix+"sensitivity_output", 569 | header=["Zero Bound Price", "Perp Yield", "Expected Damages", "Risk Premium", 570 | "SCC"], start_char='\n') 571 | 572 | store_trees(prefix=prefix, tree_dict={'SDF':self.sdf_tree, 'DeltaConsumption':self.delta_cons_tree}) 573 | 574 | 575 | class ConstraintAnalysis(object): 576 | def __init__(self, run_name, utility, const_value, opt_m=None): 577 | self.run_name = run_name 578 | self.utility = utility 579 | self.cfp_m = constraint_first_period(utility, const_value, utility.tree.num_decision_nodes) 580 | self.opt_m = opt_m 581 | if self.opt_m is None: 582 | self.opt_m = self._get_optimal_m() 583 | 584 | self.con_cost = self._constraint_cost() 585 | self.delta_u = self._first_period_delta_udiff() 586 | 587 | self.delta_c = self._delta_consumption() 588 | self.delta_c_billions = self.delta_c * self.utility.cost.cons_per_ton \ 589 | * self.utility.damage.bau.emit_level[0] 590 | self.delta_emission_gton = self.opt_m[0]*self.utility.damage.bau.emit_level[0] 591 | self.deadweight = self.delta_c*self.utility.cost.cons_per_ton / self.opt_m[0] 592 | 593 | self.delta_u2 = self._first_period_delta_udiff2() 594 | self.marginal_benefit = (self.delta_u2 / self.delta_u) * self.utility.cost.cons_per_ton 595 | self.marginal_cost = self.utility.cost.price(0, self.cfp_m[0], 0) 596 | 597 | def _get_optimal_m(self): 598 | try: 599 | header, index, data = import_csv(self.run_name+"_node_period_output") 600 | except: 601 | print("No such file for the optimal mitigation..") 602 | return data[:, 0] 603 | 604 | def _constraint_cost(self): 605 | opt_u = self.utility.utility(self.opt_m) 606 | cfp_u = self.utility.utility(self.cfp_m) 607 | return opt_u - cfp_u 608 | 609 | def _delta_consumption(self): 610 | return find_bec(self.cfp_m, self.utility, self.con_cost) 611 | 612 | def _first_period_delta_udiff(self): 613 | u_given_delta_con = self.utility.adjusted_utility(self.cfp_m, first_period_consadj=0.01) 614 | cfp_u = self.utility.utility(self.cfp_m) 615 | return u_given_delta_con - cfp_u 616 | 617 | def _first_period_delta_udiff2(self): 618 | m = self.cfp_m.copy() 619 | m[0] += 0.01 620 | u = self.utility.utility(m) 621 | cfp_u = self.utility.utility(self.cfp_m) 622 | return u - cfp_u 623 | 624 | def save_output(self, prefix=None): 625 | if prefix is not None: 626 | prefix += "_" 627 | else: 628 | prefix = "" 629 | 630 | write_columns_csv([self.con_cost, [self.delta_c], [self.delta_c_billions], [self.delta_emission_gton], 631 | [self.deadweight], self.delta_u, self.marginal_benefit, [self.marginal_cost]], 632 | prefix + self.run_name + "_constraint_output", 633 | ["Constraint Cost", "Delta Consumption", "Delta Consumption $b", 634 | "Delta Emission Gton", "Deadweight Cost", "Marginal Impact Utility", 635 | "Marginal Benefit Emissions Reduction", "Marginal Cost Emission Reduction"]) 636 | -------------------------------------------------------------------------------- /ezclimate/optimization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import multiprocessing 3 | from ezclimate.tools import _pickle_method, _unpickle_method 4 | try: 5 | import copyreg 6 | except: 7 | import copy_reg as copyreg 8 | import types 9 | 10 | copyreg.pickle(types.MethodType, _pickle_method, _unpickle_method) 11 | 12 | class GeneticAlgorithm(object): 13 | """Optimization algorithm for the EZ-Climate model. 14 | 15 | Parameters 16 | ---------- 17 | pop_amount : int 18 | number of individuals in the population 19 | num_feature : int 20 | number of elements in each individual, i.e. number of nodes in tree-model 21 | num_generations : int 22 | number of generations of the populations to be evaluated 23 | bound : float 24 | upper bound of mitigation in each node 25 | cx_prob : float 26 | probability of mating 27 | mut_prob : float 28 | probability of mutation. 29 | utility : `Utility` object 30 | object of utility class 31 | fixed_values : ndarray, optional 32 | nodes to keep fixed 33 | fixed_indices : ndarray, optional 34 | indices of nodes to keep fixed 35 | print_progress : bool, optional 36 | if the progress of the evolution should be printed 37 | 38 | Attributes 39 | ---------- 40 | pop_amount : int 41 | number of individuals in the population 42 | num_feature : int 43 | number of elements in each individual, i.e. number of nodes in tree-model 44 | num_generations : int 45 | number of generations of the populations to be evaluated 46 | bound : float 47 | upper bound of mitigation in each node 48 | cx_prob : float 49 | probability of mating 50 | mut_prob : float 51 | probability of mutation. 52 | u : `Utility` object 53 | object of utility class 54 | fixed_values : ndarray, optional 55 | nodes to keep fixed 56 | fixed_indices : ndarray, optional 57 | indices of nodes to keep fixed 58 | print_progress : bool, optional 59 | if the progress of the evolution should be printed 60 | 61 | """ 62 | def __init__(self, pop_amount, num_generations, cx_prob, mut_prob, bound, num_feature, utility, 63 | fixed_values=None, fixed_indices=None, print_progress=False): 64 | self.num_feature = num_feature 65 | self.pop_amount = pop_amount 66 | self.num_gen = num_generations 67 | self.cx_prob = cx_prob 68 | self.mut_prob = mut_prob 69 | self.u = utility 70 | self.bound = bound 71 | self.fixed_values = fixed_values 72 | self.fixed_indices = fixed_indices 73 | self.print_progress = print_progress 74 | 75 | def _generate_population(self, size): 76 | """Return 1D-array of random values in the given bound as the initial population.""" 77 | pop = np.random.random([size, self.num_feature])*self.bound 78 | if self.fixed_values is not None: 79 | self.fixed_values = self.fixed_values.flatten() 80 | for ind in pop: 81 | ind[self.fixed_indices] = self.fixed_values[self.fixed_indices] 82 | return pop 83 | 84 | def _evaluate(self, indvidual): 85 | """Returns the utility of given individual.""" 86 | return self.u.utility(indvidual) 87 | 88 | def _select(self, pop, rate): 89 | """Returns a 1D-array of selected individuals. 90 | 91 | Parameters 92 | ---------- 93 | pop : ndarray 94 | population given by 2D-array with shape ('pop_amount', 'num_feature') 95 | rate : float 96 | the probability of an individual being selected 97 | 98 | Returns 99 | ------- 100 | ndarray 101 | selected individuals 102 | 103 | """ 104 | index = np.random.choice(self.pop_amount, int(rate*self.pop_amount), replace=False) 105 | return pop[index,:] 106 | 107 | def _random_index(self, individuals, size): 108 | """Generate a random index of individuals of size 'size'. 109 | 110 | Parameters 111 | ---------- 112 | individuals : ndarray or list 113 | 2D-array of individuals 114 | size : int 115 | number of indices to generate 116 | 117 | Returns 118 | ------- 119 | ndarray 120 | 1D-array of indices 121 | 122 | """ 123 | inds_size = len(individuals) 124 | return np.random.choice(inds_size, size) 125 | 126 | def _selection_tournament(self, pop, k, tournsize, fitness): 127 | """Select `k` individuals from the input `individuals` using `k` 128 | tournaments of `tournsize` individuals. 129 | 130 | Parameters 131 | ---------- 132 | individuals : ndarray or list 133 | 2D-array of individuals to select from 134 | k : int 135 | number of individuals to select 136 | tournsize : int 137 | number of individuals participating in each tournament 138 | 139 | Returns 140 | ------- 141 | ndarray s 142 | selected individuals 143 | 144 | """ 145 | chosen = [] 146 | for i in range(k): 147 | index = self._random_index(pop, tournsize) 148 | aspirants = pop[index] 149 | aspirants_fitness = fitness[index] 150 | chosen_index = np.where(aspirants_fitness == np.max(aspirants_fitness))[0] 151 | if len(chosen_index) != 0: 152 | chosen_index = chosen_index[0] 153 | chosen.append(aspirants[chosen_index]) 154 | return np.array(chosen) 155 | 156 | def _two_point_cross_over(self, pop): 157 | """Performs a two-point cross-over of the population. 158 | 159 | Parameters 160 | ---------- 161 | pop : ndarray 162 | population given by 2D-array with shape ('pop_amount', 'num_feature') 163 | 164 | """ 165 | child_group1 = pop[::2] 166 | child_group2 = pop[1::2] 167 | for child1, child2 in zip(child_group1, child_group2): 168 | if np.random.random() <= self.cx_prob: 169 | cxpoint1 = np.random.randint(1, self.num_feature) 170 | cxpoint2 = np.random.randint(1, self.num_feature - 1) 171 | if cxpoint2 >= cxpoint1: 172 | cxpoint2 += 1 173 | else: # Swap the two cx points 174 | cxpoint1, cxpoint2 = cxpoint2, cxpoint1 175 | child1[cxpoint1:cxpoint2], child2[cxpoint1:cxpoint2] \ 176 | = child2[cxpoint1:cxpoint2].copy(), child1[cxpoint1:cxpoint2].copy() 177 | if self.fixed_values is not None: 178 | child1[self.fixed_indices] = self.fixed_values 179 | child2[self.fixed_indices] = self.fixed_values 180 | 181 | def _uniform_cross_over(self, pop, ind_prob): 182 | """Performs a uniform cross-over of the population. 183 | 184 | Parameters 185 | ---------- 186 | pop : ndarray 187 | population given by 2D-array with shape ('pop_amount', 'num_feature') 188 | ind_prob : float 189 | probability of feature cross-over 190 | 191 | """ 192 | child_group1 = pop[::2] 193 | child_group2 = pop[1::2] 194 | for child1, child2 in zip(child_group1, child_group2): 195 | size = min(len(child1), len(child2)) 196 | for i in range(size): 197 | if np.random.random() < ind_prob: 198 | child1[i], child2[i] = child2[i], child1[i] 199 | 200 | def _mutate(self, pop, ind_prob, scale=2.0): 201 | """Mutates individual's elements. The individual has a probability of `mut_prob` of 202 | being selected and every element in this individual has a probability `ind_prob` of being 203 | mutated. The mutated value is a random number. 204 | 205 | Parameters 206 | ---------- 207 | pop : ndarray 208 | population given by 2D-array with shape ('pop_amount', 'num_feature') 209 | ind_prob : float 210 | probability of feature mutation 211 | scale : float 212 | scaling constant of the random generated number for mutation 213 | 214 | """ 215 | pop_tmp = np.copy(pop) 216 | mutate_index = np.random.choice(self.pop_amount, int(self.mut_prob * self.pop_amount), replace=False) 217 | for i in mutate_index: 218 | feature_index = np.random.choice(self.num_feature, int(ind_prob * self.num_feature), replace=False) 219 | for j in feature_index: 220 | if self.fixed_indices is not None and j in self.fixed_indices: 221 | continue 222 | else: 223 | pop[i][j] = max(0.0, pop[i][j]+(np.random.random()-0.5)*scale) 224 | 225 | def _uniform_mutation(self, pop, ind_prob, scale=2.0): 226 | """Mutates individual's elements. The individual has a probability of `mut_prob` of 227 | being selected and every element in this individual has a probability `ind_prob` of being 228 | mutated. The mutated value is the current value plus a scaled uniform [-0.5,0.5] random value. 229 | 230 | Parameters 231 | ---------- 232 | pop : ndarray 233 | population given by 2D-array with shape ('pop_amount', 'num_feature') 234 | ind_prob : float 235 | probability of feature mutation 236 | scale : float 237 | scaling constant of the random generated number for mutation 238 | 239 | """ 240 | pop_len = len(pop) 241 | mutate_index = np.random.choice(pop_len, int(self.mut_prob * pop_len), replace=False) 242 | for i in mutate_index: 243 | prob = np.random.random(self.num_feature) 244 | inc = (np.random.random(self.num_feature) - 0.5)*scale 245 | pop[i] += (prob > (1.0-ind_prob)).astype(int)*inc 246 | pop[i] = np.maximum(1e-5, pop[i]) 247 | if self.fixed_values is not None: 248 | self.fixed_values = self.fixed_values.flatten() 249 | pop[i][self.fixed_indices] = self.fixed_values[self.fixed_indices] 250 | 251 | def _show_evolution(self, fits, pop): 252 | """Print statistics of the evolution of the population.""" 253 | length = len(pop) 254 | mean = fits.mean() 255 | std = fits.std() 256 | min_val = fits.min() 257 | max_val = fits.max() 258 | print (" Min {} \n Max {} \n Avg {}".format(min_val, max_val, mean)) 259 | print (" Std {} \n Population Size {}".format(std, length)) 260 | print (" Best Individual: ", pop[np.argmax(fits)]) 261 | 262 | def _survive(self, pop_tmp, fitness_tmp): 263 | """The 80 percent of the individuals with best fitness survives to 264 | the next generation. 265 | 266 | Parameters 267 | ---------- 268 | pop_tmp : ndarray 269 | population 270 | fitness_tmp : ndarray 271 | fitness values of `pop_temp` 272 | 273 | Returns 274 | ------- 275 | ndarray 276 | individuals that survived 277 | 278 | """ 279 | index_fits = np.argsort(fitness_tmp)[::-1] 280 | fitness = fitness_tmp[index_fits] 281 | pop = pop_tmp[index_fits] 282 | num_survive = int(0.8*self.pop_amount) 283 | survive_pop = np.copy(pop[:num_survive]) 284 | survive_fitness = np.copy(fitness[:num_survive]) 285 | return np.copy(survive_pop), np.copy(survive_fitness) 286 | 287 | def run(self): 288 | """Start the evolution process. 289 | 290 | The evolution steps are: 291 | 1. Select the individuals to perform cross-over and mutation. 292 | 2. Cross over among the selected candidate. 293 | 3. Mutate result as offspring. 294 | 4. Combine the result of offspring and parent together. And selected the top 295 | 80 percent of original population amount. 296 | 5. Random Generate 20 percent of original population amount new individuals 297 | and combine the above new population. 298 | 299 | Returns 300 | ------- 301 | tuple 302 | final population and the fitness for the final population 303 | 304 | Note 305 | ---- 306 | Uses the :mod:`~multiprocessing` package. 307 | 308 | """ 309 | print("----------------Genetic Evolution Starting----------------") 310 | pop = self._generate_population(self.pop_amount) 311 | pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()) 312 | fitness = pool.map(self._evaluate, pop) # how do we know pop[i] belongs to fitness[i]? 313 | fitness = np.array([val[0] for val in fitness]) 314 | u_hist = np.zeros(self.num_gen) 315 | for g in range(0, self.num_gen): 316 | print ("-- Generation {} --".format(g+1)) 317 | pop_select = self._select(np.copy(pop), rate=1) 318 | 319 | self._uniform_cross_over(pop_select, 0.50) 320 | self._uniform_mutation(pop_select, 0.25, np.exp(-float(g)/self.num_gen)**2) 321 | #self._mutate(pop_select, 0.05) 322 | 323 | fitness_select = pool.map(self._evaluate, pop_select) 324 | fitness_select = np.array([val[0] for val in fitness_select]) 325 | 326 | pop_tmp = np.append(pop, pop_select, axis=0) 327 | fitness_tmp = np.append(fitness, fitness_select, axis=0) 328 | 329 | pop_survive, fitness_survive = self._survive(pop_tmp, fitness_tmp) 330 | 331 | pop_new = self._generate_population(self.pop_amount - len(pop_survive)) 332 | fitness_new = pool.map(self._evaluate, pop_new) 333 | fitness_new = np.array([val[0] for val in fitness_new]) 334 | 335 | pop = np.append(pop_survive, pop_new, axis=0) 336 | fitness = np.append(fitness_survive, fitness_new, axis=0) 337 | if self.print_progress: 338 | self._show_evolution(fitness, pop) 339 | u_hist[g] = fitness[0] 340 | 341 | fitness = pool.map(self._evaluate, pop) 342 | fitness = np.array([val[0] for val in fitness]) 343 | return pop, fitness 344 | 345 | 346 | class GradientSearch(object) : 347 | """Gradient search optimization algorithm for the EZ-Climate model. 348 | 349 | Parameters 350 | ---------- 351 | utility : `Utility` object 352 | object of utility class 353 | learning_rate : float 354 | starting learning rate of gradient descent 355 | var_nums : int 356 | number of elements in array to optimize 357 | accuracy : float 358 | stop value for the gradient descent 359 | iterations : int 360 | maximum number of iterations 361 | fixed_values : ndarray, optional 362 | nodes to keep fixed 363 | fixed_indices : ndarray, optional 364 | indices of nodes to keep fixed 365 | print_progress : bool, optional 366 | if the progress of the evolution should be printed 367 | scale_alpha : ndarray, optional 368 | array to scale the learning rate 369 | 370 | Attributes 371 | ---------- 372 | utility : `Utility` object 373 | object of utility class 374 | learning_rate : float 375 | starting learning rate of gradient descent 376 | var_nums : int 377 | number of elements in array to optimize 378 | accuracy : float 379 | stop value for the gradient descent 380 | iterations : int 381 | maximum number of iterations 382 | fixed_values : ndarray, optional 383 | nodes to keep fixed 384 | fixed_indices : ndarray, optional 385 | indices of nodes to keep fixed 386 | print_progress : bool, optional 387 | if the progress of the evolution should be printed 388 | scale_alpha : ndarray, optional 389 | array to scale the learning rate 390 | 391 | """ 392 | 393 | def __init__(self, utility, var_nums, accuracy=1e-06, iterations=100, fixed_values=None, 394 | fixed_indices=None, print_progress=False, scale_alpha=None): 395 | self.u = utility 396 | self.var_nums = var_nums 397 | self.accuracy = accuracy 398 | self.iterations = iterations 399 | self.fixed_values = fixed_values 400 | self.fixed_indices = fixed_indices 401 | self.print_progress = print_progress 402 | self.scale_alpha = scale_alpha 403 | if scale_alpha is None: 404 | self.scale_alpha = np.exp(np.linspace(0.0, 3.0, var_nums)) 405 | 406 | def _partial_grad(self, i): 407 | """Calculate the ith element of the gradient vector.""" 408 | m_copy = self.m.copy() 409 | m_copy[i] = m_copy[i] - self.delta if (m_copy[i] - self.delta)>=0 else 0.0 410 | minus_utility = self.u.utility(m_copy) 411 | m_copy[i] += 2*self.delta 412 | plus_utility = self.u.utility(m_copy) 413 | grad = (plus_utility-minus_utility) / (2*self.delta) 414 | return grad, i 415 | 416 | def numerical_gradient(self, m, delta=1e-08, fixed_indices=None): 417 | """Calculate utility gradient numerically. 418 | 419 | Parameters 420 | ---------- 421 | m : ndarray or list 422 | array of mitigation 423 | delta : float, optional 424 | change in mitigation 425 | fixed_indices : ndarray or list, optional 426 | indices of gradient that should not be calculated 427 | 428 | Returns 429 | ------- 430 | ndarray 431 | gradient 432 | 433 | """ 434 | self.delta = delta 435 | self.m = m 436 | if fixed_indices is None: 437 | fixed_indices = [] 438 | grad = np.zeros(len(m)) 439 | if not isinstance(m, np.ndarray): 440 | self.m = np.array(m) 441 | pool = multiprocessing.Pool() 442 | indices = np.delete(list(range(len(m))), fixed_indices) 443 | res = pool.map(self._partial_grad, indices) 444 | for g, i in res: 445 | grad[i] = g 446 | pool.close() 447 | pool.join() 448 | del self.m 449 | del self.delta 450 | return grad 451 | 452 | def _accelerate_scale(self, accelerator, prev_grad, grad): 453 | sign_vector = np.sign(prev_grad * grad) 454 | scale_vector = np.ones(self.var_nums) * ( 1 + 0.10) 455 | accelerator[sign_vector <= 0] = 1 456 | accelerator *= scale_vector 457 | return accelerator 458 | 459 | 460 | def gradient_descent(self, initial_point, return_last=False): 461 | """Gradient descent algorithm. The `initial_point` is updated using the 462 | Adam algorithm. Adam uses the history of the gradient to compute individual 463 | step sizes for each element in the mitigation vector. The vector of step 464 | sizes are calculated using estimates of the first and second moments of 465 | the gradient. 466 | 467 | Parameters 468 | ---------- 469 | initial_point : ndarray 470 | initial guess of the mitigation 471 | return_last : bool, optional 472 | if True the function returns the last point, else the point 473 | with highest utility 474 | 475 | Returns 476 | ------- 477 | tuple 478 | (best point, best utility) 479 | 480 | """ 481 | num_decision_nodes = initial_point.shape[0] 482 | x_hist = np.zeros((self.iterations+1, num_decision_nodes)) 483 | u_hist = np.zeros(self.iterations+1) 484 | u_hist[0] = self.u.utility(initial_point) 485 | x_hist[0] = initial_point 486 | 487 | beta1, beta2 = 0.90, 0.90 488 | eta = 0.0015 489 | eps = 1e-3 490 | m_t, v_t = 0, 0 491 | 492 | prev_grad = 0.0 493 | accelerator = np.ones(self.var_nums) 494 | 495 | for i in range(self.iterations): 496 | grad = self.numerical_gradient(x_hist[i], fixed_indices=self.fixed_indices) 497 | m_t = beta1*m_t + (1-beta1)*grad 498 | v_t = beta2*v_t + (1-beta2)*np.power(grad, 2) 499 | m_hat = m_t / (1-beta1**(i+1)) 500 | v_hat = v_t / (1-beta2**(i+1)) 501 | if i != 0: 502 | accelerator = self._accelerate_scale(accelerator, prev_grad, grad) 503 | 504 | new_x = x_hist[i] + ((eta*m_hat)/(np.square(v_hat)+eps)) * accelerator 505 | new_x[new_x < 0] = 0.0 506 | 507 | if self.fixed_values is not None: 508 | self.fixed_values = self.fixed_values.flatten() 509 | new_x[self.fixed_indices] = self.fixed_values[self.fixed_indices] 510 | 511 | x_hist[i+1] = new_x 512 | u_hist[i+1] = self.u.utility(new_x)[0] 513 | prev_grad = grad.copy() 514 | 515 | if self.print_progress: 516 | print("-- Iteration {} -- \n Current Utility: {}".format(i+1, u_hist[i+1])) 517 | print(new_x) 518 | 519 | if return_last: 520 | return x_hist[i+1], u_hist[i+1] 521 | best_index = np.argmax(u_hist) 522 | return x_hist[best_index], u_hist[best_index] 523 | 524 | def run(self, initial_point_list, topk=4): 525 | """Initiate the gradient search algorithm. 526 | 527 | Parameters 528 | ---------- 529 | initial_point_list : list 530 | list of initial points to select from 531 | topk : int, optional 532 | select and run gradient descent on the `topk` first points of 533 | `initial_point_list` 534 | 535 | Returns 536 | ------- 537 | tuple 538 | best mitigation point and the utility of the best mitigation point 539 | 540 | Raises 541 | ------ 542 | ValueError 543 | If `topk` is larger than the length of `initial_point_list`. 544 | 545 | Note 546 | ---- 547 | Uses the :mod:`~multiprocessing` package. 548 | 549 | """ 550 | print("----------------Gradient Search Starting----------------") 551 | 552 | if topk > len(initial_point_list): 553 | raise ValueError("topk {} > number of initial points {}".format(topk, len(initial_point_list))) 554 | 555 | candidate_points = initial_point_list[:topk] 556 | mitigations = [] 557 | utilities = np.zeros(topk) 558 | for cp, count in zip(candidate_points, list(range(topk))): 559 | if not isinstance(cp, np.ndarray): 560 | cp = np.array(cp) 561 | print("Starting process {} of Gradient Descent".format(count+1)) 562 | m, u = self.gradient_descent(cp) 563 | mitigations.append(m) 564 | utilities[count] = u 565 | best_index = np.argmax(utilities) 566 | return mitigations[best_index], utilities[best_index] 567 | 568 | 569 | class CoordinateDescent(object): 570 | """Coordinate descent optimization algorithm for the EZ-Climate model. 571 | 572 | Parameters 573 | ---------- 574 | utility : `Utility` object 575 | object of utility class 576 | var_nums : int 577 | number of elements in array to optimize 578 | accuracy : float 579 | stop value for the utility increase 580 | iterations : int 581 | maximum number of iterations 582 | 583 | Attributes 584 | ---------- 585 | utility : `Utility` object 586 | object of utility class 587 | var_nums : int 588 | number of elements in array to optimize 589 | accuracy : float 590 | stop value for the utility increase 591 | iterations : int 592 | maximum number of iterations 593 | 594 | """ 595 | def __init__(self, utility, var_nums, accuracy=1e-4, iterations=100): 596 | self.u = utility 597 | self.var_nums = var_nums 598 | self.accuracy = accuracy 599 | self.iterations = iterations 600 | 601 | def _min_func(self, x, m, i): 602 | m_copy = m.copy() 603 | m_copy[i] = x 604 | return -self.u.utility(m_copy)[0] 605 | 606 | def _minimize_node(self, node, m): 607 | from scipy.optimize import fmin 608 | return fmin(self._min_func, x0=m[node], args=(m, node), disp=False) 609 | 610 | def run(self, m): 611 | """Run the coordinate descent iterations. 612 | 613 | Parameters 614 | ---------- 615 | m : initial point 616 | 617 | Returns 618 | ------- 619 | tuple 620 | best mitigation point and the utility of the best mitigation point 621 | 622 | Note 623 | ---- 624 | Uses the :mod:`~scipy` package. 625 | 626 | """ 627 | num_decision_nodes = m.shape[0] 628 | x_hist = [] 629 | u_hist = [] 630 | nodes = list(range(self.var_nums)) 631 | x_hist.append(m.copy()) 632 | u_hist.append(self.u.utility(m)[0]) 633 | print("----------------Coordinate Descent Starting----------------") 634 | print("Starting Utility: {}".format(u_hist[0])) 635 | for i in range(self.iterations): 636 | print("-- Iteration {} --".format(i+1)) 637 | node_iteration = np.random.choice(nodes, replace=False, size=len(nodes)) 638 | for node in node_iteration: 639 | m[node] = max(0.0, self._minimize_node(node, m)) 640 | x_hist.append(m.copy()) 641 | u_hist.append(self.u.utility(m)[0]) 642 | print("Current Utility: {}".format(u_hist[i+1])) 643 | if np.abs(u_hist[i+1] - u_hist[i]) < self.accuracy: 644 | break 645 | return x_hist[-1], u_hist[-1] 646 | --------------------------------------------------------------------------------