├── docs ├── Project.toml ├── src │ ├── api.md │ └── index.md └── make.jl ├── .github ├── dependabot.yml ├── workflows │ ├── ci-nightly.yml │ ├── documentation.yml │ ├── ci.yml │ └── CompatHelper.yml └── copilot-instructions.md ├── .gitignore ├── test ├── Project.toml ├── runtests.jl ├── test_cdp.jl └── test_lq_approx.jl ├── src ├── ContinuousDPs.jl ├── lq_approx.jl └── cdp.jl ├── Project.toml ├── README.md └── LICENSE /docs/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | ContinuousDPs = "c470661a-f883-5a79-ade0-a473a39194e2" 3 | Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" 4 | 5 | [compat] 6 | Documenter = "1" 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | day: "saturday" 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.cov 2 | *.jl.*.cov 3 | *.jl.mem 4 | deps/deps.jl 5 | Manifest.toml 6 | 7 | # Jupyter Notebook 8 | .ipynb_checkpoints 9 | 10 | # Documentation builds 11 | docs/build/ 12 | -------------------------------------------------------------------------------- /test/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 3 | BasisMatrices = "08854c51-b66b-5062-a90d-8e7ae4547a49" 4 | QuantEcon = "fcd29c91-0bd7-5a09-975d-7ac3f643a60c" 5 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 6 | -------------------------------------------------------------------------------- /test/runtests.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra 2 | using ContinuousDPs 3 | using QuantEcon: PFI, VFI, solve, stationary_values 4 | using BasisMatrices: Basis, ChebParams, SplineParams, LinParams, nodes 5 | using Test 6 | 7 | include("test_cdp.jl") 8 | include("test_lq_approx.jl") 9 | -------------------------------------------------------------------------------- /docs/src/api.md: -------------------------------------------------------------------------------- 1 | # API Reference 2 | 3 | ```@index 4 | ``` 5 | 6 | ## Continuous Dynamic Programming 7 | 8 | ```@docs 9 | ContinuousDP 10 | ``` 11 | 12 | ## LQ Approximation 13 | 14 | ```@docs 15 | approx_lq 16 | ``` 17 | 18 | ## Simulation 19 | 20 | ```@docs 21 | simulate 22 | ``` 23 | 24 | ## Policy Evaluation 25 | 26 | ```@docs 27 | evaluate_policy! 28 | set_eval_nodes! 29 | ``` -------------------------------------------------------------------------------- /src/ContinuousDPs.jl: -------------------------------------------------------------------------------- 1 | module ContinuousDPs 2 | 3 | # stdlib 4 | using LinearAlgebra, Random 5 | 6 | using QuantEcon 7 | import QuantEcon: 8 | bellman_operator, bellman_operator!, compute_greedy!, compute_greedy, 9 | evaluate_policy, DDPAlgorithm, solve, simulate, simulate! 10 | 11 | const DPAlgorithm = DDPAlgorithm 12 | 13 | include("cdp.jl") 14 | include("lq_approx.jl") 15 | 16 | export 17 | ContinuousDP, evaluate_policy!, set_eval_nodes!, simulate, approx_lq, LQA 18 | 19 | end # module 20 | -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | name = "ContinuousDPs" 2 | uuid = "c470661a-f883-5a79-ade0-a473a39194e2" 3 | version = "0.1.0" 4 | 5 | [deps] 6 | BasisMatrices = "08854c51-b66b-5062-a90d-8e7ae4547a49" 7 | FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" 8 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 9 | Optim = "429524aa-4258-5aef-a3af-852621145aeb" 10 | QuantEcon = "fcd29c91-0bd7-5a09-975d-7ac3f643a60c" 11 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 12 | 13 | [compat] 14 | BasisMatrices = "0.8" 15 | FiniteDiff = "2" 16 | Optim = "1" 17 | QuantEcon = "0.16, 0.17" 18 | julia = "1.10" 19 | -------------------------------------------------------------------------------- /docs/make.jl: -------------------------------------------------------------------------------- 1 | using ContinuousDPs 2 | using Documenter 3 | 4 | DocMeta.setdocmeta!(ContinuousDPs, :DocTestSetup, :(using ContinuousDPs); recursive=true) 5 | 6 | makedocs(; 7 | modules=[ContinuousDPs], 8 | authors="QuantEcon", 9 | sitename="ContinuousDPs.jl", 10 | format=Documenter.HTML(; 11 | canonical="https://QuantEcon.github.io/ContinuousDPs.jl", 12 | edit_link="main", 13 | assets=String[], 14 | ), 15 | pages=[ 16 | "Home" => "index.md", 17 | "API Reference" => "api.md", 18 | ], 19 | checkdocs=:none, # Don't error on missing docs 20 | warnonly=[:missing_docs, :docs_block] # Convert errors to warnings 21 | ) 22 | 23 | deploydocs(; 24 | repo="github.com/QuantEcon/ContinuousDPs.jl", 25 | devbranch="main", 26 | ) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ContinuousDPs.jl 2 | 3 | [![Build Status](https://github.com/QuantEcon/ContinuousDPs.jl/actions/workflows/ci.yml/badge.svg)](https://github.com/QuantEcon/ContinuousDPs.jl/actions/workflows/ci.yml) 4 | [![codecov](https://codecov.io/gh/QuantEcon/ContinuousDPs.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/QuantEcon/ContinuousDPs.jl) 5 | [![Documentation](https://img.shields.io/badge/docs-dev-blue.svg)](https://QuantEcon.github.io/ContinuousDPs.jl/dev/) 6 | 7 | Routines for solving continuous state dynamic programs by the Bellman equation collocation method 8 | 9 | ## Installation 10 | 11 | To install the package, open the Julia package manager (Pkg) and type 12 | 13 | ``` 14 | add https://github.com/QuantEcon/ContinuousDPs.jl 15 | ``` 16 | 17 | ## Demo Notebooks 18 | 19 | * [Stochastic optimal growth model](http://nbviewer.jupyter.org/github/QuantEcon/ContinuousDPs.jl/blob/main/examples/cdp_ex_optgrowth_jl.ipynb) 20 | * [Examples from Miranda and Fackler 2002, Chapter 9](http://nbviewer.jupyter.org/github/QuantEcon/ContinuousDPs.jl/blob/main/examples/cdp_ex_MF_jl.ipynb) 21 | -------------------------------------------------------------------------------- /.github/workflows/ci-nightly.yml: -------------------------------------------------------------------------------- 1 | name: CI-nightly 2 | on: 3 | pull_request: 4 | branches: 5 | - main 6 | push: 7 | branches: 8 | - main 9 | tags: '*' 10 | jobs: 11 | test: 12 | name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | version: 18 | - 'nightly' 19 | os: 20 | - ubuntu-latest 21 | arch: 22 | - x64 23 | steps: 24 | - uses: actions/checkout@v6 25 | - uses: julia-actions/setup-julia@v2 26 | with: 27 | version: ${{ matrix.version }} 28 | arch: ${{ matrix.arch }} 29 | - uses: actions/cache@v5 30 | env: 31 | cache-name: cache-artifacts 32 | with: 33 | path: ~/.julia/artifacts 34 | key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} 35 | restore-keys: | 36 | ${{ runner.os }}-test-${{ env.cache-name }}- 37 | ${{ runner.os }}-test- 38 | ${{ runner.os }}- 39 | - uses: julia-actions/julia-buildpkg@v1 40 | - uses: julia-actions/julia-runtest@v1 41 | -------------------------------------------------------------------------------- /.github/workflows/documentation.yml: -------------------------------------------------------------------------------- 1 | name: Documentation 2 | on: 3 | push: 4 | branches: 5 | - main 6 | tags: '*' 7 | pull_request: 8 | concurrency: 9 | # Skip intermediate builds: always. 10 | # Cancel intermediate builds: only if it is a pull request build. 11 | group: ${{ github.workflow }}-${{ github.ref }} 12 | cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | permissions: 17 | actions: read 18 | contents: write 19 | statuses: write 20 | steps: 21 | - uses: actions/checkout@v6 22 | - uses: julia-actions/setup-julia@v2 23 | with: 24 | version: '1' 25 | - name: Configure git 26 | run: | 27 | git config --global user.name "$GITHUB_ACTOR" 28 | git config --global user.email "$GITHUB_ACTOR@users.noreply.github.com" 29 | - uses: julia-actions/cache@v2 30 | - name: Install dependencies 31 | run: julia --project=docs -e 'using Pkg; Pkg.develop(PackageSpec(path=pwd())); Pkg.instantiate()' 32 | - name: Build and deploy 33 | env: 34 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 35 | DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} 36 | run: julia --project=docs docs/make.jl -------------------------------------------------------------------------------- /docs/src/index.md: -------------------------------------------------------------------------------- 1 | # ContinuousDPs.jl 2 | 3 | [![Build Status](https://github.com/QuantEcon/ContinuousDPs.jl/actions/workflows/ci.yml/badge.svg)](https://github.com/QuantEcon/ContinuousDPs.jl/actions/workflows/ci.yml) 4 | [![codecov](https://codecov.io/gh/QuantEcon/ContinuousDPs.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/QuantEcon/ContinuousDPs.jl) 5 | [![Documentation](https://img.shields.io/badge/docs-dev-blue.svg)](https://QuantEcon.github.io/ContinuousDPs.jl/dev/) 6 | 7 | Routines for solving continuous state dynamic programs by the Bellman equation collocation method. 8 | 9 | ## Installation 10 | 11 | To install the package, open the Julia package manager (Pkg) and type: 12 | 13 | ```julia 14 | add https://github.com/QuantEcon/ContinuousDPs.jl 15 | ``` 16 | 17 | ## Quick Start 18 | 19 | ```julia 20 | using ContinuousDPs 21 | 22 | # Create a continuous dynamic programming problem 23 | # (example code here would depend on the specific API) 24 | ``` 25 | 26 | ## Demo Notebooks 27 | 28 | * [Stochastic optimal growth model](http://nbviewer.jupyter.org/github/QuantEcon/ContinuousDPs.jl/blob/main/examples/cdp_ex_optgrowth_jl.ipynb) 29 | * [Examples from Miranda and Fackler 2002, Chapter 9](http://nbviewer.jupyter.org/github/QuantEcon/ContinuousDPs.jl/blob/main/examples/cdp_ex_MF_jl.ipynb) 30 | 31 | ## References 32 | 33 | * M. J. Miranda and P. L. Fackler, Applied Computational Economics and Finance, MIT press, 2002. -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | branches: 5 | - main 6 | push: 7 | branches: 8 | - main 9 | tags: '*' 10 | jobs: 11 | test: 12 | name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} 13 | runs-on: ${{ matrix.os }} 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | version: 18 | - '1' # Leave this line unchanged. '1' will automatically expand to the latest stable 1.x release of Julia. 19 | os: [ubuntu-latest, windows-latest, macOS-latest] 20 | arch: 21 | - x64 22 | steps: 23 | - uses: actions/checkout@v6 24 | - uses: julia-actions/setup-julia@v2 25 | with: 26 | version: ${{ matrix.version }} 27 | arch: ${{ matrix.arch }} 28 | - uses: actions/cache@v5 29 | env: 30 | cache-name: cache-artifacts 31 | with: 32 | path: ~/.julia/artifacts 33 | key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }} 34 | restore-keys: | 35 | ${{ runner.os }}-test-${{ env.cache-name }}- 36 | ${{ runner.os }}-test- 37 | ${{ runner.os }}- 38 | - uses: julia-actions/julia-buildpkg@v1 39 | - uses: julia-actions/julia-runtest@v1 40 | - uses: julia-actions/julia-processcoverage@v1 41 | - uses: codecov/codecov-action@v5 42 | with: 43 | file: lcov.info 44 | token: ${{ secrets.CODECOV_TOKEN }} 45 | -------------------------------------------------------------------------------- /.github/workflows/CompatHelper.yml: -------------------------------------------------------------------------------- 1 | name: CompatHelper 2 | on: 3 | schedule: 4 | - cron: 0 0 * * * 5 | workflow_dispatch: 6 | permissions: 7 | contents: write 8 | pull-requests: write 9 | jobs: 10 | CompatHelper: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Check if Julia is already available in the PATH 14 | id: julia_in_path 15 | run: which julia 16 | continue-on-error: true 17 | - name: Install Julia, but only if it is not already available in the PATH 18 | uses: julia-actions/setup-julia@v2 19 | with: 20 | version: '1' 21 | arch: ${{ runner.arch }} 22 | if: steps.julia_in_path.outcome != 'success' 23 | - name: "Add the General registry via Git" 24 | run: | 25 | import Pkg 26 | ENV["JULIA_PKG_SERVER"] = "" 27 | Pkg.Registry.add("General") 28 | shell: julia --color=yes {0} 29 | - name: "Install CompatHelper" 30 | run: | 31 | import Pkg 32 | name = "CompatHelper" 33 | uuid = "aa819f21-2bde-4658-8897-bab36330d9b7" 34 | version = "3" 35 | Pkg.add(; name, uuid, version) 36 | shell: julia --color=yes {0} 37 | - name: "Run CompatHelper" 38 | run: | 39 | import CompatHelper 40 | CompatHelper.main() 41 | shell: julia --color=yes {0} 42 | env: 43 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 44 | COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }} 45 | # COMPATHELPER_PRIV: ${{ secrets.COMPATHELPER_PRIV }} 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2017, QuantEcon 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | * Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /src/lq_approx.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Tools for solving dynamic programs with continuous states using LQ 3 | approximation. 4 | 5 | References 6 | ---------- 7 | * M. J. Miranda and P. L. Fackler, Applied Computational Economics and Finance, 8 | MIT press, 2002. 9 | 10 | =# 11 | import QuantEcon.LQ, QuantEcon.ScalarOrArray 12 | 13 | """ 14 | approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, 15 | discount) 16 | 17 | Return an approximating LQ instance. 18 | 19 | # Arguments 20 | - `s_star::ScalarOrArray{T}`: State variables at the steady-state 21 | - `x_star::ScalarOrArray{T}`: Action variables at the steady-state 22 | - `f_star::Real`: Reward function evaluated at the steady-state 23 | - `Df_star::AbstractVector{T}`: Gradient of f satisfying 24 | `Df_star = [f_s', f_x']` 25 | - `DDf_star::AbstractMatrix{T}`: Hessian of f satisfying 26 | `DDf_star::Array = [f_ss f_sx; f_xs f_xx]` 27 | - `g_star::ScalarOrArray{T}`: State transition function evaluated at the 28 | steady-state 29 | - `Dg_star::AbstractMatrix{T}`: Jacobian of g satisfying `Dg_star = [g_s, g_x]` 30 | - `discount::Real`: Discount factor 31 | 32 | """ 33 | function approx_lq(s_star::ScalarOrArray{T}, x_star::ScalarOrArray{T}, 34 | f_star::Real, Df_star::AbstractVector{T}, 35 | DDf_star::AbstractMatrix{T}, g_star::ScalarOrArray{T}, 36 | Dg_star::AbstractMatrix{T}, discount::Real) where T 37 | 38 | n = length(s_star) # Dim of state variable s 39 | nb_states = n + 1 40 | m = length(x_star) # Dim of control variable x 41 | z_star = [s_star..., x_star...] 42 | 43 | # Unpack derivatives 44 | f_s, f_x = Df_star[1:n, :]', Df_star[n+1:end, :]' 45 | f_ss, f_xs = DDf_star[1:n, 1:n], DDf_star[n+1:end, 1:n] 46 | f_sx, f_xx = DDf_star[1:n, n+1:end], DDf_star[n+1:end, n+1:end] 47 | g_s, g_x = Dg_star[:, 1:n], Dg_star[:, n+1:end] 48 | 49 | # Initialize arrays 50 | A = Array{T}(undef, nb_states, nb_states) 51 | B = Array{T}(undef, nb_states, m) 52 | C = zeros(nb_states, 1) 53 | Q = Array{T}(undef, m, m) 54 | R = Array{T}(undef, nb_states, nb_states) 55 | N = Array{T}(undef, m, nb_states) 56 | 57 | # (1, s)' R (1, s) + 2 x' N (1, s) + x' Q x 58 | R[1, 1] = -(f_star - Df_star' * z_star + z_star' * DDf_star * z_star / 2) 59 | R[2:end, 1] = -(f_s' - (f_ss * s_star + f_sx * x_star)) / 2 60 | R[1, 2:end] = -(f_s - (s_star' * f_ss + (f_sx * x_star)')) / 2 61 | R[2:end, 2:end] = -f_ss / 2 62 | 63 | N[:, 1] = -(f_x' - (f_sx' * s_star + f_xx * x_star)) / 2 64 | N[:, 2:end] = -f_sx' / 2 65 | 66 | Q[:, :] = -f_xx / 2 67 | 68 | # A (1, s) + B x + C w 69 | A[1, 1] = 1.0 70 | A[1, 2:end] .= 0.0 71 | A[2:end, 1] .= g_star .- Dg_star * z_star # g_star may be a scalar 72 | A[2:end, 2:end] = g_s 73 | 74 | B[1, :] .= 0.0 75 | B[2:end, :] = g_x 76 | 77 | # Construct LQ instance 78 | lq = QuantEcon.LQ(Q, R, A, B, C, N, bet=discount) 79 | 80 | return lq 81 | end 82 | -------------------------------------------------------------------------------- /test/test_cdp.jl: -------------------------------------------------------------------------------- 1 | @testset "cdp.jl" begin 2 | 3 | @testset "Deterministic optimal growth" begin 4 | alpha = 0.65 5 | beta = 0.95 6 | f(s, x) = log(x) 7 | g(s, x, e) = s^alpha - x 8 | shocks = [1.] 9 | weights = [1.] 10 | x_lb(s) = 0 11 | x_ub(s) = s 12 | 13 | # Analytical solution 14 | ab = alpha * beta 15 | c1 = (log(1 - ab) + log(ab) * ab / (1 - ab)) / (1 - beta) 16 | c2 = alpha / (1 - ab) 17 | v_star(s) = c1 + c2 * log(s) 18 | x_star(s) = (1 - ab) * s^alpha 19 | 20 | # Analytical state path 21 | s_init = 0.1 22 | ts_length = 25 23 | s_path_star = Array{Float64}(undef, ts_length) 24 | s_path_star[1] = s_init 25 | shock = 1. # Arbitrary 26 | for t in 1:ts_length-1 27 | s_path_star[t+1] = g(s_path_star[t], x_star(s_path_star[t]), shock) 28 | end 29 | 30 | # Bases 31 | bases = Basis[] 32 | 33 | # Chebyshev 34 | n = 30 35 | s_min, s_max = 0.1, 2. 36 | push!(bases, Basis(ChebParams(n, s_min, s_max))) 37 | 38 | # Spline 39 | k = 3 40 | m = 101 41 | breaks = m - (k-1) 42 | s_min, s_max = 0.1, 2. 43 | push!(bases, Basis(SplineParams(breaks, s_min, s_max, k))) 44 | 45 | methods = [PFI, VFI] 46 | 47 | for basis in bases 48 | cdp = ContinuousDP(f, g, beta, shocks, weights, x_lb, x_ub, basis) 49 | 50 | for method in methods 51 | # solve 52 | tol = sqrt(eps()) 53 | max_iter = 500 54 | res = @inferred(solve(cdp, method, tol=tol, max_iter=max_iter)) 55 | 56 | rtol = 1e-5 57 | @test isapprox(res.V, v_star.(cdp.interp.S); rtol=rtol) 58 | @test isapprox(res.X, x_star.(cdp.interp.S); rtol=rtol) 59 | 60 | # set_eval_nodes! 61 | grid_size = 200 62 | eval_nodes = collect(range(s_min, stop=s_max, length=grid_size)) 63 | set_eval_nodes!(res, eval_nodes); 64 | 65 | # simulate 66 | s_path = @inferred(simulate(res, s_init, ts_length)) 67 | atol = 1e-5 68 | @test isapprox(s_path[end], s_path_star[end]; atol=atol) 69 | end 70 | end 71 | 72 | @testset "Test warning" begin 73 | cdp = ContinuousDP( 74 | f, g, beta, shocks, weights, x_lb, x_ub, bases[1] 75 | ) 76 | for max_iter in [0, 1] 77 | @test_logs (:warn, r".*max_iter.*") 78 | solve(cdp, max_iter=max_iter) 79 | end 80 | end 81 | end 82 | 83 | @testset "LQ control" begin 84 | using QuantEcon 85 | 86 | A = [1.0 0.0; 87 | -0.5 0.9]; 88 | 89 | R = [5.0 0.0; 90 | 0.0 0.3] 91 | 92 | N = [0.05 0.1] 93 | 94 | Q = 0.1; 95 | 96 | C = 0.0; 97 | 98 | B = [0.0; 1.5]; 99 | 100 | f(s, x) = -([1, s...]' * R * [1, s...] .+ x' * Q * x .+ 101 | 2 * x' * N * [1, s...])[1]; 102 | g(s, x, e) = (A * [1, s...] + B * x)[2]; 103 | 104 | point = (5.0, 0.0, 0.0); 105 | 106 | discount = 0.9; 107 | lq = QuantEcon.LQ(Q, R, A, B, C, N, bet=discount); 108 | P, F, d = stationary_values(lq); 109 | v_star(s) = -([1, s...]' * P * [1, s...] + d) 110 | x_star(s) = -(F * [1, s...])[1]; 111 | 112 | n = 100 113 | s_min, s_max = -5.0, 10. 114 | basis = Basis(LinParams(n, s_min, s_max)) 115 | 116 | x_lb(s) = -20.0 117 | x_ub(s) = 5.0; 118 | 119 | shocks = [0.] 120 | weights = [1.] 121 | 122 | cdp = ContinuousDP(f, g, discount, shocks, weights, x_lb, x_ub, basis) 123 | 124 | res_lqa = @inferred(solve(cdp, LQA, point=point)); 125 | rtol = 1e-2 126 | 127 | @test isapprox(res_lqa.V, v_star.(cdp.interp.S); rtol=rtol) 128 | @test isapprox(res_lqa.X, x_star.(cdp.interp.S); rtol=rtol) 129 | 130 | end 131 | 132 | @testset "Initial value" begin 133 | # Construct optimal growth model 134 | n = 10 135 | s_min, s_max = 5, 10 136 | basis = Basis(LinParams(n, s_min, s_max)) 137 | 138 | alpha = 0.2 139 | bet = 0.5 140 | gamm = 0.9 141 | sigma = 0.1 142 | discount = 0.9; 143 | 144 | x_star = ((discount * bet) / (1 - discount * gamm))^(1 / (1 - bet)) 145 | s_star = gamm * x_star + x_star^bet 146 | s_star, x_star 147 | 148 | f(s, x) = (s - x)^(1 - alpha) / (1 - alpha) 149 | g(s, x, e) = gamm * x .+ e * x^bet; 150 | 151 | n_shocks = 3 152 | shocks, weights = zeros(3), ones(3) / 3. 153 | 154 | x_lb(s) = 0 155 | x_ub(s) = 0.99 * s; 156 | 157 | cdp = ContinuousDP(f, g, discount, shocks, weights, x_lb, x_ub, basis) 158 | 159 | # Compute coefficients once 160 | v_init = π * ones(n) 161 | res = solve(cdp, v_init=v_init, max_iter=0) 162 | 163 | # Basis is identity matrix 164 | @test isapprox(res.C, v_init) 165 | 166 | end 167 | 168 | end 169 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # ContinuousDPs.jl 2 | 3 | ContinuousDPs.jl is a Julia package that provides routines for solving continuous state dynamic programs using the Bellman equation collocation method. It is part of the QuantEcon ecosystem and offers Policy Function Iteration (PFI) and Value Function Iteration (VFI) algorithms for solving dynamic programming problems. 4 | 5 | Always reference these instructions first and fallback to search or bash commands only when you encounter unexpected information that does not match the info here. 6 | 7 | ## Working Effectively 8 | 9 | ### Bootstrap and Setup 10 | - Julia 1.2+ is required. Check version: `julia --version` 11 | - Install package dependencies: `julia --project=. -e "using Pkg; Pkg.instantiate()"` -- takes 75 seconds. NEVER CANCEL. Set timeout to 120+ seconds. 12 | - The package uses standard Julia package structure with `Project.toml` for dependencies 13 | 14 | ### Building and Testing 15 | - Build the package: `julia --project=. -e "using Pkg; Pkg.build()"` 16 | - Run all tests: `julia --project=. -e "using Pkg; Pkg.test()"` -- takes 60 seconds. NEVER CANCEL. Set timeout to 120+ seconds. 17 | - Run basic functionality test: Create a simple script importing `ContinuousDPs`, `QuantEcon`, and `BasisMatrices` to verify the package works 18 | 19 | ### Development Workflow 20 | - Start Julia REPL in package mode: `julia --project=.` 21 | - Load the package in development: `julia --project=. -e "using ContinuousDPs"` 22 | - Import required dependencies: `using QuantEcon: PFI, VFI, solve; using BasisMatrices: Basis, ChebParams, SplineParams` 23 | - ALWAYS import `solve` from QuantEcon when using the solving functionality 24 | 25 | ## Core Functionality Testing 26 | 27 | ### Creating and Solving a Continuous DP 28 | Always test new code with this basic workflow: 29 | ```julia 30 | using ContinuousDPs 31 | using QuantEcon: PFI, VFI, solve 32 | using BasisMatrices: Basis, ChebParams 33 | 34 | # Define reward and transition functions 35 | f(s, x) = log(x) # reward function 36 | g(s, x, e) = s^0.3 * x^0.7 + e # state transition function 37 | 38 | # Setup problem parameters 39 | discount = 0.9 40 | shocks = [0.0] # shock values 41 | weights = [1.0] # shock weights 42 | x_lb(s) = 0.01 # lower bound on action 43 | x_ub(s) = s - 0.01 # upper bound on action 44 | 45 | # Create basis for approximation 46 | n = 10 47 | s_min, s_max = 0.1, 2.0 48 | basis = Basis(ChebParams(n, s_min, s_max)) 49 | 50 | # Create continuous DP 51 | cdp = ContinuousDP(f, g, discount, shocks, weights, x_lb, x_ub, basis) 52 | 53 | # Solve using PFI or VFI 54 | res = solve(cdp, PFI) # or VFI 55 | 56 | # Simulate solution 57 | s_init = 1.0 58 | ts_length = 10 59 | s_path = simulate(res, s_init, ts_length) 60 | ``` 61 | 62 | ### Validation Scenarios 63 | ALWAYS run these validation steps after making changes: 64 | 1. **Package Loading Test**: Verify `using ContinuousDPs` works without errors 65 | 2. **Basic Solve Test**: Create a simple DP problem and solve it using both PFI and VFI 66 | 3. **Simulation Test**: Simulate paths from solved problems to ensure results are reasonable 67 | 4. **Integration Test**: Run the full test suite to ensure no regressions 68 | 69 | ## Repository Structure 70 | 71 | ### Key Directories and Files 72 | ``` 73 | ├── src/ 74 | │ ├── ContinuousDPs.jl # Main module file 75 | │ ├── cdp.jl # Core continuous DP functionality 76 | │ └── lq_approx.jl # Linear quadratic approximation methods 77 | ├── test/ 78 | │ ├── runtests.jl # Test runner 79 | │ ├── test_cdp.jl # Tests for core CDP functionality 80 | │ └── test_lq_approx.jl # Tests for LQ approximation 81 | ├── examples/ 82 | │ ├── cdp_ex_optgrowth_jl.ipynb # Optimal growth model example 83 | │ ├── cdp_ex_MF_jl.ipynb # Miranda & Fackler examples 84 | │ └── lqapprox_jl.ipynb # LQ approximation examples 85 | └── Project.toml # Package dependencies and metadata 86 | ``` 87 | 88 | ### Important Files to Check When Making Changes 89 | - Always check `src/cdp.jl` when modifying core solving algorithms 90 | - Always check `src/lq_approx.jl` when working with linear quadratic approximations 91 | - Always run tests in `test/test_cdp.jl` when modifying CDP functionality 92 | - Always run tests in `test/test_lq_approx.jl` when modifying LQ approximation 93 | 94 | ## Common Tasks 95 | 96 | ### Running Examples 97 | - Examples are Jupyter notebooks in the `examples/` directory 98 | - Cannot directly run notebooks in command line, but code can be extracted and run in Julia REPL 99 | - Key examples: optimal growth model, Miranda & Fackler chapter 9 examples 100 | 101 | ### Algorithm Types 102 | - **PFI (Policy Function Iteration)**: Generally faster convergence 103 | - **VFI (Value Function Iteration)**: More robust but potentially slower 104 | - Both algorithms available through `QuantEcon.solve()` with method parameter 105 | 106 | ### Basis Types 107 | - **Chebyshev**: `ChebParams(n, s_min, s_max)` - good general purpose choice 108 | - **Spline**: `SplineParams(breaks, s_min, s_max, k)` - flexible, good for irregular functions 109 | - **Linear**: `LinParams(breaks, s_min, s_max)` - simple linear interpolation 110 | 111 | ### Debugging Common Issues 112 | - **`solve` not defined**: Import from QuantEcon: `using QuantEcon: solve` 113 | - **Method convergence issues**: Try different basis sizes or algorithm (PFI vs VFI) 114 | - **Simulation errors**: Check that state bounds are consistent with transition function 115 | - **Performance issues**: Larger basis sizes increase accuracy but slow computation 116 | 117 | ## CI and Quality Assurance 118 | 119 | ### Continuous Integration 120 | - GitHub Actions runs tests on Ubuntu, Windows, and macOS 121 | - Tests must pass on Julia 1.x (latest stable) 122 | - Nightly CI also runs tests on Julia nightly builds 123 | - CompatHelper automatically updates dependency bounds 124 | 125 | ### Before Committing Changes 126 | - ALWAYS run `julia --project=. -e "using Pkg; Pkg.test()"` to ensure all tests pass 127 | - Verify basic functionality with a simple CDP example 128 | - Check that examples in `examples/` directory still work if you modified core functionality 129 | - No additional linting or formatting tools required - Julia has built-in code standards 130 | 131 | ## Dependencies and Compatibility 132 | - **Core dependencies**: QuantEcon.jl, BasisMatrices.jl, Optim.jl, FiniteDiff.jl 133 | - **Julia version**: 1.2+ required (see Project.toml) 134 | - **Platform support**: Windows, macOS, Linux (all tested in CI) 135 | - Dependencies install automatically via `Pkg.instantiate()` 136 | 137 | ## Performance Notes 138 | - Basis approximation size (`n`) significantly affects both accuracy and speed 139 | - PFI generally converges faster than VFI but requires more memory 140 | - Larger shock grids increase computational complexity 141 | - Simulation is fast once the problem is solved 142 | 143 | ## Troubleshooting 144 | - **Long solve times**: Normal for complex problems. Increase `max_iter` if needed, but be patient 145 | - **Convergence warnings**: Try different basis size, tolerance, or algorithm 146 | - **Memory issues**: Reduce basis size or use simpler basis types 147 | - **Installation issues**: Ensure Julia 1.2+ and try `Pkg.update()` first -------------------------------------------------------------------------------- /test/test_lq_approx.jl: -------------------------------------------------------------------------------- 1 | @testset "lq_approx.jl" begin 2 | 3 | @testset "QuantEcon LQ Lecture Example" begin 4 | # Set parameters 5 | α, β, ρ1, ρ2, σ = 10.0, 0.95, 0.9, 0.0, 1.0 6 | 7 | R = 1 / β 8 | A = [1.0 0.0 0.0; 9 | α ρ1 ρ2; 10 | 0.0 1.0 0.0] 11 | A12 = zeros(3,1) 12 | ALQ_l = hcat(A, A12) 13 | ALQ_r = [0 -R 0 R] 14 | ALQ = vcat(ALQ_l, ALQ_r) 15 | 16 | RLQ = [0.0 0.0 0.0 0.0; 17 | 0.0 0.0 0.0 0.0; 18 | 0.0 0.0 0.0 0.0; 19 | 0.0 0.0 0.0 1e-9] 20 | 21 | QLQ = 1.0 22 | BLQ = [0.0; 0.0; 0.0; R] 23 | β_LQ = β 24 | 25 | f(s, x) = -([1, s...]' * RLQ * [1, s...] + x' * QLQ * x); 26 | g(s, x) = ALQ * [1, s...] + BLQ * x; 27 | 28 | s_star, x_star = [1.0, 10.9, 1.0], 0.0; 29 | 30 | f_star = f(s_star, x_star) 31 | Df_star = [0.0, 0.0, -2.0e-9, 0.0] 32 | D²f_star = [0.0 0.0 0.0 0.0; 33 | 0.0 0.0 0.0 0.0; 34 | 0.0 0.0 -2.0e-9 0.0; 35 | 0.0 0.0 0.0 -2.0] 36 | g_star = g(s_star, x_star)[2:end] 37 | Dg_star = [0.9 0.0 0.0 0.0; 38 | 1.0 0.0 0.0 0.0; 39 | -1.052631578947368 0.0 1.052631578947368 1.052631578947368] 40 | 41 | lq = approx_lq(s_star, x_star, f_star, Df_star, D²f_star, g_star, 42 | Dg_star, β) 43 | 44 | @test isapprox(lq.R, RLQ) 45 | @test isapprox(lq.Q, [QLQ]) 46 | @test isapprox(lq.A, ALQ) 47 | @test isapprox(lq.B, BLQ) 48 | end 49 | 50 | @testset "One State and One Action Variable Example" begin 51 | A = [1.0 0.0; 52 | 0.5 0.9]; 53 | 54 | R = [5.0 0.0; 55 | 0.0 0.3] 56 | 57 | N = [0.05 0.1] 58 | 59 | Q = [0.1]; 60 | 61 | B = [0.0; 1.5]; 62 | 63 | f(s, x) = -([1, s...]' * R * [1, s...] .+ x' * Q * x 64 | .+ 2 * x' * N * [1, s...]); 65 | g(s, x) = A * [1, s...] + B * x; 66 | 67 | s_star = 5.0; 68 | x_star = 0.0; 69 | 70 | g_star = g(s_star, x_star)[2]; 71 | f_star = f(s_star, x_star)[1]; 72 | 73 | Df_star = [-3.0; 74 | -1.1]; 75 | D²f_star = [-0.6 -0.2; 76 | -0.2 -0.2] 77 | Dg_star = [0.9 1.5]; 78 | 79 | lq = approx_lq(s_star, x_star, f_star, Df_star, D²f_star, g_star, 80 | Dg_star, 1.0) 81 | 82 | @test isapprox(lq.R, R) 83 | @test isapprox(lq.Q, Q) 84 | @test isapprox(lq.A, A) 85 | @test isapprox(lq.B, B) 86 | @test isapprox(lq.N, N) 87 | end 88 | 89 | @testset "One State and Two Action Variables Example" begin 90 | A = [1.0 0.0; 91 | 0.5 0.9]; 92 | 93 | R = [5.0 0.0; 94 | 0.0 0.3] 95 | 96 | N = [0.05 0.1; 97 | 0.01 0.02] 98 | 99 | Q = [0.1 -0.05; 100 | -0.05 0.3]; 101 | 102 | B = [0.0 0.0; 1.5 -0.9]; 103 | 104 | f(s, x) = -([1, s...]' * R * [1, s...] .+ x' * Q * x 105 | .+ 2 * x' * N * [1, s...]); 106 | g(s, x) = A * [1, s...] + B * x; 107 | 108 | x_star = [0.0, 0.0]; 109 | s_star = 5.0; 110 | 111 | g_star = g(s_star, x_star)[2]; 112 | f_star = f(s_star, x_star)[1]; 113 | 114 | Df_star = [-3.0; 115 | -1.1; 116 | -0.22]; 117 | D²f_star = [-0.6 -0.2 -0.04; 118 | -0.2 -0.2 0.1; 119 | -0.04 0.1 -0.6] 120 | Dg_star = [0.9 1.5 -0.9]; 121 | 122 | lq = approx_lq(s_star, x_star, f_star, Df_star, D²f_star, g_star, 123 | Dg_star, 1.0) 124 | 125 | @test isapprox(lq.R, R) 126 | @test isapprox(lq.Q, Q) 127 | @test isapprox(lq.A, A) 128 | @test isapprox(lq.B, B) 129 | @test isapprox(lq.N, N) 130 | 131 | end 132 | 133 | @testset "Three States and Two Action Variables Example" begin 134 | A = [1.0 0.0 0.0 0.0; 135 | -0.2 0.5 0.01 0.0; 136 | 0.05 -0.5 0.8 0.3; 137 | -0.1 -0.2 0.3 0.6]; 138 | 139 | R = [5.0 0.00 0.00 0.00; 140 | 0.0 0.15 0.21 -0.05; 141 | 0.0 0.21 0.5 -0.01; 142 | 0.0 -0.05 -0.01 0.8] 143 | 144 | N = [-0.1 0.005 -0.15 -0.2; 145 | 0.005 -0.15 -0.2 0.2] 146 | 147 | Q = [1.0 -0.5; 148 | -0.5 0.9]; 149 | 150 | B = [0.0 0.0; 151 | 0.3 0.5; 152 | 0.0 0.2; 153 | 0.1 0.3]; 154 | 155 | f(s, x) = ([1, s...]' * R * [1, s...] + x' * Q * x 156 | + 2 * x' * N * [1, s...]); 157 | g(s, x) = A * [1, s...] + B * x; 158 | 159 | s_star = [-0.7916666666666763, -19.583333333333226, -14.541666666666579]; 160 | x_star = [0.0, 0.0]; 161 | 162 | g_star = g(s_star, x_star)[2:end]; # Drop constant 163 | f_star = f(s_star, x_star); 164 | 165 | Df_star = [-7.008333333332452; 166 | -19.62499999999752; 167 | -22.79583333333047; 168 | 11.483749999998526; 169 | 2.264166666666381]; 170 | 171 | D²f_star = [0.3 0.42 -0.1 0.01 -0.3; 172 | 0.42 1.0 -0.02 -0.3 -0.4; 173 | -0.1 -0.02 1.6 -0.4 0.4; 174 | 0.01 -0.3 -0.4 2.0 -1.0; 175 | -0.3 -0.4 0.4 -1.0 1.8]; 176 | 177 | Dg_star = [0.5 0.01 0.0 0.3 0.5; 178 | -0.5 0.8 0.3 0.0 0.2; 179 | -0.2 0.3 0.6 0.1 0.3]; 180 | 181 | lq = approx_lq(s_star, x_star, f_star, Df_star, D²f_star, g_star, 182 | Dg_star, 1.0); 183 | 184 | @test isapprox(-lq.R, R) 185 | @test isapprox(-lq.Q, Q) 186 | @test isapprox(lq.A, A) 187 | @test isapprox(lq.B, B) 188 | @test isapprox(-lq.N, N) 189 | 190 | end 191 | 192 | @testset "Monetary Policy Example" begin # See 9.4.6 193 | vlq = 1.0e+02 * [-1.516771891935916 # Obtained from Fackler's code 194 | -1.421873224803633 195 | -1.253495277719767 196 | -1.054476825045721 197 | -0.919716533913779 198 | -0.849214404323941 199 | -0.842970436276206 200 | -0.900984629770574 201 | -0.975359737401322 202 | -1.023256984807047 203 | -1.350214126639643 204 | -1.257350153002016 205 | -1.093041592907463 206 | -0.900127220717386 207 | -0.771471010069413 208 | -0.707072960963543 209 | -0.706933073399777 210 | -0.771051347378114 211 | -0.849495841998174 212 | -0.899427782898555 213 | -1.056517641369012 214 | -0.967723054720698 215 | -0.811553268604770 216 | -0.630847057382630 217 | -0.514399007702594 218 | -0.462209119564662 219 | -0.474277392968833 220 | -0.550603827915108 221 | -0.637187096513793 222 | -0.691188424403486 223 | -0.714520526767850 224 | -0.631830020603504 225 | -0.487868395455513 226 | -0.325474425685280 227 | -0.227338617457150 228 | -0.193460970771124 229 | -0.223841485627201 230 | -0.318480162025382 231 | -0.417271591592004 232 | -0.477376999965666 233 | -0.490780548132429 234 | -0.414194122452051 235 | -0.282440658271998 236 | -0.138358929953671 237 | -0.058535363177447 238 | -0.042969957943327 239 | -0.091662714251310 240 | -0.204613632101397 241 | -0.315613222635957 242 | -0.381822711493587 243 | -0.385297705462748 244 | -0.314815360266339 245 | -0.195270057054223 246 | -0.069500570187802 247 | -0.007989244863485 248 | -0.010736081081270 249 | -0.077741078841160 250 | -0.209004238143153 251 | -0.332211989645650 252 | -0.404525558987249 253 | -0.398071998758808 254 | -0.333693734046368 255 | -0.226356591802190 256 | -0.118899346387675 257 | -0.075700262515263 258 | -0.096759340184955 259 | -0.182076579396751 260 | -0.331651980150650 261 | -0.467067892621084 262 | -0.545485542446652 263 | -0.529103428020609 264 | -0.470829243792138 265 | -0.375700262515897 266 | -0.286555258553288 267 | -0.261668416132782 268 | -0.301039735254381 269 | -0.404669215918082 270 | -0.572556858123887 271 | -0.720180931562259 272 | -0.804702661871796 273 | -0.682156123064999 274 | -0.627951325825841 275 | -0.540961118528224 276 | -0.464024275533553 277 | -0.451345594080985 278 | -0.502925074170520 279 | -0.618762715802159 280 | -0.798858518975902 281 | -0.954621366392899 282 | -1.043212483691748 283 | -0.778391993248151 284 | -0.726221889503649 285 | -0.643301069195345 286 | -0.572468306684642 287 | -0.565893705716043 288 | -0.623577266289547 289 | -0.745518988405155 290 | -0.931718872062866 291 | -1.091551106469175 292 | -1.182176917262681] 293 | 294 | bet = [0.8 0.5; 0.2 0.6] 295 | gamm = [-0.8, 0.0] 296 | Omega = [0.3 0.0; 0.0 1.0] 297 | s_target = [0., 1.] 298 | alpha = [0.9, 0.4] 299 | Sigma = 0.04 * Matrix(I, 2, 2) 300 | discount = 0.9; 301 | 302 | k = [3, 3]; 303 | m = [10, 10]; 304 | breaks = m - (k.-1); 305 | s_min = [-15, -10]; 306 | s_max = [15, 10]; 307 | basis = Basis(map(SplineParams, breaks, s_min, s_max, k)...); 308 | 309 | s_star = [0., 1.]; # steady-state states 310 | x_star = (s_star[1] - alpha[1] - dot(bet[1,:], s_star)) / gamm[1]; 311 | e_star = [0.0, 0.0]; 312 | 313 | f(s, x) = -(s - s_target)' * Omega * (s - s_target) / 2 314 | g(s, x, e) = alpha + bet * s + gamm * x + e 315 | 316 | f_star = f(s_star, x_star) 317 | Df_star = [0.0, 0.0, 0.0] 318 | D²f_star = [-0.3 0.0 0.0; 319 | 0.0 -1.0 0.0; 320 | 0.0 0.0 0.0] 321 | g_star = g(s_star, x_star, e_star) 322 | Dg_star = [0.8 0.5 -0.8; 323 | 0.2 0.6 0.0] 324 | 325 | lq = approx_lq(s_star, x_star, f_star, Df_star, D²f_star, g_star, 326 | Dg_star, discount); 327 | 328 | P, F, d = stationary_values(lq) 329 | 330 | states = [ones(100) nodes(basis)[1]] 331 | vlq_approx = -[states[i, 1:3]' * P * states[i, 1:3] for i ∈ 1:100] 332 | 333 | @test isapprox(vlq_approx, vlq) 334 | end 335 | 336 | end 337 | -------------------------------------------------------------------------------- /src/cdp.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Tools for representing and solving dynamic programs with continuous states. 3 | 4 | Implement the Bellman equation collocation method as described in Mirand and 5 | Fackler (2002), Chapter 9. 6 | 7 | References 8 | ---------- 9 | * M. J. Miranda and P. L. Fackler, Applied Computational Economics and Finance, 10 | MIT press, 2002. 11 | 12 | =# 13 | using BasisMatrices 14 | import Optim 15 | using FiniteDiff 16 | import QuantEcon.ScalarOrArray 17 | 18 | 19 | #= Types and contructors =# 20 | 21 | """ 22 | Interp{N,TS,TM,TL} 23 | 24 | Type that contains information about interpolation 25 | 26 | # Fields 27 | 28 | - `basis::Basis{N}`: Object that contains interpolation basis information 29 | - `S::TS<:VecOrMat`: Vector or Matrix that contains interpolation nodes 30 | - `Scoord::NTuple{N,Vector{Float64}}` Tuple that contains transformed 31 | interpolation nodes 32 | - `length::Int`: Degree of interpolation at tensor grid 33 | - `size::NTuple{N,Int}`: Tuple that contains degree of interpolation at each 34 | dimension 35 | - `lb::NTuple{N,Float64}`: Lower bound of domain 36 | - `ub::NTuple{N,Float64}`: Upper bound of domain 37 | - `Phi::TM<:AbstractMatrix`: Interpolation basis matrix 38 | - `Phi_lu::TL<:Factorization`: LU factorized interpolation basis matrix 39 | """ 40 | struct Interp{N,TS<:VecOrMat,TM<:AbstractMatrix,TL<:Factorization} 41 | basis::Basis{N} 42 | S::TS 43 | Scoord::NTuple{N,Vector{Float64}} 44 | length::Int 45 | size::NTuple{N,Int} 46 | lb::NTuple{N,Float64} 47 | ub::NTuple{N,Float64} 48 | Phi::TM 49 | Phi_lu::TL 50 | end 51 | 52 | """ 53 | Interp(basis) 54 | 55 | Constructor for `Interp` 56 | 57 | # Arguments 58 | 59 | -`basis::Basis`: Object that contains interpolation basis information 60 | """ 61 | function Interp(basis::Basis) 62 | S, Scoord = nodes(basis) 63 | grid_length = length(basis) 64 | grid_size = size(basis) 65 | grid_lb, grid_ub = min(basis), max(basis) 66 | Phi = BasisMatrix(basis, Expanded(), S).vals[1] 67 | Phi_lu = lu(Phi) 68 | interp = Interp(basis, S, Scoord, grid_length, grid_size, grid_lb, grid_ub, 69 | Phi, Phi_lu) 70 | end 71 | 72 | 73 | """ 74 | ContinuousDP{N,TR,TS,Tf,Tg,Tlb,Tub} 75 | 76 | Type that reperesents a continuous-state dynamic program 77 | 78 | # Fields 79 | 80 | - `f::Tf<:Function`: Reward function 81 | - `g::Tg<:Function`: State transition function 82 | - `discount::Float64`: Discount factor 83 | - `shocks::TR<:AbstractVecOrMat`: Random variables' nodes 84 | - `weights::Vector{Float64}`: Random variables' weights 85 | - `x_lb::Tlb<:Function`: Lower bound of action variables 86 | - `x_ub::Tub<:Function`: Upper bound of action variables 87 | - `interp::Interp{N,TS<:VecOrMat}`: Object that contains information about 88 | interpolation 89 | """ 90 | mutable struct ContinuousDP{N,TR<:AbstractVecOrMat,TS<:VecOrMat, 91 | Tf<:Function,Tg<:Function, 92 | Tlb<:Function,Tub<:Function} 93 | f::Tf 94 | g::Tg 95 | discount::Float64 96 | shocks::TR 97 | weights::Vector{Float64} 98 | x_lb::Tlb 99 | x_ub::Tub 100 | interp::Interp{N,TS} 101 | end 102 | 103 | """ 104 | ContinuousDP(f, g, discount, shocks, weights, x_lb, x_ub, basis) 105 | 106 | Constructor for `ContinuousDP` 107 | 108 | # Arguments 109 | - `f::Tf<:Function`: Reward function 110 | - `g::Tg<:Function`: State transition function 111 | - `discount::Float64`: Discount factor 112 | - `shocks::TR<:AbstractVecOrMat`: Random variables' nodes 113 | - `weights::Vector{Float64}`: Random variables' weights 114 | - `x_lb::Tlb<:Function`: Lower bound of action variables 115 | - `x_ub::Tub<:Function`: Upper bound of action variables 116 | - `basis::Basis`: Object that contains interpolation basis information 117 | """ 118 | function ContinuousDP(f::Function, g::Function, discount::Float64, 119 | shocks::Array{Float64}, weights::Vector{Float64}, 120 | x_lb::Function, x_ub::Function, 121 | basis::Basis) 122 | interp = Interp(basis) 123 | cdp = ContinuousDP(f, g, discount, shocks, weights, x_lb, x_ub, interp) 124 | return cdp 125 | end 126 | 127 | 128 | """ 129 | CDPSolveResult{Algo,N,TR,TS} 130 | 131 | Type that contains the solution result of continuous-state dynamic programming 132 | 133 | # Fields 134 | 135 | - `cdp::ContinuousDP{N,TR,TS}`: Object that contains model paramers 136 | - `tol::Float64`: Convergence criteria 137 | - `max_iter::Int`: Maximum number of iteration 138 | - `C::Vector{Float64}`: Basis coefficients vector 139 | - `converged::Bool`: Bool that shows whether model converges 140 | - `num_iter::Int`: Number of iteration until model converges 141 | - `eval_nodes::TS<:VecOrMat`: Evaluation vector or matrix 142 | - `eval_nodes_coord::NTuple{N,Vector{Float64}}`: Tuple that contains evaluation 143 | transformed grids 144 | - `V::Vector{Float64}`: Computed value function 145 | - `X::Vector{Float64}`: Computed policy function 146 | - `resid::Vector{Float64}`: Residuals of basis coefficients 147 | """ 148 | mutable struct CDPSolveResult{Algo<:DPAlgorithm,N, 149 | TR<:AbstractVecOrMat,TS<:VecOrMat} 150 | cdp::ContinuousDP{N,TR,TS} 151 | tol::Float64 152 | max_iter::Int 153 | C::Vector{Float64} 154 | converged::Bool 155 | num_iter::Int 156 | eval_nodes::TS 157 | eval_nodes_coord::NTuple{N,Vector{Float64}} 158 | V::Vector{Float64} 159 | X::Vector{Float64} 160 | resid::Vector{Float64} 161 | 162 | function CDPSolveResult{Algo,N,TR,TS}( 163 | cdp::ContinuousDP{N,TR,TS}, tol::Float64, max_iter::Integer 164 | ) where {Algo,N,TR,TS} 165 | C = zeros(cdp.interp.length) 166 | converged = false 167 | num_iter = 0 168 | eval_nodes = cdp.interp.S 169 | eval_nodes_coord = cdp.interp.Scoord 170 | V = Float64[] 171 | X = Float64[] 172 | resid = Float64[] 173 | res = new{Algo,N,TR,TS}(cdp, tol, max_iter, C, converged, num_iter, 174 | eval_nodes, eval_nodes_coord, V, X, resid) 175 | return res 176 | end 177 | end 178 | 179 | Base.ndims(::ContinuousDP{N}) where {N} = N 180 | Base.ndims(::CDPSolveResult{Algo,N}) where {Algo,N} = N 181 | 182 | """ 183 | evaluate!(res) 184 | 185 | Evaluate the value function and the policy function at each point 186 | 187 | # arguments 188 | 189 | - `res::CDPSolveResult`: Object to store the result of dynamic programming 190 | """ 191 | function evaluate!(res::CDPSolveResult) 192 | cdp, C, s_nodes = res.cdp, res.C, res.eval_nodes 193 | res.V, res.X = s_wise_max(cdp, s_nodes, C) 194 | res.resid = res.V - vec(funeval(C, cdp.interp.basis, s_nodes)) 195 | return res 196 | end 197 | 198 | function set_eval_nodes!( 199 | res::CDPSolveResult{Algo,1}, s_nodes_coord::NTuple{1,Vector{Float64}} 200 | ) where {Algo} 201 | s_nodes = s_nodes_coord[1] 202 | res.eval_nodes = s_nodes 203 | res.eval_nodes_coord = s_nodes_coord 204 | evaluate!(res) 205 | end 206 | 207 | function set_eval_nodes!( 208 | res::CDPSolveResult{Algo,N}, s_nodes_coord::NTuple{N,Vector{Float64}} 209 | ) where {Algo,N} 210 | s_nodes = gridmake(s_nodes_coord...) 211 | res.eval_nodes = s_nodes 212 | res.eval_nodes_coord = s_nodes_coord 213 | evaluate!(res) 214 | end 215 | 216 | function set_eval_nodes!( 217 | res::CDPSolveResult{Algo,N}, s_nodes_coord::NTuple{N,AbstractVector} 218 | ) where {Algo,N} 219 | T = Float64 220 | s_nodes_coord_vecs = 221 | ntuple(i -> collect(T, s_nodes_coord[i]), N)::NTuple{N,Vector{T}} 222 | set_eval_nodes!(res, s_nodes_coord_vecs) 223 | end 224 | 225 | function set_eval_nodes!( 226 | res::CDPSolveResult{Algo,N}, s_nodes_coord::Vararg{AbstractVector,N} 227 | ) where {Algo,N} 228 | set_eval_nodes!(res, s_nodes_coord) 229 | end 230 | 231 | @doc """ 232 | set_eval_nodes!(res, s_nodes_coord) 233 | 234 | Set evaluation nodes 235 | 236 | # Arguments 237 | 238 | - `res::CDPSolveResult`: Object that contains the result of dynamic programming 239 | - `s_nodes_coord::NTuple{N,AbstractVector}`: Evaluation nodes 240 | """ set_eval_nodes! 241 | 242 | function (res::CDPSolveResult)(s_nodes::AbstractArray{Float64}) 243 | cdp, C = res.cdp, res.C 244 | V, X = s_wise_max(cdp, s_nodes, C) 245 | resid = V - vec(funeval(C, cdp.interp.basis, s_nodes)) 246 | return V, X, resid 247 | end 248 | 249 | 250 | #= Methods =# 251 | 252 | """ 253 | _s_wise_max(cdp, s, C) 254 | 255 | Find optimal value and policy for each grid point 256 | 257 | # Arguments 258 | 259 | - `cdp::ContinuousDP`: Object that contains model parameters 260 | - `s::AbstractArray{Float64}`: Interpolation nodes 261 | - `C::Vector{Float64}`: Basis coefficients vector 262 | 263 | # Returns 264 | 265 | - `v::Vector{Float64}`: Updated value function vector 266 | - `x::Vector{Float64}`: Updated policy function vector 267 | """ 268 | function _s_wise_max(cdp::ContinuousDP, s, C) 269 | sp = Array{Float64}(undef, size(cdp.shocks, 1), length(s)) 270 | function objective(x) 271 | for i in 1:size(sp, 1) 272 | sp[i, :] .= cdp.g(s, x, cdp.shocks[i, axes(cdp.shocks)[2:end]...]) 273 | end 274 | Vp = funeval(C, cdp.interp.basis, sp) 275 | cont = cdp.discount * dot(cdp.weights, Vp) 276 | flow = cdp.f(s, x) 277 | -1 * (flow + cont) 278 | end 279 | res = Optim.optimize(objective, cdp.x_lb(s), cdp.x_ub(s)) 280 | v = -res.minimum::Float64 281 | x = res.minimizer::Float64 282 | return v, x 283 | end 284 | 285 | """ 286 | s_wise_max!(cdp, ss, C, Tv) 287 | 288 | Find optimal value for each grid point 289 | 290 | # Arguments 291 | 292 | - `cdp::ContinuousDP`: Object that contains model parameters 293 | - `ss::AbstractArray{Float64}`: interpolation nodes 294 | - `C::Vector{Float64}`: Basis coefficients vector 295 | - `Tv::Vector{Float64}`: A buffer array to hold the updated value function 296 | 297 | # Returns 298 | 299 | - `Tv::Vector{Float64}`: Updated value function vector 300 | """ 301 | function s_wise_max!(cdp::ContinuousDP, ss::AbstractArray{Float64}, 302 | C::Vector{Float64}, Tv::Vector{Float64}) 303 | n = size(ss, 1) 304 | t = Base.tail(axes(ss)) 305 | for i in 1:n 306 | Tv[i], _ = _s_wise_max(cdp, ss[(i, t...)...], C) 307 | end 308 | return Tv 309 | end 310 | 311 | """ 312 | s_wise_max!(cdp, ss, C, Tv) 313 | 314 | Find optimal value and policy for each grid point 315 | 316 | # Arguments 317 | 318 | - `cdp::ContinuousDP`: Object that contains model parameters 319 | - `ss::AbstractArray{Float64}`: interpolation nodes 320 | - `C::Vector{Float64}`: Basis coefficients vector 321 | - `Tv::Vector{Float64}`: A buffer array to hold the updated value function 322 | - `X::Vector{Float64}`: A buffer array to hold the updeted policy function 323 | 324 | # Returns 325 | 326 | - `Tv::Vector{Float64}`: Updated value function vector 327 | - `X::Vector{Float64}`: Updated policy function vector 328 | """ 329 | function s_wise_max!(cdp::ContinuousDP, ss::AbstractArray{Float64}, 330 | C::Vector{Float64}, Tv::Vector{Float64}, 331 | X::Vector{Float64}) 332 | n = size(ss, 1) 333 | t = Base.tail(axes(ss)) 334 | for i in 1:n 335 | Tv[i], X[i] = _s_wise_max(cdp, ss[(i, t...)...], C) 336 | end 337 | return Tv, X 338 | end 339 | 340 | """ 341 | s_wise_max(cdp, ss, C) 342 | 343 | Find optimal value and policy for each grid point 344 | 345 | # Arguments 346 | 347 | - `cdp::ContinuousDP`: Object that contains model parameters 348 | - `ss::AbstractArray{Float64}`: Interpolation nodes 349 | - `C::Vector{Float64}`: Basis coefficients vector 350 | 351 | # Returns 352 | 353 | - `Tv::Vector{Float64}`: Value function vector 354 | - `X::Vector{Float64}`: Policy function vector 355 | """ 356 | function s_wise_max(cdp::ContinuousDP, ss::AbstractArray{Float64}, 357 | C::Vector{Float64}) 358 | n = size(ss, 1) 359 | Tv, X = Array{Float64}(undef, n), Array{Float64}(undef, n) 360 | s_wise_max!(cdp, ss, C, Tv, X) 361 | end 362 | 363 | 364 | """ 365 | bellman_operator!(cdp, C, Tv) 366 | 367 | Update basis coefficients. Values are stored in `Tv` 368 | 369 | # Arguments 370 | 371 | - `cdp::ContinuousDP`: Object that contains model parameters 372 | - `C::Vector{Float64}`: Basis coefficients vector 373 | - `Tv::Vector{Float64}`: Vector to store values 374 | 375 | # Returns 376 | 377 | - `C::Vector{Float64}`: Updated basis coefficients vector 378 | """ 379 | function bellman_operator!(cdp::ContinuousDP, C::Vector{Float64}, 380 | Tv::Vector{Float64}) 381 | Tv = s_wise_max!(cdp, cdp.interp.S, C, Tv) 382 | ldiv!(C, cdp.interp.Phi_lu, Tv) 383 | return C 384 | end 385 | 386 | 387 | """ 388 | compute_greedy!(cdp, C, X) 389 | compute_greedy!(cdp, ss, C, X) 390 | 391 | Updates policy function vector 392 | 393 | # Arguments 394 | 395 | - `cdp::ContinuousDP`: Object that contains model parameters 396 | - `ss::AbstractArray{Float64}`: Interpolation nodes 397 | - `C::Vector{Float64}`: Basis coefficients vector 398 | - `X::Vector{Float64}`: A buffer array to hold the updated policy function. 399 | 400 | # Returns 401 | 402 | - `X::Vector{Float64}`: Updated policy function vector 403 | """ 404 | function compute_greedy!(cdp::ContinuousDP, ss::AbstractArray{Float64}, 405 | C::Vector{Float64}, X::Vector{Float64}) 406 | n = size(ss, 1) 407 | t = Base.tail(axes(ss)) 408 | for i in 1:n 409 | _, X[i] = _s_wise_max(cdp, ss[(i, t...)...], C) 410 | end 411 | return X 412 | end 413 | 414 | compute_greedy!(cdp::ContinuousDP, C::Vector{Float64}, X::Vector{Float64}) = 415 | compute_greedy!(cdp, cdp.interp.S, C, X) 416 | 417 | """ 418 | evaluate_policy!(cdp, X, C) 419 | 420 | Update basis coefficients 421 | 422 | # Arguments 423 | 424 | - `cdp::ContinuousDP`: Object that contains model parameters 425 | - `X::Vector{Float64}`: Policy function vector 426 | - `C::Vector{Float64}`: A buffer array to hold the basis coefficients 427 | 428 | # Returns 429 | 430 | - `C::Vector{Float64}`: Updated basis coefficients vector 431 | """ 432 | function evaluate_policy!(cdp::ContinuousDP{N}, X::Vector{Float64}, 433 | C::Vector{Float64}) where N 434 | n = size(cdp.interp.S, 1) 435 | ts = Base.tail(axes(cdp.interp.S)) 436 | te = Base.tail(axes(cdp.shocks)) 437 | A = Array{Float64}(undef, n, n) 438 | A[:] = cdp.interp.Phi 439 | for i in 1:n 440 | s = cdp.interp.S[(i, ts...)...] 441 | for (j, w) in enumerate(cdp.weights) 442 | e = cdp.shocks[(j, te...)...] 443 | s_next = cdp.g(s, X[i], e) 444 | A[i, :] -= ckron( 445 | [vec(evalbase(cdp.interp.basis.params[k], s_next[k])) 446 | for k in N:-1:1]... 447 | ) * cdp.discount * w 448 | end 449 | end 450 | A_lu = lu(A) 451 | for i in 1:n 452 | s = cdp.interp.S[(i, ts...)...] 453 | C[i] = cdp.f(s, X[i]) 454 | end 455 | ldiv!(A_lu, C) 456 | return C 457 | end 458 | 459 | 460 | """ 461 | policy_iteration_operator!(cdp, C, X) 462 | 463 | Update basis coefficients by policy function iteration 464 | 465 | # Arguments 466 | 467 | - `cdp::ContinuousDP`: Object that contains model parameters 468 | - `C::Vector{Float64}`: Basis coefficients vector 469 | - `X::Vector{Float64}`: A buffer array to hold the updated policy function 470 | 471 | # Returns 472 | 473 | - `C::Vector{Float64}` Updated basis coefficients vector 474 | """ 475 | function policy_iteration_operator!(cdp::ContinuousDP, C::Vector{Float64}, 476 | X::Vector{Float64}) 477 | compute_greedy!(cdp, C, X) 478 | evaluate_policy!(cdp, X, C) 479 | return C 480 | end 481 | 482 | 483 | """ 484 | operator_iteration!(T, C, tol, max_iter; verbose=2, print_skip=50) 485 | 486 | Updates basis coefficients until it converges. 487 | 488 | # Arguments 489 | 490 | - `T::Function`: Function that updates basis coefficients by VFI or PFI 491 | - `C::Vector{Float64}`: initial basis coefficients vector 492 | - `tol::Float64`: Tolerance to be used to update basis coefficients 493 | - `max_iter::Int`: The maximum number of iteration 494 | - `verbose::Int`: Level of feedback (0 for no output, 1 for warnings only, 2 for 495 | warning and convergence messages during iteration) 496 | - `print_skip::Int`: if verbose == 2, how many iterations to apply between print 497 | messages 498 | 499 | # Returns 500 | 501 | - `converged::Bool`: Bool that shows whether basis coefficients vector converges 502 | - `i::Int`: Number of iteration it took to converge 503 | """ 504 | function operator_iteration!(T::Function, C::TC, tol::Float64, max_iter; 505 | verbose::Int=2, print_skip::Int=50) where TC 506 | converged = false 507 | i = 0 508 | 509 | if max_iter <= 0 510 | if verbose >= 1 511 | @warn("No computation performed with max_iter=$max_iter") 512 | end 513 | return converged, i 514 | end 515 | 516 | err = tol + 1 517 | C_old = similar(C) 518 | while true 519 | copyto!(C_old, C) 520 | C = T(C)::TC 521 | err = maximum(abs, C - C_old) 522 | i += 1 523 | (err <= tol) && (converged = true) 524 | 525 | (converged || i >= max_iter) && break 526 | 527 | if (verbose == 2) && (i % print_skip == 0) 528 | println("Compute iterate $i with error $err") 529 | end 530 | end 531 | 532 | if verbose == 2 533 | println("Compute iterate $i with error $err") 534 | end 535 | 536 | if verbose >= 1 537 | if !converged 538 | @warn("max_iter attained") 539 | elseif verbose == 2 540 | println("Converged in $i steps") 541 | end 542 | end 543 | 544 | return converged, i 545 | end 546 | 547 | 548 | #= Solve methods =# 549 | 550 | """ 551 | 552 | solve(cdp, method=PFI; v_init=zeros(cdp.interp.length), tol=sqrt(eps()), 553 |  max_iter=500, verbose=2, print_skip=50) 554 | 555 | Solve the continuous-state dynamic program 556 | 557 | # Arguments 558 | 559 | - `cdp::ContinuousDP`: Object that contains model parameters 560 | - `method::Type{T cdp.f(z[s_range], z[x_range]) 665 | g_vec = z -> cdp.g(z[s_range], z[x_range], e_star) 666 | 667 | Df_star = gradient(f_vec, z_star) 668 | DDf_star = hessian(f_vec, z_star) 669 | Dg_star = s_is_nb ? gradient(g_vec, z_star)' : jacobian(g_vec, z_star) 670 | 671 | # Construct LQ approximation instance 672 | lq = approx_lq(s_star, x_star, f_star, Df_star, DDf_star, g_star, Dg_star, 673 | cdp.discount) 674 | 675 | # Solve LQ problem 676 | P, F, d = stationary_values(lq) 677 | 678 | # Compute value function 679 | v(s) = -([1, s...]' * P * [1, s...] + d) 680 | v_vals = [v(cdp.interp.S[i, :]) for i in 1:length(cdp.interp.basis)] 681 | 682 | # Back out basis coefficients 683 | ldiv!(res.C, cdp.interp.Phi_lu, v_vals) 684 | 685 | res.converged = true 686 | end 687 | 688 | #= Simulate methods =# 689 | 690 | """ 691 | simulate!([rng=GLOBAL_RNG], s_path, res, s_init) 692 | 693 | Generate a sample path of state variable(s) 694 | 695 | # Arguments 696 | 697 | - `rng::AbstractRNG`: Random number generator 698 | - `s_path::VecOrMat`: Array to store the generated sample path 699 | - `res::CDPSolveResult`: Object that contains result of dynamic programming 700 | - `s_init`: Initial value of state variable(s) 701 | 702 | # Return 703 | 704 | - `s_path::VecOrMat`:: Generated sample path of state variable(s) 705 | """ 706 | function simulate!(rng::AbstractRNG, s_path::TS, 707 | res::CDPSolveResult{Algo,N,TR,TS}, 708 | s_init) where {Algo,N,TR,TS<:VecOrMat} 709 | ts_length = size(s_path)[end] 710 | cdf = cumsum(res.cdp.weights) 711 | r = rand(rng, ts_length - 1) 712 | e_ind = Array{Int}(undef, ts_length - 1) 713 | for t in 1:ts_length - 1 714 | e_ind[t] = searchsortedlast(cdf, r[t]) + 1 715 | end 716 | 717 | basis = Basis(map(LinParams, res.eval_nodes_coord, ntuple(i -> 0, N))) 718 | X_interp = Interpoland(basis, res.X) 719 | 720 | s_ind_front = Base.front(axes(s_path)) 721 | e_ind_tail = Base.tail(axes(res.cdp.shocks)) 722 | s_path[(s_ind_front..., 1)...] = s_init 723 | for t in 1:ts_length - 1 724 | s = s_path[(s_ind_front..., t)...] 725 | x = X_interp(s) 726 | e = res.cdp.shocks[(e_ind[t], e_ind_tail...)...] 727 | s_path[(s_ind_front..., t + 1)...] = res.cdp.g(s, x, e) 728 | end 729 | 730 | return s_path 731 | end 732 | 733 | simulate!(s_path::VecOrMat{Float64}, res::CDPSolveResult, s_init) = 734 | simulate!(Random.GLOBAL_RNG, s_path, res, s_init) 735 | 736 | """ 737 | simulate([rng=GLOBAL_RNG], res, s_init, ts_length) 738 | 739 | Generate a sample path of state variable(s) 740 | 741 | # Arguments 742 | 743 | - `rng::AbstractRNG`: Random number generator 744 | - `res::CDPSolveResult`: Object that contains result of dynamic programming 745 | - `s_init`: Initial value of state variable(s) 746 | - `ts_length::Integer`: Length of simulation 747 | 748 | # Return 749 | 750 | - `s_path::VecOrMat`:: Generated sample path of state variable(s) 751 | """ 752 | function simulate(rng::AbstractRNG, res::CDPSolveResult{Algo,1}, s_init::Real, 753 | ts_length::Integer) where {Algo<:DPAlgorithm} 754 | s_path = Array{Float64}(undef, ts_length) 755 | simulate!(rng, s_path, res, s_init) 756 | return s_path 757 | end 758 | 759 | simulate(res::CDPSolveResult{Algo,1}, s_init::Real, 760 | ts_length::Integer) where {Algo<:DPAlgorithm} = 761 | simulate(Random.GLOBAL_RNG, res, s_init, ts_length) 762 | 763 | function simulate(rng::AbstractRNG, res::CDPSolveResult, s_init::Vector, 764 | ts_length::Integer) 765 | s_path = Array{Float64}(undef, length(s_init), ts_length) 766 | simulate!(rng, s_path, res, s_init) 767 | return s_path 768 | end 769 | 770 | simulate(res::CDPSolveResult, s_init::Vector, ts_length::Integer) = 771 | simulate(Random.GLOBAL_RNG, res, s_init, ts_length) 772 | --------------------------------------------------------------------------------