├── benchmark ├── .gitignore ├── Project.toml ├── benchmarks.jl ├── generators.jl └── README.md ├── .gitignore ├── docs ├── src │ ├── lib │ │ ├── index.md │ │ ├── repeated_games.md │ │ ├── util.md │ │ ├── base_types_and_methods.md │ │ ├── learning_algorithms.md │ │ ├── game_generators.md │ │ └── computing_nash_equilibria.md │ └── index.md ├── Project.toml ├── make.jl └── README.md ├── test ├── generators │ ├── runtests.jl │ └── test_bimatrix_generators.jl ├── runtests.jl ├── util.jl ├── test_logitdyn.jl ├── test_lrsnash.jl ├── test_homotopy_continuation.jl ├── test_brd.jl ├── test_pure_nash.jl ├── test_localint.jl ├── test_support_enumeration.jl ├── test_random.jl ├── test_lemke_howson.jl ├── test_fictplay.jl ├── test_repeated_game.jl └── test_normal_form_game.jl ├── src ├── generators │ ├── Generators.jl │ └── bimatrix_generators.jl ├── util.jl ├── lrsnash.jl ├── pure_nash.jl ├── GameTheory.jl ├── homotopy_continuation.jl ├── logitdyn.jl ├── random.jl ├── support_enumeration.jl ├── brd.jl ├── localint.jl ├── fictplay.jl └── lemke_howson.jl ├── .github ├── dependabot.yml ├── workflows │ ├── TagBot.yml │ ├── ci-nighty.yml │ ├── CompatHelper.yml │ └── ci.yml └── copilot-instructions.md ├── LICENSE.md ├── Project.toml └── README.md /benchmark/.gitignore: -------------------------------------------------------------------------------- 1 | /tune.json 2 | -------------------------------------------------------------------------------- /benchmark/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.jl.cov 2 | *.jl.*.cov 3 | *.jl.mem 4 | docs/build/ 5 | docs/site/ 6 | Manifest.toml 7 | -------------------------------------------------------------------------------- /docs/src/lib/index.md: -------------------------------------------------------------------------------- 1 | # Index 2 | 3 | ```@index 4 | Modules = [GameTheory, GameTheory.Generators] 5 | ``` 6 | -------------------------------------------------------------------------------- /docs/Project.toml: -------------------------------------------------------------------------------- 1 | [deps] 2 | Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" 3 | GameTheory = "64a4ffa8-f47c-4a47-8dad-aee7aadc3b51" 4 | -------------------------------------------------------------------------------- /test/generators/runtests.jl: -------------------------------------------------------------------------------- 1 | using GameTheory: pure_nash 2 | using QuantEcon: MVNSampler 3 | using GameTheory.Generators 4 | 5 | include("test_bimatrix_generators.jl") 6 | -------------------------------------------------------------------------------- /src/generators/Generators.jl: -------------------------------------------------------------------------------- 1 | module Generators 2 | 3 | using Random 4 | 5 | using GameTheory: Player, NormalFormGame 6 | 7 | include("bimatrix_generators.jl") 8 | export blotto_game, ranking_game, sgc_game, unit_vector_game, tournament_game 9 | 10 | end # module 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | day: "saturday" 9 | commit-message: 10 | prefix: "ci" 11 | include: "scope" 12 | -------------------------------------------------------------------------------- /docs/src/lib/repeated_games.md: -------------------------------------------------------------------------------- 1 | # [Repeated Games](@id repeated_games) 2 | 3 | ## Exported 4 | ```@autodocs 5 | Modules = [GameTheory] 6 | Pages = ["repeated_game.jl"] 7 | Private = false 8 | ``` 9 | 10 | ## Internal 11 | ```@autodocs 12 | Modules = [GameTheory] 13 | Pages = ["repeated_game.jl"] 14 | Public = false 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/src/lib/util.md: -------------------------------------------------------------------------------- 1 | # [Utilities](@id util) 2 | 3 | This is documentation for `util.jl`. 4 | 5 | ## Exported 6 | 7 | ```@autodocs 8 | Modules = [GameTheory] 9 | Pages = ["util.jl"] 10 | Private = false 11 | ``` 12 | 13 | ## Internal 14 | 15 | ```@autodocs 16 | Modules = [GameTheory] 17 | Pages = ["util.jl"] 18 | Public = false 19 | ``` 20 | -------------------------------------------------------------------------------- /src/util.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Utility functions used in GameTheory.jl 3 | =# 4 | 5 | """ 6 | clp_optimizer_silent() 7 | 8 | Function that returns a `Clp.Optimizer` instance in silent mode. 9 | """ 10 | function clp_optimizer_silent() 11 | optimizer = Clp.Optimizer() 12 | MOI.set(optimizer, MOI.Silent(), true) 13 | return optimizer 14 | end 15 | -------------------------------------------------------------------------------- /docs/src/lib/base_types_and_methods.md: -------------------------------------------------------------------------------- 1 | # [Base Types and Methods](@id base_types_and_methods) 2 | 3 | ## Exported 4 | ```@autodocs 5 | Modules = [GameTheory] 6 | Pages = ["GameTheory.jl", "normal_form_game.jl"] 7 | Private = false 8 | ``` 9 | 10 | ## Internal 11 | ```@autodocs 12 | Modules = [GameTheory] 13 | Pages = ["GameTheory.jl", "normal_form_game.jl"] 14 | Public = false 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/src/lib/learning_algorithms.md: -------------------------------------------------------------------------------- 1 | # [Learning Algorithms](@id learning_algorithms) 2 | 3 | ## Exported 4 | ```@autodocs 5 | Modules = [GameTheory] 6 | Pages = ["brd.jl", "fictplay.jl", "localint.jl", "logitdyn.jl"] 7 | Private = false 8 | ``` 9 | 10 | ## Internal 11 | ```@autodocs 12 | Modules = [GameTheory] 13 | Pages = ["brd.jl", "fictplay.jl", "localint.jl", "logitdyn.jl"] 14 | Public = false 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/src/lib/game_generators.md: -------------------------------------------------------------------------------- 1 | # [Game Generators](@id game_generators) 2 | 3 | ## Exported 4 | ```@autodocs 5 | Modules = [GameTheory, GameTheory.Generators] 6 | Pages = ["random.jl", "generators/bimatrix_generators.jl"] 7 | Private = false 8 | ``` 9 | 10 | ## Internal 11 | ```@autodocs 12 | Modules = [GameTheory, GameTheory.Generators] 13 | Pages = ["random.jl", "generators/bimatrix_generators.jl"] 14 | Public = false 15 | ``` 16 | -------------------------------------------------------------------------------- /.github/workflows/TagBot.yml: -------------------------------------------------------------------------------- 1 | name: TagBot 2 | on: 3 | issue_comment: 4 | types: 5 | - created 6 | workflow_dispatch: 7 | inputs: 8 | lookback: 9 | default: "3" 10 | jobs: 11 | TagBot: 12 | if: github.event_name == 'workflow_dispatch' || github.actor == 'JuliaTagBot' 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: JuliaRegistries/TagBot@v1 16 | with: 17 | token: ${{ secrets.GITHUB_TOKEN }} 18 | ssh: ${{ secrets.DOCUMENTER_KEY }} 19 | -------------------------------------------------------------------------------- /docs/src/lib/computing_nash_equilibria.md: -------------------------------------------------------------------------------- 1 | # [Computing Nash Equilibria](@id computing_nash_equilibria) 2 | 3 | ## Exported 4 | ```@autodocs 5 | Modules = [GameTheory] 6 | Pages = ["pure_nash.jl", "lemke_howson.jl", "support_enumeration.jl", "lrsnash.jl", "homotopy_continuation.jl"] 7 | Private = false 8 | ``` 9 | 10 | ## Internal 11 | ```@autodocs 12 | Modules = [GameTheory] 13 | Pages = ["pure_nash.jl", "lemke_howson.jl", "support_enumeration.jl", "lrsnash.jl", "homotopy_continuation.jl"] 14 | Public = false 15 | ``` 16 | -------------------------------------------------------------------------------- /test/runtests.jl: -------------------------------------------------------------------------------- 1 | using GameTheory 2 | using Test 3 | 4 | include("util.jl") 5 | 6 | include("test_pure_nash.jl") 7 | include("test_repeated_game.jl") 8 | include("test_normal_form_game.jl") 9 | include("test_random.jl") 10 | include("test_support_enumeration.jl") 11 | include("test_lemke_howson.jl") 12 | include("test_lrsnash.jl") 13 | include("test_homotopy_continuation.jl") 14 | include("test_fictplay.jl") 15 | include("test_localint.jl") 16 | include("test_brd.jl") 17 | include("test_logitdyn.jl") 18 | 19 | include("generators/runtests.jl") 20 | -------------------------------------------------------------------------------- /test/util.jl: -------------------------------------------------------------------------------- 1 | function isapprox_act_profs(act_prof1, act_prof2) 2 | length(act_prof1) == length(act_prof2) || return false 3 | for (action1, action2) in zip(act_prof1, act_prof2) 4 | isapprox(action1, action2) || return false 5 | end 6 | return true 7 | end 8 | 9 | function isapprox_vecs_act_profs(vec_act_profs1, vec_act_profs2) 10 | length(vec_act_profs1) == length(vec_act_profs2) || return false 11 | for act_prof in vec_act_profs2 12 | any(x -> isapprox_act_profs(x, act_prof), 13 | vec_act_profs1) || return false 14 | end 15 | return true 16 | end 17 | -------------------------------------------------------------------------------- /test/test_logitdyn.jl: -------------------------------------------------------------------------------- 1 | # ------------------------------------- # 2 | # Testing Logit-Dynamics Response model # 3 | # ------------------------------------- # 4 | 5 | using Random 6 | 7 | @testset "Testing logitdyn.jl" begin 8 | 9 | payoff_matrix = [4 0; 3 2] 10 | beta = 4.0 11 | g = NormalFormGame(payoff_matrix) 12 | lgdy = LogitDynamics(g, beta) 13 | 14 | seed = 1234 15 | init_actions = (1, 1) 16 | ts_length = 3 17 | @test all(play(MersenneTwister(seed), lgdy, init_actions) .<= 2) 18 | @test all(play(lgdy, init_actions) .<= 2) 19 | @test all(time_series(MersenneTwister(seed), lgdy, ts_length, init_actions) .<= 2) 20 | @test all(time_series(lgdy, ts_length, init_actions) .<= 2) 21 | end 22 | -------------------------------------------------------------------------------- /docs/make.jl: -------------------------------------------------------------------------------- 1 | using Documenter, GameTheory 2 | 3 | makedocs( 4 | modules = [GameTheory], 5 | format = Documenter.HTML(prettyurls = false), 6 | sitename = "GameTheory.jl", 7 | pages = [ 8 | "Home" => "index.md", 9 | "Library" => [ 10 | "lib/base_types_and_methods.md", 11 | "lib/game_generators.md", 12 | "lib/computing_nash_equilibria.md", 13 | "lib/learning_algorithms.md", 14 | "lib/repeated_games.md", 15 | "lib/util.md", 16 | "lib/index.md" 17 | ] 18 | ], 19 | ) 20 | 21 | deploydocs( 22 | repo = "github.com/QuantEcon/GameTheory.jl.git", 23 | branch = "gh-pages", 24 | target = "build", 25 | deps = nothing, 26 | make = nothing, 27 | ) 28 | -------------------------------------------------------------------------------- /.github/workflows/ci-nighty.yml: -------------------------------------------------------------------------------- 1 | name: CI-nightly 2 | on: 3 | pull_request: 4 | branches: 5 | - main 6 | push: 7 | branches: 8 | - main 9 | tags: '*' 10 | # needed to allow julia-actions/cache to delete old caches that it has created 11 | permissions: 12 | actions: write 13 | contents: read 14 | jobs: 15 | test: 16 | name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ github.event_name }} 17 | runs-on: ${{ matrix.os }} 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | version: 22 | - 'nightly' 23 | os: 24 | - ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v6 27 | - uses: julia-actions/setup-julia@v2 28 | with: 29 | version: ${{ matrix.version }} 30 | - uses: julia-actions/cache@v2 31 | - uses: julia-actions/julia-buildpkg@v1 32 | - uses: julia-actions/julia-runtest@v1 33 | -------------------------------------------------------------------------------- /benchmark/benchmarks.jl: -------------------------------------------------------------------------------- 1 | using GameTheory 2 | using BenchmarkTools 3 | using Random 4 | using LinearAlgebra 5 | 6 | const SUITE = BenchmarkGroup() 7 | 8 | SUITE["support_enumeration"] = BenchmarkGroup(["support_enumeration"]) 9 | SUITE["support_enumeration"]["Float"] = BenchmarkGroup() 10 | seed = 0 11 | rng = MersenneTwister(seed) 12 | ns = [10, 11] 13 | for n in ns 14 | sz = (n, n) 15 | g = random_game(rng, sz) 16 | SUITE["support_enumeration"]["Float"][sz] = 17 | @benchmarkable support_enumeration($g) 18 | end 19 | SUITE["support_enumeration"]["Rational"] = BenchmarkGroup() 20 | T = Rational{Int} 21 | ns = [7, 8] 22 | for n in ns 23 | sz = (n, n) 24 | g = NormalFormGame(Matrix{T}(I, n, n)) 25 | SUITE["support_enumeration"]["Rational"][sz] = 26 | @benchmarkable support_enumeration($g) 27 | end 28 | 29 | 30 | SUITE["repeated_game"] = BenchmarkGroup(["repeated_game"]) 31 | pd_payoffs = [9.0 1.0; 10.0 3.0] 32 | g = NormalFormGame(pd_payoffs) 33 | rpd = RepeatedGame(g, 0.75) 34 | SUITE["repeated_game"]["outerapproximation"] = 35 | @benchmarkable outerapproximation($rpd, nH=64, tol=1e-9) 36 | -------------------------------------------------------------------------------- /benchmark/generators.jl: -------------------------------------------------------------------------------- 1 | using GameTheory.Generators 2 | using BenchmarkTools 3 | using Random 4 | 5 | const SUITE = BenchmarkGroup() 6 | 7 | SUITE["bimatrix_generators"] = BenchmarkGroup(["bimatrix_generators"]) 8 | seed = 0 9 | rng = MersenneTwister(seed) 10 | 11 | # blotto_game 12 | hts = [(3, 62), (4, 21)] # (h, t) 13 | rho = 0.5 14 | SUITE["bimatrix_generators"]["blotto_game"] = BenchmarkGroup() 15 | for ht in hts 16 | SUITE["bimatrix_generators"]["blotto_game"][ht] = 17 | @benchmarkable blotto_game($rng, ($ht)..., $rho) 18 | end 19 | 20 | # ranking_game 21 | n = 2000 22 | SUITE["bimatrix_generators"]["ranking_game"] = 23 | @benchmarkable ranking_game($rng, $n) 24 | 25 | # sgc_game 26 | k = 500 27 | SUITE["bimatrix_generators"]["sgc_game"] = @benchmarkable sgc_game($k) 28 | 29 | # tournament_game 30 | n, k = 200, 2 31 | SUITE["bimatrix_generators"]["tournament_game"] = 32 | @benchmarkable tournament_game($rng, $n, $k) 33 | 34 | # unit_vector_game 35 | n = 2000 36 | bools = [true, false] 37 | SUITE["bimatrix_generators"]["unit_vector_game"] = BenchmarkGroup() 38 | for b in bools 39 | SUITE["bimatrix_generators"]["unit_vector_game"][b] = 40 | @benchmarkable unit_vector_game($rng, $n; avoid_pure_nash=$b) 41 | end 42 | -------------------------------------------------------------------------------- /test/test_lrsnash.jl: -------------------------------------------------------------------------------- 1 | @testset "lrsnash.jl" begin 2 | 3 | @testset "test 3 by 2 non-degenerate normal form game(Int)" begin 4 | g = NormalFormGame(Player([3 3; 2 5; 0 6]), 5 | Player([3 2 3; 2 6 1])) 6 | NEs = [([1//1, 0//1, 0//1], [1//1, 0//1]), 7 | ([4//5, 1//5, 0//1], [2//3, 1//3]), 8 | ([0//1, 1//3, 2//3], [1//3, 2//3])] 9 | NEs_computed = @inferred(lrsnash(g)) 10 | 11 | @test sort(NEs_computed) == sort(NEs) 12 | end 13 | 14 | @testset "test 3 by 2 non-degenerate normal form game(Rational)" begin 15 | g = NormalFormGame(Player([3//1 3//1; 2//1 5//1; 0//1 6//1]), 16 | Player([3//1 2//1 3//1; 2//1 6//1 1//1])) 17 | NEs = [([1//1, 0//1, 0//1], [1//1, 0//1]), 18 | ([4//5, 1//5, 0//1], [2//3, 1//3]), 19 | ([0//1, 1//3, 2//3], [1//3, 2//3])] 20 | NEs_computed = @inferred(lrsnash(g)) 21 | 22 | @test sort(NEs_computed) == sort(NEs) 23 | end 24 | 25 | @testset "test 3 by 2 degenerate normal form game(Int)" begin 26 | g = NormalFormGame(Player([1 -1; -1 1; 0 0]), 27 | Player([1 0 0; 0 0 0])) 28 | NEs = [([1, 0, 0], [1, 0]), 29 | ([0, 1, 0], [0, 1]), 30 | ([0, 1, 0], [1//2, 1//2]), 31 | ([0, 0, 1], [1//2, 1//2])] 32 | NEs_computed = @inferred(lrsnash(g)) 33 | 34 | @test sort(NEs_computed) == sort(NEs) 35 | end 36 | 37 | end 38 | -------------------------------------------------------------------------------- /.github/workflows/CompatHelper.yml: -------------------------------------------------------------------------------- 1 | name: CompatHelper 2 | on: 3 | schedule: 4 | - cron: 0 0 * * * 5 | workflow_dispatch: 6 | permissions: 7 | contents: write 8 | pull-requests: write 9 | jobs: 10 | CompatHelper: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Check if Julia is already available in the PATH 14 | id: julia_in_path 15 | run: which julia 16 | continue-on-error: true 17 | - name: Install Julia, but only if it is not already available in the PATH 18 | uses: julia-actions/setup-julia@v2 19 | with: 20 | version: '1' 21 | arch: ${{ runner.arch }} 22 | if: steps.julia_in_path.outcome != 'success' 23 | - name: "Add the General registry via Git" 24 | run: | 25 | import Pkg 26 | ENV["JULIA_PKG_SERVER"] = "" 27 | Pkg.Registry.add("General") 28 | shell: julia --color=yes {0} 29 | - name: "Install CompatHelper" 30 | run: | 31 | import Pkg 32 | name = "CompatHelper" 33 | uuid = "aa819f21-2bde-4658-8897-bab36330d9b7" 34 | version = "3" 35 | Pkg.add(; name, uuid, version) 36 | shell: julia --color=yes {0} 37 | - name: "Run CompatHelper" 38 | run: | 39 | import CompatHelper 40 | CompatHelper.main() 41 | shell: julia --color=yes {0} 42 | env: 43 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 44 | COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }} 45 | # COMPATHELPER_PRIV: ${{ secrets.COMPATHELPER_PRIV }} 46 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The GameTheory.jl package is licensed under the BSD-3 License. All rights reserved. 2 | 3 | > Copyright (c) 2016: The QuantEcon team 4 | > 5 | > Redistribution and use in source and binary forms, with or without 6 | > modification, are permitted provided that the following conditions are met: 7 | > 8 | > 1. Redistributions of source code must retain the above copyright notice, this 9 | > list of conditions and the following disclaimer. 10 | > 11 | > 2. Redistributions in binary form must reproduce the above copyright 12 | > notice, this list of conditions and the following disclaimer in the 13 | > documentation and/or other materials provided with the distribution. 14 | > 15 | > 3. Neither the name of the copyright holder nor the names of its 16 | > contributors may be used to endorse or promote products derived from 17 | > this software without specific prior written permission. 18 | > 19 | > THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | > "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | > LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | > A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | > HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 | > INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 | > BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 26 | > OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 | > AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 | > LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 29 | > WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 | > POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /Project.toml: -------------------------------------------------------------------------------- 1 | name = "GameTheory" 2 | uuid = "64a4ffa8-f47c-4a47-8dad-aee7aadc3b51" 3 | repo = "https://github.com/QuantEcon/GameTheory.jl.git" 4 | version = "0.4.0" 5 | 6 | [deps] 7 | CDDLib = "3391f64e-dcde-5f30-b752-e11513730f60" 8 | Clp = "e2554f3b-3117-50c0-817c-e040a3ddf72d" 9 | Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" 10 | Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" 11 | Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" 12 | HomotopyContinuation = "f213a82b-91d6-5c5d-acf7-10f1c761b327" 13 | LRSLib = "262c1cb6-76e2-5873-868b-19ece3183cc5" 14 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 15 | MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee" 16 | Parameters = "d96e819e-fc66-5662-9728-84c9c7592b0a" 17 | Polyhedra = "67491407-f73d-577b-9b50-8179a7c68029" 18 | QuantEcon = "fcd29c91-0bd7-5a09-975d-7ac3f643a60c" 19 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 20 | SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" 21 | StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" 22 | 23 | [compat] 24 | CDDLib = "0.9, 0.10" 25 | Clp = "1.3" 26 | Combinatorics = "1" 27 | Distributions = "0.25" 28 | Graphs = "1" 29 | HomotopyContinuation = "2" 30 | LRSLib = "0.8" 31 | MathOptInterface = "1" 32 | Parameters = "0.12" 33 | Polyhedra = "0.7, 0.8" 34 | QuantEcon = "0.17" 35 | StatsBase = "0.33, 0.34" 36 | julia = "1.10" 37 | 38 | [extras] 39 | Combinatorics = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" 40 | DelimitedFiles = "8bb1440f-4735-579b-a4ab-409b98df4dab" 41 | LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" 42 | Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" 43 | Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" 44 | Unicode = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" 45 | 46 | [targets] 47 | test = ["Combinatorics", "DelimitedFiles", "LinearAlgebra", "Test", "Random", "Unicode"] 48 | -------------------------------------------------------------------------------- /src/lrsnash.jl: -------------------------------------------------------------------------------- 1 | using LRSLib: buildrep, solve_nash 2 | 3 | """ 4 | lrsnash(g) 5 | 6 | Compute in exact arithmetic all extreme mixed-action Nash equilibria of 7 | a 2-player normal form game with Integer or Rational payoffs. This 8 | function calls the Nash equilibrium computation routine in `lrslib` 9 | (through its Julia wrapper `LRSLib.jl`) which is based on the 10 | "lexicographic reverse search" vertex enumeration algorithm. 11 | 12 | # Arguments 13 | 14 | - `g::NormalFormGame{2,<:RatOrInt}`: 2-player NormalFormGame instance 15 | with Integer or Rational payoffs. 16 | 17 | # Returns 18 | 19 | - `::Vector{NTuple{2,Vector{Rational{BigInt}}}}`: Vector of mixed-action 20 | Nash equilibria. 21 | 22 | # Examples 23 | 24 | A degenerate game example: 25 | 26 | ```julia 27 | julia> player1 = Player([3 3; 2 5; 0 6]); 28 | 29 | julia> player2 = Player([3 2 3; 3 6 1]); 30 | 31 | julia> g = NormalFormGame(player1, player2); 32 | 33 | julia> println(g) 34 | 3×2 NormalFormGame{2, Int64}: 35 | [3, 3] [3, 3] 36 | [2, 2] [5, 6] 37 | [0, 3] [6, 1] 38 | 39 | julia> lrsnash(g) 40 | 3-element Vector{Tuple{Vector{Rational{BigInt}}, Vector{Rational{BigInt}}}}: 41 | ([1//1, 0//1, 0//1], [1//1, 0//1]) 42 | ([1//1, 0//1, 0//1], [2//3, 1//3]) 43 | ([0//1, 1//3, 2//3], [1//3, 2//3]) 44 | ``` 45 | 46 | The set of Nash equilibria of this degenerate game consists of an 47 | isolated equilibrium, the third output, and a non-singleton equilibrium 48 | component, the extreme points of which are given by the first 49 | two outputs. 50 | 51 | # References 52 | 53 | - D. Avis, G. Rosenberg, R. Savani, and B. von Stengel, "Enumeration of 54 | Nash Equilibria for Two-Player Games," Economic Theory (2010), 9-37. 55 | """ 56 | function lrsnash(g::NormalFormGame{2,<:RatOrInt}) 57 | hrs = [buildrep(i, g.players[3-i].payoff_array) for i in 1:2] 58 | NEs = solve_nash(hrs...) 59 | return NEs 60 | end 61 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | pull_request: 4 | branches: 5 | - main 6 | push: 7 | branches: 8 | - main 9 | tags: '*' 10 | # needed to allow julia-actions/cache to delete old caches that it has created 11 | permissions: 12 | actions: write 13 | contents: read 14 | jobs: 15 | test: 16 | name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ github.event_name }} 17 | runs-on: ${{ matrix.os }} 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | version: 22 | - '1' # Leave this line unchanged. '1' will automatically expand to the latest stable 1.x release of Julia. 23 | os: [ubuntu-latest, windows-latest, macOS-latest] 24 | steps: 25 | - uses: actions/checkout@v6 26 | - uses: julia-actions/setup-julia@v2 27 | with: 28 | version: ${{ matrix.version }} 29 | - uses: julia-actions/cache@v2 30 | - uses: julia-actions/julia-buildpkg@v1 31 | - uses: julia-actions/julia-runtest@v1 32 | - uses: julia-actions/julia-processcoverage@v1 33 | - uses: codecov/codecov-action@v5 34 | with: 35 | files: lcov.info 36 | token: ${{ secrets.CODECOV_TOKEN }} 37 | fail_ci_if_error: false 38 | docs: 39 | name: Documentation 40 | runs-on: ubuntu-latest 41 | steps: 42 | - uses: actions/checkout@v6 43 | - uses: julia-actions/setup-julia@v2 44 | with: 45 | version: '1' 46 | - run: | 47 | julia --project=docs -e ' 48 | using Pkg 49 | Pkg.develop(PackageSpec(path=pwd())) 50 | Pkg.instantiate()' 51 | - run: | 52 | julia --project=docs -e ' 53 | using Documenter: doctest 54 | using GameTheory 55 | doctest(GameTheory)' 56 | - run: julia --project=docs docs/make.jl 57 | env: 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | DOCUMENTER_KEY: ${{ secrets.DOCUMENTER_KEY }} 60 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # GameTheory.jl Documentation 2 | 3 | This directory contains the documentation for GameTheory.jl, built using [Documenter.jl](https://documenter.juliadocs.org/). 4 | 5 | ## Documentation Structure 6 | 7 | The documentation uses a static structure with manually maintained pages: 8 | 9 | - `src/index.md` - Main homepage with installation, usage examples, and library outline 10 | - `src/lib/` - Library documentation pages organized by topic 11 | 12 | ## Local Build Instructions 13 | 14 | To build the documentation locally from the repository root: 15 | 16 | ```bash 17 | julia --project=docs -e 'using Pkg; Pkg.instantiate(); include("docs/make.jl")' 18 | ``` 19 | 20 | The generated documentation will be available in `docs/build/index.html`. 21 | 22 | ## Contributing to Documentation 23 | 24 | ### Adding New Content 25 | 26 | To add documentation for new functionality: 27 | 28 | 1. **For new library functions/types**: Add docstrings to the source code and ensure they appear in the appropriate `docs/src/lib/*.md` file by including the source file in the `@autodocs` block. 29 | 30 | 2. **For new major features**: Create a new page in `docs/src/lib/` and add it to the `pages` array in `make.jl`. 31 | 32 | 3. **For examples or tutorials**: Add to the main `docs/src/index.md` or create dedicated pages as needed. 33 | 34 | ### Editing Existing Pages 35 | 36 | - **Homepage**: Edit `docs/src/index.md` for installation instructions, basic usage, and overview 37 | - **Library pages**: Edit files in `docs/src/lib/` to modify section organization or add manual content 38 | - **Structure**: Modify the `pages` array in `make.jl` to change page ordering or add new sections 39 | 40 | ### Documentation Standards 41 | 42 | - Use `@autodocs` blocks to automatically include docstrings from source files 43 | - Maintain consistent section structure across library pages (Exported/Internal) 44 | - Include practical examples in docstrings where helpful 45 | - Keep the library outline in `index.md` synchronized with actual pages 46 | -------------------------------------------------------------------------------- /docs/src/index.md: -------------------------------------------------------------------------------- 1 | # GameTheory.jl 2 | 3 | [*GameTheory.jl*](https://github.com/QuantEcon/GameTheory.jl) is a [Julia](http://www.julialang.org) package about algorithms and data structures for Game Theory. 4 | 5 | ## Installation 6 | 7 | To install the package, enter the Pkg mode by pressing `]` and run 8 | 9 | ```julia 10 | add GameTheory 11 | ``` 12 | 13 | ## Usage 14 | 15 | Once installed, the `GameTheory` package can be used by typing 16 | 17 | ```@example 1 18 | using GameTheory 19 | ``` 20 | 21 | The Base type `Player` can be created by passing a payoff matrix: 22 | 23 | ```@example 1 24 | player1 = Player([3 1; 0 2]) 25 | ``` 26 | 27 | A 2-player `NormalFormGame` can be created either by passing `Player` instances, 28 | 29 | ```@example 1 30 | player2 = Player([2 0; 1 3]) 31 | g = NormalFormGame((player1, player2)) 32 | print(g) 33 | ``` 34 | 35 | or by passing a payoff matrix directly: 36 | 37 | ```@example 1 38 | payoff_bimatrix = Array{Int}(undef, 2, 2, 2) 39 | payoff_bimatrix[1, 1, :] = [3, 2] 40 | payoff_bimatrix[1, 2, :] = [1, 1] 41 | payoff_bimatrix[2, 1, :] = [0, 0] 42 | payoff_bimatrix[2, 2, :] = [2, 3] 43 | g = NormalFormGame(payoff_bimatrix) 44 | print(g) 45 | ``` 46 | 47 | After constructing a `NormalFormGame`, we can find its Nash Equilibria by using methods of `GameTheory`. For example, `pure_nash` finds all pure action Nash Equilibria by enumeration: 48 | 49 | ```@example 1 50 | pure_nash(g) 51 | ``` 52 | 53 | Please see the [notebooks](@ref notebooks) on QuantEcon for more details. 54 | 55 | ## [Notebooks](@id notebooks) 56 | 57 | Some notebooks for demonstration are available: 58 | 59 | * [Tools for Game Theory](https://nbviewer.jupyter.org/github/QuantEcon/game-theory-notebooks/blob/main/game_theory_jl.ipynb) 60 | * [A Recursive Formulation of Repeated Games](https://nbviewer.jupyter.org/github/QuantEcon/QuantEcon.notebooks/blob/main/recursive_repeated_games.ipynb) 61 | 62 | ## Library Outline 63 | 64 | * [Base Types and Methods](@ref base_types_and_methods) 65 | 66 | * [Game Generators](@ref game_generators) 67 | 68 | * [Computing Nash Equilibria](@ref computing_nash_equilibria) 69 | 70 | * [Learning Algorithms](@ref learning_algorithms) 71 | 72 | * [Repeated Games](@ref repeated_games) 73 | 74 | * [Utilities](@ref util) 75 | 76 | -------------------------------------------------------------------------------- /src/pure_nash.jl: -------------------------------------------------------------------------------- 1 | """ 2 | pure_nash(nfg; ntofind=Inf, tol=1e-8) 3 | 4 | Finds all pure action Nash equilibria for a normal form 5 | game. It returns an empty array if there is no pure 6 | action Nash. 7 | 8 | Currently uses a brute force algorithm, but that hopefully 9 | will change in the future. 10 | 11 | # Arguments 12 | 13 | - `nfg::NormalFormGame`: Instance of N-player NormalFormGame. 14 | - `ntofind::Inf`: Maximal number of pure action Nash equilibria to be 15 | found; default is `prod(nfg.nums_actions)`. 16 | - `tol::Real` : Tolerance to be used to determine best response actions. 17 | 18 | # Returns 19 | - `ne::Vector{NTuple{N,Int}}`: Vector of pure action Nash equilibria. 20 | 21 | # Examples 22 | 23 | A 4-player unanimity game example: 24 | 25 | ```julia 26 | julia> g = NormalFormGame((2, 2, 2, 2)); 27 | 28 | julia> g[1, 1, 1, 1] = 3, 3, 3, 3; 29 | 30 | julia> g[2, 2, 2, 2] = 4, 4, 4, 4; 31 | 32 | julia> println(g) 33 | 2×2×2×2 NormalFormGame{4, Float64}: 34 | [:, :, 1, 1] = 35 | [3.0, 3.0, 3.0, 3.0] [0.0, 0.0, 0.0, 0.0] 36 | [0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0] 37 | 38 | [:, :, 2, 1] = 39 | [0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0] 40 | [0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0] 41 | 42 | [:, :, 1, 2] = 43 | [0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0] 44 | [0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0] 45 | 46 | [:, :, 2, 2] = 47 | [0.0, 0.0, 0.0, 0.0] [0.0, 0.0, 0.0, 0.0] 48 | [0.0, 0.0, 0.0, 0.0] [4.0, 4.0, 4.0, 4.0] 49 | 50 | julia> pure_nash(g) 51 | 8-element Vector{NTuple{4, Int64}}: 52 | (1, 1, 1, 1) 53 | (2, 2, 1, 1) 54 | (2, 1, 2, 1) 55 | (1, 2, 2, 1) 56 | (2, 1, 1, 2) 57 | (1, 2, 1, 2) 58 | (1, 1, 2, 2) 59 | (2, 2, 2, 2) 60 | ``` 61 | """ 62 | function pure_nash(nfg::NormalFormGame; ntofind=prod(nfg.nums_actions), 63 | tol::Real=1e-8) 64 | # Get number of players and their actions 65 | np = num_players(nfg) 66 | na = nfg.nums_actions 67 | 68 | # Holder for all NE 69 | ne = Array{PureActionProfile{np,Int}}(undef, 0) 70 | 71 | # Create counter for how many to find 72 | nfound = 0 73 | 74 | for _a in CartesianIndices(na) 75 | if is_nash(nfg, _a.I, tol=tol) 76 | push!(ne, _a.I) 77 | nfound = nfound + 1 78 | end 79 | nfound >= ntofind && break 80 | end 81 | 82 | return ne 83 | end 84 | -------------------------------------------------------------------------------- /test/test_homotopy_continuation.jl: -------------------------------------------------------------------------------- 1 | @testset "homotopy_continuation.jl" begin 2 | 3 | @testset "3x2 game" begin 4 | g = NormalFormGame(Player([3 3; 2 5; 0 6]), 5 | Player([3 2 3; 2 6 1])) 6 | NEs = [([1//1, 0//1, 0//1], [1//1, 0//1]), 7 | ([4//5, 1//5, 0//1], [2//3, 1//3]), 8 | ([0//1, 1//3, 2//3], [1//3, 2//3])] 9 | 10 | NEs_computed = @inferred hc_solve(g, show_progress=false) 11 | @test isapprox_vecs_act_profs(NEs_computed, NEs) 12 | end 13 | 14 | @testset "2x2x2 game from McKelvey and McLennan" begin 15 | g = NormalFormGame((2, 2, 2)) 16 | g[1, 1, 1] = 9, 8, 12 17 | g[2, 2, 1] = 9, 8, 2 18 | g[1, 2, 2] = 3, 4, 6 19 | g[2, 1, 2] = 3, 4, 4 20 | NEs = [ 21 | ([1, 0], [1, 0], [1, 0]), 22 | ([0, 1], [0, 1], [1, 0]), 23 | ([1, 0], [0, 1], [0, 1]), 24 | ([0, 1], [1, 0], [0, 1]), 25 | ([0//1, 1//1], [1//3, 2//3], [1//3, 2//3]), 26 | ([1//4, 3//4], [1//1, 0//1], [1//4, 3//4]), 27 | ([1//2, 1//2], [1//2, 1//2], [1//1, 0//1]), 28 | ([1//4, 3//4], [1//2, 1//2], [1//3, 2//3]), 29 | ([1//2, 1//2], [1//3, 2//3], [1//4, 3//4]) 30 | ] 31 | 32 | NEs_computed = @inferred hc_solve(g, show_progress=false) 33 | @test isapprox_vecs_act_profs(NEs_computed, NEs) 34 | 35 | ntofind = 1 36 | NEs_computed = 37 | @inferred hc_solve(g, ntofind=ntofind, show_progress=false) 38 | @test length(NEs_computed) == ntofind 39 | for i in 1:ntofind 40 | @test is_nash(g, NEs_computed[i]) 41 | end 42 | end 43 | 44 | @testset "2x2x2 game from Nau, Canovas, and Hansen" begin 45 | payoff_profiles = [[3, 0, 2], 46 | [0, 1, 0], 47 | [0, 2, 0], 48 | [1, 0, 0], 49 | [1, 0, 0], 50 | [0, 3, 0], 51 | [0, 1, 0], 52 | [2, 0, 3]] 53 | g = NormalFormGame(reshape(payoff_profiles, (2, 2, 2))) 54 | q = (-13 + sqrt(601)) / 24 55 | p = (9q - 1) / (7q + 2) 56 | r = (-3q + 2) / (q + 1) 57 | NEs = [([p, 1-p], [q, 1-q], [r, 1-r])] 58 | 59 | NEs_computed = @inferred hc_solve(g, show_progress=false) 60 | @test isapprox_vecs_act_profs(NEs_computed, NEs) 61 | end 62 | 63 | @testset "1-player game" begin 64 | g = NormalFormGame([[1], [2], [3]]) 65 | @test_throws ArgumentError hc_solve(g) 66 | @test_throws ArgumentError hc_solve(g, ntofind=1) 67 | end 68 | 69 | end 70 | -------------------------------------------------------------------------------- /test/test_brd.jl: -------------------------------------------------------------------------------- 1 | # ------------------------------- # 2 | # Testing best response dynamics # 3 | # ------------------------------- # 4 | 5 | using Random 6 | 7 | @testset "Testing brd.jl" begin 8 | 9 | payoff_matrix = [4 0; 3 2] 10 | N = 4 11 | ts_length = 3 12 | init_action_dist = [4, 0] 13 | 14 | @testset "Testing best response dynamics model" begin 15 | 16 | brd = BRD(payoff_matrix, N) 17 | @test @inferred(play(brd, init_action_dist, num_reps=ts_length)) == 18 | [4, 0] 19 | @test @inferred(time_series(brd, ts_length, init_action_dist)) == 20 | [4 4 4; 0 0 0] 21 | @test all(time_series(brd, ts_length) .<= N) 22 | end 23 | 24 | @testset "Testing KMR model" begin 25 | 26 | epsilon = 0.1 27 | seed = 1234 28 | kmr = KMR(payoff_matrix, N, epsilon) 29 | @test all(play(MersenneTwister(seed), kmr, init_action_dist, 30 | num_reps=ts_length) .<= N) 31 | @test all(time_series(MersenneTwister(seed), kmr, ts_length, 32 | init_action_dist) .<= N) 33 | @test all(time_series(MersenneTwister(seed), kmr, ts_length) .<= N) 34 | end 35 | 36 | @testset "Testing sampling best response dynamics model" begin 37 | 38 | k = 2 39 | seed = 1234 40 | sbrd = SamplingBRD(payoff_matrix, N, k) 41 | @test all(play(MersenneTwister(seed), sbrd, init_action_dist, 42 | num_reps=ts_length) .<= N) 43 | @test all(time_series(MersenneTwister(seed), sbrd, ts_length, 44 | init_action_dist) .<= N) 45 | @test all(time_series(MersenneTwister(seed), sbrd, ts_length) .<= N) 46 | end 47 | 48 | @testset "Testing argument errors" begin 49 | 50 | @testset "Non square payoff matrix" begin 51 | non_square_pay = ones((2, 3)) 52 | 53 | @test_throws ArgumentError brd = BRD(non_square_pay, N) 54 | @test_throws ArgumentError kmr = KMR(non_square_pay, N, 0.1) 55 | @test_throws ArgumentError sbrd = SamplingBRD(non_square_pay, N, 2) 56 | end 57 | 58 | @testset "Invalid initial action distribution" begin 59 | invalid_action_dist_1 = [4, 0, 0] 60 | invalid_action_dist_2 = [3, 0] 61 | brd = BRD(payoff_matrix, N) 62 | 63 | @test_throws ArgumentError x = play(brd, invalid_action_dist_1) 64 | @test_throws ArgumentError x = play(brd, invalid_action_dist_2) 65 | @test_throws ArgumentError x = time_series(brd, 3, invalid_action_dist_1) 66 | @test_throws ArgumentError x = time_series(brd, 3, invalid_action_dist_2) 67 | end 68 | end 69 | end -------------------------------------------------------------------------------- /test/test_pure_nash.jl: -------------------------------------------------------------------------------- 1 | using Combinatorics 2 | 3 | 4 | @testset "Testing Pure Nash Equilibrium Routines" begin 5 | 6 | # Pure Action Nash equilibrium 7 | @testset "2x2 Game with 1 Pure Action Nash equilibrium" begin 8 | A = [9.0 1.0 9 | 10.0 3.0] 10 | 11 | nfg = NormalFormGame(A) 12 | ne = pure_nash(nfg) 13 | 14 | @test ne == [(2, 2)] 15 | end 16 | 17 | @testset "Matching Pennies Game with 0 Pure Action Nash equilibrium" begin 18 | MP = [1.0 -1.0 19 | -1.0 1.0] 20 | p1 = Player(MP) 21 | p2 = Player(-MP) 22 | 23 | g_MP = NormalFormGame(p1, p2) 24 | ne = pure_nash(g_MP) 25 | 26 | @test ne == [] 27 | end 28 | 29 | @testset "Coordination game with 2 Pure Action Nash equilibria" begin 30 | Coo = [4.0 0.0 31 | 3.0 2.0] 32 | 33 | g_Coo = NormalFormGame(Coo) 34 | ne = pure_nash(g_Coo) 35 | 36 | @test sort(ne) == sort([(1,1); (2,2)]) 37 | end 38 | 39 | @testset "Coordination game with 2 Pure Action Nash equilibria but only find 1" begin 40 | Coo = [4.0 0.0 41 | 3.0 2.0] 42 | 43 | g_Coo = NormalFormGame(Coo) 44 | ne = pure_nash(g_Coo; ntofind=1) 45 | 46 | @test length(ne) == 1 47 | end 48 | 49 | @testset "Unanimity Game with more than two players" begin 50 | N = 4 51 | a, b = 1, 2 52 | g_Unanimity = NormalFormGame(tuple(fill(2, N)...)) 53 | g_Unanimity[fill(1, N)...] = fill(a, N) 54 | g_Unanimity[fill(2, N)...] = fill(b, N) 55 | 56 | Unanimity_NE = [tuple(fill(1, N)...)] 57 | for k in 2:N-2 58 | for ind in combinations(1:N, k) 59 | a = fill(1, N) 60 | a[ind] .= 2 61 | push!(Unanimity_NE, tuple(a...)) 62 | end 63 | end 64 | push!(Unanimity_NE, tuple(fill(2, N)...)) 65 | 66 | ne = pure_nash(g_Unanimity) 67 | 68 | @test sort(ne) == sort(Unanimity_NE) 69 | end 70 | 71 | @testset "Tolerance" begin 72 | epsilon = 1e-08 73 | 74 | g = NormalFormGame((2, 2)) 75 | g[1, 1] = [1, 1] 76 | g[1, 2] = [-2, 1 + epsilon] 77 | g[2, 1] = [1 + epsilon, -2] 78 | g[2, 2] = [0, 0]; 79 | 80 | NEs = [[(2, 2)]] 81 | epsilon_NEs = [[(1, 1); (2, 2)]] 82 | 83 | for (tol, answer) in zip([0 epsilon], [NEs epsilon_NEs]) 84 | @test sort(pure_nash(g, tol=tol)) == sort(answer) 85 | end 86 | end 87 | 88 | @testset "Trivial game with 1 player" begin 89 | n = 3 90 | g1 = NormalFormGame(Player(collect(1:n))) 91 | @test pure_nash(g1) == [(n,)] 92 | @test sort(pure_nash(g1, tol=1.)) == [(n-1,), (n,)] 93 | end 94 | 95 | end 96 | -------------------------------------------------------------------------------- /benchmark/README.md: -------------------------------------------------------------------------------- 1 | # GameTheory.jl Benchmarks 2 | 3 | ## Running the benchmark suite 4 | 5 | Use [PkgBenchmark.jl](https://github.com/JuliaCI/PkgBenchmark.jl): 6 | 7 | ```jl 8 | using PkgBenchmark 9 | ``` 10 | 11 | As an example, let us run the benchmarks (defined in `benchmarks.jl`) and compare the performance changes for the two commits 12 | [`32e6090`](https://github.com/QuantEcon/GameTheory.jl/commit/32e60906bdf34f39bc535fc1235e5da5e261d1c4) (target) 13 | and 14 | [`b5031c3`](https://github.com/QuantEcon/GameTheory.jl/commit/b5031c3cf24e41b124e91f4881c9a543eed19ecc) (baseline): 15 | 16 | ```jl 17 | jud = judge("GameTheory", "32e6090", "b5031c3") 18 | ``` 19 | 20 | To show the results: 21 | 22 | ```jl 23 | julia> show(PkgBenchmark.benchmarkgroup(jud)) 24 | 2-element BenchmarkTools.BenchmarkGroup: 25 | tags: [] 26 | "support_enumeration" => 2-element BenchmarkTools.BenchmarkGroup: 27 | tags: ["support_enumeration"] 28 | "Rational" => 2-element BenchmarkTools.BenchmarkGroup: 29 | tags: [] 30 | (8, 8) => TrialJudgement(+5.42% => regression) 31 | (7, 7) => TrialJudgement(+5.27% => regression) 32 | "Float" => 2-element BenchmarkTools.BenchmarkGroup: 33 | tags: [] 34 | (10, 10) => TrialJudgement(-1.00% => invariant) 35 | (11, 11) => TrialJudgement(-0.25% => invariant) 36 | ... 37 | ``` 38 | 39 | To show the timing estimates for the baseline: 40 | 41 | ```jl 42 | julia> show(jud.baseline_results.benchmarkgroup) 43 | 2-element BenchmarkTools.BenchmarkGroup: 44 | tags: [] 45 | "support_enumeration" => 2-element BenchmarkTools.BenchmarkGroup: 46 | tags: ["support_enumeration"] 47 | "Rational" => 2-element BenchmarkTools.BenchmarkGroup: 48 | tags: [] 49 | (8, 8) => Trial(128.530 ms) 50 | (7, 7) => Trial(29.616 ms) 51 | "Float" => 2-element BenchmarkTools.BenchmarkGroup: 52 | tags: [] 53 | (10, 10) => Trial(96.379 ms) 54 | (11, 11) => Trial(410.365 ms) 55 | ... 56 | ``` 57 | 58 | and for the target: 59 | 60 | ```jl 61 | julia> show(jud.target_results.benchmarkgroup) 62 | 2-element BenchmarkTools.BenchmarkGroup: 63 | tags: [] 64 | "support_enumeration" => 2-element BenchmarkTools.BenchmarkGroup: 65 | tags: ["support_enumeration"] 66 | "Rational" => 2-element BenchmarkTools.BenchmarkGroup: 67 | tags: [] 68 | (8, 8) => Trial(135.496 ms) 69 | (7, 7) => Trial(31.177 ms) 70 | "Float" => 2-element BenchmarkTools.BenchmarkGroup: 71 | tags: [] 72 | (10, 10) => Trial(95.419 ms) 73 | (11, 11) => Trial(409.337 ms) 74 | ... 75 | ``` 76 | 77 | To run a script file other than `benchmarks.jl`: 78 | 79 | ```jl 80 | using GameTheory 81 | results = benchmarkpkg( 82 | "GameTheory", 83 | script="$(dirname(pathof(GameTheory)))/../benchmark/generators.jl" 84 | ) 85 | ``` 86 | 87 | For more usage information, see the [PkgBenchmark documentation](https://juliaci.github.io/PkgBenchmark.jl/stable). 88 | -------------------------------------------------------------------------------- /test/test_localint.jl: -------------------------------------------------------------------------------- 1 | # ------------------------- # 2 | # Testing local interaction # 3 | # ------------------------- # 4 | 5 | 6 | @testset "Testing localint.jl" begin 7 | payoff_matrix = [4 0; 2 3] 8 | adj_matrix = [0 1 3; 2 0 1; 3 2 0] 9 | init_actions = (1, 1, 2) 10 | 11 | @testset "LocalInteraction from NormalFormGame" begin 12 | game = NormalFormGame(payoff_matrix) 13 | li = LocalInteraction(game, adj_matrix) 14 | 15 | @test li.players == ntuple(i -> Player(payoff_matrix), 3) 16 | @test li.num_actions == 2 17 | end 18 | 19 | @testset "Testing local interaction with simultaneous revision" begin 20 | li = LocalInteraction(payoff_matrix, adj_matrix) 21 | 22 | @test @inferred(play(li, init_actions) == (2, 1, 1)) 23 | @test @inferred(play(li, init_actions, num_reps=2) == (1, 2, 2)) 24 | @test @inferred(time_series(li, 3, init_actions) == [1 2 1; 1 1 2; 2 1 2]) 25 | @test all(time_series(li, 3) .<= li.num_actions) 26 | end 27 | 28 | @testset "Testing local interaction with asynchronous revision" begin 29 | seed = 1234 30 | li = LocalInteraction(payoff_matrix, adj_matrix, AsynchronousRevision()) 31 | 32 | @test @inferred(play(li, init_actions, 1) == (2, 1, 2)) 33 | @test @inferred(play(li, init_actions, [1,2]) == (2, 1, 2)) 34 | @test @inferred(play(li, init_actions, [1,2], num_reps=2) == (2, 2, 2)) 35 | @test all(time_series(MersenneTwister(seed), li, 3, init_actions) 36 | .<= li.num_actions) 37 | @test all(time_series(MersenneTwister(seed), li, 3, [1,2]) 38 | .<= li.num_actions) 39 | @test all(time_series(MersenneTwister(seed), li, 3) 40 | .<= li.num_actions) 41 | @test all(time_series(li, 3, init_actions) .<= li.num_actions) 42 | @test all(time_series(li, 3, [1,2]) .<= li.num_actions) 43 | @test @inferred(time_series(li, 3, init_actions, [1,2]) == 44 | [1 2 2; 1 1 2; 2 2 2]) 45 | @test_throws ArgumentError series = time_series(li, 5, init_actions, [1, 1, 1]) 46 | end 47 | 48 | @testset "Testing invalid local interaction instance" begin 49 | 50 | @testset "Non square adjacency matrix" begin 51 | non_square_adj = ones((2, 3)) 52 | game = NormalFormGame(payoff_matrix) 53 | 54 | @test_throws ArgumentError li = LocalInteraction(game, non_square_adj) 55 | @test_throws ArgumentError li = LocalInteraction(payoff_matrix, non_square_adj) 56 | end 57 | 58 | @testset "Non square payoff matrix" begin 59 | non_square_pay = ones((2, 3)) 60 | game = NormalFormGame((Player(ones((2, 3))), Player(ones((3, 2))))) 61 | 62 | @test_throws ArgumentError li = LocalInteraction(game, adj_matrix) 63 | @test_throws ArgumentError li = LocalInteraction(non_square_pay, adj_matrix) 64 | end 65 | end 66 | end -------------------------------------------------------------------------------- /test/test_support_enumeration.jl: -------------------------------------------------------------------------------- 1 | @testset "Testing Support Enumeration" begin 2 | 3 | function NEs_approx_equal(NEs1::Vector{NTuple{2,Vector{T1}}}, 4 | NEs2::Vector{NTuple{2,Vector{T2}}}) where {T1,T2} 5 | @test length(NEs1) == length(NEs2) 6 | @test T1 == T2 7 | for (actions1, actions2) in zip(NEs1, NEs2) 8 | for (action1, action2) in zip(actions1, actions2) 9 | @test action1 ≈ action2 10 | end 11 | end 12 | end 13 | 14 | @testset "test 3 by 2 non-degenerate normal form game(Float)" begin 15 | g = NormalFormGame(Player([3.0 3.0; 2.0 5.0; 0.0 6.0]), 16 | Player([3.0 2.0 3.0; 2.0 6.0 1.0])) 17 | NEs = [([1.0, 0.0, 0.0], [1.0, 0.0]), 18 | ([0.8, 0.2, 0.0], [2/3, 1/3]), 19 | ([0.0, 1/3, 2/3], [1/3, 2/3])] 20 | NEs_computed = @inferred(support_enumeration(g)) 21 | 22 | NEs_approx_equal(NEs_computed, NEs) 23 | end 24 | 25 | @testset "test 3 by 2 non-degenerate normal form game(Int)" begin 26 | g = NormalFormGame(Player([3 3; 2 5; 0 6]), 27 | Player([3 2 3; 2 6 1])) 28 | NEs = [([1.0, 0.0, 0.0], [1.0, 0.0]), 29 | ([0.8, 0.2, 0.0], [2/3, 1/3]), 30 | ([0.0, 1/3, 2/3], [1/3, 2/3])] 31 | NEs_computed = @inferred(support_enumeration(g)) 32 | 33 | NEs_approx_equal(NEs_computed, NEs) 34 | end 35 | 36 | @testset "test 3 by 2 non-degenerate normal form game(Rational)" begin 37 | g = NormalFormGame(Player([3//1 3//1; 2//1 5//1; 0//1 6//1]), 38 | Player([3//1 2//1 3//1; 2//1 6//1 1//1])) 39 | NEs = [([1//1, 0//1, 0//1], [1//1, 0//1]), 40 | ([4//5, 1//5, 0//1], [2//3, 1//3]), 41 | ([0//1, 1//3, 2//3], [1//3, 2//3])] 42 | NEs_computed = @inferred(support_enumeration(g)) 43 | 44 | NEs_approx_equal(NEs_computed, NEs) 45 | end 46 | 47 | @testset "test 3 by 2 degenerate normal form game(Float)" begin 48 | g = NormalFormGame(Player([1.0 -1.0; -1.0 1.0; 0.0 0.0]), 49 | Player([1.0 0.0 0.0; 0.0 0.0 0.0])) 50 | NEs = [([1.0, 0.0, 0.0], [1.0, 0.0]), 51 | ([0.0, 1.0, 0.0], [0.0, 1.0])] 52 | NEs_computed = @inferred(support_enumeration(g)) 53 | 54 | NEs_approx_equal(NEs_computed, NEs) 55 | end 56 | 57 | @testset "test 3 by 2 degenerate normal form game(Int)" begin 58 | g = NormalFormGame(Player([1 -1; -1 1; 0 0]), 59 | Player([1 0 0; 0 0 0])) 60 | NEs = [([1.0, 0.0, 0.0], [1.0, 0.0]), 61 | ([0.0, 1.0, 0.0], [0.0, 1.0])] 62 | NEs_computed = @inferred(support_enumeration(g)) 63 | 64 | NEs_approx_equal(NEs_computed, NEs) 65 | end 66 | 67 | @testset "test 3 by 2 degenerate normal form game(Rational)" begin 68 | g = NormalFormGame(Player([1//1 -1//1; -1//1 1//1; 0//1 0//1]), 69 | Player([1//1 0//1 0//1; 0//1 0//1 0//1])) 70 | NEs = [([1//1, 0//1, 0//1], [1//1, 0//1]), 71 | ([0//1, 1//1, 0//1], [0//1, 1//1])] 72 | NEs_computed = @inferred(support_enumeration(g)) 73 | 74 | NEs_approx_equal(NEs_computed, NEs) 75 | end 76 | 77 | end 78 | -------------------------------------------------------------------------------- /test/test_random.jl: -------------------------------------------------------------------------------- 1 | using Random 2 | 3 | @testset "Testing Random Games Generating" begin 4 | 5 | @testset "test random game" begin 6 | nums_actions = (2, 3, 4) 7 | g = @inferred random_game(nums_actions) 8 | @test g.nums_actions == nums_actions 9 | 10 | nums_actions = (4, 3) 11 | N = length(nums_actions) 12 | seed = 1234 13 | rngs = [MersenneTwister(seed) for i in 1:2] 14 | gs_Float = [random_game(rng, nums_actions) for rng in rngs] 15 | gs_Range = [random_game(rng, 0:10, nums_actions) for rng in rngs] 16 | for gs in [gs_Float, gs_Range] 17 | for i in 1:N 18 | @test gs[1].players[i].payoff_array == 19 | gs[2].players[i].payoff_array 20 | end 21 | end 22 | end 23 | 24 | @testset "test covariance game" begin 25 | nums_actions = (2, 3, 4) 26 | N = length(nums_actions) 27 | 28 | rho = 0.5 29 | g = covariance_game(nums_actions, rho) 30 | @test g.nums_actions == nums_actions 31 | 32 | rho = 1 33 | g = covariance_game(nums_actions, rho) 34 | for a in CartesianIndices(nums_actions) 35 | payoff_profile = g[a] 36 | for i in 1:(N-1) 37 | @test payoff_profile[i] ≈ payoff_profile[end] 38 | end 39 | end 40 | 41 | rho = -1/(N-1) 42 | g = covariance_game(nums_actions, rho) 43 | for a in CartesianIndices(nums_actions) 44 | payoff_profile = g[a] 45 | @test sum(payoff_profile) ≈ 0 atol=1e-10 46 | end 47 | 48 | end 49 | 50 | @testset "test random game value error" begin 51 | nums_actions = () 52 | 53 | @test_throws ArgumentError random_game(nums_actions) 54 | 55 | end 56 | 57 | @testset "test covariance game value error" begin 58 | nums_actions = () #empty 59 | 60 | @test_throws ArgumentError covariance_game(nums_actions, 0.5) 61 | 62 | nums_actions = (2,) #length one 63 | 64 | @test_throws ArgumentError covariance_game(nums_actions, 0.5) 65 | 66 | nums_actions = (2, 3, 4) 67 | rho = 1.1 # > 1 68 | 69 | @test_throws ArgumentError covariance_game(nums_actions, rho) 70 | 71 | rho = -1. # < -1/(N-1) 72 | 73 | @test_throws ArgumentError covariance_game(nums_actions, rho) 74 | 75 | end 76 | 77 | @testset "random_pure_actions" begin 78 | nums_actions = (2, 3, 4) 79 | seed = 1234 80 | action_profiles = 81 | [random_pure_actions(MersenneTwister(seed), nums_actions) 82 | for i in 1:2] 83 | @test action_profiles[1] <= nums_actions 84 | @test action_profiles[2] == action_profiles[1] 85 | end 86 | 87 | @testset "random_mixed_actions" begin 88 | nums_actions = (2, 3, 4) 89 | seed = 1234 90 | action_profiles = 91 | [random_mixed_actions(MersenneTwister(seed), nums_actions) 92 | for i in 1:2] 93 | @test length.(action_profiles[1]) == nums_actions 94 | for i in 1:length(nums_actions) 95 | @test action_profiles[2][i] == action_profiles[1][i] 96 | end 97 | end 98 | 99 | end 100 | -------------------------------------------------------------------------------- /src/GameTheory.jl: -------------------------------------------------------------------------------- 1 | module GameTheory 2 | 3 | # stdlib 4 | using LinearAlgebra, Random 5 | 6 | # Packages 7 | using QuantEcon 8 | using Combinatorics 9 | using Parameters 10 | using Distributions 11 | 12 | # Optimization packages 13 | using MathOptInterface 14 | const MOI = MathOptInterface 15 | const MOIU = MOI.Utilities 16 | using Clp 17 | 18 | # Geometry packages 19 | using Polyhedra 20 | using CDDLib 21 | using LRSLib 22 | 23 | # Type aliases # 24 | 25 | """ 26 | PureAction 27 | 28 | Alias for `Integer`. 29 | """ 30 | const PureAction = Integer 31 | 32 | """ 33 | MixedAction{T} 34 | 35 | Alias for `Vector{T}` where `T<:Real`. 36 | """ 37 | const MixedAction{T<:Real} = Vector{T} 38 | 39 | """ 40 | Action{T} 41 | 42 | Alias for `Union{PureAction,MixedAction{T}}` where `T<:Real`. 43 | """ 44 | const Action{T<:Real} = Union{PureAction,MixedAction{T}} 45 | 46 | """ 47 | PureActionProfile{N,T} 48 | 49 | Alias for `NTuple{N,T}` where `T<:PureAction`. 50 | """ 51 | const PureActionProfile{N,T<:PureAction} = NTuple{N,T} 52 | 53 | """ 54 | MixedActionProfile{N,T} 55 | 56 | Alias for `NTuple{N,MixedAction{T}}` where `T<:Real`. 57 | """ 58 | const MixedActionProfile{N,T<:Real} = NTuple{N,MixedAction{T}} 59 | 60 | """ 61 | ActionProfile{N,T} 62 | 63 | Alias for `Union{PureActionProfile{N,T},MixedActionProfile{N,T}}`. 64 | """ 65 | const ActionProfile{N,T} = Union{PureActionProfile{N,T},MixedActionProfile{N,T}} 66 | 67 | const RatOrInt = Union{Rational,Int} 68 | 69 | # package code goes here 70 | include("normal_form_game.jl") 71 | include("homotopy_continuation.jl") 72 | include("lrsnash.jl") 73 | include("pure_nash.jl") 74 | include("repeated_game.jl") 75 | include("random.jl") 76 | include("lemke_howson.jl") 77 | include("support_enumeration.jl") 78 | include("util.jl") 79 | include("generators/Generators.jl") 80 | 81 | include("fictplay.jl") 82 | include("localint.jl") 83 | include("brd.jl") 84 | include("logitdyn.jl") 85 | 86 | export 87 | # Types 88 | Player, NormalFormGame, 89 | 90 | # Type aliases 91 | Action, MixedAction, PureAction, 92 | ActionProfile, MixedActionProfile, PureActionProfile, 93 | 94 | # Normal form game functions 95 | best_response, best_responses, is_best_response, payoff_vector, 96 | is_nash, pure2mixed, pure_strategy_NE, is_pareto_efficient, 97 | is_pareto_dominant, is_dominated, dominated_actions, delete_action, 98 | payoff_profile_array, 99 | 100 | # General functions 101 | num_players, num_actions, num_opponents, 102 | 103 | # Utilities 104 | BROptions, 105 | 106 | # Nash Equilibrium 107 | pure_nash, 108 | 109 | # Repeated Games 110 | RepeatedGame, unpack, flow_u_1, flow_u_2, flow_u, best_dev_i, 111 | best_dev_1, best_dev_2, best_dev_payoff_i, best_dev_payoff_1, 112 | best_dev_payoff_2, worst_value_i, worst_value_1, worst_value_2, 113 | worst_values, outerapproximation, AS, uniquetolrows, 114 | 115 | # Random Games 116 | random_game, covariance_game, 117 | random_pure_actions, random_mixed_actions, 118 | 119 | # Lemke-Howson 120 | lemke_howson, 121 | 122 | # Support Enumeration 123 | support_enumeration, support_enumeration_task, 124 | 125 | # LRS 126 | lrsnash, 127 | 128 | # Homotopy Continuation 129 | hc_solve, 130 | 131 | # Learning algorithms 132 | play!, play, time_series, 133 | AbstractGain, DecreasingGain, ConstantGain, 134 | AbstractFictitiousPlay, FictitiousPlay, StochasticFictitiousPlay, 135 | AbstractRevision, SimultaneousRevision, AsynchronousRevision, 136 | LocalInteraction, 137 | AbstractBRD, BRD, KMR, SamplingBRD, 138 | LogitDynamics 139 | 140 | end # module 141 | -------------------------------------------------------------------------------- /test/test_lemke_howson.jl: -------------------------------------------------------------------------------- 1 | @testset "lemke_howson.jl" begin 2 | 3 | function test_lh_case(case) 4 | g = case[:g] 5 | NEs_dict = case[:NEs_dict] 6 | converged = case[:converged] 7 | 8 | for k in keys(NEs_dict) 9 | NE_compupted, res = 10 | lemke_howson(g, init_pivot=k, full_output=Val(true)) 11 | @test isapprox_act_profs(NE_compupted, NEs_dict[k]) 12 | @test res.converged == converged 13 | end 14 | end 15 | 16 | @testset "Basic" begin 17 | # From von Stengel 2007 in Algorithmic Game Theory 18 | A = [3 3; 2 5; 0 6] 19 | B = [3 2 3; 2 6 1] 20 | g = NormalFormGame(Player(A), Player(B)) 21 | NEs_dict = Dict( 22 | 1 => ([1., 0., 0.], [1., 0.]), # init_pivot => NE 23 | 2 => ([0., 1/3, 2/3], [1/3, 2/3]) 24 | ) 25 | 26 | NE = @inferred lemke_howson(g) 27 | @test is_nash(g, NE) 28 | NE = @inferred lemke_howson(g, init_pivot=5) 29 | @test is_nash(g, NE) 30 | 31 | NE, res = @inferred lemke_howson(g, full_output=Val(true)) 32 | @test is_nash(g, res.NE) 33 | @test res.converged 34 | 35 | T1 = Rational{BigInt} 36 | T2 = BigFloat 37 | g_1 = NormalFormGame(T1, g) 38 | NE_1 = @inferred lemke_howson(g_1) 39 | @test is_nash(g_1, NE_1) 40 | @test eltype(NE_1[1]) == T2 41 | 42 | case = (g = g, NEs_dict = NEs_dict, converged = true) 43 | test_lh_case(case) 44 | end 45 | 46 | @testset "Degenerate games" begin 47 | cases = [] 48 | 49 | # From von Stengel 2007 in Algorithmic Game Theory 50 | A = [3 3; 2 5; 0 6] 51 | B = [3 2 3; 3 6 1] 52 | push!(cases, ( 53 | g = NormalFormGame(Player(A), Player(B)), 54 | NEs_dict = Dict(1 => ([0., 1/3, 2/3], [1/3, 2/3])), 55 | converged = true 56 | )) 57 | 58 | # == Examples of cycles by "ad hoc" tie breaking rules == # 59 | 60 | # Example where tie breaking that picks the variable with 61 | # the smallest row index in the tableau leads to cycling 62 | A = [0 0 0; 63 | 0 1 1; 64 | 1 1 0] 65 | B = [1 0 1; 66 | 1 1 0; 67 | 0 0 2] 68 | push!(cases, ( 69 | g = NormalFormGame(Player(A), Player(B)), 70 | NEs_dict = Dict(1 => ([0., 2/3, 1/3], [0., 1., 0.])), 71 | converged = true 72 | )) 73 | 74 | # Example where tie breaking that picks the variable with 75 | # the smallest variable index in the tableau leads to cycling 76 | perm = [3, 1, 2] 77 | C = A[:, perm] 78 | D = B[perm, :] 79 | push!(cases, ( 80 | g = NormalFormGame(Player(C), Player(D)), 81 | NEs_dict = Dict(1 => ([0., 2/3, 1/3], [0., 0., 1.])), 82 | converged = true 83 | )) 84 | 85 | test_lh_case.(cases) 86 | end 87 | 88 | @testset "Capping" begin 89 | A = [3 3; 2 5; 0 6] 90 | B = [3 2 3; 2 6 1] 91 | g = NormalFormGame(Player(A), Player(B)) 92 | m, n = g.nums_actions 93 | max_iter = 10^6 # big number 94 | 95 | for k in 1:m+n 96 | (NE1, res1) = @inferred( 97 | lemke_howson(g; init_pivot=k, max_iter=max_iter, 98 | capping=nothing, full_output = Val(true)) 99 | ) 100 | (NE2, res2) = @inferred( 101 | lemke_howson(g; init_pivot=k, max_iter = max_iter, 102 | capping=max_iter, full_output = Val(true)) 103 | ) 104 | @test isapprox(NE1[1], NE2[1]) 105 | @test isapprox(NE1[2], NE2[2]) 106 | @test res1.init == res2.init 107 | end 108 | 109 | init_pivot = 2 110 | max_iter = m+n 111 | (NE, res) = lemke_howson(g; init_pivot=init_pivot, max_iter=max_iter, 112 | capping = 1, full_output=Val(true)) 113 | @test res.num_iter == max_iter 114 | @test res.init == init_pivot-1 115 | end 116 | 117 | @testset "Invalid init_pivot" begin 118 | A = [3 3; 2 5; 0 6] 119 | B = [3 2 3; 2 6 1] 120 | g = NormalFormGame(Player(A), Player(B)) 121 | m, n = g.nums_actions 122 | 123 | @test_throws ArgumentError lemke_howson(g; init_pivot=-1) 124 | @test_throws ArgumentError lemke_howson(g; init_pivot=0) 125 | @test_throws ArgumentError lemke_howson(g; init_pivot=m+n+1) 126 | end 127 | 128 | end 129 | -------------------------------------------------------------------------------- /src/homotopy_continuation.jl: -------------------------------------------------------------------------------- 1 | using HomotopyContinuation 2 | 3 | """ 4 | hc_solve(g; ntofind=Inf, options...) 5 | 6 | Compute all isolated mixed-action Nash equilibria of an N-player normal form 7 | game. 8 | 9 | This function solves a system of polynomial equations arising from the 10 | nonlinear complementarity problem representation of Nash eqiulibrium, by using 11 | `HomotopyContinuation.jl`. 12 | 13 | # Arguments 14 | 15 | - `g::NormalFormGame`: N-player NormalFormGame instance. 16 | - `ntofind=Inf`: Number of Nash equilibria to find. 17 | - `options...`: Optional arguments to pass to `HomotopyContinuation.solve`. For 18 | example, the option `seed::UInt32` can set the random seed used during the 19 | computations. See the [documentation] 20 | (https://www.juliahomotopycontinuation.org/HomotopyContinuation.jl/stable/solve/) 21 | for `HomotopyContinuation.solve` for details. 22 | 23 | # Returns 24 | 25 | - `::Vector{NTuple{N,Vector{Float64}}}`: Vector of mixed-action Nash 26 | equilibria. 27 | 28 | # Examples 29 | 30 | Consider the 3-player 2-action game with 9 Nash equilibria in McKelvey and 31 | McLennan (1996) "Computation of Equilibria in Finite Games": 32 | 33 | ```julia 34 | julia> Base.active_repl.options.iocontext[:compact] = true; # Reduce digits to display 35 | 36 | julia> g = NormalFormGame((2, 2, 2)); 37 | 38 | julia> g[1, 1, 1] = [9, 8, 12]; 39 | 40 | julia> g[2, 2, 1] = [9, 8, 2]; 41 | 42 | julia> g[1, 2, 2] = [3, 4, 6]; 43 | 44 | julia> g[2, 1, 2] = [3, 4, 4]; 45 | 46 | julia> println(g) 47 | 2×2×2 NormalFormGame{3, Float64}: 48 | [:, :, 1] = 49 | [9.0, 8.0, 12.0] [0.0, 0.0, 0.0] 50 | [0.0, 0.0, 0.0] [9.0, 8.0, 2.0] 51 | 52 | [:, :, 2] = 53 | [0.0, 0.0, 0.0] [3.0, 4.0, 6.0] 54 | [3.0, 4.0, 4.0] [0.0, 0.0, 0.0] 55 | 56 | julia> NEs = hc_solve(g, show_progress=false) 57 | 9-element Vector{Tuple{Vector{Float64}, Vector{Float64}, Vector{Float64}}}: 58 | ([0.0, 1.0], [0.333333, 0.666667], [0.333333, 0.666667]) 59 | ([1.0, 0.0], [0.0, 1.0], [0.0, 1.0]) 60 | ([0.25, 0.75], [0.5, 0.5], [0.333333, 0.666667]) 61 | ([0.5, 0.5], [0.5, 0.5], [1.0, -7.34684e-40]) 62 | ([0.0, 1.0], [1.0, 0.0], [-2.93874e-39, 1.0]) 63 | ([0.0, 1.0], [0.0, 1.0], [1.0, 0.0]) 64 | ([0.5, 0.5], [0.333333, 0.666667], [0.25, 0.75]) 65 | ([1.0, 4.48416e-44], [1.0, -7.17465e-43], [1.0, -4.48416e-44]) 66 | ([0.25, 0.75], [1.0, 0.0], [0.25, 0.75]) 67 | 68 | julia> all([is_nash(g, NE) for NE in NEs]) 69 | true 70 | ``` 71 | """ 72 | function hc_solve(g::NormalFormGame{N}; ntofind=Inf, options...) where N 73 | f = construct_hc_system(g) 74 | 75 | stop_fn = isfinite(ntofind) ? r -> _is_nash(r, N) : _ -> false 76 | res = let n = 0 77 | HomotopyContinuation.solve( 78 | f, 79 | stop_early_cb = r -> stop_fn(r) && ((n += 1) >= ntofind); 80 | options... 81 | )::HomotopyContinuation.Result 82 | end 83 | 84 | NEs = [_get_action_profile(r, g.nums_actions) 85 | for r in res.path_results if _is_nash(r, N)] 86 | return NEs 87 | end 88 | 89 | function hc_solve(g::NormalFormGame{1}; args...) 90 | throw(ArgumentError("not implemented for 1-player games")) 91 | end 92 | 93 | 94 | function construct_hc_system(g::NormalFormGame{N}) where N 95 | na = g.nums_actions 96 | indptr = [0, cumsum(na)...] 97 | M = indptr[end] 98 | 99 | @var u[1:N] v[1:M] x[1:M] 100 | exs = Vector{Expression}(undef, 2M+N) 101 | 102 | for i in 1:N 103 | na_js = Base.tail((na[i:end]..., na[1:i-1]...)::NTuple{N,Int}) 104 | for a_i in 1:na[i] 105 | exs[indptr[i]+a_i] = u[i] - sum( 106 | g.players[i].payoff_array[a_i, a_js] * 107 | prod([x[indptr[i+k <= N ? i+k : i+k-N]+a_js.I[k]] 108 | for k in 1:N-1]) 109 | for a_js in CartesianIndices(na_js) 110 | ) - v[indptr[i]+a_i] 111 | end 112 | end 113 | 114 | for i in 1:N 115 | for a_i in 1:na[i] 116 | exs[M+indptr[i]+a_i] = x[indptr[i]+a_i] * v[indptr[i]+a_i] 117 | end 118 | end 119 | 120 | for i in 1:N 121 | exs[2M+i] = 1 - sum(x[indptr[i]+a_i] for a_i in 1:na[i]) 122 | end 123 | 124 | return System(exs) 125 | end 126 | 127 | 128 | function _is_nash(r::PathResult, N; nonneg_tol=1e-14) 129 | is_success(r) || return false 130 | is_real(r) || return false 131 | for i in N+1:length(r.solution) 132 | r.solution[i].re > -nonneg_tol || return false 133 | end 134 | return true 135 | end 136 | 137 | 138 | function _get_action_profile(r::PathResult, nums_actions::NTuple{N}) where N 139 | out = ntuple(i -> Array{Float64}(undef, nums_actions[i]), N) 140 | ind = N + sum(nums_actions) 141 | for i in 1:N 142 | for j in 1:nums_actions[i] 143 | ind += 1 144 | out[i][j] = r.solution[ind].re 145 | end 146 | end 147 | return out 148 | end 149 | -------------------------------------------------------------------------------- /src/logitdyn.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Tools for Logit Response Dynamics 3 | =# 4 | 5 | # LogitDynamics # 6 | 7 | """ 8 | LogitDynamics{N, T, S} 9 | 10 | Type representing the Logit-Dynamics model. 11 | 12 | # Fields 13 | 14 | - `players::NTuple{N,Player{N,T}}` : Tuple of `Player` instances. 15 | - `nums_actions::NTuple{N,Int}` : Tuple of the numbers of actions, one for each 16 | player. 17 | - `beta<:Real` : The level of noise in a player's decision. 18 | - `choice_probs::Vector{Array}` : The choice probabilities of each action, one 19 | for each player. 20 | """ 21 | struct LogitDynamics{N,T<:Real,S<:Real} 22 | players::NTuple{N,Player{N,T}} 23 | nums_actions::NTuple{N,Int} 24 | beta::S 25 | choice_probs::Vector{Array} 26 | end 27 | 28 | """ 29 | LogitDynamics(g, beta) 30 | 31 | Construct a `LogitDynamics` instance. 32 | 33 | # Arguments 34 | 35 | - `g::NormalFormGame{N,T}` : `NormalFormGame` instance. 36 | - `beta::S` : The level of noise in players' decision. 37 | 38 | # Returns 39 | 40 | - `::LogitDynamics` : The Logit-Dynamics model. 41 | """ 42 | function LogitDynamics(g::NormalFormGame{N,T}, beta::S) where {N,T<:Real,S<:Real} 43 | choice_probs = Vector{Array}(undef, N) 44 | for (i, player) in enumerate(g.players) 45 | payoff_array = permutedims(player.payoff_array, vcat(2:N, 1)) 46 | payoff_array_normalized = payoff_array .- maximum(payoff_array, dims=N) 47 | choice_probs[i] = cumsum(exp.(payoff_array_normalized .* beta), 48 | dims=N) 49 | end 50 | return LogitDynamics(g.players, g.nums_actions, beta, choice_probs) 51 | end 52 | 53 | """ 54 | play!(rng, ld, player_ind, actions) 55 | 56 | Return a new action of player indexed by `player_ind` given each players' choice 57 | probabilities. 58 | 59 | # Arguments 60 | 61 | - `rng::AbstractRNG` : Random number generator used. 62 | - `ld::LogitDynamics{N}` : `LogitDynamics` instance. 63 | - `player_ind::Integer` : A player index who takes an action. 64 | - `actions::Vector{<:Integer}` : The action profile. 65 | 66 | # Returns 67 | 68 | - `::Integer` : The new action of the player indexed by `player_ind`. 69 | """ 70 | function play!(rng::AbstractRNG, ld::LogitDynamics{N}, player_ind::Integer, 71 | actions::Vector{<:Integer}) where N 72 | oppponent_actions = [actions[player_ind+1:N]..., actions[1:player_ind-1]...] 73 | cdf = ld.choice_probs[player_ind][oppponent_actions..., :] 74 | random_value = rand(rng) 75 | next_action = searchsortedfirst(cdf, random_value*cdf[end]) 76 | return next_action 77 | end 78 | 79 | """ 80 | play([rng=Random.GLOBAL_RNG,] ld, init_actions[; num_reps=1]) 81 | 82 | Return new action profile after `num_reps` iterations. 83 | 84 | # Arguments 85 | 86 | - `rng::AbstractRNG` : Random number generator used. 87 | - `ld::LogitDynamics{N}` : `LogitDynamics` instance. 88 | - `init_actions::PureActionProfile` : Initial action profile. 89 | - `num_reps::Integer` : The number of iterations. 90 | 91 | # Returns 92 | 93 | - `::Vector{<:Integer}` : New action profile. 94 | """ 95 | function play(rng::AbstractRNG, 96 | ld::LogitDynamics{N}, 97 | init_actions::PureActionProfile; 98 | num_reps::Integer=1) where N 99 | actions = [m for m in init_actions] 100 | player_ind_seq = rand(rng, 1:N, num_reps) 101 | for player_ind in player_ind_seq 102 | actions[player_ind] = play!(rng, ld, player_ind, actions) 103 | end 104 | return actions 105 | end 106 | 107 | play(ld::LogitDynamics, init_actions::PureActionProfile; 108 | num_reps::Integer=1) = 109 | play(Random.GLOBAL_RNG, ld, init_actions, num_reps=num_reps) 110 | 111 | """ 112 | time_series!(rng, ld, out, player_ind_seq) 113 | 114 | Update the matrix `out` which is used in `time_series` method given a player 115 | index sequence. 116 | 117 | # Arguments 118 | 119 | - `rng::AbstractRNG` : Random number generator used. 120 | - `ld::LogitDynamics{N}` : `LogitDynamics` instance. 121 | - `out::Matrix{<:Integer}` : Matrix representing the time series of action 122 | profiles. 123 | - `player_ind_seq::Vector{<:Integer}` : The sequence of player index, which is 124 | determined randomly. 125 | 126 | # Returns 127 | 128 | - `::Matrix{<:Integer}` : Updated `out`. 129 | """ 130 | function time_series!(rng::AbstractRNG, 131 | ld::LogitDynamics{N}, 132 | out::Matrix{<:Integer}, 133 | player_ind_seq::Vector{<:Integer}) where N 134 | ts_length = size(out, 2) 135 | current_actions = [out[i, 1] for i in 1:N] 136 | for t in 1:ts_length-1 137 | current_actions[player_ind_seq[t]] = play!(rng, ld, player_ind_seq[t], 138 | current_actions) 139 | for i in 1:N 140 | out[i, t+1] = current_actions[i] 141 | end 142 | end 143 | return out 144 | end 145 | 146 | """ 147 | time_series([rng=Random.GLOBAL_RNG,] ld, ts_length, init_actions) 148 | 149 | Return a time series of action profiles. 150 | 151 | # Arguments 152 | 153 | - `rng::AbstractRNG` : Random number generator used. 154 | - `ld::LogitDynamics{N}` : `LogitDynamics` instance. 155 | - `ts_length::Integer` : The length of time series. 156 | - `init_actions::PureActionProfile` : Initial action profile. 157 | 158 | # Returns 159 | 160 | - `::Matrix{<:Integer}` : The time series of action profiles. 161 | """ 162 | function time_series(rng::AbstractRNG, 163 | ld::LogitDynamics{N}, 164 | ts_length::Integer, 165 | init_actions::PureActionProfile) where N 166 | player_ind_seq = rand(rng, 1:N, ts_length-1) 167 | out = Matrix{Int}(undef, N, ts_length) 168 | for i in 1:N 169 | out[i, 1] = init_actions[i] 170 | end 171 | time_series!(rng, ld, out, player_ind_seq) 172 | end 173 | 174 | time_series(ld::LogitDynamics, ts_length::Integer, 175 | init_actions::PureActionProfile) = 176 | time_series(Random.GLOBAL_RNG, ld, ts_length, init_actions) 177 | -------------------------------------------------------------------------------- /test/generators/test_bimatrix_generators.jl: -------------------------------------------------------------------------------- 1 | using LinearAlgebra, Random, Unicode, DelimitedFiles 2 | using Combinatorics: binomial 3 | 4 | @testset "bimatrix_generators.jl" begin 5 | 6 | @testset "blotto_game" begin 7 | h = 3 8 | t = 7 9 | rho = 0.5 10 | 11 | g = @inferred(blotto_game(h, t, rho)) 12 | 13 | @testset "test_nb_actions" begin 14 | nb_actions = (factorial(t+h-1)/ 15 | (factorial(h-1)*factorial(t))) 16 | 17 | @test g.nums_actions == (nb_actions, nb_actions) 18 | end 19 | 20 | @testset "test_constant_diagonal" begin 21 | for i=1:2 22 | d = diag(g.players[i].payoff_array) 23 | @test length(unique(d)) == 1 24 | end 25 | end 26 | 27 | @testset "test_seed" begin 28 | g1 = blotto_game(MersenneTwister(32), h, t, rho) 29 | g2 = blotto_game(MersenneTwister(32), h, t, rho) 30 | for i in 1:2 31 | @test g1.players[i].payoff_array == g2.players[i].payoff_array 32 | end 33 | end 34 | 35 | end 36 | 37 | @testset "ranking_game" begin 38 | g = @inferred(ranking_game(100)) 39 | p1_array = g.players[1].payoff_array 40 | p2_array = g.players[2].payoff_array 41 | 42 | @testset "test_weakly_decreasing_rowise_payoffs" begin 43 | @test all(p1_array[:, 1:(end-1)] - p1_array[:, 2:end] .>=0) 44 | @test all(p2_array[:, 1:(end-1)] - p2_array[:, 2:end] .>=0) 45 | end 46 | 47 | @testset "test_elements_first_row" begin 48 | @test sum(g[1, 1]) == 1 49 | possible_elements = [0, 1, 0.5] 50 | @test all([value in possible_elements for value in p1_array[1, :]]) 51 | @test all([value in possible_elements for value in p2_array[1, :]]) 52 | end 53 | 54 | @testset "test_seed" begin 55 | g1 = ranking_game(MersenneTwister(32), 20) 56 | g2 = ranking_game(MersenneTwister(32), 20) 57 | for i in 1:2 58 | @test g1.players[i].payoff_array == g2.players[i].payoff_array 59 | end 60 | end 61 | 62 | end 63 | 64 | @testset "sgc_game" begin 65 | k = 2 66 | n = 4*k - 1 67 | g = @inferred(sgc_game(k)) 68 | 69 | s = " 70 | 0.750 0.750 1.000 0.500 0.500 1.000 0.000 0.500 0.000 0.500 0.000 71 | 0.500 0.000 0.500 0.500 1.000 0.750 0.750 1.000 0.500 0.000 0.500 72 | 0.000 0.500 0.000 0.500 0.000 0.500 1.000 0.500 0.500 1.000 0.750 73 | 0.750 0.000 0.500 0.000 0.500 0.000 0.500 0.000 0.500 0.500 0.000 74 | 0.500 0.000 0.500 0.000 0.750 0.000 0.000 0.750 0.000 0.000 0.000 75 | 0.000 0.500 0.000 0.500 0.000 0.500 0.000 0.000 0.750 0.750 0.000 76 | 0.000 0.000 0.000 0.000 0.500 0.000 0.500 0.000 0.500 0.000 0.000 77 | 0.000 0.000 0.000 0.750 0.000 0.000 0.750 0.500 0.000 0.500 0.000 78 | 0.500 0.000 0.000 0.000 0.000 0.000 0.000 0.750 0.750 0.000" 79 | payoffs = readdlm(IOBuffer(Unicode.normalize(s, stripcc=true))) 80 | payoffs = reshape(payoffs, (2, n^2)) 81 | payoff_matrices = [reshape(payoffs[i, :], (n, n)) for i in 1:2] 82 | payoff_matrices[2] = transpose(payoff_matrices[2]) 83 | 84 | for i in 1:2 85 | @test g.players[i].payoff_array == payoff_matrices[i] 86 | end 87 | end 88 | 89 | @testset "tournament_game" begin 90 | n, k = 6, 4 91 | m = binomial(n, k) 92 | g = @inferred(tournament_game(n, k)) 93 | 94 | @testset "test_size" begin 95 | @test g.nums_actions == (n, m) 96 | end 97 | 98 | @testset "test_payoff_values" begin 99 | possible_values = [0., 1.] 100 | for player in g.players 101 | @test issubset(player.payoff_array, possible_values) 102 | end 103 | 104 | max_num_dominated_subsets = sum([binomial(i, k) for i in k:(n-1)]) 105 | @test sum(g.players[1].payoff_array) <= max_num_dominated_subsets 106 | @test all(sum(g.players[2].payoff_array, dims=2) .== k) 107 | end 108 | 109 | @testset "test_seed" begin 110 | seed = 0 111 | g1 = tournament_game(MersenneTwister(seed), n, k) 112 | g2 = tournament_game(MersenneTwister(seed), n, k) 113 | 114 | for i in 1:2 115 | @test g1.players[i].payoff_array == g2.players[i].payoff_array 116 | end 117 | end 118 | 119 | @testset "test_throws_argument_error_too_large_inputs" begin 120 | n, k = 100, 50 121 | @test_throws ArgumentError tournament_game(n, k) 122 | end 123 | end 124 | 125 | @testset "unit_vector_game" begin 126 | n = 100 127 | g = @inferred(unit_vector_game(n)) 128 | 129 | @testset "test_size" begin 130 | @test g.nums_actions == (n, n) 131 | end 132 | 133 | @testset "test_payoff_values" begin 134 | @test all(sum(g.players[1].payoff_array, dims=1) .== 1.) 135 | end 136 | 137 | @testset "test_avoid_pure_nash" begin 138 | NEs = pure_nash(unit_vector_game(n, avoid_pure_nash=true), tol=0.) 139 | @test length(NEs) == 0 140 | end 141 | 142 | @testset "test_seed" begin 143 | seed = 0 144 | n = 100 145 | g1 = unit_vector_game(MersenneTwister(seed), n) 146 | g2 = unit_vector_game(MersenneTwister(seed), n) 147 | for i in 1:2 148 | @test g1.players[i].payoff_array == g2.players[i].payoff_array 149 | end 150 | end 151 | 152 | @testset "test_redraw" begin 153 | seed = 6 154 | rng = MersenneTwister(seed) 155 | n = 2 156 | g = unit_vector_game(rng, n, avoid_pure_nash=true) 157 | NEs = pure_nash(g, tol=0.) 158 | @test length(NEs) == 0 159 | end 160 | 161 | @testset "test_throws_argument_error_avoid_pure_nash_n_1" begin 162 | n = 1 163 | @test_throws ArgumentError unit_vector_game(n, avoid_pure_nash=true) 164 | end 165 | end 166 | 167 | end 168 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GameTheory.jl 2 | 3 | [![Build Status](https://github.com/QuantEcon/GameTheory.jl/workflows/CI/badge.svg)](https://github.com/QuantEcon/GameTheory.jl/actions/workflows/ci.yml) 4 | [![codecov](https://codecov.io/gh/QuantEcon/GameTheory.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/QuantEcon/GameTheory.jl) 5 | [![](https://img.shields.io/badge/docs-stable-blue.svg)](https://QuantEcon.github.io/GameTheory.jl/stable) 6 | [![](https://img.shields.io/badge/docs-dev-blue.svg)](https://QuantEcon.github.io/GameTheory.jl/dev) 7 | 8 | Algorithms and data structures for game theory in Julia 9 | 10 | ## Example usage 11 | 12 | Create a `NormalFormGame`: 13 | 14 | ```julia 15 | using GameTheory 16 | player1 = Player([3 3; 2 5; 0 6]) 17 | player2 = Player([3 2 3; 2 6 1]) 18 | g = NormalFormGame(player1, player2) 19 | println(g) 20 | ``` 21 | ``` 22 | 3×2 NormalFormGame{2, Int64}: 23 | [3, 3] [3, 2] 24 | [2, 2] [5, 6] 25 | [0, 3] [6, 1] 26 | ``` 27 | 28 | `lrsnash` calls the Nash equilibrium computation routine in [lrslib](http://cgm.cs.mcgill.ca/~avis/C/lrs.html) 29 | (through its Julia wrapper [LRSLib.jl](https://github.com/JuliaPolyhedra/LRSLib.jl)): 30 | 31 | ```julia 32 | lrsnash(g) 33 | ``` 34 | ``` 35 | 3-element Vector{Tuple{Vector{Rational{BigInt}}, Vector{Rational{BigInt}}}}: 36 | ([4//5, 1//5, 0//1], [2//3, 1//3]) 37 | ([0//1, 1//3, 2//3], [1//3, 2//3]) 38 | ([1//1, 0//1, 0//1], [1//1, 0//1]) 39 | ``` 40 | 41 | A 2x2x2 `NormalFormGame`: 42 | 43 | ```julia 44 | g = NormalFormGame((2, 2, 2)) 45 | g[1, 1, 1] = [9, 8, 12] 46 | g[2, 2, 1] = [9, 8, 2] 47 | g[1, 2, 2] = [3, 4, 6] 48 | g[2, 1, 2] = [3, 4, 4] 49 | println(g) 50 | ``` 51 | ``` 52 | 2×2×2 NormalFormGame{3, Float64}: 53 | [:, :, 1] = 54 | [9.0, 8.0, 12.0] [0.0, 0.0, 0.0] 55 | [0.0, 0.0, 0.0] [9.0, 8.0, 2.0] 56 | 57 | [:, :, 2] = 58 | [0.0, 0.0, 0.0] [3.0, 4.0, 6.0] 59 | [3.0, 4.0, 4.0] [0.0, 0.0, 0.0] 60 | ``` 61 | 62 | `hc_solve` computes all isolated Nash equilibria of an N-player game by using 63 | [HomotopyContinuation.jl](https://github.com/JuliaHomotopyContinuation/HomotopyContinuation.jl): 64 | 65 | ```julia 66 | NEs = hc_solve(g) 67 | ``` 68 | ``` 69 | 9-element Vector{Tuple{Vector{Float64}, Vector{Float64}, Vector{Float64}}}: 70 | ([2.63311e-36, 1.0], [0.333333, 0.666667], [0.333333, 0.666667]) 71 | ([0.25, 0.75], [1.0, 0.0], [0.25, 0.75]) 72 | ([0.0, 1.0], [0.0, 1.0], [1.0, 0.0]) 73 | ([0.25, 0.75], [0.5, 0.5], [0.333333, 0.666667]) 74 | ([0.5, 0.5], [0.5, 0.5], [1.0, 1.37753e-40]) 75 | ([1.0, 0.0], [0.0, 1.0], [0.0, 1.0]) 76 | ([0.5, 0.5], [0.333333, 0.666667], [0.25, 0.75]) 77 | ([1.0, 0.0], [1.0, 9.40395e-38], [1.0, -9.40395e-38]) 78 | ([0.0, 1.0], [1.0, 0.0], [0.0, 1.0]) 79 | ``` 80 | 81 | See the tutorials for further examples. 82 | 83 | ## Implemented algorithms 84 | 85 | ### Nash equilibrium computation 86 | 87 | * [`pure_nash`](https://quantecon.github.io/GameTheory.jl/stable/lib/computing_nash_equilibria.html#GameTheory.pure_nash-Tuple{NormalFormGame}): 88 | Find all pure-action Nash equilibria of an N-player game (if any) 89 | * [`lemke_howson`](https://quantecon.github.io/GameTheory.jl/stable/lib/computing_nash_equilibria.html#GameTheory.lemke_howson-Union{Tuple{NormalFormGame{2,%20T}},%20Tuple{T}}%20where%20T) 90 | Find one mixed-action Nash equilibrium of a two-player normal-form game 91 | * [`support_enumeration`](https://quantecon.github.io/GameTheory.jl/stable/lib/computing_nash_equilibria.html#GameTheory.support_enumeration-Union{Tuple{NormalFormGame{2,%20T}},%20Tuple{T}}%20where%20T): 92 | Find all mixed-action Nash equilibria of a two-player nondegenerate game 93 | * [`lrsnash`](https://quantecon.github.io/GameTheory.jl/stable/lib/computing_nash_equilibria.html#GameTheory.lrsnash-Tuple{NormalFormGame{2,%20%3C:Union{Int64,%20Rational}}}): 94 | Find all mixed-action Nash equilibria (or equilibrium components) of a two-player game 95 | * [`hc_solve`](https://quantecon.github.io/GameTheory.jl/stable/lib/computing_nash_equilibria.html#GameTheory.hc_solve-Union{Tuple{NormalFormGame{N}},%20Tuple{N}}%20where%20N): 96 | Find all isolated mixed-action Nash equilibria of an N-player game 97 | 98 | ### Learning/evolutionary dynamics 99 | 100 | * [`BRD`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.BRD): 101 | Best response dynamics 102 | * [`KMR`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.KMR): 103 | Best response with mutations dynamics of Kandori-Mailath-Rob 104 | * [`SamplingBRD`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.SamplingBRD): 105 | Sampling best response dynamics 106 | * [`FictitiousPlay`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.FictitiousPlay): 107 | Fictitious play 108 | * [`StochasticFictitiousPlay`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.StochasticFictitiousPlay): 109 | Stochastic fictitious play 110 | * [`LocalInteraction`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.LocalInteraction): 111 | Local interaction dynamics 112 | * [`LogitDynamics`](https://quantecon.github.io/GameTheory.jl/stable/lib/learning_algorithms.html#GameTheory.LogitDynamics): 113 | Logit dynamics 114 | 115 | ### Repeated games 116 | 117 | * [`outerapproximation`](https://quantecon.github.io/GameTheory.jl/stable/lib/repeated_games.html#GameTheory.outerapproximation-Tuple{RepeatedGame{2}}): 118 | Equilibrium payoff computation algorithm by Judd-Yeltekin-Conklin 119 | * [`AS`](https://quantecon.github.io/GameTheory.jl/stable/lib/repeated_games.html#GameTheory.AS-Union{Tuple{RepeatedGame{2,%20T,%20TD}},%20Tuple{TD},%20Tuple{T}}%20where%20{T,%20TD}): 120 | Equilibrium payoff computation algorithm by Abreu-Sannikov 121 | 122 | ## Tutorials 123 | 124 | * [Tools for Game Theory in GameTheory.jl](https://nbviewer.org/github/QuantEcon/game-theory-notebooks/blob/main/game_theory_jl.ipynb) 125 | * [A Recursive Formulation of Repeated Games](https://nbviewer.org/github/QuantEcon/QuantEcon.notebooks/blob/master/recursive_repeated_games.ipynb) 126 | * [Abreu-Sannikov Algorithm for Repeated Two-player Games](https://nbviewer.jupyter.org/github/QuantEcon/game-theory-notebooks/blob/main/as_jl.ipynb) 127 | 128 | See also the [`game_theory`](https://quanteconpy.readthedocs.io/en/latest/game_theory.html) submodule of 129 | [`QuantEcon.py`](https://github.com/QuantEcon/QuantEcon.py), 130 | the Python counterpart of this package. 131 | -------------------------------------------------------------------------------- /test/test_fictplay.jl: -------------------------------------------------------------------------------- 1 | # ------------------------- # 2 | # Testing fictitious play # 3 | # ------------------------- # 4 | 5 | 6 | using Distributions 7 | using Random 8 | 9 | 10 | @testset "Testing fictplay.jl" begin 11 | 12 | matching_pennies_bimatrix = Array{Float64}(undef, 2, 2, 2) 13 | matching_pennies_bimatrix[:, 1, 1] = [1, -1] 14 | matching_pennies_bimatrix[:, 1, 2] = [-1, 1] 15 | matching_pennies_bimatrix[:, 2, 1] = [-1, 1] 16 | matching_pennies_bimatrix[:, 2, 2] = [1, -1] 17 | g = NormalFormGame(matching_pennies_bimatrix) 18 | 19 | gain = 0.1 20 | init_actions = (1,1) 21 | init_actions_mixed = ([1.0, 0.0], [1.0, 0.0]) 22 | 23 | function vector_approximate_equal(vec1::NTuple{2,Vector{T1}}, 24 | vec2::NTuple{2,Vector{T2}}) where {T1,T2} 25 | @test T1 == T2 26 | for (x1, x2) in zip(vec1, vec2) 27 | @test length(x1) == length(x2) 28 | for (xx1, xx2) in zip(x1, x2) 29 | @test xx1 ≈ xx2 30 | end 31 | end 32 | end 33 | 34 | function matrix_approximate_equal(mat1::NTuple{2,Matrix{T1}}, 35 | mat2::NTuple{2,Matrix{T2}}) where {T1,T2} 36 | @test T1 == T2 37 | for (x1, x2) in zip(mat1, mat2) 38 | @test size(x1) == size(x2) 39 | row, col = size(x1) 40 | for i in 1:row 41 | for j in 1:col 42 | @test x1[i, j] ≈ x2[i, j] 43 | end 44 | end 45 | end 46 | end 47 | 48 | @testset "AbstractFictitiousPlay from AbstractFictitiousPlay" begin 49 | fp = FictitiousPlay(g) 50 | fp_g = FictitiousPlay(fp) 51 | 52 | @test fp_g.players == fp.players 53 | @test fp_g.nums_actions == fp.nums_actions 54 | @test fp_g.gain == fp.gain 55 | 56 | normal = Normal() 57 | sfp = StochasticFictitiousPlay(g, normal) 58 | sfp_g1 = StochasticFictitiousPlay(sfp, normal) 59 | sfp_g2 = StochasticFictitiousPlay(sfp) 60 | 61 | @test sfp_g1.players == sfp.players 62 | @test sfp_g2.players == sfp.players 63 | @test sfp_g1.nums_actions == sfp.nums_actions 64 | @test sfp_g2.nums_actions == sfp.nums_actions 65 | @test sfp_g2.d == sfp.d 66 | @test sfp_g1.gain == sfp.gain 67 | @test sfp_g2.gain == sfp.gain 68 | end 69 | 70 | @testset "Testing fictitious play" begin 71 | 72 | fp_dec = FictitiousPlay(g) 73 | x = play(fp_dec, init_actions) 74 | x_mixed = play(fp_dec, init_actions_mixed) 75 | x_series = time_series(fp_dec, 3, init_actions) 76 | x_series_mixed = time_series(fp_dec, 3, init_actions_mixed) 77 | x_des = ([1.0, 0.0], [0.5, 0.5]) 78 | x_series_des = ([1.0 1.0 1.0; 0.0 0.0 0.0], [1.0 0.5 1/3; 0.0 0.5 2/3]) 79 | 80 | fp_con = FictitiousPlay(g, ConstantGain(gain)) 81 | y = play(fp_con, init_actions) 82 | y_mixed = play(fp_con, init_actions_mixed) 83 | y_series = time_series(fp_con, 3, init_actions) 84 | y_series_mixed = time_series(fp_con, 3, init_actions_mixed) 85 | y_des = ([1.0, 0.0], [0.9, 0.1]) 86 | y_series_des = ( 87 | [1.0 1.0 1.0; 0.0 0.0 0.0], [1.0 0.9 0.81; 0.0 0.1 0.19] 88 | ) 89 | 90 | vector_approximate_equal(x, x_des) 91 | vector_approximate_equal(x_mixed, x_des) 92 | vector_approximate_equal(y, y_des) 93 | vector_approximate_equal(y_mixed, y_des) 94 | matrix_approximate_equal(x_series, x_series_des) 95 | matrix_approximate_equal(x_series_mixed, x_series_des) 96 | matrix_approximate_equal(y_series, y_series_des) 97 | matrix_approximate_equal(y_series_mixed, y_series_des) 98 | 99 | seed = 1234 100 | for i in 1:2 101 | @test all(play(MersenneTwister(seed), fp_dec)[i] .<= 1.0) 102 | @test all(play(fp_dec)[i] .<= 1.0) 103 | @test all(play(MersenneTwister(seed), fp_con)[i] .<= 1.0) 104 | @test all(play(fp_con)[i] .<= 1.0) 105 | @test all(time_series(MersenneTwister(seed), fp_dec, 3)[i] .<= 1.0) 106 | @test all(time_series(fp_dec, 3)[i] .<= 1.0) 107 | @test all(time_series(MersenneTwister(seed), fp_con, 3)[i] .<= 1.0) 108 | @test all(time_series(fp_con, 3)[i] .<= 1.0) 109 | end 110 | end 111 | 112 | @testset "Testing stochastic fictitious play" begin 113 | 114 | normal = Normal() #standard normal distribution 115 | seed = 1234 116 | 117 | sfp_dec = StochasticFictitiousPlay(g, normal) 118 | x = [play(MersenneTwister(seed), sfp_dec, init_actions) for i in 1:2] 119 | x_mixed = [play(MersenneTwister(seed), sfp_dec, init_actions_mixed) for i in 1:2] 120 | x_series = [time_series(MersenneTwister(seed), sfp_dec, 3, init_actions) 121 | for i in 1:2] 122 | x_series_mixed = [time_series(MersenneTwister(seed), sfp_dec, 3, 123 | init_actions_mixed) for i in 1:2] 124 | 125 | sfp_con = StochasticFictitiousPlay(g, normal, ConstantGain(gain)) 126 | y = [play(MersenneTwister(seed), sfp_con, init_actions) for i in 1:2] 127 | y_mixed = [play(MersenneTwister(seed), sfp_con, init_actions_mixed) for i in 1:2] 128 | y_series = [time_series(MersenneTwister(seed), sfp_con, 3, init_actions) 129 | for i in 1:2] 130 | y_series_mixed = [time_series(MersenneTwister(seed), sfp_con, 3, 131 | init_actions_mixed) for i in 1:2] 132 | 133 | vector_approximate_equal(x[1], x[2]) 134 | vector_approximate_equal(x_mixed[1], x_mixed[2]) 135 | vector_approximate_equal(y[1], y[2]) 136 | vector_approximate_equal(y_mixed[1], y_mixed[2]) 137 | matrix_approximate_equal(x_series[1], x_series[2]) 138 | matrix_approximate_equal(x_series_mixed[1], x_series_mixed[2]) 139 | matrix_approximate_equal(y_series[1], y_series[2]) 140 | matrix_approximate_equal(y_series_mixed[1], y_series_mixed[2]) 141 | 142 | for i in 1:2 143 | @test all(play(MersenneTwister(seed), sfp_dec)[i] .<= 1.0) 144 | @test all(play(MersenneTwister(seed), sfp_con)[i] .<= 1.0) 145 | @test all(time_series(MersenneTwister(seed), sfp_dec, 3)[i] .<= 1.0) 146 | @test all(time_series(MersenneTwister(seed), sfp_con, 3)[i] .<= 1.0) 147 | end 148 | end 149 | end -------------------------------------------------------------------------------- /src/random.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Generate random NormalFormGame instances. 3 | =# 4 | 5 | # 6 | # Random Games Generating 7 | # 8 | """ 9 | random_game([rng=GLOBAL_RNG], [S=Float64], nums_actions) 10 | 11 | Return a random N-player NormalFormGame instance where the payoffs are drawn 12 | independently from the uniform distribution on the set as determined by `S`. 13 | `S` is a range (such as `0:9`) or a subtype of `Integer` or `AbstractFloat`; 14 | in the latter case, the set is [0, 1) for floats and `typemin(S):typemax(S)` 15 | for integers. 16 | 17 | # Arguments 18 | 19 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 20 | - `S::Union{Type,AbstractRange}`: Set of values from which payoffs are drawn. 21 | - `nums_actions::NTuple{N,Int}`: Tuple of the numbers of actions, 22 | one for each player. 23 | 24 | # Returns 25 | 26 | - `::NormalFormGame`: The generated random N-player NormalFormGame. 27 | 28 | # Examples 29 | 30 | ```julia 31 | julia> using GameTheory, Random 32 | 33 | julia> rng = MersenneTwister(12345); 34 | 35 | julia> g = random_game(rng, (4, 3)); 36 | 37 | julia> println(g) 38 | 4×3 NormalFormGame{2, Float64}: 39 | [0.562714, 0.586598] [0.381128, 0.0501668] [0.922317, 0.61179] 40 | [0.849939, 0.620099] [0.365801, 0.215712] [0.0404417, 0.569955] 41 | [0.371605, 0.965631] [0.835014, 0.364706] [0.573382, 0.923602] 42 | [0.283365, 0.754047] [0.260024, 0.696476] [0.981364, 0.0311643] 43 | 44 | julia> g = random_game(rng, 0:9, (4, 3)); 45 | 46 | julia> println(g) 47 | 4×3 NormalFormGame{2, Int64}: 48 | [1, 5] [1, 2] [6, 2] 49 | [2, 5] [0, 2] [1, 0] 50 | [0, 5] [3, 9] [1, 1] 51 | [9, 5] [2, 9] [0, 6] 52 | ``` 53 | """ 54 | function random_game(rng::AbstractRNG, S::Union{Type{T},AbstractRange{T}}, 55 | nums_actions::NTuple{N,Int}) where {N,T<:Real} 56 | if N == 0 57 | throw(ArgumentError("nums_actions must be non-empty")) 58 | end 59 | 60 | players::NTuple{N,Player{N,T}} = 61 | ntuple(i -> Player(rand(rng, S, tuple(nums_actions[i:end]..., 62 | nums_actions[1:i-1]...))), 63 | N) 64 | 65 | return NormalFormGame(players) 66 | end 67 | 68 | random_game(rng::AbstractRNG, nums_actions::NTuple{N,Int}) where {N} = 69 | random_game(rng, Float64, nums_actions) 70 | 71 | random_game(nums_actions::NTuple{N,Int}) where {N} = 72 | random_game(Random.GLOBAL_RNG, nums_actions) 73 | 74 | random_game(S::Union{Type{T},AbstractRange{T}}, 75 | nums_actions::NTuple{N,Int}) where {N,T<:Real} = 76 | random_game(Random.GLOBAL_RNG, S, nums_actions) 77 | 78 | # 79 | # Covariance Games Generating 80 | # 81 | """ 82 | covariance_game([rng=GLOBAL_RNG], nums_actions, rho) 83 | 84 | Return a random N-player NormalFormGame instance with N>=2 where 85 | the payoff profiles are drawn independently from the standard 86 | multi-normal with the covariance of any pair of payoffs equal to 87 | `rho`, as studied in Rinott and Scarsini (2000). 88 | 89 | # Arguments 90 | 91 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 92 | - `nums_actions::NTuple{N,Int}`: Tuple of the numbers of actions, 93 | one for each player. 94 | - `rho::Real`: Covariance of a pair of payoff values. Must be in 95 | [-1/(N-1), 1], where N is the number of players. 96 | 97 | # Returns 98 | 99 | - `::NormalFormGame`: The generated random N-player NormalFormGame. 100 | 101 | # Examples 102 | 103 | ```julia 104 | julia> using GameTheory, Random 105 | 106 | julia> rng = MersenneTwister(12345); 107 | 108 | julia> g = covariance_game(rng, (4, 3), -0.7); 109 | 110 | julia> println(g) 111 | 4×3 NormalFormGame{2, Float64}: 112 | [1.17236, -0.211696] [1.46647, -1.13947] [0.378353, 0.603951] 113 | [0.415565, 0.0779055] [0.606808, 1.00812] [1.12871, -1.03399] 114 | [0.685759, -0.278449] [-0.588508, 0.464548] [-0.970332, -0.0319236] 115 | [-1.47708, 1.12447] [1.92585, -2.27959] [-2.1476, 1.53569] 116 | ``` 117 | 118 | # References 119 | 120 | - Y. Rinott and M. Scarsini, "On the Number of Pure Strategy 121 | Nash Equilibria in Random Games," Games and Economic Behavior 122 | (2000), 274-293. 123 | """ 124 | function covariance_game(rng::AbstractRNG, nums_actions::NTuple{N,Int}, 125 | rho::Real) where N 126 | if N <= 1 127 | throw(ArgumentError("length of nums_actions must be at least 2")) 128 | end 129 | 130 | if !(-1 / (N - 1) <= rho <= 1) 131 | lb = (N == 2) ? "-1" : "-1/$(N-1)" 132 | throw(ArgumentError("rho must be in [$lb, 1]")) 133 | end 134 | 135 | mu = zeros(N) 136 | Sigma = fill(rho, (N, N)) 137 | Sigma[diagind(Sigma)] = ones(N) 138 | 139 | d = MVNSampler(mu, Sigma) 140 | x = rand(rng, d, prod(nums_actions)) 141 | 142 | x_T = Matrix{eltype(x)}(undef, prod(nums_actions), N) 143 | transpose!(x_T, x) 144 | payoff_profile_array = 145 | reshape(x_T, (nums_actions..., N)) 146 | 147 | return NormalFormGame(payoff_profile_array) 148 | end 149 | 150 | covariance_game(nums_actions::NTuple{N,Int}, rho::Real) where {N} = 151 | covariance_game(Random.GLOBAL_RNG, nums_actions, rho) 152 | 153 | # 154 | # Random action profile 155 | # 156 | """ 157 | random_pure_actions([rng=GLOBAL_RNG], nums_actions) 158 | 159 | Return a tuple of random pure actions (integers). 160 | 161 | # Arguments 162 | 163 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 164 | - `nums_actions::NTuple{N,Int}`: N-tuple of the numbers of actions, 165 | one for each player. 166 | 167 | # Returns 168 | 169 | - `::NTuple{N,Int}`: N-tuple of random pure actions. 170 | 171 | """ 172 | random_pure_actions(rng::AbstractRNG, nums_actions::NTuple{N,Int}) where {N} = 173 | ntuple(i -> rand(rng, 1:nums_actions[i]), Val(N)) 174 | 175 | random_pure_actions(nums_actions::NTuple{N,Int}) where {N} = 176 | random_pure_actions(Random.GLOBAL_RNG, nums_actions) 177 | 178 | """ 179 | random_mixed_actions([rng=GLOBAL_RNG], nums_actions) 180 | 181 | Return a tuple of random mixed actions (vectors of floats). 182 | 183 | # Arguments 184 | 185 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 186 | - `nums_actions::NTuple{N,Int}`: N-tuple of the numbers of actions, 187 | one for each player. 188 | 189 | # Returns 190 | 191 | - `::NTuple{N,Vector{Float64}}`: N-tuple of random mixed actions. 192 | """ 193 | random_mixed_actions(rng::AbstractRNG, nums_actions::NTuple{N,Int}) where {N} = 194 | ntuple(i -> QuantEcon.random_probvec(rng, nums_actions[i]), Val(N)) 195 | 196 | random_mixed_actions(nums_actions::NTuple{N,Int}) where {N} = 197 | random_mixed_actions(Random.GLOBAL_RNG, nums_actions) 198 | -------------------------------------------------------------------------------- /test/test_repeated_game.jl: -------------------------------------------------------------------------------- 1 | @testset "Testing Repeated Game functionality" begin 2 | 3 | pd_payoff = [9.0 1.0 4 | 10.0 3.0] 5 | 6 | A = Player(pd_payoff) 7 | B = Player(pd_payoff) 8 | nfg = NormalFormGame((A, B)) 9 | 10 | # Tests construction of repeated game 11 | rpd = RepeatedGame(nfg, 0.75) 12 | C, H, Z = GameTheory.initialize_sg_hpl(4, [0.0, 0.0], 1.0) 13 | 14 | # 15 | # Test various helper functions 16 | # 17 | @testset "Testing flow utility computations" begin 18 | @test abs(flow_u_1(rpd, 1, 1) - 9.0) < 1e-14 19 | @test abs(flow_u_2(rpd, 2, 2) - 3.0) < 1e-14 20 | @test maximum(abs, flow_u(rpd, 2, 2) - [3.0, 3.0]) < 1e-14 21 | end 22 | 23 | @testset "Testing best deviation computations" begin 24 | @test best_dev_i(rpd, 1, 1) == 2 25 | end 26 | 27 | @testset "Testing unit circle function" begin 28 | H = GameTheory.unitcircle(4) 29 | points = [1.0 0.0 30 | 0.0 1.0 31 | -1.0 0.0 32 | 0.0 -1.0] 33 | 34 | @test maximum(abs, H - points) < 1e-12 35 | end 36 | 37 | @testset "Testing subgradient and hyperplane level initialize" begin 38 | C, H, Z = GameTheory.initialize_sg_hpl(4, [0.0, 0.0], 1.0) 39 | 40 | @test maximum(abs, C - ones(4)) < 1e-12 41 | @test maximum(abs, H - Z') < 1e-12 42 | end 43 | 44 | @testset "Testing worst value computation" begin 45 | @test abs(worst_value_i(rpd, H, C, 1) + 1.0) < 1e-12 46 | end 47 | 48 | # 49 | # Test the actual computation 50 | # 51 | @testset "Testing outer approximation" begin 52 | kwargs = Dict(:nH=>64, :maxiter=>150, :tol=>1e-9) 53 | vertices = @inferred(outerapproximation(rpd; kwargs...)) 54 | p_in_v = [vertices[i, :] for i in 1:size(vertices, 1)] 55 | 56 | mybools = [all(isapprox.([3.0, 3.0], p)) for p in p_in_v] 57 | @test any(mybools) 58 | end 59 | 60 | # 61 | # Test AS algorithm 62 | # 63 | @testset "Testing AS algorithm" begin 64 | # Helper function to test if each row of vertices approximately matches some row in expected 65 | function vertices_match_expected(vertices, expected; tol=1e-4) 66 | # Convert to Float64 for comparison if needed 67 | vertices_float = eltype(vertices) <: AbstractFloat ? vertices : Float64.(vertices) 68 | expected_float = eltype(expected) <: AbstractFloat ? expected : Float64.(expected) 69 | 70 | # Check that each row in vertices is approximately equal to some row in expected 71 | for i in 1:size(vertices_float, 1) 72 | found_match = false 73 | for j in 1:size(expected_float, 1) 74 | if maximum(abs, vertices_float[i, :] - expected_float[j, :]) < tol 75 | found_match = true 76 | break 77 | end 78 | end 79 | if !found_match 80 | return false 81 | end 82 | end 83 | return true 84 | end 85 | 86 | vertices = @inferred(AS(rpd; tol=1e-9)) 87 | 88 | pts_sorted = [3.0 3.0; 89 | 3.0 9.75; 90 | 9.0 9.0; 91 | 9.75 3.0] 92 | 93 | # Test that each row of vertices is approximately equal to some row of pts_sorted 94 | @test vertices_match_expected(vertices, pts_sorted) 95 | 96 | @testset "AS with Int payoffs" begin 97 | nfg_int = NormalFormGame(Int, nfg) 98 | rpd_int = RepeatedGame(nfg_int, 0.75) 99 | vertices = @inferred(AS(rpd_int; tol=1e-9)) 100 | @test vertices_match_expected(vertices, pts_sorted) 101 | 102 | vertices_u = @inferred(AS(rpd_int; tol=1e-9, u=[0, 0])) 103 | @test vertices_match_expected(vertices_u, pts_sorted) 104 | end 105 | 106 | @testset "AS with Rational payoffs" begin 107 | nfg_rat = NormalFormGame(Rational{Int}, nfg) 108 | rpd_rat = RepeatedGame(nfg_rat, 0.75) 109 | vertices = @inferred(AS(rpd_rat; tol=1e-9)) 110 | @test vertices_match_expected(vertices, pts_sorted) 111 | end 112 | 113 | @testset "AS with rational payoffs and rational delta" begin 114 | # Test the case described in the issue with both rational payoffs and delta 115 | nfg_rat = NormalFormGame(Rational{Int}, nfg) 116 | rpd_rat = RepeatedGame(nfg_rat, 3//4) # Rational delta 117 | vertices = @inferred(AS(rpd_rat; tol=1e-9)) 118 | @test vertices_match_expected(vertices, pts_sorted) 119 | # For rational case, the result should be Matrix{Rational{BigInt}} 120 | @test eltype(vertices) == Rational{BigInt} 121 | end 122 | 123 | @testset "AS with Int payoffs and rational delta" begin 124 | # Test the case with Int payoffs but rational delta (should use exact arithmetic) 125 | nfg_int = NormalFormGame(Int, nfg) 126 | rpd_int_rat = RepeatedGame(nfg_int, 3//4) # Rational delta with Int payoffs 127 | vertices = @inferred(AS(rpd_int_rat; tol=1e-9)) 128 | @test vertices_match_expected(vertices, pts_sorted) 129 | # Should also use exact arithmetic and return Rational{BigInt} 130 | @test eltype(vertices) == Rational{BigInt} 131 | end 132 | 133 | @testset "AS with verbose output" begin 134 | # Test verbose parameter 135 | rpd_test = RepeatedGame(nfg, 0.75) 136 | # Should run without error and print convergence message 137 | vertices = @inferred(AS(rpd_test; tol=1e-9, verbose=true)) 138 | @test size(vertices) == size(pts_sorted) 139 | end 140 | 141 | @testset "uniquetolrows function" begin 142 | # Test the uniquetolrows utility function 143 | V = [1.0001 2.0002; 1.0 2.0; 3.0 4.0; 1.00009 2.00008] 144 | tol = 1e-3 145 | V_unique = uniquetolrows(V, tol) 146 | # Should remove the near-duplicate rows (rows 1, 2, 4 are all within tolerance) 147 | @test size(V_unique, 1) == 2 # Two duplicates should be removed 148 | 149 | # Test with the example from the issue 150 | tol = 1e-9 151 | rpd_test = RepeatedGame(nfg, 0.75) 152 | V_test = AS(rpd_test; tol=tol) 153 | V_approx = uniquetolrows(V_test, tol) 154 | @test size(V_approx, 1) <= size(V_test, 1) # Should not increase size 155 | end 156 | end 157 | 158 | end 159 | -------------------------------------------------------------------------------- /.github/copilot-instructions.md: -------------------------------------------------------------------------------- 1 | # Copilot Instructions for GameTheory.jl 2 | 3 | ## Repository Overview 4 | 5 | GameTheory.jl is a Julia package that implements algorithms and data structures for game theory. The package provides tools for: 6 | 7 | - **Normal Form Games**: Creating and analyzing strategic games with multiple players 8 | - **Nash Equilibrium Computation**: Finding pure and mixed strategy Nash equilibria 9 | - **Learning/Evolutionary Dynamics**: Simulating how players' strategies evolve over time 10 | - **Repeated Games**: Analyzing games played repeatedly over time 11 | 12 | ## Key Concepts and Types 13 | 14 | ### Core Types 15 | - `Player{N,T}`: Represents a player with an N-dimensional payoff array of type T 16 | - `NormalFormGame{N,T}`: Represents an N-player normal form game 17 | - `RepeatedGame`: For repeated games analysis 18 | 19 | ### Type Aliases 20 | - `PureAction = Integer`: A pure strategy (single action choice) 21 | - `MixedAction{T} = Vector{T}`: A mixed strategy (probability distribution over actions) 22 | - `Action{T} = Union{PureAction,MixedAction{T}}`: Either pure or mixed action 23 | - `ActionProfile{N,T}`: A tuple of N actions (one per player) 24 | 25 | ### Important Constants 26 | - `RatOrInt = Union{Rational,Int}`: Used for exact arithmetic in Nash equilibrium computations 27 | 28 | ## Code Organization 29 | 30 | ### Core Modules (`src/`) 31 | - `normal_form_game.jl`: Main game representation and basic operations 32 | - `pure_nash.jl`: Pure strategy Nash equilibrium computation 33 | - `support_enumeration.jl`: Mixed strategy Nash equilibria via support enumeration 34 | - `lrsnash.jl`: Nash equilibria using LRS library (vertex enumeration) 35 | - `homotopy_continuation.jl`: Nash equilibria using polynomial homotopy continuation 36 | - `repeated_game.jl`: Tools for repeated games analysis 37 | - `random.jl`: Random game generation utilities 38 | - `util.jl`: General utility functions 39 | 40 | ### Learning Algorithms (`src/`) 41 | - `fictplay.jl`: Fictitious play dynamics 42 | - `localint.jl`: Local interaction dynamics on networks 43 | - `brd.jl`: Best response dynamics and variants (BRD, KMR, SamplingBRD) 44 | - `logitdyn.jl`: Logit choice dynamics 45 | 46 | ### Generators (`src/generators/`) 47 | - `Generators.jl`: Game generation utilities 48 | - Various specialized game generators (bimatrix, coordination, etc.) 49 | 50 | ## Development Guidelines 51 | 52 | ### Julia Conventions 53 | - Follow standard Julia naming conventions (lowercase with underscores for functions, CamelCase for types) 54 | - Use `!` suffix for mutating functions (e.g., `play!`) 55 | - Provide both mutating and non-mutating versions where appropriate 56 | - Use multiple dispatch effectively for different input types 57 | 58 | ### Mathematical Precision 59 | - Use `Rational` types for exact computations when needed (especially Nash equilibria) 60 | - Be careful with floating-point comparisons; use appropriate tolerances 61 | - Prefer numerically stable algorithms for matrix operations 62 | 63 | ### Performance Considerations 64 | - Use type-stable functions with proper type annotations 65 | - Leverage Julia's multiple dispatch for efficient specialization 66 | - Consider using `@inbounds` for performance-critical loops (with proper bounds checking) 67 | - Use views (`@view`) instead of creating temporary arrays when possible 68 | 69 | ### Testing Patterns 70 | - Each source file has a corresponding test file (e.g., `src/normal_form_game.jl` → `test/test_normal_form_game.jl`) 71 | - Use `@testset` to group related tests 72 | - Include edge cases and error conditions in tests 73 | - Test both pure and mixed strategy scenarios 74 | - Verify mathematical properties (e.g., Nash equilibrium conditions) 75 | 76 | ### Documentation Standards 77 | - Use comprehensive docstrings with examples 78 | - Include mathematical background for algorithms 79 | - Document function arguments with types and descriptions 80 | - Provide usage examples for complex functions 81 | - Reference academic papers for algorithm implementations 82 | 83 | ## Key Dependencies and Their Uses 84 | 85 | ### Mathematical Libraries 86 | - `LinearAlgebra`: Matrix operations for payoff computations 87 | - `Distributions`: Probability distributions for stochastic processes 88 | - `Combinatorics`: Generating combinations/permutations of strategies 89 | - `StaticArrays`: High-performance small arrays (where applicable) 90 | 91 | ### Optimization Libraries 92 | - `MathOptInterface` (MOI): Abstract interface for optimization problems 93 | - `Clp`: High-performance linear programming solver 94 | - `Polyhedra`: Geometric computations with polytopes 95 | - `CDDLib`: Convex hull and vertex enumeration 96 | - `LRSLib`: Linear reverse search for vertex/facet enumeration 97 | 98 | ### Specialized Libraries 99 | - `HomotopyContinuation`: Polynomial system solving for Nash equilibria 100 | - `QuantEcon`: Economic and quantitative modeling utilities 101 | - `Graphs`: Network analysis for local interaction models 102 | 103 | ## Common Patterns and Idioms 104 | 105 | ### Game Creation 106 | ```julia 107 | # Create players with payoff matrices 108 | player1 = Player([3 0; 5 1]) # 2x2 payoff matrix 109 | player2 = Player([3 5; 0 1]) 110 | game = NormalFormGame(player1, player2) 111 | 112 | # Or create directly 113 | game = NormalFormGame([3 0; 5 1], [3 5; 0 1]) 114 | ``` 115 | 116 | ### Action Handling 117 | ```julia 118 | # Pure actions are integers (1-indexed) 119 | pure_action = 1 120 | 121 | # Mixed actions are probability vectors 122 | mixed_action = [0.6, 0.4] # 60% action 1, 40% action 2 123 | 124 | # Action profiles for multiple players 125 | profile = (1, 2) # Player 1 plays action 1, Player 2 plays action 2 126 | ``` 127 | 128 | ### Nash Equilibrium Computation 129 | ```julia 130 | # Different methods for different game types 131 | pure_equilibria = pure_nash(game) 132 | mixed_equilibria = support_enumeration(game) # 2-player only 133 | all_equilibria = lrsnash(game) # Exact rational arithmetic 134 | ``` 135 | 136 | ### Learning Dynamics 137 | ```julia 138 | # Set up dynamics 139 | dynamics = FictitiousPlay(game) 140 | initial_actions = (1, 1) 141 | 142 | # Simulate 143 | final_actions = play(dynamics, initial_actions, num_reps=1000) 144 | history = time_series(dynamics, 100, initial_actions) 145 | ``` 146 | 147 | ## Testing and Quality Assurance 148 | 149 | ### Running Tests 150 | - Use `julia --project=. -e "using Pkg; Pkg.test()"` to run full test suite 151 | - Individual test files can be run with `julia test/test_filename.jl` 152 | - CI runs tests on Julia 1.x across Linux, Windows, and macOS 153 | 154 | ### Code Quality 155 | - Follow the existing code style in the repository 156 | - Add tests for new functionality 157 | - Update documentation for user-facing changes 158 | - Consider numerical stability and edge cases 159 | - Test with both small and large games where applicable 160 | 161 | ## Mathematical Background 162 | 163 | When implementing new algorithms, ensure understanding of: 164 | - **Game Theory**: Nash equilibria, dominant strategies, Pareto efficiency 165 | - **Linear Algebra**: Matrix operations, eigenvalues, linear systems 166 | - **Optimization**: Linear programming, convex optimization 167 | - **Probability**: Mixed strategies as probability distributions 168 | - **Numerical Methods**: Stability, convergence, precision issues 169 | 170 | ## Common Gotchas 171 | 172 | 1. **Indexing**: Julia uses 1-based indexing, not 0-based 173 | 2. **Mutability**: Be careful with shared references to arrays 174 | 3. **Type Stability**: Avoid changing variable types within functions 175 | 4. **Broadcasting**: Use `.` for element-wise operations (`.*`, `.+`, etc.) 176 | 5. **Rational Arithmetic**: When using `Rational` types, operations may be slower but exact 177 | 6. **Memory Management**: Large games can consume significant memory; consider algorithmic complexity 178 | 179 | ## Resources 180 | 181 | - [Julia Documentation](https://docs.julialang.org/) 182 | - [Game Theory Textbooks]: Fudenberg & Tirole, Myerson, etc. 183 | - [Package Documentation](https://quantecon.github.io/GameTheory.jl/stable/) 184 | - [QuantEcon Lectures](https://julia.quantecon.org/) for economic applications 185 | - [Python version (QuantEcon.py game_theory submodule)](https://quanteconpy.readthedocs.io/en/latest/game_theory.html) -------------------------------------------------------------------------------- /src/support_enumeration.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Compute all mixed Nash equilibria of a 2-player (non-degenerate) normal 3 | form game by support enumeration. 4 | 5 | Julia version of QuantEcon.py/support_enumeration.py 6 | 7 | References 8 | ---------- 9 | B. von Stengel, "Equilibrium Computation for Two-Player Games in 10 | Strategic and Extensive Form," Chapter 3, N. Nisan, T. Roughgarden, E. 11 | Tardos, and V. Vazirani eds., Algorithmic Game Theory, 2007. 12 | =# 13 | 14 | using LinearAlgebra: LAPACKException, SingularException 15 | using QuantEcon: next_k_array! 16 | 17 | """ 18 | support_enumeration(g) 19 | 20 | Compute mixed-action Nash equilibria with equal support size 21 | for a 2-player normal form game by support enumeration. For a 22 | non-degenerate game input, these are all the Nash equilibria. 23 | 24 | The algorithm checks all the equal-size support pairs; if the 25 | players have the same number n of actions, there are 2n choose n 26 | minus 1 such pairs. This should thus be used only for small games. 27 | 28 | # Arguments 29 | 30 | - `g::NormalFormGame{2,T}`: 2-player NormalFormGame instance. 31 | 32 | # Returns 33 | 34 | - `::Vector{NTuple{2,Vector{S}}}`: Mixed-action Nash equilibria that are found, 35 | where `S` is Float if `T` is Int or Float, and Rational if `T` is Rational. 36 | 37 | # Examples 38 | 39 | ```julia 40 | julia> Base.active_repl.options.iocontext[:compact] = true; # Reduce digits to display 41 | 42 | julia> player1 = Player([3 3; 2 5; 0 6]); 43 | 44 | julia> player2 = Player([3 2 3; 2 6 1]); 45 | 46 | julia> g = NormalFormGame(player1, player2); 47 | 48 | julia> println(g) 49 | 3×2 NormalFormGame{2, Int64}: 50 | [3, 3] [3, 2] 51 | [2, 2] [5, 6] 52 | [0, 3] [6, 1] 53 | 54 | julia> support_enumeration(g) 55 | 3-element Vector{Tuple{Vector{Float64}, Vector{Float64}}}: 56 | ([1.0, 0.0, 0.0], [1.0, 0.0]) 57 | ([0.8, 0.2, 0.0], [0.666667, 0.333333]) 58 | ([0.0, 0.333333, 0.666667], [0.333333, 0.666667]) 59 | ``` 60 | """ 61 | function support_enumeration(g::NormalFormGame{2,T}) where T 62 | S = typeof(zero(T)/one(T)) 63 | c = Channel{Tuple{Vector{S},Vector{S}}}(0) 64 | task = support_enumeration_task(c, g) 65 | bind(c, task) 66 | schedule(task) 67 | NEs = collect(c) 68 | 69 | return NEs 70 | 71 | end 72 | 73 | """ 74 | support_enumeration_task(c, g) 75 | 76 | Task version of `support_enumeration`. 77 | 78 | # Arguments 79 | 80 | - `c::Channel`: Channel to be binded with the support enumeration task. 81 | - `g::NormalFormGame{2}`: 2-player NormalFormGame instance. 82 | 83 | # Returns 84 | 85 | - `::Task`: Runnable task for generating Nash equilibria. 86 | 87 | # Examples 88 | 89 | ```julia 90 | julia> Base.active_repl.options.iocontext[:compact] = true; # Reduce digits to display 91 | 92 | julia> player1 = Player([3 3; 2 5; 0 6]); 93 | 94 | julia> player2 = Player([3 2 3; 2 6 1]); 95 | 96 | julia> g = NormalFormGame(player1, player2); 97 | 98 | julia> println(g) 99 | 3×2 NormalFormGame{2, Int64}: 100 | [3, 3] [3, 2] 101 | [2, 2] [5, 6] 102 | [0, 3] [6, 1] 103 | 104 | julia> c = Channel{Tuple{Vector{Float64},Vector{Float64}}}(0); 105 | 106 | julia> t = support_enumeration_task(c, g); 107 | 108 | julia> bind(c, t); schedule(t); 109 | 110 | julia> for NE in c 111 | display(NE) 112 | end 113 | ([1.0, 0.0, 0.0], [1.0, 0.0]) 114 | ([0.8, 0.2, 0.0], [0.666667, 0.333333]) 115 | ([0.0, 0.333333, 0.666667], [0.333333, 0.666667]) 116 | ``` 117 | """ 118 | function support_enumeration_task(c::Channel, 119 | g::NormalFormGame{2}) 120 | 121 | task = Task( 122 | () -> _support_enumeration_producer(c, 123 | (g.players[1].payoff_array, 124 | g.players[2].payoff_array)) 125 | ) 126 | 127 | return task 128 | end 129 | 130 | """ 131 | _support_enumeration_producer(c, payoff_matrices) 132 | 133 | Main body of `support_enumeration_task`. 134 | 135 | # Arguments 136 | 137 | - `c::Channel`: Channel to be binded with the support enumeration task. 138 | - `payoff_matrices::NTuple{2,Matrix{T}}`: Payoff matrices of player 1 and 139 | player 2, where `T<:Real`. 140 | 141 | # Puts 142 | 143 | - `NTuple{2,Vector{S}}`: Tuple of Nash equilibrium mixed actions, where `S` is 144 | Float if `T` is Int or Float, and Rational if `T` is Rational. 145 | """ 146 | function _support_enumeration_producer(c::Channel, 147 | payoff_matrices 148 | ::NTuple{2,Matrix{T}}) where T<:Real 149 | 150 | nums_actions = size(payoff_matrices[1], 1), size(payoff_matrices[2], 1) 151 | n_min = min(nums_actions...) 152 | flags_vecs = Tuple(BitVector(undef, n) for n in nums_actions) 153 | S = typeof(zero(T)/one(T)) 154 | 155 | for k = 1:n_min 156 | supps = (collect(1:k), Vector{Int}(undef, k)) 157 | actions = (Vector{S}(undef, k), Vector{S}(undef, k)) 158 | A = Matrix{S}(undef, k+1, k+1) 159 | b = Vector{S}(undef, k+1) 160 | while supps[1][end] <= nums_actions[1] 161 | @inbounds for i in 1:k 162 | supps[2][i] = i 163 | end 164 | while supps[2][end] <= nums_actions[2] 165 | if _indiff_mixed_action!(A, b, flags_vecs[1], actions[2], 166 | payoff_matrices[1], 167 | supps[1], supps[2]) 168 | if _indiff_mixed_action!(A, b, flags_vecs[2], actions[1], 169 | payoff_matrices[2], 170 | supps[2], supps[1]) 171 | out = (zeros(S, nums_actions[1]), 172 | zeros(S, nums_actions[2])) 173 | for (p, (supp, action)) in enumerate(zip(supps, 174 | actions)) 175 | out[p][supp] = action 176 | end 177 | put!(c, out) 178 | end 179 | end 180 | next_k_array!(supps[2]) 181 | end 182 | next_k_array!(supps[1]) 183 | end 184 | end 185 | 186 | end 187 | 188 | function _solve!(A::Matrix{T}, b::Vector{T}) where T <: Union{Float64,Float32} 189 | r = 0 190 | try 191 | LAPACK.gesv!(A, b) 192 | catch LAPACKException 193 | r = 1 194 | end 195 | return r 196 | end 197 | 198 | @inline function _solve!(A::Matrix{Rational{T}}, 199 | b::Vector{Rational{T}}) where T <: Integer 200 | r = 0 201 | try 202 | b[:] = ldiv!(lu!(A), b) 203 | catch SingularException 204 | r = 1 205 | end 206 | return r 207 | end 208 | 209 | """ 210 | _indiff_mixed_action!(A, b, own_supp_flags, out, 211 | payoff_matrix, own_supp, opp_supp) 212 | 213 | Given a player's payoff matrix `payoff_matrix`, an array `own_supp` 214 | of this player's actions, and an array `opp_supp` of the opponent's 215 | actions, each of length k, compute the opponent's mixed action whose 216 | support equals `opp_supp` and for which the player is indifferent 217 | among the actions in `own_supp`, if any such exists. Return `true` 218 | if such a mixed action exists and actions in `own_supp` are indeed 219 | best responses to it, in which case the outcome is stored in `out`; 220 | `false` otherwise. Arrays `A`, `b`, `own_supp_flags` are used in intermediate 221 | steps. 222 | 223 | # Arguments 224 | 225 | - `A::Matrix{T}`: Matrix of shape (k+1, k+1) used in intermediate steps, where 226 | `T<:Real`. 227 | - `b::Vector{T}`: Vector of length k+1 used in intermediate steps, where 228 | `T<:Real`. 229 | - `own_supp_flags::BitVector`: BitVector of length m used in intermediate 230 | steps. 231 | - `out::Vector{T}`: Vector of length k to store the nonzero values of the 232 | desired mixed action, where `T<:Real`. 233 | - `payoff_matrix::Matrix`: The player's payoff matrix, of shape (m, n). 234 | - `own_supp::Vector{Int}`: Vector containing the player's action indices, of 235 | length k. 236 | - `opp_supp::Vector{Int}`: Vector containing the opponent's action indices, of 237 | length k. 238 | 239 | # Returns 240 | 241 | - `::Bool`: `true` if a desired mixed action exists and `false` otherwise. 242 | """ 243 | function _indiff_mixed_action!(A::Matrix{T}, b::Vector{T}, 244 | own_supp_flags::BitVector, 245 | out::Vector{T}, 246 | payoff_matrix::Matrix, 247 | own_supp::Vector{Int}, 248 | opp_supp::Vector{Int}) where T<:Real 249 | 250 | m = size(payoff_matrix, 1) 251 | k = length(own_supp) 252 | 253 | for j in 1:k, i in 1:k 254 | A[i, j] = payoff_matrix[own_supp[i], opp_supp[j]] 255 | end 256 | A[1:end-1, end] .= -one(T) 257 | A[end, 1:end-1] .= one(T) 258 | A[end, end] = zero(T) 259 | b[1:end-1] .= zero(T) 260 | b[end] = one(T) 261 | 262 | r = _solve!(A, b) 263 | r == 0 || return false # A: singular 264 | 265 | for i in 1:k 266 | b[i] <= zero(T) && return false 267 | end 268 | 269 | out[:] = b[1:end-1] 270 | val = b[end] 271 | 272 | if k == m 273 | return true 274 | end 275 | 276 | own_supp_flags[:] .= false 277 | own_supp_flags[own_supp] .= true 278 | 279 | for i = 1:m 280 | if !own_supp_flags[i] 281 | payoff = zero(T) 282 | for j = 1:k 283 | payoff += payoff_matrix[i, opp_supp[j]] * out[j] 284 | end 285 | if payoff > val 286 | return false 287 | end 288 | end 289 | end 290 | 291 | return true 292 | end 293 | -------------------------------------------------------------------------------- /src/brd.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Tools for best response dynamics 3 | =# 4 | 5 | using StatsBase 6 | 7 | 8 | # AbstractBRD 9 | 10 | """ 11 | AbstractBRD 12 | 13 | Abstract type representing the best response dynamics model. 14 | """ 15 | abstract type AbstractBRD{T<:Real} end 16 | 17 | """ 18 | BRD 19 | 20 | Type representing the best response dynamics model. 21 | 22 | # Fields 23 | 24 | - `N::Int` : The number of players. 25 | - `player::Player{2,T}` : `Player` instance in the model. 26 | - `num_actions::Int` : The number of actions for players. 27 | """ 28 | struct BRD{T<:Real} <: AbstractBRD{T} 29 | N::Int 30 | player::Player{2,T} 31 | num_actions::Int 32 | end 33 | 34 | """ 35 | BRD(N, payoff_array) 36 | 37 | Create a new BRD instance. 38 | 39 | # Arguments 40 | 41 | - `N::Integer` : The number of players. 42 | - `payoff_array::Matrix` : Payoff array for each player. 43 | 44 | # Returns 45 | 46 | - `::BRD` : The best response dynamics model. 47 | """ 48 | function BRD(payoff_array::Matrix{T}, N::Integer) where {T<:Real} 49 | num_actions = size(payoff_array, 1) 50 | if num_actions != size(payoff_array, 2) 51 | throw(ArgumentError("Payoff array must be square")) 52 | end 53 | return BRD(N, Player(payoff_array), num_actions) 54 | end 55 | 56 | """ 57 | KMR 58 | 59 | Type representing the Kandori Mailath Rob model. 60 | 61 | # Fields 62 | 63 | - `N::Int` : The number of players. 64 | - `player::Player` : `Player` instance in the model. 65 | - `num_actions::Int` : The number of actions for players. 66 | - `epsilon::Float64` : The probability of strategy flips. 67 | """ 68 | struct KMR{T<:Real} <: AbstractBRD{T} 69 | N::Int 70 | player::Player{2,T} 71 | num_actions::Int 72 | epsilon::Float64 73 | end 74 | 75 | """ 76 | KMR(N, payoff_array, epsilon) 77 | 78 | Create a new KMR instance. 79 | 80 | # Arguments 81 | 82 | - `N::Integer` : The number of players. 83 | - `payoff_array::Matrix` : The payoff array for each player. 84 | - `epsilon::Float64` : The probability of strategy flips. 85 | 86 | # Returns 87 | 88 | - `::KMR` : The Kandori Mailath Rob model. 89 | """ 90 | function KMR(payoff_array::Matrix{T}, 91 | N::Integer, 92 | epsilon::Float64) where {T<:Real} 93 | num_actions = size(payoff_array, 1) 94 | if num_actions != size(payoff_array, 2) 95 | throw(ArgumentError("Payoff array must be square")) 96 | end 97 | return KMR(N, Player(payoff_array), num_actions, epsilon) 98 | end 99 | 100 | """ 101 | SamplingBRD 102 | 103 | Type representing the sampling best response dynamics model. 104 | 105 | # Fields 106 | 107 | - `N::Int` : The number of players. 108 | - `player::Player` : `Player` instance in the model. 109 | - `num_actions::Int` : The number of actions for players. 110 | - `k::Int` : Sample size. 111 | """ 112 | struct SamplingBRD{T<:Real} <: AbstractBRD{T} 113 | N::Int 114 | player::Player{2,T} 115 | num_actions::Int 116 | k::Int #sample size 117 | end 118 | 119 | """ 120 | SamplingBRD(N, payoff_array, k) 121 | 122 | Create a new SamplingBRD instance. 123 | 124 | # Arguments 125 | 126 | - `N::Integer` : The number of players. 127 | - `payoff_array::Matrix` : Payoff array for a player. 128 | - `k::Integer` : Sample size. 129 | 130 | # Returns 131 | 132 | - `::SamplingBRD` : The sampling best response dynamics model. 133 | """ 134 | function SamplingBRD(payoff_array::Matrix{T}, 135 | N::Integer, 136 | k::Integer) where {T<:Real} 137 | num_actions = size(payoff_array, 1) 138 | if num_actions != size(payoff_array, 2) 139 | throw(ArgumentError("Payoff array must be square")) 140 | end 141 | return SamplingBRD(N, Player(payoff_array), num_actions, k) 142 | end 143 | 144 | 145 | # play! 146 | 147 | function play!(rng::AbstractRNG, 148 | brd::BRD, 149 | action::Integer, 150 | action_dist::Vector{<:Integer}, 151 | options::BROptions=BROptions()) 152 | action_dist[action] -= 1 153 | next_action = best_response(brd.player, action_dist, options) 154 | action_dist[next_action] += 1 155 | return action_dist 156 | end 157 | 158 | function play!(rng::AbstractRNG, 159 | brd::KMR, 160 | action::Integer, 161 | action_dist::Vector{<:Integer}, 162 | options::BROptions=BROptions()) 163 | action_dist[action] -= 1 164 | if rand(rng) <= brd.epsilon 165 | next_action = rand(rng, 1:brd.num_actions) 166 | else 167 | next_action = best_response(brd.player, action_dist, options) 168 | end 169 | action_dist[next_action] += 1 170 | return action_dist 171 | end 172 | 173 | function play!(rng::AbstractRNG, 174 | brd::SamplingBRD, 175 | action::Integer, 176 | action_dist::Vector{<:Integer}, 177 | options::BROptions=BROptions()) 178 | action_dist[action] -= 1 179 | actions = sample(1:brd.num_actions, Weights(action_dist), brd.k) 180 | sample_action_dist = zeros(Int, brd.num_actions) 181 | for a in actions 182 | sample_action_dist[a] += 1 183 | end 184 | next_action = best_response(brd.player, sample_action_dist, options) 185 | action_dist[next_action] += 1 186 | return action_dist 187 | end 188 | 189 | @doc """ 190 | play!([rng=Random.GLOBAL_RNG, ]brd, action, action_dist[, options=BROptions()]) 191 | 192 | Update an action distribution given a specified action. 193 | 194 | # Arguments 195 | 196 | - `rng::AbstractRNG` : Random number generator used. 197 | - `brd::AbstractBRD` : `AbstractBRD` instance. 198 | - `action::Integer` : A specified action. 199 | - `action_dist::Vector{<:Integer}` : The distribution of players' actions. 200 | - `options::BROptions` : Options for `best response` method. 201 | 202 | # Returns 203 | 204 | - `action_dist::Vector{<:Integer}` : Updated `action_dist`. 205 | """ 206 | 207 | 208 | # play 209 | 210 | """ 211 | play([rng=Random.GLOBAL_RNG, ]brd, init_action_dist[, options=BROptions(); num_reps=1]) 212 | 213 | Return the action distribution after `num_reps` times iteration 214 | 215 | # Arguments 216 | 217 | - `rng::AbstractRNG` : Random number generator used. 218 | - `brd::AbstractBRD` : `AbstractBRD` instance. 219 | - `init_action_dist::Vector{<:Integer}` : The initial distribution of players' actions. 220 | - `options::BROptions` : Options for `best_response` method. 221 | - `num_reps::Integer` : The number of iterations. 222 | 223 | # Returns 224 | 225 | - `::Vector{<:Integer}` : The action distribution after iterations. 226 | """ 227 | function play(rng::AbstractRNG, 228 | brd::AbstractBRD, 229 | init_action_dist::Vector{<:Integer}, 230 | options::BROptions=BROptions(); 231 | num_reps::Integer=1) 232 | if length(init_action_dist) != brd.num_actions 233 | throw(ArgumentError("The length of init_action_dist must be the number 234 | of actions")) 235 | end 236 | if sum(init_action_dist) != brd.N 237 | throw(ArgumentError("The sum of init_action_dist must be the number of 238 | players")) 239 | end 240 | 241 | player_ind_seq = rand(rng, 1:brd.N, num_reps) 242 | for t in 1:num_reps 243 | action = searchsortedfirst(accumulate(+, init_action_dist), 244 | player_ind_seq[t]) 245 | init_action_dist = play!(rng, brd, action, init_action_dist, options) 246 | end 247 | return init_action_dist 248 | end 249 | 250 | play(brd::AbstractBRD, init_action_dist::Vector{<:Integer}, 251 | options::BROptions=BROptions(); num_reps::Integer=1) = 252 | play(Random.GLOBAL_RNG, brd, init_action_dist, options, num_reps=num_reps) 253 | 254 | 255 | # time_series! 256 | 257 | """ 258 | time_series!(rng, brd, out, player_ind_seq, options) 259 | 260 | Update the matrix `out` which is used in `time_series` method given a player 261 | index sequence. 262 | 263 | # Arguments 264 | 265 | - `rng::AbstractRNG` : Random number generator used. 266 | - `brd::AbstractBRD` : Instance of the model. 267 | - `out::Matrix{<:Integer}` : Matrix representing the time series of action 268 | profiles. 269 | - `player_ind_seq::Vector{<:Integer}` : The vector of player index. 270 | - `options::BROptions` : Options for `best_response` method. 271 | 272 | # Returns 273 | 274 | - `out::Matrix{<:Integer}` : Updated `out`. 275 | """ 276 | function time_series!(rng::AbstractRNG, 277 | brd::AbstractBRD, 278 | out::Matrix{<:Integer}, 279 | player_ind_seq::Vector{<:Integer}, 280 | options::BROptions) 281 | ts_length = size(out, 2) 282 | action_dist = [out[i,1] for i in 1:brd.num_actions] 283 | for t in 1:ts_length-1 284 | action = searchsortedfirst(accumulate(+, action_dist), player_ind_seq[t]) 285 | action_dist = play!(rng, brd, action, action_dist, options) 286 | for i in 1:brd.num_actions 287 | out[i,t+1] = action_dist[i] 288 | end 289 | end 290 | return out 291 | end 292 | 293 | 294 | # time_series 295 | 296 | function time_series(rng::AbstractRNG, 297 | brd::AbstractBRD, 298 | ts_length::Integer, 299 | init_action_dist::Vector{<:Integer}, 300 | options::BROptions=BROptions()) 301 | if length(init_action_dist) != brd.num_actions 302 | throw(ArgumentError("The length of init_action_dist must be the number 303 | of actions")) 304 | end 305 | if sum(init_action_dist) != brd.N 306 | throw(ArgumentError("The sum of init_action_dist must be the number of 307 | players")) 308 | end 309 | 310 | player_ind_seq = rand(rng, 1:brd.N, ts_length) 311 | out = Matrix{Int}(undef, brd.num_actions, ts_length) 312 | for i in 1:brd.num_actions 313 | out[i, 1] = init_action_dist[i] 314 | end 315 | time_series!(rng, brd, out, player_ind_seq, options) 316 | end 317 | 318 | time_series(brd::AbstractBRD, ts_length::Integer, 319 | init_action_dist::Vector{<:Integer}, 320 | options::BROptions=BROptions()) = 321 | time_series(Random.GLOBAL_RNG, brd, ts_length, init_action_dist, options) 322 | 323 | function time_series(rng::AbstractRNG, 324 | brd::AbstractBRD, 325 | ts_length::Integer, 326 | options::BROptions=BROptions()) 327 | player_ind_seq = rand(rng, 1:brd.N, ts_length) 328 | nums_actions = ntuple(i -> brd.num_actions, brd.N) 329 | init_actions = random_pure_actions(rng, nums_actions) 330 | action_dist = zeros(Int, brd.num_actions) 331 | for i in 1:brd.N 332 | action_dist[init_actions[i]] += 1 333 | end 334 | time_series(rng, brd, ts_length, action_dist, options) 335 | end 336 | 337 | time_series(brd::AbstractBRD, ts_length::Integer, 338 | options::BROptions=BROptions()) = 339 | time_series(Random.GLOBAL_RNG, brd, ts_length, options) 340 | 341 | @doc """ 342 | time_series([rng=Random.GLOBAL_RNG, ]brd, ts_length, init_action_dist[, options=BROptions()]) 343 | 344 | Return the time series of action distribution. 345 | 346 | # Arguments 347 | 348 | - `rng::AbstractRNG` : Random number generator used. 349 | - `brd::AbstractBRD` : `AbstractBRD` instance. 350 | - `ts_length::Integer` : The length of time series. 351 | - `init_action_dist::Vector{<:Integer}` : Initial action distribution. If not 352 | provided, it is selected randomly. 353 | - `options::BROptions` : Options for `best_response` method. 354 | 355 | # Returns 356 | 357 | - `::Matrix{<:Integer}` : The time series of action distributions. 358 | """ 359 | -------------------------------------------------------------------------------- /src/localint.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Tools for local interaction model 3 | 4 | =# 5 | 6 | using SparseArrays 7 | 8 | 9 | # AbstractRevision # 10 | 11 | """ 12 | AbstractRevision 13 | 14 | Abstract type representing revision method. 15 | """ 16 | abstract type AbstractRevision end 17 | 18 | """ 19 | AsynchronousRevision 20 | 21 | Type representing an asynchronous revision. 22 | """ 23 | struct AsynchronousRevision <: AbstractRevision end 24 | 25 | """ 26 | SimultaneousRevision 27 | 28 | Type representing a simultaneous revision. 29 | """ 30 | struct SimultaneousRevision <: AbstractRevision end 31 | 32 | 33 | # LocalInteraction 34 | 35 | """ 36 | LocalInteraction{N, T, S, A, TR} 37 | 38 | Type representing the local interaction model with N players. 39 | 40 | # Fields 41 | 42 | - `players::NTuple{N,Player{2,T}}` : Tuple of `Player` instances. 43 | - `num_actions::Integer` : The number of actions for players. 44 | - `adj_matrix::Array{S,2}` : Adjacency matrix of the graph in the model. 45 | - `revision<:AbstractRevision` : The way to revise the action profile. 46 | """ 47 | struct LocalInteraction{N,T<:Real,S<:Real,A<:Integer,TR<:AbstractRevision} 48 | players::NTuple{N,Player{2,T}} 49 | num_actions::Int 50 | adj_matrix::SparseMatrixCSC{S,A} 51 | revision::TR 52 | end 53 | 54 | """ 55 | LocalInteraction(g, adj_matrix[, revision=SimultaneousRevision()]) 56 | 57 | Construct a `LocalInteraction` instance. 58 | 59 | # Arguments 60 | 61 | - `g::NormalFormGame` : The game used in the model. 62 | - `adj_matrix::AbstractMatrix` : Adjacency matrix of the graph in the model. 63 | - `revision::AbstractRevision` : Arguments to specify the revision method; 64 | `SimultaneousRevision()` or `AsynchronousRevision()`. 65 | 66 | # Returns 67 | 68 | - `::LocalInteraction` : The local interaction model. 69 | """ 70 | function LocalInteraction(g::NormalFormGame{2,T}, 71 | adj_matrix::AbstractMatrix{S}, 72 | revision::AbstractRevision=SimultaneousRevision() 73 | ) where {T<:Real,S<:Real} 74 | if size(adj_matrix, 1) != size(adj_matrix, 2) 75 | throw(ArgumentError("Adjacency matrix must be square")) 76 | end 77 | N = size(adj_matrix, 1) 78 | players = ntuple(i -> g.players[1], N) 79 | num_actions = g.nums_actions[1] 80 | if num_actions != g.nums_actions[2] 81 | throw(ArgumentError("Payoff matrix must be square")) 82 | end 83 | sparse_adj = sparse(adj_matrix)::SparseMatrixCSC{S} 84 | return LocalInteraction(players, num_actions, sparse_adj, revision) 85 | end 86 | 87 | """ 88 | LocalInteraction(payoff_matrix, 89 | adj_matrix[, revision=SimultaneousRevision()]) 90 | 91 | Construct a `LocalInteraction` instance. 92 | 93 | # Arguments 94 | 95 | - `payoff_matrix::Matrix` : The payoff matrix of the game. 96 | - `adj_matrix::AbstractMatrix` : Adjacency matrix of the graph in the model. 97 | - `revision::AbstractRevision` : Arguments to specify the revision method. 98 | `SimultaneousRevision()` or `AsynchronousRevision` 99 | 100 | # Returns 101 | 102 | - `::LocalInteraction` : The local interaction model. 103 | """ 104 | function LocalInteraction(payoff_matrix::Matrix{T}, 105 | adj_matrix::AbstractMatrix{S}, 106 | revision::AbstractRevision=SimultaneousRevision() 107 | ) where {T<:Real,S<:Real} 108 | N = size(adj_matrix, 1) 109 | if N != size(adj_matrix, 2) 110 | throw(ArgumentError("Adjacency matrix must be square")) 111 | end 112 | players = ntuple(i -> Player(payoff_matrix), N) 113 | num_actions = size(payoff_matrix, 1) 114 | if num_actions != size(payoff_matrix, 2) 115 | throw(ArgumentError("Payoff matrix must be square")) 116 | end 117 | sparse_adj = sparse(adj_matrix)::SparseMatrixCSC{S} 118 | return LocalInteraction(players, num_actions, sparse_adj, revision) 119 | end 120 | 121 | 122 | # play! 123 | 124 | function play!(li::LocalInteraction{N}, 125 | actions::Vector{<:Integer}, 126 | player_ind::AbstractVector{<:Integer}, 127 | options::BROptions) where N 128 | actions_matrix = sparse(1:N, actions, ones(Int, N), N, li.num_actions) 129 | opponent_action = li.adj_matrix[player_ind,:] * actions_matrix 130 | for (k, i) in enumerate(player_ind) 131 | actions[i] = best_response(li.players[i], Vector(opponent_action[k,:]), 132 | options) 133 | end 134 | return actions 135 | end 136 | 137 | play!(li::LocalInteraction, actions::Vector{<:Integer}, player_ind::Integer, 138 | options::BROptions) = play!(li, actions, [player_ind], options) 139 | 140 | play!(li::LocalInteraction{N}, actions::Vector{<:Integer}, 141 | options::BROptions) where {N} = play!(li, actions, 1:N, options) 142 | 143 | function play!(li::LocalInteraction, 144 | actions::Vector{<:Integer}, 145 | options::BROptions, 146 | player_ind::Union{AbstractVector{<:Integer},Integer}, 147 | num_reps::Integer=1) 148 | for t in 1:num_reps 149 | play!(li, actions, player_ind, options) 150 | end 151 | return actions 152 | end 153 | 154 | @doc """ 155 | play!(li, actions, options, player_ind[, num_reps=1]) 156 | 157 | Update an action profile `num_reps` times. 158 | 159 | # Arguments 160 | 161 | - `li::LocalInteraction` : `LocalInteraction` instance. 162 | - `actions::Vector{<:Integer}` : Action profile in the intial period. 163 | - `options::BROptions` : Options for `best_response` method. 164 | - `player_ind::Union{Vector{<:Integer},Integer} : Integer or vector of integers 165 | representing the index of players to take an action. 166 | - `num_reps::Integer` : The number of iterations. 167 | 168 | # Returns 169 | 170 | - `actions::Vector{Int}` : Updated `actions`. 171 | """ 172 | 173 | 174 | # play 175 | 176 | function play(li::LocalInteraction{N}, 177 | actions::PureActionProfile, 178 | player_ind::Union{AbstractVector{<:Integer},Integer}, 179 | options::BROptions=BROptions(); 180 | num_reps::Integer=1) where N 181 | actions_vector = [i for i in actions] 182 | actions_vector = play!(li, actions_vector, options, player_ind, num_reps) 183 | new_actions = ntuple(i -> actions_vector[i], N) 184 | return new_actions 185 | end 186 | 187 | function play(li::LocalInteraction{N}, 188 | actions::PureActionProfile, 189 | options::BROptions=BROptions(); 190 | num_reps::Integer=1) where N 191 | play(li, actions, 1:N, options, num_reps=num_reps) 192 | end 193 | 194 | @doc """ 195 | play(li, actions, player_ind[, options=BROptions(); num_reps=1]) 196 | 197 | Return the action profile after `num_reps` time iterations. 198 | 199 | # Arguments 200 | 201 | - `li::LocalInteraction{N}` : `LocalInteraction` instance. 202 | - `actions::PureActionProfile` : Initial actions of each players. 203 | - `player_ind::Union{AbstractVector{<:Integer},Integer}` : Integer or vector of 204 | integers representing the index of players to take an action with asynchronous 205 | revision. 206 | - `options::BROptions` : Options for `best_response` method. 207 | - `num_reps::Integer` : The number of iterations. 208 | 209 | # Returns 210 | 211 | - `::PureActionProfile` : Actions of each players after iterations. 212 | """ 213 | 214 | 215 | # time_series! 216 | 217 | """ 218 | time_series!(li, out, options, player_ind_seq) 219 | 220 | Update the matrix `out` which is used in `time_series` method given player index 221 | sequence. 222 | 223 | # Arguments 224 | 225 | - `li::LocalInteraction{N}` : `LocalInteraction` instance. 226 | - `out::Matrix{<:Integer}` : Matrix representing a time series of action 227 | profiles. 228 | - `options::BROptions` : Options for `best_response` method. 229 | - `player_ind_seq::Vector{<:Integer}` : Vector representing the index of players 230 | to take an action. 231 | 232 | # Returns 233 | 234 | - `out::Matrix{<:Integer}` : Updated `out`. 235 | """ 236 | function time_series!(li::LocalInteraction{N}, 237 | out::Matrix{<:Integer}, 238 | options::BROptions, 239 | player_ind_seq::Vector{<:Integer}) where N 240 | ts_length = size(out, 2) 241 | if ts_length != length(player_ind_seq) + 1 242 | throw(ArgumentError("The length of `ts_length` and 243 | `player_ind_seq` are mismatched")) 244 | end 245 | 246 | actions = [out[i,1] for i in 1:N] 247 | for t in 2:ts_length 248 | play!(li, actions, options, player_ind_seq[t-1]) 249 | out[:,t] = actions 250 | end 251 | 252 | return out 253 | end 254 | 255 | """ 256 | time_series!(li, out, options) 257 | 258 | Update the matrix `out` which is used in `time_series` method. All players take 259 | their actions simultaneously. 260 | 261 | # Arguments 262 | 263 | - `li::LocalInteraction{N}` : `LocalInteraction` instance. 264 | - `out::Matrix{<:Integer}` : Matrix representing a time series of action 265 | profiles. 266 | - `options::BROptions` : Options for `best_response` method. 267 | 268 | # Returns 269 | 270 | - `out::Matrix{<:Integer}` : Updated `out`. 271 | """ 272 | function time_series!(li::LocalInteraction{N}, 273 | out::Matrix{<:Integer}, 274 | options::BROptions) where N 275 | ts_length = size(out, 2) 276 | actions = [out[i,1] for i in 1:N] 277 | for t in 2:ts_length 278 | play!(li, actions, options) 279 | out[:,t] = actions 280 | end 281 | 282 | return out 283 | end 284 | 285 | 286 | # time_series 287 | 288 | function time_series(rng::AbstractRNG, 289 | li::LocalInteraction{N}, 290 | ts_length::Integer, 291 | init_actions::PureActionProfile, 292 | player_ind_seq::Vector{<:Integer}, 293 | options::BROptions=BROptions()) where N 294 | out = Matrix{Int}(undef, N, ts_length) 295 | for i in 1:N 296 | out[i,1] = init_actions[i] 297 | end 298 | time_series!(li, out, options, player_ind_seq) 299 | end 300 | 301 | time_series(li::LocalInteraction, ts_length::Integer, 302 | init_actions::PureActionProfile, player_ind_seq::Vector{<:Integer}, 303 | options::BROptions=BROptions()) = 304 | time_series(Random.GLOBAL_RNG, li, ts_length, init_actions, player_ind_seq, 305 | options) 306 | 307 | function time_series(rng::AbstractRNG, 308 | li::LocalInteraction{N,T,S,A,TR}, 309 | ts_length::Integer, 310 | init_actions::PureActionProfile, 311 | options::BROptions=BROptions() 312 | ) where {N,T,S,A,TR<:SimultaneousRevision} 313 | out = Matrix{Int}(undef, N, ts_length) 314 | for i in 1:N 315 | out[i, 1] = init_actions[i] 316 | end 317 | time_series!(li, out, options) 318 | end 319 | 320 | time_series(li::LocalInteraction{N,T,S,A,TR}, 321 | ts_length::Integer, 322 | init_actions::PureActionProfile, 323 | options::BROptions=BROptions() 324 | ) where {N,T,S,A,TR<:SimultaneousRevision} = 325 | time_series(Random.GLOBAL_RNG, li, ts_length, init_actions, options) 326 | 327 | function time_series(rng::AbstractRNG, 328 | li::LocalInteraction{N,T,S,A,TR}, 329 | ts_length::Integer, 330 | init_actions::PureActionProfile, 331 | options::BROptions=BROptions() 332 | ) where {N,T,S,A,TR<:AsynchronousRevision} 333 | player_ind_seq = rand(rng, 1:N, ts_length-1) 334 | time_series(rng, li, ts_length, init_actions, player_ind_seq, options) 335 | end 336 | 337 | time_series(li::LocalInteraction{N,T,S,A,TR}, 338 | ts_length::Integer, 339 | init_actions::PureActionProfile, 340 | options::BROptions=BROptions() 341 | ) where {N,T,S,A,TR<:AsynchronousRevision} = 342 | time_series(Random.GLOBAL_RNG, li, ts_length, init_actions, options) 343 | 344 | function time_series(rng::AbstractRNG, 345 | li::LocalInteraction{N}, 346 | ts_length::Integer, 347 | player_ind_seq::Vector{<:Integer}, 348 | options::BROptions=BROptions()) where N 349 | nums_actions = ntuple(i -> li.num_actions, N) 350 | init_actions = random_pure_actions(rng, nums_actions) 351 | time_series(rng, li, ts_length, init_actions, player_ind_seq, options) 352 | end 353 | 354 | time_series(li::LocalInteraction, ts_length::Integer, 355 | player_ind_seq::Vector{<:Integer}, options::BROptions=BROptions()) = 356 | time_series(Random.GLOBAL_RNG, li, ts_length, player_ind_seq, options) 357 | 358 | function time_series(rng::AbstractRNG, 359 | li::LocalInteraction{N}, 360 | ts_length::Integer, 361 | options::BROptions=BROptions()) where N 362 | nums_actions = ntuple(i -> li.num_actions, N) 363 | init_actions = random_pure_actions(rng, nums_actions) 364 | time_series(rng, li, ts_length, init_actions, options) 365 | end 366 | 367 | time_series(li::LocalInteraction, ts_length::Integer, 368 | options::BROptions=BROptions()) = 369 | time_series(Random.GLOBAL_RNG, li, ts_length, options) 370 | 371 | @doc """ 372 | time_series([rng=Random.GLOBAL_RNG, ]li, ts_length, init_actions, 373 | player_ind_seq[, options=BROptions()]) 374 | 375 | Return the time series of action profiles. 376 | 377 | # Arguments 378 | 379 | - `rng::AbstractRNG` : Random number generator used. 380 | - `li::LocalInteraction{N}` : `LocalInteraction` instance. 381 | - `ts_length::Integer` : The length of time series. 382 | - `init_actions::PureActionProfile` : Action profile in the initial period. If 383 | not provided, it is selected randomly. 384 | - `player_ind_seq::Vector{<:Integer}` : Vector of integers representing the 385 | index of players to take an action with asynchronous revision. If not 386 | provided, it is selected randomly. 387 | - `options::BROptions` : Options for `best_response` method. 388 | 389 | # Returns 390 | 391 | - `::Matrix{<:Integer}` : The time series of action profiles. 392 | """ 393 | -------------------------------------------------------------------------------- /src/fictplay.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Tools for fictitious play 3 | 4 | =# 5 | 6 | 7 | # AbstractGain # 8 | 9 | """ 10 | AbstractGain 11 | 12 | Abstract type representing the gain in a fictitious play model. 13 | """ 14 | abstract type AbstractGain end 15 | 16 | """ 17 | DecreasingGain 18 | 19 | Type representing a decresing gain. 20 | """ 21 | struct DecreasingGain <: AbstractGain end 22 | 23 | """ 24 | ConstantGain 25 | 26 | Type representing a constant gain. 27 | """ 28 | struct ConstantGain{T<:Real} <: AbstractGain 29 | size::T 30 | end 31 | 32 | step_size(T::Type, gain::DecreasingGain, t::Integer) = one(T)/(t+1) 33 | step_size(T::Type, gain::ConstantGain, t::Integer) = T(gain.size) 34 | 35 | 36 | # AbstractFictitiousPlay # 37 | 38 | """ 39 | AbstractFictitiousPlay 40 | 41 | Abstract type representing a fictitious play model. 42 | """ 43 | abstract type AbstractFictitiousPlay{N,T<:Real} end 44 | 45 | """ 46 | FictitiousPlay{N, T, TG} 47 | 48 | Type representing a fictitious play model with N players. 49 | 50 | # Fields 51 | 52 | - `players::NTuple{N,Player{N,T}}` : Tuple of `Player` instances. 53 | - `nums_actions::NTuple{N,Int}` : Tuple of the numbers of actions, one for 54 | each player. 55 | - `gain::TG<:AbstractGain` : Gain type. 56 | """ 57 | struct FictitiousPlay{N,T<:Real,TG<:AbstractGain} <: AbstractFictitiousPlay{N,T} 58 | players::NTuple{N,Player{N,T}} 59 | nums_actions::NTuple{N,Int} 60 | gain::TG 61 | end 62 | 63 | """ 64 | FictitiousPlay(g[, gain=DecreasingGain()]) 65 | 66 | Construct a `FictitiousPlay` instance from `NormalFormGame`. 67 | 68 | # Arguments 69 | 70 | - `g::NormalFormGame` : `NormalFormGame` instance. 71 | - `gain::AbstractGain` : Argument to specify the gain or step size; 72 | `DecreasingGain()` or `ConstantGain(size)`. 73 | 74 | # Returns 75 | 76 | - `::FictitiousPlay` : The fictitious play model. 77 | """ 78 | FictitiousPlay(g::NormalFormGame, gain::AbstractGain=DecreasingGain()) = 79 | FictitiousPlay(g.players, g.nums_actions, gain) 80 | 81 | """ 82 | FictitiousPlay(fp[, gain=fp.gain]) 83 | 84 | Construct a new `FictitiousPlay` instance from `fp`. 85 | 86 | # Arguments 87 | 88 | - `fp::AbstractFictitiousPlay` : `AbstractFictitiousPlay` instance. 89 | - `gain::AbstractGain` : Argument to specify the gain or step size. 90 | 91 | # Returns 92 | 93 | - `::FictitiousPlay` : The fictitious play model. 94 | """ 95 | FictitiousPlay(fp::AbstractFictitiousPlay, gain::AbstractGain=fp.gain) = 96 | FictitiousPlay(fp.players, fp.nums_actions, gain) 97 | 98 | """ 99 | StochasticFictitiousPlay{N, T, TG, TD} 100 | 101 | Type representing a stochastic fictitious play model with N players. 102 | 103 | # Fields 104 | 105 | - `players::NTuple{N,Player{N,T}}` : Tuple of `Player` instances. 106 | - `nums_actions::NTuple{N,Int}` : Tuple of the numbers of actions, one for 107 | each player. 108 | - `gain::TG<:AbstractGain` : Gain type. 109 | - `d::TD<:Distributions.Distribution` : `Distribution` instance from which 110 | payoff perturbations are drawn. 111 | """ 112 | struct StochasticFictitiousPlay{N,T<:Real,TD<:Distribution, 113 | TG<:AbstractGain} <: AbstractFictitiousPlay{N,T} 114 | players::NTuple{N,Player{N,T}} 115 | nums_actions::NTuple{N,Int} 116 | d::TD 117 | gain::TG 118 | end 119 | 120 | """ 121 | StochasticFictitiousPlay(g, d[, gain=DecreasingGain()]) 122 | 123 | Construct a `StochasticFictitiousPlay` instance. 124 | 125 | # Arguments 126 | 127 | - `g::NormalFormGame` : `NormalFormGame` instance. 128 | - `d::Distributions.Distribution` : `Distribution` instance from which payoff 129 | perturbations are drawn. 130 | - `gain::AbstractGain` : Argument to specify the gain or step size; 131 | `DecreasingGain()` or `ConstantGain(size)`. 132 | 133 | # Returns 134 | 135 | - `::StochasticFictitiousPlay` : The stochastic fictitious play model. 136 | """ 137 | StochasticFictitiousPlay(g::NormalFormGame, d::Distribution, 138 | gain::AbstractGain=DecreasingGain()) = 139 | StochasticFictitiousPlay(g.players, g.nums_actions, d, gain) 140 | 141 | """ 142 | StochasticFictitiousPlay(fp[, d=fp.d, gain=fp.gain]) 143 | 144 | Construct a new `StochasticFictitiousPlay` instance from `fp`. 145 | 146 | # Arguments 147 | 148 | - `fp::AbstractFictitiousPlay` : `AbstractFictitiousPlay` instance. 149 | - `d::Distributions.Distribution` : `Distribution` instance from which payoff 150 | perturbations are drawn. 151 | - `gain::AbstractGain` : Argument to specify the gain or step size. 152 | 153 | # Returns 154 | 155 | - `::StochasticFictitiousPlay` : The stochastic fictitious play model. 156 | """ 157 | StochasticFictitiousPlay(fp::AbstractFictitiousPlay, d::Distribution, 158 | gain::AbstractGain=fp.gain) = 159 | StochasticFictitiousPlay(fp.players, fp.nums_actions, d, gain) 160 | StochasticFictitiousPlay(fp::StochasticFictitiousPlay, 161 | gain::AbstractGain=fp.gain) = 162 | StochasticFictitiousPlay(fp.players, fp.nums_actions, fp.d, gain) 163 | 164 | 165 | # play! 166 | 167 | function play!(rng::AbstractRNG, 168 | fp::FictitiousPlay{N}, 169 | actions::MixedActionProfile{N,TA}, 170 | options::BROptions, 171 | brs::Vector{Int}, t::Integer) where {N,TA<:Real} 172 | for i in 1:N 173 | opponents_actions = 174 | tuple(actions[i+1:end]..., actions[1:i-1]...) 175 | brs[i] = best_response(fp.players[i], opponents_actions, options) 176 | end 177 | 178 | for i in 1:N 179 | actions[i] .*= 1 - step_size(TA, fp.gain, t) 180 | actions[i][brs[i]] += step_size(TA, fp.gain, t) 181 | end 182 | 183 | return actions 184 | end 185 | 186 | function play!(rng::AbstractRNG, 187 | fp::StochasticFictitiousPlay{N}, 188 | actions::MixedActionProfile{N,TA}, 189 | options::BROptions, 190 | brs::Vector{Int}, t::Integer) where {N,TA<:Real} 191 | for i in 1:N 192 | opponents_actions = 193 | tuple(actions[i+1:end]..., actions[1:i-1]...) 194 | perturbations = rand(rng, fp.d, fp.nums_actions[i]) 195 | brs[i] = best_response(fp.players[i], opponents_actions, perturbations) 196 | end 197 | 198 | for i in 1:N 199 | actions[i] .*= 1 - step_size(TA, fp.gain, t) 200 | actions[i][brs[i]] += step_size(TA, fp.gain, t) 201 | end 202 | 203 | return actions 204 | end 205 | 206 | play!(fp::StochasticFictitiousPlay{N}, actions::MixedActionProfile{N,TA}, 207 | options::BROptions, brs::Vector{Int}, t::Integer) where {N,TA<:Real} = 208 | play!(Random.GLOBAL_RNG, fp, actions, options, brs, t) 209 | 210 | function play!(rng::AbstractRNG, 211 | fp::AbstractFictitiousPlay{N}, 212 | actions::MixedActionProfile{N,TA}, 213 | options::BROptions=BROptions(); 214 | num_reps::Integer=1, t_init::Integer=1) where {N,TA<:Real} 215 | brs = Vector{Int}(undef, N) 216 | for t in t_init:(t_init+num_reps-1) 217 | play!(rng, fp, actions, options, brs, t) 218 | end 219 | return actions 220 | end 221 | 222 | @doc """ 223 | play!(rng, fp, actions[, options=BROptions(); num_reps=1, t_init=1]) 224 | 225 | Update action profile `num_reps` times. 226 | 227 | # Arguments 228 | 229 | - `rng::AbstractRNG` : Random number generator used. 230 | - `fp::AbstractFictitiousPlay{N}` : `AbstractFictitiousPlay` instance. 231 | - `actions::MixedActionProfile{N,TA}` : Mixed action profile for each player. 232 | - `options::BROptions` : Options for `best_response` method. 233 | - `num_reps::Integer` : The number of iterations. 234 | - `t_init::Integer` : The period when the iteration starts. 235 | 236 | # Returns 237 | 238 | - `actions::MixedActionProfile` : Updated mixed action profile. 239 | """ 240 | 241 | 242 | # play 243 | 244 | function play(rng::AbstractRNG, 245 | fp::AbstractFictitiousPlay{N}, 246 | actions::MixedActionProfile{N,TA}, 247 | options::BROptions=BROptions(); 248 | num_reps::Integer=1, t_init::Integer=1) where {N,TA<:Real} 249 | Tout = typeof(zero(TA)/one(TA)) 250 | actions_copied::NTuple{N,Vector{Tout}} = 251 | ntuple(i -> copyto!(similar(actions[i], Tout), actions[i]), N) 252 | play!(rng, fp, actions_copied, options, num_reps=num_reps, t_init=t_init) 253 | end 254 | 255 | play(fp::AbstractFictitiousPlay, actions::MixedActionProfile, 256 | options::BROptions=BROptions(); num_reps::Integer=1, t_init::Integer=1) = 257 | play(Random.GLOBAL_RNG, fp, actions, options, 258 | num_reps=num_reps, t_init=t_init) 259 | 260 | function play(rng::AbstractRNG, 261 | fp::AbstractFictitiousPlay{N}, 262 | actions::PureActionProfile{N}, 263 | options::BROptions=BROptions(); 264 | num_reps::Integer=1, t_init::Integer=1) where {N} 265 | mixed_actions = ntuple(i -> pure2mixed(fp.nums_actions[i], actions[i]), N) 266 | play!(rng, fp, mixed_actions, options, num_reps=num_reps, t_init=t_init) 267 | end 268 | 269 | play(fp::AbstractFictitiousPlay, actions::PureActionProfile, 270 | options::BROptions=BROptions(); num_reps::Integer=1, t_init::Integer=1) = 271 | play(Random.GLOBAL_RNG, fp, actions, options, 272 | num_reps=num_reps, t_init=t_init) 273 | 274 | function play(rng::AbstractRNG, 275 | fp::AbstractFictitiousPlay{N}, 276 | options::BROptions=BROptions(); 277 | num_reps::Integer=1, t_init::Integer=1) where {N} 278 | play!(rng, fp, random_mixed_actions(rng, fp.nums_actions), options, 279 | num_reps=num_reps, t_init=t_init) 280 | end 281 | 282 | play(fp::AbstractFictitiousPlay, options::BROptions=BROptions(); 283 | num_reps::Integer=1, t_init::Integer=1) = 284 | play(Random.GLOBAL_RNG, fp, options, num_reps=num_reps, t_init=t_init) 285 | 286 | @doc """ 287 | play([rng=Random.GLOBAL_RNG, ]fp, actions[, options=BROptions(); 288 | num_reps=1, t_init=1]) 289 | 290 | Return a new action profile after `num_reps` times iterations. 291 | 292 | # Arguments 293 | 294 | - `rng::AbstractRNG` : Random number generator used. 295 | - `actions::ActionProfile` : Action profile used in the initial period; 296 | `PureActionProfile`, `MixedActionProfile`, or nothing. If nothing, mixed 297 | action profile is randomly selected. 298 | - `fp::AbstractFictitiousPlay{N}` : `AbstractFictitiousPlay` instance. 299 | - `options::BROptions` : Options for `best_response` method. 300 | - `num_reps::Integer` : The number of the iterations. 301 | - `t_init::Integer` : The period when the iteration starts. 302 | 303 | # Returns 304 | 305 | - `::MixedActionProfile` : The new action profile after iterations. 306 | """ 307 | 308 | # time_series! 309 | 310 | """ 311 | time_series!(rng, fp, out[, options=BROptions(); t_init=1]) 312 | 313 | Update the tuple of matrices `out` which is used in `time_series` method. 314 | 315 | # Arguments 316 | 317 | - `rng::AbstractRNG` : Random number generator used. 318 | - `fp::AbstractFictitiousPlay{N}` : `AbstractFictitiousPlay` instance. 319 | - `out::NTuple{N,Matrix{<:Real}}` : Tuple of matrices which represent the time 320 | series of mixed action profile. 321 | - `options::BROptions` : Options for `best_response`. 322 | - `t_init::Integer` : The period when the iteration starts. 323 | 324 | # Returns 325 | 326 | - `out::NTuple{N,Matrix{<:Real}}` : Updated `out`. 327 | """ 328 | function time_series!(rng::AbstractRNG, 329 | fp::AbstractFictitiousPlay{N}, 330 | out::NTuple{N,Matrix{<:Real}}, 331 | options::BROptions=BROptions(); 332 | t_init::Integer=1) where {N} 333 | ts_length = size(out[1], 2) 334 | actions = ntuple(i -> out[i][:, 1], N) 335 | brs = Vector{Int}(undef, N) 336 | 337 | for j in 2:ts_length 338 | play!(rng, fp, actions, options, brs, t_init - 1 + j - 1) 339 | for i in 1:N 340 | out[i][:, j] = actions[i] 341 | end 342 | end 343 | 344 | return out 345 | end 346 | 347 | 348 | # time_series 349 | 350 | function _copy_action_to!(dest::AbstractVector, src::MixedAction) 351 | dest[:] = src 352 | return dest 353 | end 354 | 355 | function _copy_action_to!(dest::AbstractVector, src::PureAction) 356 | dest .= 0 357 | dest[src] = 1 358 | return dest 359 | end 360 | 361 | for (ex_TAS, ex_where, ex_T) in ( 362 | (:(MixedActionProfile{N,TA}), (:(N), :(T<:Real), :(TA<:Real)), :(TA)), 363 | (:(PureActionProfile{N}), (:(N), :(T<:Real)), :(T)) 364 | ) 365 | @eval function time_series(rng::AbstractRNG, 366 | fp::AbstractFictitiousPlay{N,T}, 367 | ts_length::Integer, 368 | init_actions::$(ex_TAS), 369 | options::BROptions=BROptions(); 370 | t_init::Integer=1) where $(ex_where...) 371 | Tout = typeof(zero($(ex_T))/one($(ex_T))) 372 | out::NTuple{N,Matrix{Tout}} = 373 | ntuple(i -> Matrix{Tout}(undef, fp.nums_actions[i], ts_length), N) 374 | for i in 1:N 375 | _copy_action_to!(@views(out[i][:, 1]), init_actions[i]) 376 | end 377 | time_series!(rng, fp, out, options, t_init=t_init) 378 | end 379 | end 380 | 381 | time_series(fp::AbstractFictitiousPlay, ts_length::Integer, 382 | init_actions::ActionProfile, options::BROptions=BROptions(); 383 | t_init::Integer=1) = 384 | time_series(Random.GLOBAL_RNG, fp, ts_length, init_actions, options, 385 | t_init=t_init) 386 | 387 | function time_series(rng::AbstractRNG, 388 | fp::AbstractFictitiousPlay{N}, 389 | ts_length::Integer, 390 | options::BROptions=BROptions(); 391 | t_init::Integer=1) where {N} 392 | time_series(rng, fp, ts_length, random_mixed_actions(rng, fp.nums_actions), 393 | options, t_init=t_init) 394 | end 395 | 396 | time_series(fp::AbstractFictitiousPlay, ts_length::Integer, 397 | options::BROptions=BROptions(); t_init::Integer=1) = 398 | time_series(Random.GLOBAL_RNG, fp, ts_length, options, t_init=t_init) 399 | 400 | @doc """ 401 | time_series([rng=Random.GLOBAL_RNG, ]fp, ts_length, init_actions 402 | [, options=BROptions(); t_init=1]) 403 | 404 | Return a time series of mixed action profiles. 405 | 406 | # Arguments 407 | 408 | - `rng::AbstractRNG` : Random number generator. 409 | - `fp::AbstractFictitiousPlay{N,T}` : `AbstractFictitiousPlay` instance. 410 | - `ts_length::Integer` : The length of the time series. 411 | - `init_actions::ActionProfile` : Action profile used in the initial period; 412 | `PureActionProfile`, `MixedActionProfile`, or nothing. If nothing, mixed 413 | action profile is randomly selected. 414 | - `options::BROptions` : Options for `best_response` method. 415 | - `t_init::Integer` : The period when the iteration starts. 416 | 417 | # Returns 418 | 419 | - `::NTuple{N,Matrix{<:Real}}` : The time series of mixed action profiles. 420 | """ 421 | -------------------------------------------------------------------------------- /src/lemke_howson.jl: -------------------------------------------------------------------------------- 1 | #= 2 | Compute mixed Nash equilibria of a 2-player normal form game by the 3 | Lemke-Howson algorithm. 4 | =# 5 | 6 | using QuantEcon: _pivoting!, _lex_min_ratio_test! 7 | 8 | """ 9 | LHResult 10 | 11 | # Fields 12 | 13 | - `NE::NTuple{2,Vector}`: Computed Nash equilibrium. 14 | - `converged::Bool`: Whether the routine has converged. 15 | - `num_iter::Int`: Number of iterations. 16 | - `max_iter::Int`: Maximum number of iterations. 17 | - `init::Int`: Initial condition used. 18 | """ 19 | struct LHResult{T<:Real} 20 | NE::NTuple{2,Vector{T}} 21 | converged::Bool 22 | num_iter::Int 23 | max_iter::Int 24 | init::Int 25 | end 26 | 27 | 28 | """ 29 | lemke_howson(g; init_pivot=1, max_iter=10^6, capping=nothing, 30 | full_output=Val(false)) 31 | 32 | Find one mixed-action Nash equilibrium of a 2-player normal-form game by 33 | the Lemke–Howson algorithm (Lemke and Howson, 1964), implemented with 34 | "complementary pivoting" (see, e.g., von Stengel, 2007 for details). 35 | 36 | # Arguments 37 | 38 | - `g::NormalFormGame{2,T}`: 2-player NormalFormGame instance. 39 | - `init_pivot::Int`: Initial pivot, an integer `k` such that `1 <= k <= m+n`, 40 | where integers `1, ..., m`, and `m+1, ..., m+n` correspond to the actions 41 | of players 1 and 2, respectively. 42 | - `max_iter::Int`: Maximum number of pivoting steps. 43 | - `capping::Union{Int,Nothing}`: If supplied, the routine is executed 44 | with the heuristic proposed by Codenotti et al. (2008); see Notes below 45 | for details. 46 | - `full_output::Union{Val{true},Val{false}}`: If `Val(false)`, only the 47 | computed Nash equilibrium is returned. If `Val(true)`, the return value 48 | is `(NE, res)`, where `NE` is the Nash equilibrium and `res` is a `LHResult` 49 | object. 50 | 51 | # Returns 52 | 53 | - `NE::NTuple{2,Vector{S}}`: Tuple of computed Nash equilibrium mixed 54 | actions, where the type `S` is determined by `S = float(T)`. 55 | - `res::LHResult`: Object containing information about the computation. 56 | Returned only when `full_output` is `Val(true)`. See `LHResult` for details. 57 | 58 | # Examples 59 | 60 | Consider the following game from von Stengel (2007): 61 | 62 | ```julia 63 | julia> Base.active_repl.options.iocontext[:compact] = true; # Reduce digits to display 64 | 65 | julia> player1 = Player([3 3; 2 5; 0 6]); 66 | 67 | julia> player2 = Player([3 2 3; 2 6 1]); 68 | 69 | julia> g = NormalFormGame(player1, player2); 70 | 71 | julia> println(g) 72 | 3×2 NormalFormGame{2, Int64}: 73 | [3, 3] [3, 2] 74 | [2, 2] [5, 6] 75 | [0, 3] [6, 1] 76 | ``` 77 | 78 | Obtain a Nash equilibrium of this game by `lemke_howson` with player 1's 79 | action 2 (out of the three actions 1, 2, and 3) as the initial pivot: 80 | 81 | ```julia 82 | julia> NE = lemke_howson(g, init_pivot=2) 83 | ([0.0, 0.333333, 0.666667], [0.333333, 0.666667]) 84 | 85 | julia> is_nash(g, NE) 86 | true 87 | ``` 88 | 89 | Additional information is returned if `full_output` is set `Val(true)`: 90 | 91 | ```julia 92 | julia> NE, res = lemke_howson(g, init_pivot=2, full_output=Val(true)); 93 | 94 | julia> res.converged # Whether the routine has converged 95 | true 96 | 97 | julia> res.num_iter # Number of pivoting steps performed 98 | 4 99 | ``` 100 | 101 | # Notes 102 | 103 | * This routine is implemented with floating-point arithmetic and thus is 104 | subject to numerical instability. 105 | 106 | * If `capping` is set to a positive integer, the routine is executed with 107 | the heuristic proposed by Codenotti et al. (2008): 108 | 109 | - For `k = init_pivot, init_pivot + 1, …, init_pivot + (m+n-2)` (wrapping 110 | modulo `m + n` within `1:m+n`), the Lemke-Howson algorithm is executed 111 | with `k` as the initial pivot and `capping` as the maximum number of 112 | pivoting steps. 113 | 114 | - Otherwise, the Lemke-Howson algorithm is executed with `init_pivot + 115 | (m+n-1)` (wrapping modulo `m + n` within `1:m+n`) as the initial pivot, 116 | with a limit `max_iter` on the total number of pivoting steps. 117 | 118 | According to the simulation results for *uniformly random games*, for 119 | medium- to large-size games this heuristic outperforms the basic 120 | Lemke-Howson algorithm with a fixed initial pivot, where Codenotti et al. 121 | suggest that `capping` be set to 10. 122 | 123 | # References 124 | 125 | * B. Codenotti, S. De Rossi, and M. Pagan, "An Experimental Analysis of 126 | Lemke-Howson Algorithm," arXiv:0811.3247, 2008. 127 | * C. E. Lemke and J. T. Howson, "Equilibrium Points of Bimatrix Games," 128 | Journal of the Society for Industrial and Applied Mathematics (1964), 129 | 413-423. 130 | * B. von Stengel, "Equilibrium Computation for Two-Player Games in Strategic 131 | and Extensive Form," Chapter 3, N. Nisan, T. Roughgarden, E. Tardos, and 132 | V. Vazirani eds., Algorithmic Game Theory, 2007. 133 | """ 134 | function lemke_howson(g::NormalFormGame{2,T}; 135 | init_pivot::Int=1, 136 | max_iter::Int=10^6, 137 | capping::Union{Int,Nothing} = nothing, 138 | full_output::Union{Val{true},Val{false}}=Val(false)) where T 139 | payoff_matrices = ntuple(i -> g.players[i].payoff_array, 2) 140 | nums_actions = g.nums_actions 141 | total_num = sum(nums_actions) 142 | 143 | if !(1 <= init_pivot <= total_num) 144 | throw(ArgumentError("`init_pivot` must satisfy 1 <= k <= $(total_num)")) 145 | end 146 | 147 | capping === nothing && (capping = max_iter) 148 | 149 | S = float(T) 150 | 151 | tableaux = ntuple(i -> Matrix{S}(undef, nums_actions[3-i], total_num+1), 2) 152 | bases = ntuple(i -> Vector{Int}(undef, nums_actions[3-i]), 2) 153 | 154 | converged, num_iter, init_pivot_used = 155 | _lemke_howson_capping!(payoff_matrices, tableaux, bases, init_pivot, 156 | max_iter, capping) 157 | NE = _get_mixed_actions(tableaux, bases) 158 | 159 | if full_output isa Val{false} 160 | return NE 161 | end 162 | 163 | res = LHResult(NE, converged, num_iter, max_iter, init_pivot_used) 164 | 165 | return NE, res 166 | end 167 | 168 | 169 | 170 | """ 171 | _lemke_howson_capping!(payoff_matrices, tableaux, bases, init_pivot, 172 | max_iter::Int, capping) 173 | 174 | Execute the Lemke–Howson algorithm with the heuristic proposed by 175 | Codenotti et al. 176 | 177 | # Arguments 178 | 179 | - `payoff_matrices::NTuple{2,Matrix}`: Tuple of two arrays representing 180 | payoff matrices, of shape `(m, n)` and `(n, m)`, respectively. 181 | - `tableaux::NTuple{2,Matrix}`: Tuple of two arrays to be used to store 182 | the tableaux, of shape `(n, m+n+1)` and `(m, m+n+1)`, respectively. 183 | Modified in place. 184 | - `bases::NTuple{2,Vector{Int}}`: Tuple of two arrays to be used to 185 | store the bases, of length `n` and `m`, respectively. Modified in 186 | place. 187 | - `init_pivot::Int`: Integer `k` such that `1 <= k <= m + n`. 188 | - `max_iter::Int`: Maximum number of pivoting steps. 189 | - `capping::Int`: Value for capping. If set equal to `max_iter`, the routine 190 | is equivalent to the standard Lemke–Howson algorithm. 191 | 192 | # Returns 193 | 194 | - `converged::Bool`: Whether the pivoting terminated before `max_iter` was 195 | reached. 196 | - `total_num_iter::Int`: Total number of pivoting steps performed across runs. 197 | - `init_pivot_curr::Int`: The initial pivot used in the final run. 198 | """ 199 | function _lemke_howson_capping!(payoff_matrices::NTuple{2,Matrix}, 200 | tableaux::NTuple{2,Matrix{T}}, 201 | bases::NTuple{2,Vector{Int}}, 202 | init_pivot::Int, 203 | max_iter::Int, 204 | capping::Int) where {T<:AbstractFloat} 205 | total = size(tableaux[2], 1) + size(tableaux[1], 1) # m + n 206 | init_pivot_curr = init_pivot 207 | max_iter_curr = max_iter 208 | total_num_iter = 0 209 | 210 | for _ in 1:(total - 1) 211 | capping_curr = min(max_iter_curr, capping) 212 | 213 | _initialize_tableaux!(payoff_matrices, tableaux, bases) 214 | converged, num_iter = 215 | _lemke_howson_tbl!(tableaux, bases, init_pivot_curr, capping_curr) 216 | 217 | total_num_iter += num_iter 218 | 219 | if converged || total_num_iter >= max_iter 220 | return converged, total_num_iter, init_pivot_curr 221 | end 222 | 223 | init_pivot_curr += 1 224 | if init_pivot_curr > total 225 | init_pivot_curr -= total 226 | end 227 | max_iter_curr -= num_iter 228 | end 229 | 230 | _initialize_tableaux!(payoff_matrices, tableaux, bases) 231 | converged, num_iter = 232 | _lemke_howson_tbl!(tableaux, bases, init_pivot_curr, max_iter_curr) 233 | total_num_iter += num_iter 234 | 235 | return converged, total_num_iter, init_pivot_curr 236 | end 237 | 238 | 239 | """ 240 | _initialize_tableaux!(payoff_matrices, tableaux, bases) 241 | 242 | Given a tuple of payoff matrices, initialize the tableau and basis 243 | arrays in place. 244 | 245 | For each player `i`, if `minimum(payoff_matrices[i])` is non-positive, 246 | then stored in the tableau are payoff values incremented by 247 | `abs(minimum(payoff_matrices[i])) + 1` (to ensure the tableau does not 248 | have a negative entry or a column identically zero). 249 | 250 | Suppose that players 1 and 2 have `m` and `n` actions, respectively. 251 | 252 | * `tableaux[1]` has `n` rows and `m+n+1` columns, where columns `1:m` 253 | and `m+1:m+n` correspond to the non-slack and slack variables, 254 | respectively. 255 | 256 | * `tableaux[2]` has `m` rows and `m+n+1` columns, where columns `1:m` 257 | and `m+1:m+n` correspond to the slack and non-slack variables, 258 | respectively. 259 | 260 | * In each `tableaux[i]`, column `m+n+1` contains the values of the basic 261 | variables (which are initially `1`). 262 | 263 | * `bases[1]` and `bases[2]` contain basic variable indices, which are 264 | initially `m+1:m+n` and `1:m`, respectively. 265 | 266 | # Arguments 267 | 268 | - `payoff_matrices::NTuple{2,Matrix}`: Tuple of two arrays representing 269 | payoff matrices, of shape `(m, n)` and `(n, m)`, respectively. 270 | - `tableaux::NTuple{2,Matrix}`: Tuple of two arrays to be used to store 271 | the tableaux, of shape `(n, m+n+1)` and `(m, m+n+1)`, respectively. 272 | Modified in place. 273 | - `bases::NTuple{2,Vector{Int}}`: Tuple of two arrays to be used to 274 | store the bases, of length `n` and `m`, respectively. Modified in 275 | place. 276 | 277 | # Returns 278 | 279 | - `tableaux, bases` 280 | 281 | # Examples 282 | 283 | ```julia 284 | julia> A = [3 3; 2 5; 0 6]; 285 | 286 | julia> B = [3 2 3; 2 6 1]; 287 | 288 | julia> m, n = size(A); 289 | 290 | julia> tableaux = (Matrix{Float64}(undef, (n, m+n+1)), 291 | Matrix{Float64}(undef, (m, m+n+1))); 292 | 293 | julia> bases = (Vector{Int}(undef, n), Vector{Int}(undef, m)); 294 | 295 | julia> tableaux, bases = _initialize_tableaux!((A, B), tableaux, bases); 296 | 297 | julia> tableaux[1] 298 | 2×6 Matrix{Float64}: 299 | 3.0 2.0 3.0 1.0 0.0 1.0 300 | 2.0 6.0 1.0 0.0 1.0 1.0 301 | 302 | julia> tableaux[2] 303 | 3×6 Matrix{Float64}: 304 | 1.0 0.0 0.0 4.0 4.0 1.0 305 | 0.0 1.0 0.0 3.0 6.0 1.0 306 | 0.0 0.0 1.0 1.0 7.0 1.0 307 | 308 | julia> bases 309 | ([4, 5], [1, 2, 3]) 310 | ``` 311 | """ 312 | function _initialize_tableaux!(payoff_matrices::NTuple{2,Matrix}, 313 | tableaux::NTuple{2,Matrix{T}}, 314 | bases::NTuple{2,Vector{Int}}) where T 315 | nums_actions = size(payoff_matrices[1]) 316 | 317 | consts = zeros(T, 2) # To be added to payoffs if min <= 0 318 | for pl in 1:2 319 | min_ = minimum(payoff_matrices[pl]) 320 | if min_ <= 0 321 | consts[pl] = -min_ + 1 322 | end 323 | end 324 | 325 | @inbounds for (pl, (py_start, sl_start)) in enumerate( 326 | ((0, nums_actions[1]), (nums_actions[1], 0)) 327 | ) 328 | for j in 1:nums_actions[pl] 329 | for i in 1:nums_actions[3-pl] 330 | tableaux[pl][i, py_start+j] = 331 | payoff_matrices[3-pl][i, j] + consts[3-pl] 332 | end 333 | end 334 | for j in 1:nums_actions[3-pl] 335 | for i in 1:nums_actions[3-pl] 336 | tableaux[pl][i, sl_start+j] = 0 337 | end 338 | tableaux[pl][j, sl_start+j] = 1 339 | end 340 | for i in 1:nums_actions[3-pl] 341 | tableaux[pl][i, end] = 1 342 | end 343 | 344 | for i in 1:nums_actions[3-pl] 345 | bases[pl][i] = sl_start + i 346 | end 347 | end 348 | 349 | return tableaux, bases 350 | end 351 | 352 | 353 | """ 354 | _lemke_howson_tbl!(tableaux, bases, init_pivot, max_iter) 355 | 356 | Main body of the Lemke-Howson algorithm implementation. 357 | 358 | Perform the complementary pivoting. Modify `tableaux` and `bases` in place. 359 | 360 | # Arguments 361 | 362 | - `tableaux::NTuple{2,Matrix}`: Tuple of two arrays containing the tableaux, 363 | of shape `(n, m+n+1)` and `(m, m+n+1)`, respectively. Modified in place. 364 | - `bases::NTuple{2,Vector{Int}}`: Tuple of two arrays containing the bases, 365 | of length `n` and `m`, respectively. Modified in place. 366 | - `init_pivot::Int`: Integer `k` such that `1 <= k <= m + n`. 367 | - `max_iter::Int`: Maximum number of pivoting steps. 368 | 369 | # Returns 370 | 371 | - `converged::Bool`: Whether the pivoting terminated before `max_iter` was 372 | reached. 373 | - `num_iter::Int`: Number of pivoting steps performed. 374 | 375 | # Examples 376 | 377 | ```julia 378 | julia> A = [3 3; 2 5; 0 6]; 379 | 380 | julia> B = [3 2 3; 2 6 1]; 381 | 382 | julia> m, n = size(A); 383 | 384 | julia> tableaux = (Matrix{Float64}(undef, (n, m+n+1)), 385 | Matrix{Float64}(undef, (m, m+n+1))); 386 | 387 | julia> bases = (Vector{Int}(undef, n), Vector{Int}(undef, m)); 388 | 389 | julia> tableaux, bases = _initialize_tableaux!((A, B), tableaux, bases); 390 | 391 | julia> _lemke_howson_tbl!(tableaux, bases, 2, 10); 392 | 393 | julia> tableaux[1] 394 | 2×6 Matrix{Float64}: 395 | 0.875 0.0 1.0 0.375 -0.125 0.25 396 | 0.1875 1.0 0.0 -0.0625 0.1875 0.125 397 | 398 | julia> tableaux[2] 399 | 3×6 Matrix{Float64}: 400 | 1.0 -1.6 0.8 0.0 0.0 0.2 401 | 0.0 0.466667 -0.4 1.0 0.0 0.0666667 402 | 0.0 -0.0666667 0.2 0.0 1.0 0.133333 403 | 404 | julia> bases 405 | ([3, 2], [1, 4, 5]) 406 | ``` 407 | 408 | The outputs indicate that in the Nash equilibrium obtained, player 1's 409 | mixed action plays actions `3` and `2` with positive weights `0.25` and 410 | `0.125`, while player 2's mixed action plays actions `1` and `2` 411 | (labeled as `4` and `5`) with positive weights `0.0666667` and `0.133333`. 412 | """ 413 | function _lemke_howson_tbl!(tableaux::NTuple{2,Matrix{T}}, 414 | bases::NTuple{2,Vector{Int}}, 415 | init_pivot::Int, 416 | max_iter::Int) where {T<:AbstractFloat} 417 | init_player = 1 418 | for k in bases[1] 419 | if k == init_pivot 420 | init_player = 2 421 | break 422 | end 423 | end 424 | pls = (init_player, 3 - init_player) 425 | 426 | pivot = init_pivot 427 | 428 | m, n = (size(tableaux[2], 1), size(tableaux[1], 1)) 429 | slack_starts = (m+1, 1) 430 | 431 | # Workspaces 432 | col_bufs = ntuple(pl -> Vector{T}(undef, size(tableaux[pl], 1)), 2) 433 | argmins = Vector{Int}(undef, max(m, n)) 434 | 435 | converged = false 436 | num_iter = 0 437 | 438 | while true 439 | @inbounds for pl in pls 440 | # Determine the leaving variable 441 | _, row_min = _lex_min_ratio_test!(tableaux[pl], pivot, 442 | slack_starts[pl], argmins) 443 | 444 | # Pivoting step: modify tableau in place 445 | _pivoting!(tableaux[pl], pivot, row_min, col_bufs[pl]) 446 | 447 | # Update the basic variables and the pivot 448 | bases[pl][row_min], pivot = pivot, bases[pl][row_min] 449 | 450 | num_iter += 1 451 | 452 | if pivot == init_pivot 453 | converged = true 454 | break 455 | end 456 | if num_iter >= max_iter 457 | break 458 | end 459 | end 460 | 461 | if converged || num_iter >= max_iter 462 | break 463 | end 464 | end 465 | 466 | return converged, num_iter 467 | end 468 | 469 | 470 | """ 471 | _get_mixed_actions(tableaux, bases) 472 | 473 | From `tableaux` and `bases`, extract non-slack basic variables and 474 | return a tuple of the corresponding, normalized mixed actions. 475 | 476 | # Arguments 477 | 478 | - `tableaux::NTuple{2,Matrix{T}}`: Tuple of two arrays containing the tableaux, 479 | of shape `(n, m+n+1)` and `(m, m+n+1)`, respectively. 480 | - `bases::NTuple{2,Vector{Int}}`: Tuple of two arrays containing the bases, 481 | of length `n` and `m`, respectively. 482 | 483 | # Returns 484 | 485 | - `::NTuple{2,Vector{T}}`: Tuple of mixed actions as given by the 486 | non-slack basic variables in the tableaux. 487 | """ 488 | function _get_mixed_actions(tableaux::NTuple{2,Matrix{T}}, 489 | bases::NTuple{2,Vector{Int}}) where T 490 | nums_actions = (size(tableaux[2], 1), size(tableaux[1], 1)) 491 | num = nums_actions[1] + nums_actions[2] 492 | out = zeros(T, num) 493 | 494 | @inbounds for (pl, (start, stop)) in enumerate( 495 | ((1, nums_actions[1]), (nums_actions[1]+1, num)) 496 | ) 497 | sum_ = zero(T) 498 | for i in 1:nums_actions[3-pl] 499 | k = bases[pl][i] 500 | if start <= k <= stop 501 | v = tableaux[pl][i, end] 502 | out[k] = v 503 | sum_ += v 504 | end 505 | end 506 | if !iszero(sum_) 507 | @views out[start:stop] ./= sum_ 508 | end 509 | end 510 | 511 | return out[1:nums_actions[1]], out[nums_actions[1]+1:end] 512 | end 513 | -------------------------------------------------------------------------------- /src/generators/bimatrix_generators.jl: -------------------------------------------------------------------------------- 1 | #= 2 | This module contains functions that generate NormalFormGame instances of the 3 | 2-player games studied by Fearnley, Igwe, and Savani (2015): 4 | 5 | * Colonel Blotto Games (`blotto_game`): A non-zero sum extension of the Blotto 6 | game as studied by Hortala-Vallve and Llorente-Saguer (2012), where opposing 7 | parties have asymmetric and heterogeneous battlefield valuations. 8 | 9 | * Ranking Games (`ranking_game`): In these games, as studied by Goldberg et al. 10 | (2013), each player chooses an effort level associated with a cost and a 11 | score. The players are ranked according to their scores, and the player with 12 | the higher score wins the prize. Each player's payoff is given by the value of 13 | the prize minus the cost of the effort. 14 | 15 | * SGC Games (`sgc_game`): These games were introduced by Sandholm, Gilpin, and 16 | Conitzer (2005) as a worst case scenario for support enumeration as it has a 17 | unique equilibrium where each player uses half of his actions in his support. 18 | 19 | * Tournament Games (`tournament_game`): These games are constructed by 20 | Anbalagan et al. (2013) as games that do not have interim epsilon-Nash 21 | equilibria with constant cardinality supports for epsilon smaller than a 22 | certain threshold. 23 | 24 | * Unit Vector Games (`unit_vector_game`): These games are games where the payoff 25 | matrix of one player consists of unit (column) vectors, used by Savani and von 26 | Stengel (2016) to construct instances that are hard, in terms of computational 27 | complexity, both for the Lemke-Howson and support enumeration algorithms. 28 | 29 | Large part of the code here is based on the C code available at 30 | https://github.com/bimatrix-games/bimatrix-generators distributed under BSD 31 | 3-Clause License. 32 | 33 | References 34 | ---------- 35 | * Y. Anbalagan, S. Norin, R. Savani, and A. Vetta, "Polylogarithmic Supports 36 | Are Required for Approximate Well-Supported Nash Equilibria below 2/3," WINE, 37 | 2013. 38 | 39 | * J. Fearnley, T. P. Igwe, and R. Savani, "An Empirical Study of Finding 40 | Approximate Equilibria in Bimatrix Games," International Symposium on 41 | Experimental Algorithms (SEA), 2015. 42 | 43 | * L.A. Goldberg, P.W. Goldberg, P. Krysta, and C. Ventre, "Ranking Games that 44 | have Competitiveness-based Strategies", Theoretical Computer Science, 2013 45 | 46 | * R. Hortala-Vallve and A. Llorente-Saguer, "Pure Strategy Nash Equilibria in 47 | Non-Zero Sum Colonel Blotto Games", International Journal of Game Theory, 48 | 2012. 49 | 50 | * T. Sandholm, A. Gilpin, and V. Conitzer, "Mixed-Integer Programming Methods 51 | for Finding Nash Equilibria," AAAI, 2005. 52 | 53 | * R. Savani and B. von Stengel, "Unit Vector Games," International Journal of 54 | Economic Theory, 2016. 55 | 56 | =# 57 | 58 | # BSD 3-Clause License 59 | # 60 | # Copyright (c) 2015, John Fearnley, Tobenna P. Igwe, Rahul Savani 61 | # All rights reserved. 62 | # 63 | # Redistribution and use in source and binary forms, with or without 64 | # modification, are permitted provided that the following conditions are met: 65 | # 66 | # * Redistributions of source code must retain the above copyright notice, this 67 | # list of conditions and the following disclaimer. 68 | # 69 | # * Redistributions in binary form must reproduce the above copyright notice, 70 | # this list of conditions and the following disclaimer in the documentation 71 | # and/or other materials provided with the distribution. 72 | # 73 | # * Neither the name of the copyright holder nor the names of its 74 | # contributors may be used to endorse or promote products derived from 75 | # this software without specific prior written permission. 76 | # 77 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 78 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 79 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 80 | # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 81 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 82 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 83 | # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 84 | # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 85 | # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 86 | # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 87 | 88 | using QuantEcon: MVNSampler, simplex_grid, next_k_array!, k_array_rank 89 | using Graphs: random_tournament_digraph 90 | 91 | 92 | # blotto_game 93 | """ 94 | blotto_game([rng=GLOBAL_RNG], h, t, rho[, mu=0]) 95 | 96 | Return a NormalFormGame instance of a 2-player non-zero sum Colonel Blotto game 97 | (Hortala-Vallve and Llorente-Saguer, 2012), where the players have an equal 98 | number `t` of troops to assign to `h` hills (so that the number of actions for 99 | each player is equal to (t+h-1) choose (h-1) = (T+h-1)!/(T!*(h-1)!)). Each 100 | player has a value for each hill that he receives if he assigns strictly more 101 | troops to the hill than his opponent (ties are broken uniformly at random), 102 | where the values are drawn from a multivariate normal distribution with 103 | covariance `rho`. Each player’s payoff is the sum of the values of the hills won 104 | by that player. 105 | 106 | 107 | # Arguments 108 | 109 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 110 | - `h::Integer` : Number of hills. 111 | - `t::Integer` : Number of troops. 112 | - `rho::Real` : Covariance of the players' values of each hill. Must be in 113 | [-1, 1]. 114 | - `mu::Real=0` : Mean of the players' values of each hill. 115 | 116 | # Returns 117 | 118 | - `g::NormalFormGame` 119 | 120 | # Examples 121 | 122 | ```julia 123 | julia> using GameTheory.Generators, Random 124 | 125 | julia> rng = MersenneTwister(1234); 126 | 127 | julia> g = blotto_game(rng, 2, 3, 0.5) 128 | 4×4 NormalFormGame{2, Float64} 129 | 130 | julia> g.players[1] 131 | 4×4 Player{2, Float64}: 132 | 0.186434 -0.494479 -0.494479 -0.494479 133 | 0.867347 0.186434 -0.494479 -0.494479 134 | 0.867347 0.867347 0.186434 -0.494479 135 | 0.867347 0.867347 0.867347 0.186434 136 | 137 | julia> g.players[2] 138 | 4×4 Player{2, Float64}: 139 | -0.688223 -1.02919 -1.02919 -1.02919 140 | -0.347259 -0.688223 -1.02919 -1.02919 141 | -0.347259 -0.347259 -0.688223 -1.02919 142 | -0.347259 -0.347259 -0.347259 -0.688223 143 | ``` 144 | """ 145 | function blotto_game(rng::AbstractRNG, h::Integer, t::Integer, rho::Real, 146 | mu::Real=0) 147 | d_mean = [mu; mu] 148 | d_cov = [1 rho; rho 1] 149 | dist = MVNSampler(d_mean, d_cov) 150 | values = rand(rng, dist, h) 151 | 152 | actions = simplex_grid(h, t) 153 | n = size(actions)[2] 154 | 155 | payoff_arrays = [Array{Float64}(undef, n, n) for i in 1:2] 156 | 157 | payoffs = Array{Float64}(undef, 2) 158 | @inbounds for i = 1:n, j = 1:n 159 | fill!(payoffs, 0) 160 | for k = 1:h 161 | if actions[k, i] == actions[k, j] 162 | for p = 1:2 163 | payoffs[p] += values[p, k] / 2 164 | end 165 | else 166 | winner = 1 + Int(actions[k, i] < actions[k, j]) 167 | payoffs[winner] += values[winner, k] 168 | end 169 | end 170 | payoff_arrays[1][i, j] = payoffs[1] 171 | payoff_arrays[2][j, i] = payoffs[2] 172 | end 173 | 174 | g = NormalFormGame( 175 | [Player(payoff_array) for payoff_array::Array in payoff_arrays] 176 | ) 177 | return g 178 | end 179 | 180 | blotto_game(h::Integer, t::Integer, rho::Real, mu::Real=0) = 181 | blotto_game(Random.GLOBAL_RNG, h, t, rho, mu) 182 | 183 | # ranking_game 184 | """ 185 | ranking_game([rng=GLOBAL_RNG], n[, steps=10]) 186 | 187 | Return a NormalFormGame instance of (the 2-player version of) the "ranking game" 188 | studied by Goldberg et al. (2013), where each player chooses an effort level 189 | associated with a score and a cost which are both increasing functions with 190 | randomly generated step sizes. The player with the higher score wins the first 191 | prize, whose value is 1, and the other player obtains the "second prize" of 192 | value 0; in the case of a tie, the first prize is split and each player receives 193 | a value of 0.5. The payoff of a player is given by the value of the prize minus 194 | the cost of the effort. 195 | 196 | # Arguments 197 | 198 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 199 | - `n::Integer` : Number of actions, i.e, number of possible effort levels. 200 | - `steps::Integer=10` : Parameter determining the upper bound for the size of 201 | the random steps for the scores and costs for each player: The step sizes for 202 | the scores are drawn from `1`, ..., `steps`, while those for the costs are 203 | multiples of `1/(n*steps)`, where the cost of effort level `1` is 0, and the 204 | maximum possible cost of effort level `n` is less than or equal to 1. 205 | 206 | # Returns 207 | 208 | - `g::NormalFormGame` 209 | 210 | # Examples 211 | 212 | ```julia 213 | julia> using GameTheory.Generators, Random 214 | 215 | julia> rng = MersenneTwister(1234); 216 | 217 | julia> g = ranking_game(rng, 5) 218 | 5×5 NormalFormGame{2, Float64} 219 | 220 | julia> g.players[1] 221 | 5×5 Player{2, Float64}: 222 | 0.5 0.0 0.0 0.0 0.0 223 | 0.92 -0.08 -0.08 -0.08 -0.08 224 | 0.88 0.88 0.88 0.88 -0.12 225 | 0.74 0.74 0.74 0.74 0.74 226 | 0.58 0.58 0.58 0.58 0.58 227 | 228 | julia> g.players[2] 229 | 5×5 Player{2, Float64}: 230 | 0.5 0.0 0.0 0.0 0.0 231 | 0.92 0.92 -0.08 -0.08 -0.08 232 | 0.76 0.76 -0.24 -0.24 -0.24 233 | 0.56 0.56 -0.44 -0.44 -0.44 234 | 0.44 0.44 0.44 -0.56 -0.56 235 | ``` 236 | """ 237 | function ranking_game(rng::AbstractRNG, n::Integer, steps::Integer=10) 238 | payoff_arrays = [Array{Float64}(undef, n, n) for i in 1:2] 239 | 240 | scores = rand(rng, 1:steps, (n, 2)) 241 | cumsum!(scores, scores, dims=1) 242 | 243 | costs = Array{Float64}(undef, n-1, 2) 244 | rand!(rng, costs, 1:steps) 245 | cumsum!(costs, costs, dims=1) 246 | costs ./= n * steps 247 | 248 | for (p, payoff_array) in enumerate(payoff_arrays) 249 | fill!(view(payoff_array, 1, :), 0) 250 | for j in 1:n, i in 2:n 251 | @inbounds payoff_array[i, j] = -costs[i-1, p] 252 | end 253 | end 254 | 255 | prize = 1. 256 | for j in 1:n, i in 1:n 257 | if scores[i, 1] > scores[j, 2] 258 | payoff_arrays[1][i, j] += prize 259 | elseif scores[i, 1] < scores[j, 2] 260 | payoff_arrays[2][j, i] += prize 261 | else 262 | payoff_arrays[1][i, j] += prize / 2 263 | payoff_arrays[2][j, i] += prize / 2 264 | end 265 | end 266 | 267 | g = NormalFormGame( 268 | [Player(payoff_array) for payoff_array::Array in payoff_arrays] 269 | ) 270 | return g 271 | end 272 | 273 | ranking_game(n::Integer, steps::Integer=10) = 274 | ranking_game(Random.GLOBAL_RNG, n, steps) 275 | 276 | # sgc_game 277 | """ 278 | sgc_game(k) 279 | 280 | Return a NormalFormGame instance of the 2-player game introduced by Sandholm, 281 | Gilpin, and Conitzer (2005), which has a unique Nash equilibrium, where each 282 | player plays half of the actions with positive probabilities. Payoffs are 283 | normalized so that the minimum and the maximum payoffs are 0 and 1, 284 | respectively. 285 | 286 | # Arguments 287 | 288 | - `k::Integer` : Positive integer determining the number of actions. The 289 | returned game will have `4*k-1` actions for each player. 290 | 291 | # Returns 292 | 293 | - `g::NormalFormGame` 294 | 295 | # Examples 296 | 297 | ```julia 298 | julia> using GameTheory.Generators 299 | 300 | julia> g = sgc_game(2) 301 | 7×7 NormalFormGame{2, Float64} 302 | 303 | julia> g.players[1] 304 | 7×7 Player{2, Float64}: 305 | 0.75 0.5 1.0 0.5 0.5 0.5 0.5 306 | 1.0 0.75 0.5 0.5 0.5 0.5 0.5 307 | 0.5 1.0 0.75 0.5 0.5 0.5 0.5 308 | 0.0 0.0 0.0 0.75 0.0 0.0 0.0 309 | 0.0 0.0 0.0 0.0 0.75 0.0 0.0 310 | 0.0 0.0 0.0 0.0 0.0 0.75 0.0 311 | 0.0 0.0 0.0 0.0 0.0 0.0 0.75 312 | 313 | julia> g.players[2] 314 | 7×7 Player{2, Float64}: 315 | 0.75 0.5 1.0 0.5 0.5 0.5 0.5 316 | 1.0 0.75 0.5 0.5 0.5 0.5 0.5 317 | 0.5 1.0 0.75 0.5 0.5 0.5 0.5 318 | 0.0 0.0 0.0 0.0 0.75 0.0 0.0 319 | 0.0 0.0 0.0 0.75 0.0 0.0 0.0 320 | 0.0 0.0 0.0 0.0 0.0 0.0 0.75 321 | 0.0 0.0 0.0 0.0 0.0 0.75 0.0 322 | ``` 323 | """ 324 | function sgc_game(k::Integer) 325 | n, m = 4*k-1, 2*k-1 326 | payoff_arrays = [Array{Float64}(undef, n, n) for i in 1:2] 327 | 328 | for payoff_array in payoff_arrays 329 | for j in 1:m 330 | for i in 1:m 331 | payoff_array[i, j] = 0.75 332 | end 333 | for i in (m+1):n 334 | payoff_array[i, j] = 0 335 | end 336 | end 337 | for j in (m+1):n 338 | for i in 1:m 339 | payoff_array[i, j] = 0.5 340 | end 341 | for i in (m+1):n 342 | payoff_array[i, j] = 0 343 | end 344 | end 345 | 346 | payoff_array[1, m] = 1 347 | payoff_array[1, 2] = 0.5 348 | for i in 2:(m-1) 349 | payoff_array[i, i-1] = 1 350 | payoff_array[i, i+1] = 0.5 351 | end 352 | payoff_array[m, m-1] = 1 353 | payoff_array[m, 1] = 0.5 354 | end 355 | 356 | for h in 0:(k-1) 357 | i, j = m + 1 + 2*h, m + 1 + 2*h 358 | payoff_arrays[1][i, j] = payoff_arrays[1][i+1, j+1] = 0.75 359 | payoff_arrays[2][j, i+1] = payoff_arrays[2][j+1, j] = 0.75 360 | end 361 | 362 | g = NormalFormGame( 363 | [Player(payoff_array) for payoff_array::Array in payoff_arrays] 364 | ) 365 | return g 366 | end 367 | 368 | # tournament_game 369 | """ 370 | tournament_game([rng=GLOBAL_RNG], n, k) 371 | 372 | Return a NormalFormGame instance of the 2-player win-lose game, whose payoffs 373 | are either 0 or 1, introduced by Anbalagan et al. (2013). Player 1 has n 374 | actions, which constitute the set of nodes {1, ..., n}, while player 2 has n 375 | choose k actions, each corresponding to a subset of k elements of the set of n 376 | nodes. Given a randomly generated tournament graph on the n nodes, the payoff 377 | for player 1 is 1 if, in the tournament, the node chosen by player 1 dominates 378 | all the nodes in the k-subset chosen by player 2. The payoff for player 2 is 1 379 | if player 2's k-subset contains player 1's chosen node. 380 | 381 | # Notes 382 | 383 | The actions of player 2 are ordered according to the 384 | [combinatorial number system] 385 | (https://en.wikipedia.org/wiki/Combinatorial_number_system), 386 | which is different from the order used in the original library in C. 387 | 388 | # Arguments 389 | 390 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 391 | - `n::Integer` : Number of nodes in the tournament graph. 392 | - `k::Integer` : Size of subsets of nodes in the tournament graph. 393 | 394 | # Returns 395 | 396 | - `g::NormalFormGame` 397 | 398 | # Examples 399 | 400 | ```julia 401 | julia> using GameTheory.Generators, Random 402 | 403 | julia> rng = MersenneTwister(1234); 404 | 405 | julia> g = tournament_game(rng, 5, 2) 406 | 5×10 NormalFormGame{2, Float64} 407 | 408 | julia> g.players[1] 409 | 5×10 Player{2, Float64}: 410 | 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 1.0 1.0 411 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 412 | 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 413 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 414 | 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 415 | 416 | julia> g.players[2] 417 | 10×5 Player{2, Float64}: 418 | 1.0 1.0 0.0 0.0 0.0 419 | 1.0 0.0 1.0 0.0 0.0 420 | 0.0 1.0 1.0 0.0 0.0 421 | 1.0 0.0 0.0 1.0 0.0 422 | 0.0 1.0 0.0 1.0 0.0 423 | 0.0 0.0 1.0 1.0 0.0 424 | 1.0 0.0 0.0 0.0 1.0 425 | 0.0 1.0 0.0 0.0 1.0 426 | 0.0 0.0 1.0 0.0 1.0 427 | 0.0 0.0 0.0 1.0 1.0 428 | ``` 429 | """ 430 | function tournament_game(rng::AbstractRNG, n::Integer, k::Integer) 431 | m = zero(Csize_t) 432 | try 433 | m = binomial(Csize_t(n), Csize_t(k)) 434 | catch InexactError 435 | throw(ArgumentError("Maximum allowed size exceeded")) 436 | end 437 | 438 | R = zeros(Float64, n, m) 439 | C = zeros(Float64, m, n) 440 | tourn_graph = random_tournament_digraph(n; rng=rng) 441 | fadjlist = tourn_graph.fadjlist 442 | 443 | # populate matrix C 444 | X = collect(1:k) 445 | for j = 1:m 446 | C[j, X] .= 1. 447 | next_k_array!(X) 448 | end 449 | 450 | # populate matrix R 451 | # continue to use array `X` to store indices 452 | a = Vector{Int}(undef, k) 453 | for i = 1:n 454 | d = length(fadjlist[i]) 455 | if d >= k 456 | for j = 1:k 457 | a[j] = j 458 | end 459 | while a[end] <= d 460 | for j in 1:k 461 | X[j] = fadjlist[i][a[j]] 462 | end 463 | R[i, k_array_rank(Csize_t, X)] = 1. 464 | next_k_array!(a) 465 | end 466 | end 467 | end 468 | 469 | g = NormalFormGame([Player(R), Player(C)]) 470 | return g 471 | end 472 | 473 | tournament_game(n::Integer, k::Integer) = 474 | tournament_game(Random.GLOBAL_RNG, n, k) 475 | 476 | # unit_vector_game 477 | """ 478 | unit_vector_game([rng=GLOBAL_RNG], n; avoid_pure_nash=false) 479 | 480 | Return a NormalFormGame instance of the 2-player game "unit vector game" (Savani 481 | and von Stengel, 2016). Payoffs for player 2 are chosen randomly from the [0, 1) 482 | range. For player 1, each column contains exactly one 1 payoff and the rest is 483 | 0. 484 | 485 | # Arguments 486 | 487 | - `rng::AbstractRNG=GLOBAL_RNG`: Random number generator used. 488 | - `n::Integer` : Number of actions. 489 | - `avoid_pure_nash::Bool=false` : If true, player 1's payoffs will be placed in 490 | order to avoid pure Nash equilibria. (If necessary, the payoffs for player 2 491 | are redrawn so as not to have a dominant action.) 492 | 493 | # Returns 494 | 495 | - `g::NormalFormGame` 496 | 497 | # Examples 498 | 499 | ```julia 500 | julia> using GameTheory.Generators, Random 501 | 502 | julia> rng = MersenneTwister(123456); 503 | 504 | julia> g = unit_vector_game(rng, 5) 505 | 5×5 NormalFormGame{2, Float64} 506 | 507 | julia> g.players[1] 508 | 5×5 Player{2, Float64}: 509 | 0.0 0.0 0.0 0.0 0.0 510 | 1.0 0.0 0.0 0.0 1.0 511 | 0.0 0.0 0.0 0.0 0.0 512 | 0.0 0.0 1.0 0.0 0.0 513 | 0.0 1.0 0.0 1.0 0.0 514 | 515 | julia> g.players[2] 516 | 5×5 Player{2, Float64}: 517 | 0.51521 0.574332 0.391494 0.316183 0.913325 518 | 0.74129 0.47338 0.0110828 0.986807 0.302641 519 | 0.582142 0.635053 0.7289 0.324831 0.240347 520 | 0.209969 0.00394602 0.588569 0.627509 0.692993 521 | 0.180649 0.998717 0.0955464 0.974204 0.994846 522 | 523 | julia> pure_nash(g) 524 | 1-element Vector{Tuple{Int64, Int64}}: 525 | (2, 5) 526 | ``` 527 | 528 | With `avoid_pure_nash=true`: 529 | 530 | ```julia 531 | julia> rng = MersenneTwister(123456); 532 | 533 | julia> g = unit_vector_game(rng, 5; avoid_pure_nash=true) 534 | 5×5 NormalFormGame{2, Float64} 535 | 536 | julia> g.players[1] 537 | 5×5 Player{2, Float64}: 538 | 0.0 0.0 0.0 0.0 0.0 539 | 1.0 0.0 0.0 0.0 0.0 540 | 0.0 0.0 0.0 0.0 1.0 541 | 0.0 0.0 1.0 0.0 0.0 542 | 0.0 1.0 0.0 1.0 0.0 543 | 544 | julia> g.players[2] 545 | 5×5 Player{2, Float64}: 546 | 0.51521 0.574332 0.391494 0.316183 0.913325 547 | 0.74129 0.47338 0.0110828 0.986807 0.302641 548 | 0.582142 0.635053 0.7289 0.324831 0.240347 549 | 0.209969 0.00394602 0.588569 0.627509 0.692993 550 | 0.180649 0.998717 0.0955464 0.974204 0.994846 551 | 552 | julia> pure_nash(g) 553 | Tuple{Int64, Int64}[] 554 | ``` 555 | """ 556 | function unit_vector_game(rng::AbstractRNG, n::Integer; 557 | avoid_pure_nash::Bool=false) 558 | payoff_arrays = [zeros(Float64, n, n), rand(rng, Float64, (n, n))] 559 | 560 | if !avoid_pure_nash 561 | ones_ind = rand(rng, 1:n, n) 562 | for j in 1:n 563 | payoff_arrays[1][ones_ind[j], j] = 1 564 | end 565 | else 566 | n == 1 && throw(ArgumentError("Cannot avoid pure Nash with n=1")) 567 | maxes = maximum(payoff_arrays[2], dims=1) 568 | is_subotimal = payoff_arrays[2] .< maxes 569 | nums_suboptimal = sum(is_subotimal, dims=2) 570 | 571 | while any(nums_suboptimal .== 0) 572 | rand!(rng, payoff_arrays[2]) 573 | maximum!(maxes, payoff_arrays[2]) 574 | for i in 1:n, j in 1:n 575 | is_subotimal[j, i] = payoff_arrays[2][j, i] < maxes[i] 576 | end 577 | sum!(nums_suboptimal, is_subotimal) 578 | end 579 | 580 | for j in 1:n 581 | one_ind = rand(rng, 1:n) 582 | while !is_subotimal[j, one_ind] 583 | one_ind = rand(rng, 1:n) 584 | end 585 | payoff_arrays[1][one_ind, j] = 1 586 | end 587 | end 588 | 589 | g = NormalFormGame( 590 | [Player(payoff_array) for payoff_array::Array in payoff_arrays] 591 | ) 592 | return g 593 | end 594 | 595 | unit_vector_game(n::Integer; avoid_pure_nash::Bool=false) = 596 | unit_vector_game(Random.GLOBAL_RNG, n, avoid_pure_nash=avoid_pure_nash) 597 | -------------------------------------------------------------------------------- /test/test_normal_form_game.jl: -------------------------------------------------------------------------------- 1 | # ------------------------- # 2 | # Testing normal form games # 3 | # ------------------------- # 4 | 5 | # NOTE: We include `@inferred` at least once for each function name. We do 6 | # that multiple times for the same function if we have particular reason 7 | # to believe there might be a type stability with that function. 8 | 9 | using GameTheory: get_opponents_actions 10 | 11 | using MathOptInterface 12 | const MOI = MathOptInterface 13 | using Clp 14 | using CDDLib 15 | 16 | 17 | @testset "Testing normal_form_game.jl" begin 18 | 19 | # Player # 20 | 21 | @testset "Player with 1 opponent" begin 22 | coordination_game_matrix = [4 0; 3 2] 23 | player = Player(coordination_game_matrix) 24 | 25 | @test @inferred(best_response(player, 2)) == 2 26 | @test @inferred(best_response(player, [1/2, 1/2])) == 2 27 | @test @inferred(best_response(player, [1/2, 1/2], BROptions())) == 2 28 | @test sort(@inferred(best_responses(player, [2/3, 1/3]))) == 29 | sort([1, 2]) 30 | @test best_response( 31 | player, [2/3, 1/3], tie_breaking=:random 32 | ) in [1, 2] 33 | @test best_response( 34 | player, [2/3, 1/3], BROptions(tol=1e-5, tie_breaking=:random) 35 | ) in [1, 2] 36 | @test_throws ArgumentError best_response( 37 | player, [2/3, 1/3], tie_breaking=:invalid_symbol 38 | ) 39 | @test @inferred(is_best_response(player, 1, 1)) 40 | @test @inferred(is_best_response(player, [1/2, 1/2], [2/3, 1/3])) 41 | 42 | # Perturbed best response 43 | @test best_response(player, [2/3, 1/3], [0., 0.1]) == 2 44 | @test best_response(player, [2, 1], [0., 0.1]) == 2 45 | 46 | # Dominated actions 47 | @test !(@inferred(is_dominated(player, 1))) 48 | @test @inferred(dominated_actions(player)) == Int[] 49 | end 50 | 51 | @testset "Player with 2 opponents" begin 52 | payoffs_2opponents = Array{Int}(undef, 2, 2, 2) 53 | payoffs_2opponents[:, 1, 1] = [3, 1] 54 | payoffs_2opponents[:, 1, 2] = [6, 0] 55 | payoffs_2opponents[:, 2, 1] = [4, 5] 56 | payoffs_2opponents[:, 2, 2] = [2, 7] 57 | player = @inferred Player(payoffs_2opponents) 58 | 59 | @test @inferred(payoff_vector(player, (1, 2))) == [6, 0] 60 | @test !(@inferred(is_best_response(player, 1, (2, 1)))) 61 | @test @inferred(best_response(player, (2, 2))) == 2 62 | @test sort(@inferred(best_responses(player, ([3/7, 4/7], [1/2, 1/2])))) == 63 | sort([1, 2]) 64 | 65 | @test_throws MethodError best_response(player, (1, [1/2, 1/2])) 66 | 67 | @test !(@inferred(is_dominated(player, 1))) 68 | @test @inferred(dominated_actions(player)) == Int[] 69 | end 70 | 71 | @testset "Player construction with AbstractArray" begin 72 | A = [4 0; 3 2] 73 | A_T = transpose(A) # !(A_T isa Array) 74 | player_T = @inferred Player(A_T) 75 | @test player_T.payoff_array[1, 2] == A[2, 1] 76 | end 77 | 78 | @testset "convert for Player" begin 79 | T1 = Int 80 | T2 = Float64 81 | player = Player(T1[1 2; 3 4]) 82 | N = ndims(player.payoff_array) 83 | 84 | for T in [T1, T2] 85 | player_new = Player{N,T}(player) 86 | @test eltype(player_new.payoff_array) == T 87 | @test player_new.payoff_array == player.payoff_array 88 | 89 | # Constructor always makes a copy 90 | @test player_new.payoff_array !== player.payoff_array 91 | end 92 | 93 | for T in [T1, T2] 94 | player_new = Player(T, player) 95 | @test eltype(player_new.payoff_array) == T 96 | @test player_new.payoff_array == player.payoff_array 97 | 98 | # Constructor always makes a copy 99 | @test player_new.payoff_array !== player.payoff_array 100 | end 101 | 102 | player_new = @inferred Player(player) 103 | @test eltype(player_new.payoff_array) == T1 104 | @test player_new.payoff_array == player.payoff_array 105 | 106 | # Constructor always makes a copy 107 | @test player_new.payoff_array !== player.payoff_array 108 | 109 | for T in [T1, T2] 110 | player_new = convert(Player{N,T}, player) 111 | @test eltype(player_new.payoff_array) == T 112 | @test player_new.payoff_array == player.payoff_array 113 | 114 | @test (player_new.payoff_array === player.payoff_array) == 115 | (T == T1) 116 | end 117 | end 118 | 119 | @testset "repr(Player)" begin 120 | A = [1 2; 3 4] 121 | player = Player(A) 122 | r = repr("text/plain", A) 123 | @test repr("text/plain", player) == 124 | replace(r, string(typeof(A)) => 125 | split(string(typeof(player)), ".")[end]) 126 | 127 | player2 = eval(Meta.parse(repr(player))) 128 | @test player2.payoff_array == player.payoff_array 129 | end 130 | 131 | @testset "Tests on delete_action for Player" begin 132 | shapley_game = [0 1 0; 0 0 1; 1 0 0] 133 | player = Player(shapley_game) 134 | 135 | player_new_1 = @inferred delete_action(player, 1, 1) 136 | player_new_2 = @inferred delete_action(player, [1, 2], 1) 137 | @test player_new_1.payoff_array == Player([0 0 1; 1 0 0]).payoff_array 138 | @test player_new_2.payoff_array == Player([1 0 0]).payoff_array 139 | end 140 | 141 | # NormalFormGame # 142 | 143 | @testset "symmetric NormalFormGame with 2 players" begin 144 | coordination_game_matrix = [4 0; 3 2] 145 | g = @inferred(NormalFormGame(coordination_game_matrix)) 146 | 147 | # NOTE: getindex(g, 1, 2) is equivalent to `g[1, 2]`. I use the former 148 | # so we can test the type stability of the get index function 149 | @test @inferred(getindex(g, 1, 2)) == [0, 3] 150 | @test @inferred(getindex(g, CartesianIndex(1, 2))) == [0, 3] 151 | @test @inferred is_nash(g, (1, 1)) 152 | @test @inferred is_nash(g, ([2/3, 1/3], [2/3, 1/3])) 153 | end 154 | 155 | @testset "asymmetric NormalFormGame with 2 players" begin 156 | matching_pennies_bimatrix = Array{Float64}(undef, 2, 2, 2) 157 | matching_pennies_bimatrix[:, 1, 1] = [1, -1] 158 | matching_pennies_bimatrix[:, 1, 2] = [-1, 1] 159 | matching_pennies_bimatrix[:, 2, 1] = [-1, 1] 160 | matching_pennies_bimatrix[:, 2, 2] = [1, -1] 161 | g = @inferred(NormalFormGame(matching_pennies_bimatrix)) 162 | 163 | @test g[2, 1] == [-1, 1] 164 | payoff_profiles = @inferred payoff_profile_array(g) 165 | @test payoff_profiles[2, 1] == [-1, 1] 166 | @test !(is_nash(g, (1, 1))) 167 | @test is_nash(g, ([1/2, 1/2], [1/2, 1/2])) 168 | end 169 | 170 | @testset "asymmetric NormalFormGame with 3 players" begin 171 | payoffs_2opponents = Array{Int}(undef, 2, 2, 2) 172 | payoffs_2opponents[:, 1, 1] = [3, 1] 173 | payoffs_2opponents[:, 1, 2] = [6, 0] 174 | payoffs_2opponents[:, 2, 1] = [4, 5] 175 | payoffs_2opponents[:, 2, 2] = [2, 7] 176 | player = @inferred Player(payoffs_2opponents) 177 | g = @inferred NormalFormGame(tuple([player for i in 1:3]...)) 178 | 179 | @test @inferred(getindex(g, 1, 1, 2)) == [6, 4, 1] 180 | @test @inferred(getindex(g, CartesianIndex(1, 1, 2))) == [6, 4, 1] 181 | payoff_profiles = @inferred payoff_profile_array(g) 182 | @test payoff_profiles[1, 1, 2] == [6, 4, 1] 183 | @test @inferred is_nash(g, (1, 1, 1)) 184 | @test @inferred !(is_nash(g, (1, 1, 2))) 185 | 186 | p = (1 + sqrt(65)) / 16 187 | @test is_nash(g, ([1-p, p], [1-p, p], [1-p, p])) 188 | end 189 | 190 | @testset "NormalFormGame input action sizes" begin 191 | g = @inferred NormalFormGame((2, 3, 4)) 192 | 193 | @test @inferred(num_players(g)) == 3 194 | @test g.players[1].payoff_array == zeros((2, 3, 4)) 195 | @test g.players[2].payoff_array == zeros((3, 4, 2)) 196 | @test g.players[3].payoff_array == zeros((4, 2, 3)) 197 | end 198 | 199 | @testset "NormalFormGame setindex" begin 200 | g = @inferred NormalFormGame((2, 2)) 201 | g[1, 1] = [0, 10] 202 | g[1, 2] = [0, 10] 203 | g[2, 1] = 3, 5 204 | g[CartesianIndex(2, 2)] = [-2, 0] 205 | 206 | @test g.players[1].payoff_array == [0 0; 3 -2] 207 | @test g.players[2].payoff_array == [10 5; 10 0] 208 | end 209 | 210 | @testset "NormalFormGame constant payoffs" begin 211 | g = NormalFormGame((2, 2)) 212 | 213 | @test @inferred is_nash(g, (1, 1)) 214 | @test is_nash(g, (1, 2)) 215 | @test is_nash(g, (2, 1)) 216 | @test is_nash(g, (2, 2)) 217 | end 218 | 219 | @testset "NormalFormGame payoff_profile_array constructor" begin 220 | nums_actions = (2, 3, 4) 221 | for N in 1:length(nums_actions) 222 | payoff_arrays = [ 223 | reshape(collect(1:prod(nums_actions[1:N])), 224 | (nums_actions[i:N]..., nums_actions[1:(i-1)]...)) 225 | for i in 1:N 226 | ] 227 | players = [Player(payoff_array) for payoff_array in payoff_arrays] 228 | g = NormalFormGame(players) 229 | g_new = NormalFormGame(payoff_profile_array(g)) 230 | for (player_new, payoff_array) in zip(g_new.players, payoff_arrays) 231 | @test player_new.payoff_array == payoff_array 232 | end 233 | end 234 | end 235 | 236 | @testset "convert for NormalFormGame" begin 237 | T1 = Int 238 | T2 = Float64 239 | players = (Player(T1[1 2 3; 4 5 6]), Player(T1[7 8; 9 10; 11 12])) 240 | g = NormalFormGame(players) 241 | N = num_players(g) 242 | 243 | for T in [T1, T2] 244 | g_new = @inferred NormalFormGame{N,T}(g) 245 | for (player_new, player) in zip(g_new.players, players) 246 | @test eltype(player_new.payoff_array) == T 247 | @test player_new.payoff_array == player.payoff_array 248 | 249 | # Constructor always makes a copy 250 | @test player_new.payoff_array !== player.payoff_array 251 | end 252 | end 253 | 254 | for T in [T1, T2] 255 | g_new = @inferred NormalFormGame(T, g) 256 | for (player_new, player) in zip(g_new.players, players) 257 | @test eltype(player_new.payoff_array) == T 258 | @test player_new.payoff_array == player.payoff_array 259 | 260 | # Constructor always makes a copy 261 | @test player_new.payoff_array !== player.payoff_array 262 | end 263 | end 264 | 265 | g_new = @inferred NormalFormGame(g) 266 | for (player_new, player) in zip(g_new.players, players) 267 | @test eltype(player_new.payoff_array) == T1 268 | @test player_new.payoff_array == player.payoff_array 269 | 270 | # Constructor always makes a copy 271 | @test player_new.payoff_array !== player.payoff_array 272 | end 273 | 274 | for T in [T1, T2] 275 | g_new = @inferred convert(NormalFormGame{N,T}, g) 276 | for (player_new, player) in zip(g_new.players, players) 277 | @test eltype(player_new.payoff_array) == T 278 | @test player_new.payoff_array == player.payoff_array 279 | 280 | @test (player_new.payoff_array === player.payoff_array) === 281 | (T == T1) 282 | end 283 | end 284 | end 285 | 286 | @testset "Test repr/print for NormalFormGame" begin 287 | a = reshape([[1, 2], [3, 4], [5, 6], [7, 8]], (2, 2)) 288 | g = NormalFormGame(a) 289 | @test occursin(string(typeof(g)), repr(g)) 290 | @test occursin(sprint(Base.print_array, a), sprint(print, g)) 291 | end 292 | 293 | @testset "Tests on delete_action for NormalFormGame" begin 294 | shapley_game = Array{Int}(undef, 3, 3, 2) 295 | shapley_game[:, 1, 1] = [0, 0, 1] 296 | shapley_game[:, 2, 1] = [1, 0, 0] 297 | shapley_game[:, 3, 1] = [0, 1, 0] 298 | shapley_game[:, 1, 2] = [0, 1, 0] 299 | shapley_game[:, 2, 2] = [0, 0, 1] 300 | shapley_game[:, 3, 2] = [1, 0, 0] 301 | g = NormalFormGame(shapley_game) 302 | 303 | deleted_game_1 = Array{Int}(undef, 2, 3, 2) 304 | deleted_game_1[:, 1, 1] = [0, 1] 305 | deleted_game_1[:, 2, 1] = [0, 0] 306 | deleted_game_1[:, 3, 1] = [1, 0] 307 | deleted_game_1[:, 1, 2] = [1, 0] 308 | deleted_game_1[:, 2, 2] = [0, 1] 309 | deleted_game_1[:, 3, 2] = [0, 0] 310 | 311 | deleted_game_2 = Array{Int}(undef, 1, 3, 2) 312 | deleted_game_2[1, 1, 1] = 1 313 | deleted_game_2[1, 2, 1] = 0 314 | deleted_game_2[1, 3, 1] = 0 315 | deleted_game_2[1, 1, 2] = 0 316 | deleted_game_2[1, 2, 2] = 1 317 | deleted_game_2[1, 3, 2] = 0 318 | 319 | g_new_1 = @inferred delete_action(g, 1, 1) 320 | g_new_2 = @inferred delete_action(g, [1, 2], 1) 321 | @test g_new_1.players[1].payoff_array == 322 | NormalFormGame(deleted_game_1).players[1].payoff_array 323 | @test g_new_1.players[2].payoff_array == 324 | NormalFormGame(deleted_game_1).players[2].payoff_array 325 | @test g_new_2.players[1].payoff_array == 326 | NormalFormGame(deleted_game_2).players[1].payoff_array 327 | @test g_new_2.players[2].payoff_array == 328 | NormalFormGame(deleted_game_2).players[2].payoff_array 329 | end 330 | 331 | # get_opponents_actions 332 | 333 | @testset "get_opponents_actions" begin 334 | action_profiles = [(5, 6, 7), 335 | ([0.1, 0.2, 0.7], [0.2, 0.8], [0.3, 0.1, 0.1, 0.5])] 336 | for action_profile in action_profiles 337 | N = length(action_profile) 338 | for i in 1:N 339 | opponents_actions_expected = 340 | (action_profile[i+1:end]..., action_profile[1:i-1]...) 341 | opponents_actions = 342 | @inferred get_opponents_actions(action_profile, i) 343 | @test opponents_actions == opponents_actions_expected 344 | end 345 | end 346 | end 347 | 348 | # Trivial cases with one player # 349 | 350 | @testset "Player with 0 opponents" begin 351 | payoffs = [0, 1] 352 | player = Player(payoffs) 353 | 354 | @test @inferred(payoff_vector(player, nothing)) == [0, 1] 355 | @test @inferred is_best_response(player, 2, nothing) 356 | @test @inferred(best_response(player, nothing)) == 2 357 | @test @inferred is_dominated(player, 1) 358 | @test !is_dominated(player, 2) 359 | 360 | payoffs = [0, 1, -1] 361 | player = Player(payoffs) 362 | dom_actions = [1, 3] 363 | @test @inferred(dominated_actions(player)) == dom_actions 364 | 365 | end 366 | 367 | @testset "NormalFormGame with 1 player" begin 368 | payoffs = [0, 1, 1] 369 | g = @inferred NormalFormGame(Player(payoffs)) 370 | @test num_players(g) == 1 371 | @test g.players[1].payoff_array == [0, 1, 1] 372 | @test g[1] == 0 373 | payoff_profiles = @inferred payoff_profile_array(g) 374 | @test payoff_profiles[1] == [0] 375 | @test is_nash(g, 2) 376 | @test !(is_nash(g, 1)) 377 | @test is_nash(g, [0, 1/2, 1/2]) 378 | 379 | g = NormalFormGame((2,)) 380 | @test num_players(g) == 1 381 | @test g.players[1].payoff_array == zeros(2) 382 | g[1] = 10 383 | @test g.players[1].payoff_array == [10, 0] 384 | end 385 | 386 | # Invalid inputs # 387 | 388 | @testset "Player 0 actions" begin 389 | @test_throws ArgumentError p = Player(Float64[]) 390 | end 391 | 392 | @testset "NormalFormGame invalid players shape inconsistent" begin 393 | p1 = Player(zeros((2, 3))) 394 | p2 = Player(zeros((2, 3))) 395 | @test_throws ArgumentError g = NormalFormGame((p1, p2)) 396 | end 397 | 398 | @testset "NormalFormGame invalid players number inconsistent" begin 399 | p1 = Player(zeros((2, 2, 2))) 400 | p2 = Player(zeros((2, 2, 2))) 401 | @test_throws MethodError g = NormalFormGame((p1, p2)) 402 | end 403 | 404 | @testset "NormalFormGame invalid nonsquare matrix" begin 405 | @test_throws ArgumentError g = NormalFormGame(zeros((2, 3))) 406 | 407 | A = [0, 1, 1] 408 | A = reshape(A, (size(A)..., 1)) 409 | @test_throws ArgumentError g = NormalFormGame(A) 410 | end 411 | 412 | @testset "NormalFormGame invalid payoff profiles" begin 413 | @test_throws ArgumentError g = NormalFormGame(zeros((2, 2, 1))) 414 | end 415 | 416 | @testset "NormalFormGame empty tuple" begin 417 | @test_throws ArgumentError g = NormalFormGame(tuple()) 418 | end 419 | 420 | @testset "NormalFormGame 0 actions" begin 421 | @test_throws ArgumentError g = NormalFormGame((0, 2)) 422 | end 423 | 424 | @testset "payoff_vector empty tuple" begin 425 | p1 = Player(zeros((2, 2, 2))) 426 | @test_throws ArgumentError payoff_vector(p1, tuple()) 427 | end 428 | 429 | @testset "is_dominated linprog error" begin 430 | player = Player([1. 1.; 0. -1.; -1. 0.]) 431 | 432 | function clp_optimizer_silent_maxiter1() 433 | optimizer = Clp.Optimizer() 434 | MOI.set(optimizer, MOI.Silent(), true) 435 | MOI.set(optimizer, MOI.RawOptimizerAttribute("MaximumIterations"), 1) 436 | return optimizer 437 | end 438 | lp_solver = clp_optimizer_silent_maxiter1 439 | @test_throws ErrorException is_dominated(player, 1, 440 | lp_solver=lp_solver) 441 | end 442 | 443 | # Utility functions # 444 | 445 | @testset "pure2mixed" begin 446 | num_actions = 3 447 | pure_action = 1 448 | mixed_action = [1., 0., 0.] 449 | @test @inferred(pure2mixed(num_actions, pure_action)) == mixed_action 450 | end 451 | 452 | # Pareto efficiency & Pareto dominance # 453 | 454 | @testset "Tests on Pareto efficiency and dominance" begin 455 | coordination_game_matrix = [4 0; 456 | 3 2] 457 | 458 | equal_po_p1_bimatrix = Array{Float64}(undef, 2, 2, 2) 459 | equal_po_p1_bimatrix[1, 1, :] = [1, -1] 460 | equal_po_p1_bimatrix[1, 2, :] = [1, 1] 461 | equal_po_p1_bimatrix[2, 1, :] = [1, 1] 462 | equal_po_p1_bimatrix[2, 2, :] = [1, -1] 463 | 464 | three_p_equal_po_array = Array{Int}(undef, 2, 2, 2) 465 | three_p_equal_po_array[:, :, 1] = [2 0; 0 2] 466 | three_p_equal_po_array[:, :, 2] = [2 0; 0 2] 467 | 468 | p1 = p2 = p3 = Player(three_p_equal_po_array) 469 | 470 | games_dict = [NormalFormGame(coordination_game_matrix), 471 | NormalFormGame(equal_po_p1_bimatrix), 472 | NormalFormGame((p1, p2, p3))] 473 | 474 | act_prof_dict = [[(1, 1), (1, 2), (2, 1), (2, 2)], 475 | [(1, 1), (1, 2), (2, 1), (2, 2)], 476 | [(1, 1, 1), (2, 1, 1), (1, 2, 1), (2, 2, 1), 477 | (1, 1, 2), (2, 1, 2), (1, 2, 2), (2, 2, 2)]] 478 | 479 | @testset "Testing is_pareto_efficient" begin 480 | output_dict = [[true, false, false, false], 481 | [false, true, true, false], 482 | [true, false, false, false, false, false, false, 483 | true]] 484 | 485 | for i = 1:length(games_dict) 486 | for j in 1:length(act_prof_dict[i]) 487 | @test @inferred is_pareto_efficient(games_dict[i], 488 | act_prof_dict[i][j]) == 489 | output_dict[i][j] 490 | end 491 | end 492 | end 493 | 494 | @testset "Testing Pareto dominance" begin 495 | output_dict = [[true, false, false, false], 496 | [false, false, false, false], 497 | [false, false, false, false, false, false, false, 498 | false]] 499 | 500 | for i = 1:length(games_dict) 501 | for j in 1:length(act_prof_dict[i]) 502 | @test @inferred is_pareto_dominant(games_dict[i], 503 | act_prof_dict[i][j]) == 504 | output_dict[i][j] 505 | end 506 | end 507 | end 508 | 509 | @testset "Test is_dominated" begin 510 | coordination_game_matrix = [4 0; 3 2] 511 | player = Player(coordination_game_matrix) 512 | for action = 1:num_actions(player) 513 | @test !is_dominated(player, action) 514 | end 515 | 516 | payoffs_2opponents = Array{Int}(undef, 2, 2, 2) 517 | payoffs_2opponents[:, 1, 1] = [3, 1] 518 | payoffs_2opponents[:, 1, 2] = [6, 0] 519 | payoffs_2opponents[:, 2, 1] = [4, 5] 520 | payoffs_2opponents[:, 2, 2] = [2, 7] 521 | player = Player(payoffs_2opponents) 522 | 523 | for i = 1:num_actions(player) 524 | @test !is_dominated(player, i) 525 | end 526 | 527 | end 528 | 529 | @testset "Test player corner cases" begin 530 | n, m = 3, 4 531 | player = Player(zeros((n, m))) 532 | for action = 1:n 533 | @test is_best_response(player, action, ones(m) * 1/m) 534 | @test !is_dominated(player, action) 535 | end 536 | 537 | e = 1e-8 538 | player = Player([-e -e; 539 | 1 -1; 540 | -1 1]) 541 | action = 1 542 | @test is_best_response(player, action, [1/2, 1/2], tol=e) 543 | @test !is_best_response(player, action, [1/2, 1/2], tol=e/2) 544 | @test !is_dominated(player, action, tol=e+1e-16) 545 | @test dominated_actions(player, tol=e+1e-16) == Int[] 546 | @test is_dominated(player, action, tol=e/2) 547 | @test dominated_actions(player, tol=e/2) == [action] 548 | end 549 | 550 | @testset "Test rational input game" begin 551 | T = Rational{BigInt} 552 | lp_solver = CDDLib.Optimizer{T} 553 | 554 | # Corner cases 555 | e = 1//(2^25) 556 | player = Player([-e -e; 557 | 1//1 -1//1; 558 | -1//1 1//1]) 559 | 560 | action = 1 561 | @test !is_dominated(T, player, action, tol=e, lp_solver=lp_solver) 562 | @test is_dominated(T, player, action, tol=e//2, lp_solver=lp_solver) 563 | 564 | player.payoff_array[1, 1:2] .= 0; 565 | 566 | @test !is_dominated(T, player, action, tol=0, lp_solver=lp_solver) 567 | 568 | # Simple game 569 | game_matrix = [2//3 1//3; 570 | 1//3 2//3] 571 | player = Player(game_matrix) 572 | 573 | @test dominated_actions(T, player, lp_solver=lp_solver) == Int[] 574 | end 575 | end 576 | 577 | end 578 | --------------------------------------------------------------------------------