├── LICENSE
├── Makefile
├── README.md
├── add_path.m
├── definition.m
├── docs
├── combined_dynamics.tex
├── documentation.bbl
├── documentation.dvi
├── documentation.pdf
├── documentation.ps
├── documentation.tex
├── evolutionary_dynamics.tex
├── example_dr.tex
├── images
│ ├── accumulated_i.eps
│ ├── cut.sh
│ ├── evolution_i.eps
│ ├── evolution_u.eps
│ ├── final_state.eps
│ ├── maximization_a.eps
│ ├── maximization_b.eps
│ ├── running_time_pop.eps
│ ├── running_time_smith_pop.eps
│ ├── running_time_smith_str.eps
│ ├── running_time_str.eps
│ ├── test1_ev_bnn.eps
│ ├── test1_ev_logit.eps
│ ├── test1_ev_rd.eps
│ ├── test1_ev_smith.eps
│ ├── test1_simplex_bnn.eps
│ ├── test1_simplex_logit.eps
│ ├── test1_simplex_rd.eps
│ ├── test1_simplex_smith.eps
│ ├── test2_ev_bnn.eps
│ ├── test2_ev_logit.eps
│ ├── test2_ev_maynard_rd.eps
│ ├── test2_ev_rd.eps
│ ├── test2_ev_smith.eps
│ ├── test2_simplex_bnn.eps
│ ├── test2_simplex_logit.eps
│ ├── test2_simplex_maynard_rd.eps
│ ├── test2_simplex_rd.eps
│ ├── test2_simplex_smith.eps
│ ├── test_combined.eps
│ ├── test_combined_ev.eps
│ ├── test_finite_comparison2average.eps
│ ├── test_finite_comparison2average_ev.eps
│ ├── test_finite_logit_choice.eps
│ ├── test_finite_logit_choice_ev.eps
│ ├── test_finite_pairwise_comparison.eps
│ ├── test_finite_pairwise_comparison_ev.eps
│ ├── test_finite_proportional_imitation.eps
│ └── test_finite_proportional_imitation_ev.eps
├── implementation.tex
├── introduction.tex
├── maximization.tex
├── multipopulation.tex
└── references.bib
├── dynamics
├── bnn.m
├── combined_dynamics.m
├── logit.m
├── maynard_rd.m
├── rd.m
├── smith.m
├── smith_b.m
└── stopevent.m
├── graphs
├── graph_evolution.m
├── graph_fitness.m
├── graph_multi_pop.m
└── graph_simplex.m
├── revision_protocols
├── comparison2average.m
├── logit_choice.m
├── pairwise_comparison.m
├── proportional_imitation.m
└── state.m
├── run_game.m
├── run_game_finite_population.m
├── strategy.m
└── test
├── electricity grid
├── fitness_user.m
├── fitness_user_inefficient.m
├── graph_ev_dynamics.m
├── graph_final_state.m
├── graph_incentives_evolution.m
├── h_i.m
├── incentives_comb.m
├── p_x.m
├── test_electricity_system.m
├── test_electricity_system_test_boundary.m
├── utility.m
├── utility_incentives.m
└── v_x.m
├── fitness1.m
├── fitness2.m
├── fitness3.m
├── test1.m
├── test1.m.orig
├── test2.m
├── test3.m
├── test_finite_population1.m
├── test_maximization.m
└── test_maximization_b.m
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2014, Carlos Barreto carlobar@gmail.com
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
5 |
6 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
7 |
8 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
9 |
10 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
11 |
12 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | clean:
6 | rm -rf *~
7 | rm -rf ./docs/*~ ./docs/*.backup ./docs/*.aux ./docs/*.backup ./docs/*.log ./docs/*.blg ./docs/*.toc
8 | rm -rf ./test/*~ ./test/*.backup
9 | rm -rf ./test/electricity\ grid/*~ ./test/electricity\ grid/*.backup
10 | rm -rf ./revision_protocols/*~ ./revision_protocols/*.backup
11 | rm -rf ./dynamics/*~ ./dynamics/*.backup
12 | rm -f ./graphs/*~ ./graphs*.backup
13 |
14 |
15 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## PDToolbox
2 |
3 |
4 | PDToolbox is a matlab implementation of some evolutionary dynamics from game theory. The current version supports the implementation of replicator dynamics, Maynard Smith's replicator dynamics, Smith dynamics, logit dynamics, and Brown-von Neumann-Nash dynamics. Also, it can be used to run revision protocols, that is, population games with small number of agents per population.
5 |
6 | This toolbox is designed to facilitate the implementation of any game, regardless of the number of populations, strategies per population, and fitness function. It has some pre-build methods to implement different dynamics, revision protocols, and to plot the evolution of the state of each population. This software is distributed under BSD license.
7 |
8 |
9 | ## Installation
10 |
11 | 1. Download files from the 'git' repository with the following command:
12 |
13 |
14 | git clone https://github.com/carlobar/PDToolbox_matlab
15 |
16 |
17 | Or download and extract the files directly from the following link:
18 |
19 |
20 | https://github.com/carlobar/PDToolbox_matlab/archive/master.zip
21 |
22 |
23 | 2. Open matlab and add the path of the following directories:
24 |
25 | path(path, '[download directory]/PDToolbox_matlab')
26 |
27 | path(path, '[download directory]/PDToolbox_matlab/dynamics')
28 |
29 | path(path, '[download directory]/PDToolbox_matlab/graphs')
30 |
31 | path(path, '[download directory]/PDToolbox_matlab/test')
32 |
33 |
34 | The path can be verified with the command 'path'.
35 |
36 | The path can be added automatically by running 'add_path.m'.
37 |
38 |
39 |
40 | ## Running the toolbox
41 |
42 | In order to use the toolbox we must define the parameters of the game in a data structure. Some parameters that must be defined are:
43 |
44 | * Number of populations,
45 | * Strategies of each population
46 | * Evolutionary dynamics
47 | * ODE solver
48 | * Fitness function
49 |
50 |
51 | The following examples are included in the 'test' directory:
52 |
53 | 'test1.m': game with one population and three strategies per population.
54 |
55 | 'test2.m': game with two populations and two strategies per population.
56 |
57 | 'test3.m': game with one population, three strategies per population, and combined dynamics.
58 |
59 | 'test_finite_population1.m': a game with small number of agents per population.
60 |
61 | 'test_maximization.m': application to a maximization problem using one population.
62 |
63 | 'test_maximization_b.m': application to a maximization problem using multiple populations.
64 |
65 | 'electricity grid/test_electricity_system.m': application to demand response programs.
66 |
67 |
68 |
69 | ## Documentation
70 |
71 | An introduction to evolutionary dynamics with some examples is available at:
72 |
73 | https://github.com/carlobar/PDToolbox_matlab/blob/master/docs/documentation.pdf
74 |
75 |
76 |
77 | ## License
78 |
79 |
80 | PDToolbox is released under the [BSD License](http://opensource.org/licenses/BSD-3-Clause)
81 |
82 |
--------------------------------------------------------------------------------
/add_path.m:
--------------------------------------------------------------------------------
1 | % ADD_PATH adds the toolbox's directories to the search path.
2 | %
3 | % SEE ALSO definition, run_game, run_game_finite_population
4 | %
5 | % For more information see: the GitHub's repository.
6 | %
7 | % Carlos Barreto, 04-11-16
8 |
9 | directory = pwd;
10 |
11 | addpath(directory)
12 | addpath(strcat(directory, '/revision_protocols') )
13 | addpath(strcat(directory, '/graphs') )
14 | addpath(strcat(directory, '/dynamics') )
15 |
--------------------------------------------------------------------------------
/definition.m:
--------------------------------------------------------------------------------
1 | function G = definition(G)
2 | % DEFINITION Checks the parameters of the game, assigns default values, and
3 | % defines functions to run the game and to plot the evolution of
4 | % the populations
5 | %
6 | % SYNOPSIS: G2 = DEFINITION(G1)
7 | %
8 | % INPUT G1: Structure with some parameters of the game
9 | %
10 | % OUTPUT G2: Structure with all the parameters and functions required to run
11 | % a the game
12 | %
13 | % REMARKS This function uses the global variables "norm_dx" and "c_error".
14 | % Its recommended to avoid their use in other files because they
15 | % might be overwritten
16 | %
17 | % SEE ALSO add_path, run_game, run_game_finite_population, bnn, logit, rd,
18 | % maynard_rd, smith, smith_b, stopevent, combined_dynamics,
19 | % comparison2average, logit_choice, pairwise_comparison,
20 | % proportional_imitation
21 | %
22 | % For more information see: the GitHub's repository.
23 | %
24 | % Carlos Barreto, 04-11-16
25 |
26 | global norm_dx c_error
27 |
28 | norm_dx = 1;
29 |
30 | % find the name of the structure that represents the game
31 | G.name = inputname(1);
32 |
33 | % check society size
34 | if isfield(G, 'P') == 0
35 | G.P = 1;
36 | else
37 | if G.P < 1
38 | exit('Invalid value of G.P.');
39 | end
40 | end
41 |
42 | % check number of pure strategies
43 | if isfield(G,'n') ~= 0
44 | if floor(G.n) > 1
45 | G.S = ones(G.P, 1) * G.n;
46 | else
47 | exit('Invalid value of G.n.')
48 | end
49 | else
50 | if isfield(G, 'S') == 0
51 | exit('Number of strategies per population must be defined.');
52 | elseif (size(G.S, 1) < G.P)
53 | exit('Number of strategies not defined for some populations.')
54 | end
55 | end
56 |
57 |
58 | % check if the initial conditions where defined (must be normalized)
59 | if isfield(G, 'x0') == 0
60 | G.x0 = zeros( G.P, max(G.S) );
61 | for i=1:G.P
62 | x = rand(1, G.S(i));
63 | G.x0( i, 1 : G.S(i) ) = x / sum(x);
64 | end
65 | else
66 | % check if the initial condition is well defined
67 | n = size(G.x0, 1);
68 | m = size(G.x0, 2);
69 | if ~( (n == G.P) && (m == max(G.S)) )
70 | if ( (m == G.P) && (n == max(G.S)) )
71 | G.x0 = G.x0';
72 | else
73 | exit('Invalid initial condition. Size of G.x0 do not match with G.P and G.S.')
74 | end
75 | end
76 | end
77 |
78 |
79 | % check the mass of each population
80 | if isfield(G,'m') == 0
81 | G.m = ones(G.P, 1);
82 | elseif size(G.m, 1) == 1
83 | m = G.m;
84 | G.m = ones(G.P, 1)*m;
85 | elseif size(G.m, 1) < G.P
86 | G.m = ones(G.P, 1);
87 | warning('Setting by the mass of all populations to 1.')
88 | end
89 |
90 |
91 | % check if the initial conditions match with the mass of the population
92 | if abs(sum(G.x0, 2) - ones(G.P, 1)) >= eps(ones(G.P, 1))
93 | warning('Populations` initial state x0 does not match the mass m.')
94 | end
95 |
96 |
97 | % dynamics used by default
98 | if isfield(G, 'dynamics') == 0
99 | G.dynamics = {'rd'};
100 | end
101 |
102 | if isfield(G, 'revision_protocol') == 0
103 | G.revision_protocol = {'proportional_imitation'};
104 | end
105 |
106 | % check the definition of the combined dynamics
107 | if length(G.dynamics) > 1
108 | if isfield(G, 'gamma') == 0
109 | G.gamma = ones(length(G.dynamics), 1) / length(G.dynamics);
110 | elseif length(G.dynamics) ~= length(G.gamma)
111 | exit('Size of G.gamma do not match the size do G.dynamics');
112 | end
113 | end
114 |
115 | % check the ODE solver
116 | if isfield(G, 'ode') == 0
117 | G.ode = 'ode45';
118 | end
119 |
120 | % if 'tol' is defined, then RelTol and AbsTol take its value
121 | if isfield(G, 'tol') ~= 0
122 | G.RelTol = G.tol;
123 | G.AbsTol = G.tol;
124 | else
125 | if isfield(G, 'RelTol') == 0
126 | G.RelTol = .0001;
127 | end
128 |
129 | if isfield(G, 'AbsTol') == 0
130 | G.AbsTol = .0001;
131 | end
132 | end
133 |
134 | % check the time to run the game
135 | if isfield(G, 'time') == 0
136 | G.time = 30;
137 | end
138 |
139 | % check the step between time samples
140 | if isfield(G, 'step') == 0
141 | G.step = .01;
142 | end
143 |
144 | % check if the convergence stop criteria is enabled
145 | if isfield(G, 'stop_c') == 0
146 | G.stop_c = false;
147 | else
148 | if isfield(G, 'c_error') == 0
149 | c_error = 1e-5;
150 | G.c_error = c_error;
151 | else
152 | c_error = G.c_error;
153 | end
154 | end
155 |
156 | % define ODE solver options
157 | if isfield(G, 'options_ode') == 0
158 | if (G.stop_c == true)
159 | G.options_ode = odeset('RelTol', G.RelTol, 'AbsTol', G.AbsTol, 'Events', @stopevent);
160 | else
161 | G.options_ode = odeset('RelTol', G.RelTol, 'AbsTol', G.AbsTol);
162 | end
163 | end
164 |
165 | % check the fitness function
166 | if isfield(G,'f') == 0
167 | G.f = @fitness;
168 | end
169 |
170 | % decide if the fitness function returns the fitness of a single population
171 | % or the fitness of the whole society
172 | if isfield(G,'pop_wise') == 0
173 | G.pop_wise = true;
174 | end
175 |
176 | if isfield(G, 'R') == 0
177 | G.R = 1;
178 | end
179 |
180 | if isfield(G, 'N') == 0
181 | G.R = 100;
182 | end
183 |
184 | if isfield(G, 'verb') == 0
185 | G.verb = true;
186 | end
187 |
188 | % define functions to run the game
189 | G.run = @() run_game(G.name);
190 | G.run_finite = @() run_game_finite_population(G.name);
191 |
192 | % define functions to plot the evolution of the game
193 | G.graph = @() graph_simplex(G.name);
194 | G.graph2p = @() graph_multi_pop(G.name);
195 | G.graph_evolution = @() graph_evolution(G.name);
196 | G.graph_fitness = @() graph_fitness(G.name);
197 |
198 | % define a function that returns the matrix of the state at time T
199 | G.state = @(T) strategy(G.name, T);
200 |
201 |
202 |
--------------------------------------------------------------------------------
/docs/combined_dynamics.tex:
--------------------------------------------------------------------------------
1 | \FloatBarrier
2 | \section{Combined Dynamics} \label{sec:combined}
3 |
4 | It is possible to define a set of dynamics to run a combination of the dynamics.
5 | The resulting dynamic is defined as
6 | \begin{equation}
7 | \dot{ x } = \sum_{d\in \mathcal{D}} \gamma_d V_d( x ),
8 | \end{equation}
9 | where $\mathcal{D}=\{ Logit, RD, Smith, BNN \}$ denotes the set of available dynamics, $V_d()$ is the differential equation of the $d\th$ dynamic and $\gamma_d$ is the weight assigned to it.
10 | The dynamics should be defined in a cell array, e.g.,
11 | \begin{lstlisting}
12 | dynamics = {'bnn', 'rd'};
13 | \end{lstlisting}
14 | The combination is made making a linear combination between each dynamic listed in the cell array. The weight assigned to each dynamic is defined in the vector \verb|gamma|. In this case we assign
15 | \begin{lstlisting}
16 | gamma = [.25, .75];
17 | \end{lstlisting}
18 |
19 | Fig. \ref{fig:rps_combined} shows an example of the combined dynamics for the rock-paper-scissors game. Note that the evolution of the system is not confined to a limit cycle, as happened with the replicator dynamics in Fig. \ref{fig:finite1}.
20 |
21 | \begin{figure}[h]
22 | \centering
23 | \begin{subfigure}[b]{0.4\textwidth}
24 | \includegraphics[width=\textwidth]{./images/test_combined.eps}
25 | \caption{Simplex.}
26 | \label{fig:test_combined_simplex}
27 | \end{subfigure}
28 | ~
29 | \begin{subfigure}[b]{0.45\textwidth}
30 | \includegraphics[width=\textwidth]{./images/test_combined_ev.eps}
31 | \caption{Evolution of the strategies in time.}
32 | \label{fig:test_combined_ev}
33 | \end{subfigure}
34 | \caption{Evolution of the combination of replicator dynamics and BNN dynamics.}
35 | \label{fig:rps_combined}
36 | \end{figure}
37 |
38 |
39 |
40 |
--------------------------------------------------------------------------------
/docs/documentation.bbl:
--------------------------------------------------------------------------------
1 | \begin{thebibliography}{1}
2 |
3 | \bibitem{barreto2013design}
4 | Carlos Barreto, Eduardo Mojica-Nava, and Nicanor Quijano.
5 | \newblock Design of mechanisms for demand response programs.
6 | \newblock In {\em Proceedings of the 2013 IEEE 52nd Annual Conference on
7 | Decision and Control (CDC)}, pages 1828--1833, 2013.
8 |
9 | \bibitem{barreto2014incentives}
10 | Carlos Barreto, Eduardo Mojica-Nava, and Nicanor Quijano.
11 | \newblock Incentives-based mechanism for efficient demand response programs.
12 | \newblock {\em arXiv preprint arXiv:1408.5366}, 2014.
13 |
14 | \bibitem{fudenberg98}
15 | Drew Fudenberg and David~K. Levine.
16 | \newblock {\em The Theory of Learning in Games}, volume~1 of {\em MIT Press
17 | Books}.
18 | \newblock The MIT Press, April 1998.
19 |
20 | \bibitem{gal}
21 | David Gal.
22 | \newblock A psychological law of inertia and the illusion of loss aversion.
23 | \newblock {\em Judgment and Decision Making}, 1(1):23--32, July 2006.
24 |
25 | \bibitem{hofbauer2001nash}
26 | Josef Hofbauer.
27 | \newblock From nash and brown to maynard smith: equilibria, dynamics and ess.
28 | \newblock {\em Selection}, 1(1):81--88, 2001.
29 |
30 | \bibitem{Johari09}
31 | Ramesh Johari and John~N. Tsitsiklis.
32 | \newblock Efficiency of scalar-parameterized mechanisms.
33 | \newblock {\em Oper. Res.}, 57(4):823--839, July 2009.
34 |
35 | \bibitem{AlgorithmicG}
36 | Noam Nisan, Tim Roughgarden, {\'E}va Tardos, and Vijay~V. Vazirani.
37 | \newblock {\em Algorithmic Game Theory}.
38 | \newblock Cambridge University Press, 32 Avenue of the Americas, New York, NY
39 | 10013-2473, USA, 2007.
40 |
41 | \bibitem{sandholm_book}
42 | William~H. Sandholm.
43 | \newblock {\em {Population Games and Evolutionary Dynamics (Economic Learning
44 | and Social Evolution)}}.
45 | \newblock The MIT Press, 1 edition, January 2011.
46 |
47 | \end{thebibliography}
48 |
--------------------------------------------------------------------------------
/docs/documentation.dvi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/carlobar/PDToolbox_matlab/fea827a80aaa0150932e6e146907f71a83b7829b/docs/documentation.dvi
--------------------------------------------------------------------------------
/docs/documentation.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/carlobar/PDToolbox_matlab/fea827a80aaa0150932e6e146907f71a83b7829b/docs/documentation.pdf
--------------------------------------------------------------------------------
/docs/documentation.tex:
--------------------------------------------------------------------------------
1 | \documentclass[a4paper,10pt]{article}
2 |
3 | \usepackage[utf8]{inputenc}
4 | \usepackage{epsfig}
5 | \usepackage{amsmath}
6 | \usepackage{amssymb}
7 | \usepackage{array}
8 | \usepackage{float}
9 | \usepackage{ctable}
10 | \usepackage{multirow}
11 | \usepackage{graphicx}
12 | \usepackage{caption}
13 | \usepackage{subcaption}
14 | \usepackage{amsfonts}
15 | \usepackage{cite}
16 | \usepackage[]{algorithm2e}
17 |
18 | \usepackage{fullpage}
19 |
20 | \usepackage{listings}
21 | \lstset{breaklines=true}
22 |
23 |
24 | \def\th{^{th}}
25 | \newcommand{\bs}[1]{\boldsymbol{#1}}
26 | \newcommand{\pop}{\mathcal{P}}
27 |
28 | \def\th{^{th}}
29 | \def\pd{\frac{\partial}{\partial q_i^k}}
30 | \newcommand{\mcf}[1]{p\Big( \norm{\bs{#1}^k}_1 \Big)}
31 | \def\pdy{\frac{\partial}{\partial y_i^k}}
32 | \def\pdq{\frac{\partial}{\partial q_i^k}}
33 | \newcommand{\diag}{\mathop{\mathrm{diag}}}
34 | %\newcommand{\bs}[1]{\boldsymbol{#1}}
35 | \newcommand{\norm}[1]{\left\lVert#1\right\rVert}
36 |
37 | \newcommand{\normb}[1]{\left\lVert \bs{#1} \right\rVert_1}
38 |
39 |
40 | \usepackage{autonum}
41 | \usepackage{placeins}
42 |
43 | \title{Population Dynamics Toolbox}
44 | \author{Carlos Barreto}
45 |
46 | \begin{document}
47 | \lstset{language=Matlab}
48 | \maketitle
49 |
50 |
51 |
52 | \begin{abstract}
53 | The \emph{population dynamics toolbox} (PDToolbox) contains a set of functions to implement evolutionary dynamics with multiple populations. We consider both small and large populations. For finite populations, we implement some revision protocols to model random interactions between agents. On the other hand, the evolution of a society with large populations is approximated by dynamical equations.
54 |
55 | This toolbox is designed to facilitate the implementation of any game with different evolutionary dynamics or revision protocols. In particular, our attempt is to make an efficient implementation of the algorithms to compute the dynamical evolution of the society. Also, the toolbox counts with some functions to plot the state of the system and the evolution of each strategy.
56 |
57 | In Section \ref{sec:introduction} we start by introducing the notation used along the paper and we present some ideas of population games which lead to the \emph{mean dynamics} equation. In Section \ref{sec:protocols} we introduce some well known revision protocols and evolutionary dynamics. In Section \ref{sec:implementation} we introduce some details of the implementation of games with the toolbox and show an example of the implementation of the rock-paper-scissors game.
58 | Sections \ref{sec:combined} and \ref{sec:multi-pop} contain examples of the implementation of \emph{combined dynamics} and \emph{multi-population games}.
59 |
60 | \end{abstract}
61 |
62 |
63 | \tableofcontents
64 |
65 |
66 |
67 | \input{introduction}
68 |
69 | \input{evolutionary_dynamics}
70 |
71 | \input{implementation}
72 |
73 | \input{combined_dynamics}
74 |
75 | \input{multipopulation}
76 |
77 | \input{maximization}
78 |
79 | \input{example_dr}
80 |
81 |
82 |
83 | \FloatBarrier
84 | \section{Running Time Analysis}\label{sec:running_time}
85 |
86 |
87 | In this section we investigate the running time of the evolutionary dynamics as a function of either the number of populations or the number of strategies per population. We use the demand response example from Section \ref{sec:dr_example} to make the experiments.
88 | Our interest is to observe the time that takes to simulate the evolution of the dynamics during 10 seconds.
89 |
90 | \subsection{Running Time as a Function of the Number of Populations}
91 |
92 | Note that the time complexity of the algorithms, with respect to number of populations $P$, is $O(P \cdot T_f(n,P))$. The fitness function in the example satisfies $T_f(n,P)=O(nP)$. Hence, the complexity of the dynamics with respect to $P$ is $O(P^2)$.
93 |
94 | Although the dynamics have the same time complexity, the running time of the dynamics has substantial differences.
95 | Fig. \ref{fig:run_time_pop} shows the running time of the game for multiple number of populations. In this case each population has $24$ strategies and all populations have the same set of fitness functions.
96 | From the simulations we observe that the
97 | %running time increases roughly linearly for all dynamics. The
98 | fastest simulations are made with replicator dynamics.
99 | Moreover, there is a large difference between the running time of Replicator and the other dynamics.
100 | %On the other hand, Smith and Logit dynamics have almost the same running time.
101 | %
102 | One reason is that the ODE solver run less iterations with Replicator dynamics, and consequently, it makes less calls to the dynamic's algorithm.
103 |
104 |
105 |
106 | \subsection{Running Time as a Function of the Number of Strategies}
107 |
108 |
109 | Fig. \ref{fig:run_time_str} shows the running time of the game for different number of strategies per population. In this case we define $25$ populations. Note that the time complexity of the dynamics with respect to the number of strategies is $O(n)$ (or $O(n \log n)$ in the case of the Smith dynamics). In this case Replicator has the lowest running time, while smith have the largest running time. The large difference between Smith and the other dynamics is that the ODE solver makes much more iterations.
110 |
111 |
112 | Note that, except for Smith dynamics, the running time is lower when we implement games with large number of strategies. Hence, the maximization problem in Section \ref{sec:maximization} might be solved faster using the single population case.
113 |
114 |
115 |
116 | \begin{figure}
117 | \centering
118 | \begin{subfigure}[b]{0.45\textwidth}
119 | \includegraphics[width=1\textwidth]{./images/running_time_pop.eps}
120 | \caption{Run time for different population numbers and constant number of strategies.}
121 | \label{fig:run_time_pop}
122 | \end{subfigure}
123 | ~
124 | \begin{subfigure}[b]{0.45\textwidth}
125 | \includegraphics[width=1\textwidth]{./images/running_time_str.eps}
126 | %\includegraphics[width=1\textwidth]{./images/running_time_strategies.eps}
127 | \caption{Run time for different number of strategies and constant number of populations.}
128 | \label{fig:run_time_str}
129 | \end{subfigure}
130 | \caption{Run time of four evolutionary dynamics: `rd', `bnn', `smith', and `logit'.}
131 | \label{fig:run_time}
132 | \end{figure}
133 |
134 | %Which case gives the largest simulation time?
135 |
136 |
137 |
138 | \subsection{Running Time of Smith Dynamics }
139 |
140 | In the toolbox we include two algorithms for the Smith dynamics. One of them relies on matrix multiplications and has running time $O(P(T_f(n,P) + n^2))$ (`smith.m'), while the other has time complexity $O(P(T_f(n,P) + n \log n))$ (`smith\_b.m'). Even though the time complexity of the first algorithm is higher, its simulation are faster under certain conditions. The reason is that Matlab is optimized to work with matrices. From the experiments we see that `smith\_b.m' is faster only for large number strategies (see Fig. \ref{fig:smith_run_time}).
141 |
142 | \begin{figure}
143 | \centering
144 | \begin{subfigure}[b]{0.45\textwidth}
145 | \includegraphics[width=\textwidth]{./images/running_time_smith_pop.eps}
146 | \caption{Run time for different population numbers and constant number of strategies.}
147 | \label{fig:smith_run_time_pop}
148 | \end{subfigure}%
149 | ~
150 | \begin{subfigure}[b]{0.45\textwidth}
151 | \includegraphics[width=\textwidth]{./images/running_time_smith_str.eps}
152 | \caption{Run time for different number of strategies and constant number of populations.}
153 | \label{fig:smith_run_time_str}
154 | \end{subfigure}%
155 |
156 | \caption{Run time of two implementations of the Smith dynamics, namely `smith' and `smith\_b'.}
157 | \label{fig:smith_run_time}
158 | \end{figure}
159 |
160 |
161 | \FloatBarrier
162 |
163 |
164 |
165 |
166 |
167 | \iffalse
168 |
169 | \FloatBarrier
170 |
171 | \section{Designing games}
172 |
173 | In the previous section we show some examples of strategical situations that can be analyzed with game theory. In these cases, the structure of the game is given by the problem. However, we can modify the fitness function of each player in order to solve an optimization problem.
174 |
175 | For example, let us consider the following optimization problem:
176 |
177 | \begin{equation}\label{eq:opt_problem}
178 | \begin{aligned}
179 | & \underset{x}{\text{maximize}}
180 | & & \sum_{i=1}^N U_i(x_i) - C(|x|)\\
181 | & \text{subject to}
182 | & & 0 \leq q_i \geq m, i =\{1,\ldots, N\}.
183 | \end{aligned}
184 | \end{equation}
185 |
186 | This can be seen as a problem of allocating a finite resource to maximize a utility function.
187 |
188 | Note that there are $N$ agents with that give a valuation $v_i(x_i)$ to the resource $x_i$.
189 |
190 | However, the cost of assigning the resource is $C(|x|)$ .
191 |
192 | The cost of assigning the resource might be distributed among the population.
193 |
194 | Let us consider the following example:
195 |
196 |
197 | \begin{equation}
198 | U_i(x_i) = \alpha_i log(1+x_i)
199 | \end{equation}
200 |
201 | \begin{equation}
202 | C(z) = \beta z^2 + b z
203 | \end{equation}
204 |
205 | define fitness functions as
206 |
207 | \begin{equation}
208 | f_i(x_1, x_2) = \frac{\alpha_i}{1+x_i} - 2 \beta |x| - b
209 | \end{equation}
210 |
211 | \fi
212 |
213 |
214 | --
215 |
216 |
217 |
218 | \bibliographystyle{plain}
219 | \bibliography{references}
220 |
221 |
222 |
223 | \end{document}
224 |
225 |
226 |
227 | \iffalse
228 |
229 |
230 |
231 | \begin{figure}[htb]
232 | \includegraphics[1\textwidth]{./images/}
233 | \caption{}
234 | \label{fig:}
235 | \end{figure}
236 |
237 |
238 |
239 | \fi
240 |
--------------------------------------------------------------------------------
/docs/evolutionary_dynamics.tex:
--------------------------------------------------------------------------------
1 |
2 |
3 | \section{Revision Protocols and Evolutionary Dynamics}\label{sec:protocols}
4 |
5 |
6 | In this section we introduce four revision protocols, that lead to the evolutionary dynamics \emph{logit dynamics} (Logit), \emph{replicator dynamics} (RD), \emph{Brown-von Neumann-Nash dynamics} (BNN), and \emph{Smith dynamics} (Smith). These dynamics belong to the families of \emph{perturbed optimization}, \emph{imitative dynamics}, \emph{excess payoff dynamics}, and \emph{pairwise comparison dynamics}, respectively \cite{hofbauer2001nash, sandholm_book}.
7 |
8 |
9 |
10 |
11 |
12 |
13 | \subsection{Pairwise Proportional Imitation (Replicator Dynamics)}
14 |
15 | With a revision opportunity the $i\th$ agent observes an opponent $j$ at random. Then it might change its strategy if its opponent has a greater fitness. The rate change is
16 | %
17 | \begin{equation}
18 | \rho_{ij}^p(\pi^p, x^p) = \frac{1}{m^p} [\pi_j^p - \pi_i^p]_+,
19 | \end{equation}
20 | where the $[\cdot]_+:\Re \leftarrow \Re_{\geq0}$ represents the positive part, defined as $[ x ]_+ \equiv \max\{ 0, x \}$.
21 | This protocol leads to the \emph{replicator dynamics} defined as
22 | \begin{equation}\label{eq:replicator}
23 | \dot{x}_i^p = x_i^p \, \hat{F}_i^p \left( x \right),
24 | \end{equation}
25 | where $\hat{F}_i^p$ is the excess payoff to strategy $i\in S^p$, which is defined as
26 | \begin{equation}
27 | \hat{F}_i^p (x) = F_i^p(x) - \bar{F}^p(x),
28 | \end{equation}
29 | and $\bar{F}^p(x)$ is the average payoff the population $p$, i.e.,
30 | \begin{equation}
31 | \bar{F}^p(x) = \frac{1}{m^p} \sum_{j \in S^p} x_j^p F_j^p(x).
32 | \end{equation}
33 |
34 |
35 |
36 | \subsubsection*{Algorithm}
37 |
38 | \begin{algorithm}[H]
39 | \SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
40 |
41 | \Input{Society's state $x$}
42 | \Output{State update $\dot{x}$}
43 | \BlankLine
44 |
45 | \For{ $ p \leftarrow 1 $ \KwTo $P$ } {
46 | $ F^p \leftarrow fitness(x, p)$\;
47 | $ \bar{F}^p \leftarrow \frac{1}{m^p} (F^p)^\top x^p$\;
48 | $ \hat{F}^p \leftarrow F^p - \boldsymbol{1} \bar{F}^p$\;
49 | $ \dot{x}^p \leftarrow \hat{F}^p \odot x^p \frac{1}{m^p} $\;
50 | }
51 | \end{algorithm}
52 | %
53 | The running time of the algorithm is $T_{rd}(n, P) = O( P ( T_{f}(n,P) + n) ) $, where $T_{f}(n,P)$ is the time required to calculate the fitness vector of a population.
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 | \subsection{Comparison to the Average Payoff (Brown-von Neumann-Nash Dynamics (BNN))}
63 |
64 | With a revision opportunity the $i\th$ agent selects a strategy at random and might switch to it if that strategy has a payoff above the average. The agent switch strategy with probability proportional to the excess payoff
65 | %
66 | \begin{equation}
67 | \rho_{ij}^p(\pi^p, x^p) = \left[ \pi_j^p - \frac{1}{m^p} \sum_{k\in S^p} x_k^p \pi_k^p \right]_+,
68 | \end{equation}
69 |
70 | This protocol leads to \emph{Brown-von Neumann-Nash dynamics}, defined as
71 | \begin{equation}\label{eq:bnn}
72 | \dot{x}_i^p = \left[ \hat{F}_i^p \left( \bs{x} \right) \right]_+ - x_i^p \sum_{j \in S^p} \left[ \hat{F}_j^p \left( \bs{x} \right) \right]_+.
73 | \end{equation}
74 |
75 |
76 |
77 | \subsubsection*{Algorithm}
78 |
79 | \begin{algorithm}[H]
80 | \SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
81 |
82 | \Input{Society's state $x$}
83 | \Output{State update $\dot{x}$}
84 | \BlankLine
85 |
86 | \For{ $ p \leftarrow 1 $ \KwTo $P$ } {
87 | $ F^p \leftarrow fitness(x, p)$\;
88 | $ \bar{F}^p \leftarrow \frac{1}{m^p} (F^p)^\top x^p$\;
89 | $ \hat{F}^p \leftarrow \max\{F^p - \boldsymbol{1} \bar{F}^p, \boldsymbol{0}\}$\;
90 | $ \dot{x}^p \leftarrow \hat{F}^p - (\boldsymbol{1}^\top \hat{F}^p) \odot x^p \frac{1}{m^p} $\;
91 | }
92 | \end{algorithm}
93 | %
94 | The running time is $T_{BNN}(n,P) = O( P ( T_{f}(n,P) + n) ) $.
95 |
96 |
97 |
98 |
99 |
100 |
101 | \subsection{Pairwise Comparison (Smith Dynamics)}
102 |
103 | With a revision opportunity the $i\th$ agent selects a strategy at random. If the opponent has a higher fitness, the the agent switch strategy with probability proportional to
104 | \begin{equation}
105 | \rho_{ij}(\pi, x) = \left[ \pi_j - \pi_i \right]_+
106 | \end{equation}
107 | This protocol leads to \emph{Smith dynamics} that are defined as
108 | %
109 | \begin{equation}
110 | \dot{x}_i^p = \sum_{\gamma \in S^p} x_\gamma^p \left[ F_i^p \left( \bs{x} \right) - F_\gamma^p \left( \bs{x} \right) \right]_+
111 | %%\\
112 | - x_i^p \sum_{\gamma \in S^p} \left[ F_\gamma^p ( \bs{x}) - F_i^p( \bs{x} ) \right]_+.
113 | \label{eq:smith}
114 | \end{equation}
115 |
116 |
117 | \subsubsection*{Algorithm}
118 |
119 | Here we present two algorithms.
120 | The first one has time complexity $O(P(T_f(n, P)+ n^2 ))$. This algorithm is implemented as `smith.m'. A characteristic of this implementation is that might be faster under some conditions, because Matlab is optimized to operate with matrices (see more in Section \ref{sec:running_time}).
121 |
122 | \begin{algorithm}[H]
123 | \SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
124 |
125 | \Input{Society's state $x$}
126 | \Output{State update $\dot{x}$}
127 | \BlankLine
128 | \For{ $ p \leftarrow 1 $ \KwTo $P$ } {
129 | $ F^p \leftarrow fitness(x, p)$\;
130 | $A \leftarrow \boldsymbol{1} {F^p}^\top$\;
131 | $M \leftarrow \max(\boldsymbol{0}_{n\times n}, A-A^\top)$\;
132 |
133 | $F_{sum}^p \leftarrow M \boldsymbol{1}$\;
134 | $F_{avg}^p \leftarrow \frac{1}{m^p} x^\top \, M$\;
135 |
136 | $ \dot{x}^p \leftarrow F_{avg}^p - F_{sum}^p \odot x^p \frac{1}{m^p}$\;
137 | }
138 | \end{algorithm}
139 |
140 |
141 | Below we present an alternative algorithm that might be faster for large number of strategies.
142 | In this case we order the strategies in increasing order of fitness and then calculate the strategy's fitness difference (only the ones that are positive). This allow us to reduce the number of operations. The running time of this algorithm is $T_{smith}(n,p) = O(P(T_f(n, P)+ n\log(n) ))$. This algorithm is implemented as `smith\_b.m'.
143 |
144 |
145 | \begin{algorithm}[H]
146 | \SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
147 |
148 | \Input{Society's state $x$}
149 | \Output{State update $\dot{x}$}
150 | \BlankLine
151 | \For{ $ p \leftarrow 1 $ \KwTo $P$ } {
152 | $ F^p \leftarrow fitness(x, p)$\;
153 | $A \leftarrow$ Fitness functions ordered in ascending order\;
154 | $B \leftarrow$ Strategies ordered in ascending order by their fitness \;
155 |
156 | $A_{sum} \leftarrow \boldsymbol{1}^\top A$\;
157 | $A_{avg} \leftarrow 0$\;
158 | $x_{ord} \leftarrow x(B)\frac{1}{m^p} $\;
159 | $x_{cum} \leftarrow 0$\;
160 |
161 | \For{ $i \leftarrow 1$ \KwTo $n^p$ }{
162 | $k \leftarrow B(i)$\;
163 | $A_{sum} \leftarrow A_{sum} - A(i)$\:
164 |
165 |
166 | $\Gamma_a^p[k] \leftarrow A(i) x_{cum} - A_{avg}$\;
167 | $\Gamma_b^p[k] \leftarrow A_{sum} - A(i) (n-i) $\;
168 |
169 | $A_{avg} \leftarrow A_{avg} + A(i) x_{ord}(i)$\;
170 | $x_{cum} \leftarrow x_{cum} + x_{ord}(i)$\;
171 |
172 | }
173 | $ \dot{x}^p \leftarrow \Gamma_a^p - \Gamma_b^p \odot x^p \frac{1}{m^p}$\;
174 | }
175 | \end{algorithm}
176 |
177 |
178 |
179 |
180 | \subsection{Logit Choice}
181 |
182 | With a revision opportunity the $i\th$ agent selects a strategy at random and change its strategy with a probability proportional to
183 |
184 | \begin{equation}
185 | \rho_{ij}(\pi) = \frac{ \exp(\pi_j \eta^{-1} ) }{ \sum_{k \in S} \exp(\pi_k \eta^{-1} ) }
186 | \end{equation}
187 |
188 | This protocol belong to target dynamics and with a large population results in the following dynamics
189 | \begin{equation}\label{eq:logit}
190 | \dot{x}_i^p = \frac{ \exp\left(\eta^{-1} F_i^p (\bs{x}) \right) }{ \sum_{\gamma \in S^p} \exp\left(\eta^{-1} F_\gamma^p (\bs{x}) \right) }, \, \, \eta>0,
191 | \end{equation}
192 | known ad \emph{Logit dynamics}.
193 |
194 |
195 |
196 | \subsubsection*{Algorithm}
197 |
198 | \begin{algorithm}[H]
199 | \SetKwInOut{Input}{input}\SetKwInOut{Output}{output}
200 |
201 | \Input{Society's state $x$}
202 | \Output{State update $\dot{x}$}
203 | \BlankLine
204 |
205 | \For{ $ p \leftarrow 1 $ \KwTo $P$ } {
206 | $ F^p \leftarrow fitness(x, p)$\;
207 | $ \bar{F}^p \leftarrow \frac{1}{m^p} (F^p)^\top x^p$\;
208 | $ \tilde{F}^p \leftarrow \exp( F^p \eta^{-1} )$\;
209 | $ \Gamma \leftarrow \boldsymbol{1}^\top \tilde{F}^p $\;
210 | $ \dot{x}^p \leftarrow \frac{\tilde{F}^p}{\Gamma} - x^p $\;
211 | }
212 | \end{algorithm}
213 |
214 | The running time is $T_{logit}(n,P) = O( P ( T_{f}(n,P) + n) ) $.
215 |
216 |
217 |
218 | \iffalse
219 | \subsection{Maynard Smith Replicator}
220 |
221 | \begin{equation}
222 | \dot{x}_i = \frac{ x_i F_i }{ \bar{F}(x) } - x_i
223 | \end{equation}
224 | \fi
225 |
226 |
227 |
--------------------------------------------------------------------------------
/docs/example_dr.tex:
--------------------------------------------------------------------------------
1 |
2 | \FloatBarrier
3 |
4 | \section{Example: Demand response programs}\label{sec:dr_example}
5 |
6 | This is an example of multiple populations used to implement demand response programs in smart grids \cite{barreto2013design, barreto2014incentives}. In this case, we assume that each user must decide how to distribute its electricity usage along a day. Particularly,
7 | agents might have conflicting interests because they might impose externalities on the society through the price signals, i.e., the aggregated demand might affect the profit of agents. This conflict can be seen as a game between agents, in which each agent is selfish and endeavors to maximize independently its own welfare.
8 |
9 | In this problem we model the daily electricity allocation problem as a multi-population game with nonlinear fitness functions. Particularly, each agent can implement an evolutionary dynamic to find the best distribution of resources. Note that when implemented locally by each user, the evolutionary dynamics lead to the global efficient equilibrium (In this case the fitness is equal to the marginal utility of each agent).
10 |
11 | A particular feature of this problem is that the Nash equilibrium of the system is inefficient. Hence,
12 | we introduce an incentives scheme (indirect revelation mechanism) to maximize the aggregated surplus of the population.
13 | The main feature of this mechanism is that it does not require private information from users, and employs a one dimensional message space to coordinate the demand profile of agents. These properties facilitate the distributed implementation of the mechanism. The mechanism entrusts the computation tasks among users, who should maximize its own utility function based the aggregated demand (that is calculated and broadcasted by a central agent). Thus, users avoid revelation of private information (e.g., preferences), but are required to report the aggregated consumption of their appliances during some time periods.
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 | \subsubsection{Problem Formulation}
22 |
23 |
24 |
25 | We consider a population composed by $N$ consumers defined as $\mathcal{V} = {1,\ldots.N}$. Also, let us divide a period of 24 hours in a set of $T$ time intervals denoted $\tau = \{\tau_1,\ldots,\tau_T\}$.
26 | Formally, we define the set $\tau$ as a partition of $[0,24)$, where
27 | $\cup_{t\in\{1,\ldots,T\}} \tau_t = \tau$ and $\cap_{t\in\{1,\ldots,T\}} \tau_t = \varnothing$.
28 | %
29 | Let $q_i^t$ be the electricity consumption of the $i\th$ user in the $t\th$ time interval.
30 | The daily electricity consumption of the $i\th$ user is represented by the vector $\bs{q}_i=[q_i^1,\ldots,q_i^T]^\top\in \Re_{\geq 0}^{T}$.
31 | The population consumption at a given time $t$ is defined by the vector $\bs{q}^t = [q_1^t,, q_2^t\ldots,q_N^t]^\top\in \Re_{\geq 0}^{N}$.
32 | On the other hand, the joint electricity consumption of the whole population is denoted by $\bs{q} = [\bs{q}_1^\top,
33 | \ldots, \bs{q}_N^\top]^\top$.
34 | Without loss of generality, we assume that the electricity consumption of the $i\th$ user satisfies $q_i^t\geq 0$, in each time instant $t$.
35 | A \emph{valuation function} $v_i^t(q_i^t)$ models the \emph{valuation} that the $i\th$ user gives to an electricity consumption of $q_i^t$ units in the $t\th$ time interval. Finally, let $p(\cdot):\Re\rightarrow\Re$ be the price of electricity charged to consumers. The aggregated consumption at a given time $t$ is defined as $||\bs{q}^t||_1 = \sum_{j=1}^N q_j^t$.
36 | Moreover, a daily valuation is
37 | $v_i(\bs{q}_i)=\sum_{t=1}^T v_i^t(q_i^t),$
38 | where $t\in\{1,\ldots,T\}$.
39 |
40 |
41 |
42 |
43 |
44 | Now, assuming that the electricity generation cost is the same for all $t$, we can express the profit function of each individual as
45 | %
46 | \begin{equation}\label{eq:u_i_}
47 | U_i(\bs{q}) = v_i(\bs{q}_i) - \sum_{t=1}^T q_i^t p\Big( \norm{\bs{q}^t}_1 \Big),
48 | \end{equation}
49 | %
50 | where
51 | $p:\Re_+ \to \Re_+$ is the unitary price function.
52 | The consumers welfare function is maximized by solving \cite{Johari09}
53 | %
54 | \begin{equation}\label{eq:opt_problem}
55 | \begin{aligned}
56 | & \underset{\bs{q}}{\text{maximize}}
57 | & & \sum_{i=1}^N U_i(\bs{q}) = \sum_{i=1}^N\left( v_i(\bs{q}_i) - \sum_{t=1}^T q_i^t p\left( \norm{\bs{q}^t}_1 \right) \right) \\
58 | & \text{subject to}
59 | & & q_i^t \geq 0, i =\{1,\ldots,N\}, t =\{1,\ldots,T\}.
60 | \end{aligned}
61 | \end{equation}
62 |
63 |
64 |
65 | \subsubsection{Incentives}
66 |
67 | The solution of the optimization problem in Eq.~(\ref{eq:opt_problem}) is inefficient in a strategic environment, i.e., when individuals are rational and selfish \cite{barreto2013design, Johari09}. In such cases, the analysis of strategic interactions among rational agents is made using game theory \cite{fudenberg98}.
68 | In particular, the Nash equilibrium (a solution concept in game theory) is sub-optimal, however, we can show that if we consider an added incentive to the individual cost function of each player, the Nash equilibrium of the game with incentives can be made efficient in the sense of Pareto \cite{barreto2013design, barreto2014incentives}.
69 |
70 | In particular, our DR scheme with incentives models the case when all agents keep their valuation of electricity to themselves, and have autonomous control their consumption. However, in order to incentive the agents to modify their behavior for the good of the population, the central entity sends them an incentive (e.g., a price signal or reward) to indirectly control their load.
71 |
72 | Consider the new cost function for the $i^{th}$ agent:
73 | \begin{equation}\label{eq:game2}
74 | W_i(q_i,\bs{q}_{-i})
75 | = v_i(q_i) - q_i p\left( \norm{\bs{q}^t}_1 \right) + I_i(\bs{q}) .
76 | \end{equation}
77 | where incentives are of the form:
78 | \begin{equation}\label{eq:I_i}
79 | I_i(\bs{q}) = \left( \norm{\bs{q}_{-i}^t}_1\right) \left( h_i(\norm{\bs{q}_{-i}}) - p\left( \norm{\bs{q}^t}_1 \right) \right).
80 | \end{equation}
81 |
82 |
83 | The form of this incentive is inspired in the
84 | Vickrey-Clarke-Groves mechanism and the Clarke pivot rule \cite{AlgorithmicG}.
85 | %
86 | We assign incentives according to the contribution made by an agent
87 | to the society. In particular, the function $h_i:\Re \to \Re$ is a design parameter that estimates the externalities introduced by each individual.
88 | It can be shown that these incentives can lead to an optimal equilibrium in a strategic environment.
89 | In this DR approach we consider that the utility sends a two dimensional signal to each customer, namely $(q,I_i)$ and each customer responds with some consumption $q_i$.
90 | Note that the incentives modify the price paid by each user according to their relative consumption. However, two different users receive different incentives as long as their consumption are different.
91 |
92 |
93 |
94 |
95 |
96 | \subsubsection{Simulations}
97 |
98 | In this section, we illustrate some ideas of efficiency and the decentralized implementation of the incentives mechanism. We select some functions used previously in the literature. On the one hand, we define the family of valuation functions as
99 | \begin{equation}\label{eq:valuation_sim}
100 | v(\bs{q}^k,\alpha_i^k) = v_i^k (q_i^k) = \alpha_i^k \log(1+q_i^k)
101 | \end{equation}
102 | where $\alpha_i^k>0$ is the parameter that characterizes the valuation of the $i\th$ agent at the $k\th$ time instant.
103 | On the other hand, the generation cost function is defined as
104 | %
105 | \begin{equation}\label{eq:cost_sim}
106 | C(\|\bs{q}\|_1) = \beta ({\|\bs{q}\|_1})^2 + b {\|\bs{q}\|_1},
107 | \end{equation}
108 | and the unitary price function is
109 | %
110 | \begin{equation}\label{eq:p_sim}
111 | p(\|\bs{q}\|_1) = \frac{C(\|\bs{q}\|_1)}{\|\bs{q}\|_1} = \beta \|\bs{q}\|_1 + b.
112 | \end{equation}
113 | %
114 | Note that the generation cost only depends on the aggregated consumption, not on the time of the day. Furthermore,
115 | %
116 | the fitness function of the system with incentives is
117 | %
118 | \begin{equation}\label{eq:fitness_without_i_sim}
119 | F_i^k( \bs{q}^k) = \frac{\alpha_i^k }{1+q_i^k}
120 | - 2\beta \left( \sum_{j=1}^N q_j^k \right).
121 | \end{equation}
122 |
123 |
124 | The evolution of utility, demand, and incentives for different dynamics is shown in Figs.~\ref{fig:dynamics_u} and \ref{fig:dynamics_i}. Note that despite using the same initial condition, the evolution of the system is different with each dynamical model. In particular, BNN and Smith dynamics converge faster to the optimum, in contrast with the Logit and replicator dynamics.
125 | This is achieved by means of a fast decrease in the power consumption.
126 |
127 | \begin{figure}[hbt]
128 | \centering
129 | \includegraphics[width=.75\textwidth]{./images/evolution_u.eps}
130 | \caption{Evolution of profit and costs for four different dynamics.}
131 | \label{fig:dynamics_u}
132 | \end{figure}
133 |
134 |
135 | \begin{figure}[hbt]
136 | \centering
137 | \includegraphics[width=.75\textwidth]{./images/evolution_i.eps}
138 | \caption{Evolution of the incentives with four different dynamics.}
139 | \label{fig:dynamics_i}
140 | \end{figure}
141 |
142 |
143 |
144 | Incentives in Fig.~\ref{fig:dynamics_i} show that, in the long run, all dynamics converge to the same level of incentives. Particularly, Smith dynamics requires more incentives during all time, except for logit dynamics, which has a sudden increase in the incentives close to the equilibrium point.
145 |
146 | In Fig.~\ref{fig:dynamics_i} it is not clear which dynamical model moves the state of the system to the optimal equilibrium using less resources. To answer this question, we simulate the total amount of incentives used by each model.
147 | Thus, let us define the aggregated incentives in a society in a particular time $t$ as
148 | \begin{equation}
149 | I_d (t) = \sum_{i\in\mathcal{P}} \frac{1}{|S|} \sum_{k\in S} I_i \left( \bs{q}^k (t) \right).
150 | \end{equation}
151 | Now, the total accumulated incentives from $t_0$ to $t$ is defined as
152 | \begin{equation}
153 | \varPhi_d (t) = \int_{t_0}^t I_d (\tau) d\tau.
154 | \end{equation}
155 | Thus, $\varPhi_d (t)$ gives a measurement of the total amount subsidies required by the system with dynamic $d$, in the time interval $[t_0, t]$.
156 | In this case we do not have a reference to compare the subsidies requirements of each evolutionary dynamic. Hence, we compare the subsidies requirements with the average requirements of all the dynamics implemented.
157 | %
158 | In order to see which dynamic requires more resources, we plot the cumulative resources required by each dynamic relative to the average.
159 | Hence, we define the cumulative incentives as
160 | %
161 | \begin{equation}
162 | CI_d = \frac{ \varPhi_d (t) }{ \sum_{d\in \mathcal{D}} \varPhi_d (t) }.
163 | \end{equation}
164 | %
165 | Fig.~\ref{fig:integral} shows the results of the simulation of the relative subsidies required by each model of evolutionary dynamics.
166 | %
167 |
168 |
169 |
170 | Smith dynamics requires much more resources during all the time stamp, but is particularly high during the first stages, while logit has the lower incentives requirements. However, BNN has the lower incentives in long run.
171 |
172 | \begin{figure}[hbt]
173 | \centering
174 | \includegraphics[width=.75\textwidth]{./images/accumulated_i.eps}
175 | \caption{Accumulated incentives during the evolution of the algorithm.}
176 | \label{fig:integral}
177 | \end{figure}
178 |
179 |
180 |
181 | Fig. \ref{fig:final_state} shows the final demand profile of each agent. Note that the final state corresponds to the state of each population at the equilibrium.
182 |
183 | \begin{figure}[hbt]
184 | \centering
185 | \includegraphics[width=.75\textwidth]{./images/final_state.eps}
186 | \caption{Final demand profile of each agent.}
187 | \label{fig:final_state}
188 | \end{figure}
189 |
--------------------------------------------------------------------------------
/docs/images/cut.sh:
--------------------------------------------------------------------------------
1 | # script to remove blank spaces
2 |
3 | echo "############################################"
4 | echo "Cutting" $# "files:"
5 | echo "############################################"
6 |
7 | while [ $# -gt 0 ]
8 | do
9 | ps2epsi $1 $1.tmp
10 | echo "cutting $1"
11 | mv $1.tmp $1
12 |
13 | shift
14 | done
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/docs/implementation.tex:
--------------------------------------------------------------------------------
1 | \section{Implementation} \label{sec:implementation}
2 |
3 | \subsection{Parameters of the Implementation}
4 |
5 |
6 | \begin{table}[tbh]
7 | \begin{center}
8 | \begin{tabular}{p{.2 \textwidth} | p{.6 \textwidth} | p{.2 \textwidth}}\hline
9 | Field & Description & Default value\\ \hline
10 | $P\in\mathbb{Z}$ & Number of populations. & 1 \\
11 | $n\in\mathbb{Z}$ & Maximum number of pure strategies per population. & - \\
12 | $S\in\mathbb{Z}^{P}$ & Vector of pure strategies in each population, such that $1 0$, with $p \in \pop$.
7 | Let $S^p = \{ 1, \ldots, n^p \}$ be the set of actions (or pure strategies) available for each agent of the $p\th$ population.
8 | Each agent selects a pure strategy and the resulting state of the population is the usage proportion of each strategy. The set of population states is defined as $X^p = \{ x^p \in \mathbb{R}_+^{n^p} : \sum_{i \in S^p} x_i^p = m^p \}$, where the $i\th$ component of the state, denoted by $x_i^p \in \mathbb{R}_+$, is the mass of players that select the $i\th$ strategy of the population $p$.
9 |
10 | Population games (or large games) capture some properties of the interactions of many economic agents, e.g.,
11 |
12 | \begin{enumerate}
13 | \item large number of agents.
14 | \item Continuity: The actions of an agent has small impact on the payoff of other agents.
15 | \item Anonymity: means that the utility of each agent only depends on the aggregated actions of the other agents.
16 | \end{enumerate}
17 |
18 | Game theory is useful to model decision making of agents that are rational. In game theory rationality is the ability to adopt the best actions to achieve some particular goals. This implies that agents use all the information available to make decisions. Evolutionary games relax the rationality assumption by considering myopic behavior. Thus, we assume that agents choose that actions that seem to improve their fitness, however, these actions might not be optimal (as would be the case for rational agents). Thus, evolutionary games can be useful to analyze the behavior of agents in repeated games, where rationality assumptions cannot be made.
19 |
20 |
21 | In particular, an economic agent decides whether to modify or not its strategy according to the available information. In this respect, we assume that the agent's behavior satisfies both inertia and myopia properties. On the one hand, inertia
22 | is the tendency to remain at the status-quo, unless there exist motives to do that.
23 | Also, this implies that the strategy adjustment events are rare events.
24 | On the other hand, myopia means that the information used to make decisions is limited, e.g., each user makes decisions based on the current state of the population and do not estimate future actions. These two properties are based on the population games theoretical framework \cite{sandholm_book}
25 | and behavioral economics \cite{gal}.
26 |
27 | To accomplish the inertia property, the time between two successive updates of one
28 | agent's strategy is modeled with an exponential distribution (this distribution is used to model the occurrence of rare events).
29 | Thus, strategy actualization events could be characterized by means of stochastic alarm clocks.
30 | Particularly, a rate $R_i$ Poisson alarm clock produces time among rings described by
31 | a rate $R_i$ exponential distribution.
32 | The whole actualization events in the population can be considered as a rate $R=\sum_{j\in \mathcal{V}} R_j$ Poisson alarm clock.
33 | Therefore, the average number of events in a given time interval is $R$ and the probability of selecting the $i^{th}$
34 | agent in a given time instant is
35 | $\frac{R_i}{R}$ \cite{sandholm_book}.
36 |
37 | At each update opportunity (revision opportunity), the $i\th$ agent might compare the average profit of its strategy with the average profit of other strategies. Particularly, an agent might change its strategy with rate $\rho_{ij}$.
38 |
39 | The rate of change $\rho_{ij}$ is determined by a revision protocol, which defines the procedure used by each user to decide whether to change or not its strategy. The scalar $\rho_{ij} (\pi^p, x^p)$ is the \emph{conditional switch rate} from strategy $i$ to strategy $j$ in function of a given payoff vector $\pi$ and a population state $x^p$.
40 |
41 | Using the law of large numbers we can approximate the evolution of the society's state to a dynamical equation defined by
42 | \begin{equation}\label{eq:mean_dynamic}
43 | \dot{x}_i^p = \sum_{j\in S^p} x_j^p \rho_{ji} (\pi^p, x^p) - x_i^p \sum_{j\in S^p} \rho_{ij}(\pi^p, x^p).
44 | \end{equation}
45 | The previous equation is known as the \emph{mean dynamic}, which is used to define some of the dynamics in the next section.
46 |
47 |
--------------------------------------------------------------------------------
/docs/maximization.tex:
--------------------------------------------------------------------------------
1 |
2 | \FloatBarrier
3 |
4 | \section{Maximization problems}\label{sec:maximization}
5 |
6 |
7 |
8 |
9 | The evolutionary dynamics can be used to solve convex optimization problems.
10 | We can use the properties of population games to design games that maximize some function $f(\bs{z})$, where $\bs{z}\in\mathbb{R}^{n}$ is a vector of $n$ variables, i.e., $\bs{z} = [z_1, \ldots, z_k, \ldots, z_n]$. Below we show two alternatives to solve this optimization problem using either a single population or $n$ populations.
11 |
12 |
13 |
14 | \subsection{Single Population Case}
15 |
16 | First, let us consider a population where each agent can choose one of the $n+1$ strategies. In this case, the first $n$ strategies correspond one variable of the objective function and the $n+1\th$ strategy can be seen as a slack variable.
17 | Thus, $x_k$ is the proportion of agents that use the $k\th$ strategy, and it corresponds to the $k\th$ variable, i.e., $x_k = z_k$.
18 | We define the fitness function of the $k\th$ strategy $F_k$ as the derivative of the objective function with respect to the $k\th$ variable, thus, $F_k(\bs{x}) \equiv \frac{\partial }{\partial x_k} f(\bs{x})$.
19 |
20 | Note that if $f(\bs{x})$ is a concave function, then its gradient is a decreasing function.
21 | Recall that users attempt to increase their fitness by adopting the most profitable strategy in the population, say the $k\th$ strategy. This lead to an increase of $x_k$, which in turns decrease the fitness $F_k(\bs{x})$.
22 |
23 | Furthermore, the equilibrium is reached when all agents that belong to the same population have the same fitness.
24 | Thus, at the equilibrium $F_i(\bs{x}) = F_j(\bs{x})$, where $i,j\in\{1, \ldots, n \}$.
25 | If we define $F_{n+1}(\bs{x}) = 0$, then at the equilibrium we have $F_i(\bs{x}) = 0$ for every strategy $i\in\{1, \ldots, n\}$.
26 | %
27 | Since the fitness function decreases with the action of users, we can conclude that the strategy of the population evolves to make the gradient of the objective function equal to zero (or as close as possible). This resembles a gradient method to solve optimization problems.
28 |
29 | % A characteristic of this implementation is that the function of users depends on their strategy. Specifically, there are $n+1$ different strategy functions.
30 |
31 | Recall that the he evolution of the strategies lies in the simplex, that is, $\sum_{i \in S^p} z_i = m$. Hence, this implementation solves the following optimization problem:
32 | \begin{equation}
33 | \begin{aligned}
34 | & \underset{\bs{z}}{\text{maximize}}
35 | & & f(\bs{z}) \\
36 | & \text{subject to}
37 | & & \sum_{i=1}^n z_i \leq m,
38 | \end{aligned}
39 | \label{eq:opt_problem}
40 | \end{equation}
41 | where m is the total mass of the population.
42 | %Next we present a different implementation in which each user of the same population has the same fitness function.
43 | %
44 | Figure \ref{fig:maximization_a} shows an example of the setting described above for the function
45 | \begin{equation}\label{eq:objective_f}
46 | f(\bs{z}) = - (z_1-5)^2 - (z_2-5)^2.
47 | \end{equation}
48 | The simulation is executed during $0.6$ time units.
49 |
50 | \begin{figure}[htb]
51 | \centering
52 | \includegraphics[width=.5\textwidth]{./images/maximization_a.eps}
53 | \caption{Evolution of the maximization setting using only one population.}
54 | \label{fig:maximization_a}
55 | \end{figure}
56 |
57 |
58 |
59 | \subsection{Multi-population Case}
60 |
61 |
62 |
63 | Let us consider $n$ populations where each agent can choose one of two strategies.
64 | We define a population per each variable of the maximization problem and also $n$ additional strategies that resemble slack variables.
65 | Thus, $x_i^p$ is the proportion of agents that use the $i\th$ strategy in the $p\th$ population. In this case $x_1^k$ corresponds to the $k\th$ variable, that is, $x_1^k = z_k$, while $x_2^k$ is a slack variable.
66 | %
67 | The fitness function $F_1^k$ of the $k\th$ population is defined as the derivative of the objective function with respect to the $k\th$ variable, that is, $F_1^k(\bs{x}) \equiv \frac{\partial }{\partial x_1^k} f(\bs{x})$. On the other hand, $F_2^k(\bs{x}) = 0$.
68 | This implementation solves the following optimization problem:
69 | \begin{equation}
70 | \begin{aligned}
71 | & \underset{\bs{z}}{\text{maximize}}
72 | & & f(\bs{z}) \\
73 | & \text{subject to}
74 | & & z_i \leq m^i, i =\{1,\ldots,n\}.
75 | \end{aligned}
76 | \label{eq:opt_problem}
77 | \end{equation}
78 |
79 |
80 | Figure \ref{fig:maximization_b} shows an example of the setting described above for the function in Eq. (\ref{eq:objective_f}).
81 | The simulation is executed during $0.6$ time units. Note that the implementation using multiple populations reach the optimal value faster than the single population implementation.
82 | \begin{figure}[htb]
83 | \centering
84 | \includegraphics[width=.5\textwidth]{./images/maximization_b.eps}
85 | \caption{Evolution of the the maximization setting using $n$ populations.}
86 | \label{fig:maximization_b}
87 | \end{figure}
88 |
89 |
90 | The speed of convergence to the optimum depends on the dynamics and their parameters. For instance, we observed that the equilibrium of the BNN dynamics might be closer to the optimal solution $\bs{z}^*$ if the mass of the population $m^p$ is close to $\sum_{i=1}^N z_i$. Note that close to the optimum $\hat{F}^p$ is small, and if $m^p$ is too large, then the slack variable, such as $x_2^k$, might be too large, making $x_1^k$ small. These conditions might hinder the convergence to the optimum because updates in the strategies are too small.
--------------------------------------------------------------------------------
/docs/multipopulation.tex:
--------------------------------------------------------------------------------
1 |
2 | \FloatBarrier
3 | \section{Multi-population Games}\label{sec:multi-pop}
4 |
5 |
6 |
7 |
8 | \subsection{Matching pennies}
9 |
10 | We implement a matching pennies game defining a society $\pop = \{p_1, p_2\}$ with two populations and two strategies per population, namely \emph{heads} and \emph{tails}. First, note that the payoff of the game in normal form is
11 | %
12 | \begin{table}[H]
13 | \centering
14 | \begin{tabular}{|c|c|} \hline
15 | 2, 1 & 1, 2 \\ \hline
16 | 1, 2 & 2, 1 \\ \hline
17 | \end{tabular}
18 | \end{table}
19 | %
20 | Now, the fitness vector of the population $p_j$ can be expressed as $F^{p_j}(x^{p_k}) = A^{p_j} x^{p_k}$, for $p_j, p_k \in \pop$ and $p_j \neq p_k$. That is, the payoff of a population is affected only by the state of the opponent population.
21 | The payoff matrices are defines as follows
22 | %
23 | \begin{equation}
24 | A^1 = \begin{pmatrix}
25 | 2 & 1 \\
26 | 1 & 2
27 | \end{pmatrix}
28 | \end{equation}
29 | %
30 | \begin{equation}
31 | A^2 = \begin{pmatrix}
32 | 1 & 2 \\
33 | 2 & 1
34 | \end{pmatrix}
35 | \end{equation}
36 | %
37 | Fig. \ref{fig:mp_game_rd} to \ref{fig:mp_game_logit} show the evolution of the social state with the evolutionary dynamics presented in Section \ref{sec:protocols}.
38 |
39 | \begin{figure}[h]
40 | \centering
41 | \begin{subfigure}[b]{0.45\textwidth}
42 | \includegraphics[width=\textwidth]{./images/test2_simplex_rd.eps}
43 | \caption{Simplex.}
44 | \label{fig:test2_simplex_rd}
45 | \end{subfigure}
46 | ~
47 | \begin{subfigure}[b]{0.45\textwidth}
48 | \includegraphics[width=\textwidth]{./images/test2_ev_rd.eps}
49 | \caption{Evolution of the strategies in time.}
50 | \label{fig:test2_ev_rd}
51 | \end{subfigure}
52 | \caption{Matching pennies game with replicator dynamics.}
53 | \label{fig:mp_game_rd}
54 | \end{figure}
55 |
56 |
57 |
58 | \begin{figure}[h]
59 | \centering
60 | \begin{subfigure}[b]{0.45\textwidth}
61 | \includegraphics[width=\textwidth]{./images/test2_simplex_maynard_rd.eps}
62 | \caption{Simplex.}
63 | \label{fig:test2_simplex_maynard_rd}
64 | \end{subfigure}
65 | ~
66 | \begin{subfigure}[b]{0.45\textwidth}
67 | \includegraphics[width=\textwidth]{./images/test2_ev_maynard_rd.eps}
68 | \caption{Evolution of the strategies in time.}
69 | \label{fig:test2_ev_maynard_rd}
70 | \end{subfigure}
71 | \caption{Matching pennies game with Maynard replicator dynamics.}
72 | \label{fig:mp_game_maynard_rd}
73 | \end{figure}
74 |
75 |
76 |
77 | \begin{figure}[h]
78 | \centering
79 | \begin{subfigure}[b]{0.45\textwidth}
80 | \includegraphics[width=\textwidth]{./images/test2_simplex_bnn.eps}
81 | \caption{Simplex.}
82 | \label{fig:test2_simplex_bnn}
83 | \end{subfigure}
84 | ~
85 | \begin{subfigure}[b]{0.45\textwidth}
86 | \includegraphics[width=\textwidth]{./images/test2_ev_bnn.eps}
87 | \caption{Evolution of the strategies in time.}
88 | \label{fig:test2_ev_bnn}
89 | \end{subfigure}
90 | \caption{Matching pennies game with BNN dynamics.}
91 | \label{fig:mp_game_bnn}
92 | \end{figure}
93 |
94 |
95 |
96 | \begin{figure}[h]
97 | \centering
98 | \begin{subfigure}[b]{0.45\textwidth}
99 | \includegraphics[width=\textwidth]{./images/test2_simplex_smith.eps}
100 | \caption{Simplex.}
101 | \label{fig:test2_simplex_smith}
102 | \end{subfigure}
103 | ~
104 | \begin{subfigure}[b]{0.45\textwidth}
105 | \includegraphics[width=\textwidth]{./images/test2_ev_smith.eps}
106 | \caption{Evolution of the strategies in time.}
107 | \label{fig:test2_ev_smith}
108 | \end{subfigure}
109 | \caption{Matching pennies game with Smith dynamics.}
110 | \label{fig:mp_game_smith}
111 | \end{figure}
112 |
113 |
114 |
115 | \begin{figure}[h]
116 | \centering
117 | \begin{subfigure}[b]{0.45\textwidth}
118 | \includegraphics[width=\textwidth]{./images/test2_simplex_logit.eps}
119 | \caption{Simplex.}
120 | \label{fig:test2_simplex_logit}
121 | \end{subfigure}
122 | ~
123 | \begin{subfigure}[b]{0.45\textwidth}
124 | \includegraphics[width=\textwidth]{./images/test2_ev_logit.eps}
125 | \caption{Evolution of the strategies in time.}
126 | \label{fig:test2_ev_logit}
127 | \end{subfigure}
128 | \caption{Matching pennies game with Logit dynamics with $\eta=0.02$.}
129 | \label{fig:mp_game_logit}
130 | \end{figure}
131 |
132 |
--------------------------------------------------------------------------------
/docs/references.bib:
--------------------------------------------------------------------------------
1 | @online{ISOCalifornia,
2 | author = {California ISO},
3 | title = {Today's Outlook},
4 | year = 2014,
5 | url = {http://www.caiso.com/Pages/Today's-Outlook-Details.aspx},
6 | urldate = {2014-04-30}
7 | }
8 |
9 | @online{xm,
10 | author = "{XM S.A. E.P.S.}",
11 | title = {Demanda de Electricidad},
12 | year = 2014,
13 | url = {http://www.xm.com.co/Pages/DemandadeElectricidad.aspx},
14 | urldate = {2014-07-25}
15 | }
16 |
17 | @article{johari2006scalable,
18 | title={A scalable network resource allocation mechanism with bounded efficiency loss},
19 | author={Johari, Ramesh and Tsitsiklis, John N},
20 | journal={Selected Areas in Communications, IEEE Journal on},
21 | volume={24},
22 | number={5},
23 | pages={992--999},
24 | year={2006},
25 | publisher={IEEE}
26 | }
27 |
28 | @Book{ sandholm_book,
29 | author = "William H. Sandholm",
30 | isbn = "0262195879",
31 | keywords = "evolutionary\_games",
32 | month = jan,
33 | publisher = "The MIT Press",
34 | title = "{Population Games and Evolutionary Dynamics (Economic Learning and Social Evolution)}",
35 | year = "2011",
36 | edition = "1"
37 | }
38 |
39 | @InProceedings{ fazeli,
40 | author = "A. Fazeli and A. Jadbabaie",
41 | booktitle = "2012 IEEE 51st Annual Conference on Decision and Control (CDC)",
42 | title = "Game theoretic analysis of a strategic model of competitive contagion and product adoption in social networks",
43 | year = "2012",
44 | pages = "74--79",
45 | keywords = "budgeting, game theory, marketing, optimisation, social sciences, Nash equilibrium, competitive contagion, firm, fixed budget, game theoretic analysis, game-theoretic diffusion process, local coordination game, marketing, myopic best response dynamics, neighboring agent, network structure, optimal payoff, product adoption, product payoff, social network, strategic model, Equations, Games, Nash equilibrium, Social network services, Stochastic processes, Switches, Upper bound",
46 | doi = "10.1109/CDC.2012.6426222",
47 | ISSN = "0743-1546"
48 | }
49 |
50 | @Article{ pantoja,
51 | author = "A. Pantoja and N. Quijano",
52 | journal = "IEEE Transactions on Industrial Electronics",
53 | title = "A Population Dynamics Approach for the Dispatch of Distributed Generators",
54 | year = "2011",
55 | volume = "58",
56 | number = "10",
57 | pages = "4559--4567",
58 | keywords = "distributed power generation, multi-agent systems, power distribution control, power engineering computing, power generation dispatch, resource allocation, control strategy, distributed generator dispatch, distributed structure, dynamic resource allocation, hierarchical structure, market multiagent based scheme, microgrid, population dynamics, power distribution networks, replicator dynamics strategy, Adaptation model, Biological system modeling, Dynamic scheduling, Generators, Heuristic algorithms, Optimization, Resource management, Distributed generation, economical dispatch, market-based control (MBC), replicator dynamics (RD)",
59 | doi = "10.1109/TIE.2011.2107714",
60 | ISSN = "0278-0046"
61 | }
62 |
63 | @Article{ Honebein2011,
64 | title = "Building a Social Roadmap for the Smart Grid",
65 | journal = "The Electricity Journal",
66 | volume = "24",
67 | number = "4",
68 | pages = "78--85",
69 | year = "2011",
70 | note = "",
71 | issn = "1040-6190",
72 | doi = "10.1016/j.tej.2011.03.015",
73 | author = "Peter C. Honebein and Roy F. Cammarano and Craig Boice"
74 | }
75 |
76 | @Article{ fehr2007,
77 | type = "Accepted Paper Series",
78 | title = "{Human Motivation and Social Cooperation: Experimental and Analytical Foundations}",
79 | author = "Ernst Fehr and Herbert Gintis",
80 | journal = "Annual Review of Sociology",
81 | publisher = "SSRN",
82 | keywords = "cooperation, punishment, social exchange, social order, strong reciprocity",
83 | location = "http://ssrn.com/paper=1077628",
84 | volume = "33",
85 | month = "August",
86 | year = "207",
87 | language = "English"
88 | }
89 |
90 | @InProceedings{ mitter_2011,
91 | author = "M. Roozbehani and M. Rinehart and M.A. Dahleh and S.K. Mitter and D. Obradovic and H. Mangesius",
92 | booktitle = "Energy Market (EEM), 2011 8th International Conference on the European",
93 | title = "Analysis of competitive electricity markets under a new model of real-time retail pricing",
94 | year = "2011",
95 | month = "may",
96 | volume = "",
97 | number = "",
98 | pages = "250--255",
99 | keywords = "closed loop system, closed loop systems, competitive electricity market, electricity pricing, ex-post price, power markets, price stability, pricing, real-time retail pricing, social welfare",
100 | doi = "10.1109/EEM.2011.5953018",
101 | ISSN = ""
102 | }
103 |
104 | @Article{ acemoglu2010,
105 | title = "Spread of (mis)information in social networks",
106 | journal = "Games and Economic Behavior",
107 | volume = "70",
108 | number = "2",
109 | pages = "194--227",
110 | year = "2010",
111 | note = "",
112 | issn = "0899-8256",
113 | doi = "10.1016/j.geb.2010.01.005",
114 | author = "Daron Acemoglu and Asuman Ozdaglar and Ali ParandehGheibi",
115 | keywords = "Information aggregation",
116 | keywords1 = "Learning",
117 | keywords2 = "Misinformation",
118 | keywords3 = "Social networks"
119 | }
120 |
121 | @Article{ dw,
122 | author = "G{\'e}rard Weisbuch and Guillaume Deffuant and Fr{\'e}d{\'e}ric Amblard and Jean-Pierre Nadal",
123 | journal = "Complexity",
124 | keywords = "agents, bounded\_confidence, opinion\_dynamics, segregation, social\_sciences",
125 | number = "3",
126 | pages = "55--63",
127 | publisher = "Wiley Subscription Services, Inc., A Wiley Company",
128 | title = "{Meet, discuss, and segregate!}",
129 | volume = "7",
130 | year = "2002"
131 | }
132 |
133 | @Article{ degroot,
134 | author = "Morris H. Degroot",
135 | journal = "Journal of the American Statistical Association",
136 | keywords = "consensus, social\_science",
137 | number = "345",
138 | pages = "118--121",
139 | publisher = "American Statistical Association",
140 | title = "{Reaching a Consensus}",
141 | volume = "69",
142 | year = "1974"
143 | }
144 |
145 | @InProceedings{ olfati,
146 | author = "Reza Olfati-saber and J. Alex Fax and Richard M. Murray",
147 | title = "Consensus and cooperation in networked multi-agent systems",
148 | booktitle = "Proceedings of the IEEE",
149 | year = "2007",
150 | pages = "2007"
151 | }
152 |
153 | @Article{ hk,
154 | author = "Rainer Hegselmann and Ulrich Krause",
155 | title = "Opinion dynamics and bounded confidence: Models, analysis and simulation",
156 | journal = "Journal of Artificial Societies and Social Simulation",
157 | year = "2002",
158 | number = 3,
159 | volume = "5",
160 | pages = "1--24"
161 | }
162 |
163 | @Article{ castellano,
164 | author = "Claudio Castellano and Santo Fortunato and Vittorio Loreto",
165 | title = "Statistical physics of social dynamics",
166 | journal = "Reviews of Modern Physics",
167 | year = "2009",
168 | month = "Apr-Jun",
169 | volume = "81",
170 | number = "2",
171 | pages = "591--646",
172 | doi = "10.1103/RevModPhys.81.591"
173 | }
174 |
175 | @InProceedings{ bakker_2010,
176 | author = "V. Bakker and M.G.C. Bosman and A. Molderink and J.L. Hurink and G.J.M. Smit",
177 | booktitle = "2010 First IEEE International Conference on Smart Grid Communications (SmartGridComm)",
178 | title = "Demand Side Load Management Using a Three Step Optimization Methodology",
179 | year = "2010",
180 | month = "oct.",
181 | volume = "",
182 | number = "",
183 | pages = "431--436",
184 | keywords = "air pollution control, building management systems, CO2 emission reduction, communication bandwidth, demand side load management, demand side management, distributed generation, electricity consumption, energy demand profile, energy efficiency, functional electricity grid, ICT, investments, optimisation, optimization methodology, power system control, renewable resources, smart grids, smart meters, smart power grids",
185 | doi = "10.1109/SMARTGRID.2010.5622082",
186 | ISSN = ""
187 | }
188 |
189 | @Article{ schweitzer,
190 | hyphenation = "american",
191 | author = "Frank Schweitzer",
192 | title = "Modelling Migration and Economic Agglomeration with Active Brownian Particles",
193 | year = "1998",
194 | journal = "Journal of Complex Systems",
195 | volume = "1",
196 | number = "1",
197 | pages = "11--37",
198 | publisher = "{\'E}ditions HERMES, Paris"
199 | }
200 |
201 | @Article{ Martins,
202 | title = "An opinion dynamics model for the diffusion of innovations",
203 | journal = "Physica A: Statistical Mechanics and its Applications",
204 | volume = "388",
205 | number = "15--16",
206 | pages = "3225--3232",
207 | year = "2009",
208 | note = "",
209 | issn = "0378-4371",
210 | doi = "10.1016/j.physa.2009.04.007",
211 | author = "Andr{\'e} C.R. Martins and Carlos de B. Pereira and Renato Vicente",
212 | keywords = "Opinion dynamics",
213 | keywords1 = "Innovation diffusion",
214 | keywords2 = "Marketing",
215 | keywords3 = "Sociophysics"
216 | }
217 |
218 | @Article{ ben-naim,
219 | author = "E. Ben-Naim",
220 | title = "Opinion dynamics: rise and fall of political parties",
221 | journal = "EPL (Europhysics Letters)",
222 | volume = "69",
223 | number = "5",
224 | pages = "671--676",
225 | year = "2005"
226 | }
227 |
228 | @Article{ santacama2010,
229 | author = "E. Santacana and G. Rackliffe and Le Tang and Xiaoming Feng",
230 | journal = "Power and Energy Magazine, IEEE",
231 | title = "Getting Smart",
232 | year = "2010",
233 | month = "march-april ",
234 | volume = "8",
235 | number = "2",
236 | pages = "41--48",
237 | keywords = "electric power industry, grid security, power control technologies, power flow, smart control algorithms, smart grid reliability, smart grid technology, smart power grid, electrical products industry, load flow, power control, smart power grids",
238 | doi = "10.1109/MPE.2009.935557",
239 | ISSN = "1540-7977"
240 | }
241 |
242 | @Book{ fudenberg-tirole,
243 | abstract = "{This advanced text introduces the principles of noncooperative game theory in a direct and uncomplicated style that will acquaint students with the broad spectrum of the field while highlighting and explaining what they need to know at any given point.}",
244 | author = "Drew Fudenberg and Jean Tirole",
245 | howpublished = "Hardcover",
246 | isbn = "0262061414",
247 | keywords = "410-3, 412-1, aardiffgames, aarmicro, empirical\_io, game\_theory, tartumicro",
248 | month = aug,
249 | publisher = "MIT Press",
250 | title = "{Game Theory}",
251 | year = "1991",
252 | citeulike-article-id = "105659",
253 | citeulike-linkout-0 = "http://www.amazon.ca/exec/obidos/redirect?tag=citeulike09-20&path=ASIN/0262061414",
254 | citeulike-linkout-1 = "http://www.amazon.de/exec/obidos/redirect?tag=citeulike01-21&path=ASIN/0262061414",
255 | citeulike-linkout-2 = "http://www.amazon.fr/exec/obidos/redirect?tag=citeulike06-21&path=ASIN/0262061414",
256 | citeulike-linkout-3 = "http://www.amazon.jp/exec/obidos/ASIN/0262061414",
257 | citeulike-linkout-4 = "http://www.amazon.co.uk/exec/obidos/ASIN/0262061414/citeulike00-21",
258 | citeulike-linkout-5 = "http://www.amazon.com/exec/obidos/redirect?tag=citeulike07-20&path=ASIN/0262061414",
259 | citeulike-linkout-6 = "http://www.worldcat.org/isbn/0262061414",
260 | citeulike-linkout-7 = "http://books.google.com/books?vid=ISBN0262061414",
261 | citeulike-linkout-8 = "http://www.amazon.com/gp/search?keywords=0262061414&index=books&linkCode=qs",
262 | citeulike-linkout-9 = "http://www.librarything.com/isbn/0262061414",
263 | day = "29",
264 | posted-at = "2005-02-27 09:51:14",
265 | priority = "3"
266 | }
267 |
268 | @Article{ gal,
269 | author = "David Gal",
270 | title = "A psychological law of inertia and the illusion of loss aversion",
271 | journal = "Judgment and Decision Making",
272 | year = 2006,
273 | volume = "1",
274 | number = "1",
275 | pages = "23--32",
276 | month = "July",
277 | keywords = "endowment effect, inertia, loss aversion, reference-dependent prefere, risky choice, status-quo bias",
278 | abstract = " The principle of loss aversion is thought to explain a wide range of anomalous phenomena involving tradeoffs between losses and gains. In this article, I show that the anomalies loss aversion was introduced to explain --- the risky bet premium, the endowment effect, and the status-quo bias --- are characterized not only by a loss/gain tradeoff, but by a tradeoff between the status-quo and change; and, that a propensity towards the status-quo in the latter tradeoff is sufficient to explain these phenomena. Moreover, I show that two basic psychological principles --- (1) that motives drive behavior; and (2) that preferences tend to be fuzzy and ill-defined --- imply the existence of a robust and fundamental propensity of this sort. Thus, a loss aversion principle is rendered superfluous to an account of the phenomena it was"
279 | }
280 |
281 | @Article{ mohsenian,
282 | author = "A. Mohsenian-Rad and V.W.S. Wong and J. Jatskevich and R. Schober and A. Leon-Garcia",
283 | journal = "IEEE Transactions on Smart Grid",
284 | title = "Autonomous Demand-Side Management Based on Game-Theoretic Energy Consumption Scheduling for the Future Smart Grid",
285 | year = "2010",
286 | month = "dec. ",
287 | volume = "1",
288 | number = "3",
289 | pages = "320--331",
290 | keywords = "autonomous demand-side management, demand-side management programs, game-theoretic energy consumption scheduling, Nash equilibrium, power consumption, power generation scheduling, smart grid, smart power grids",
291 | doi = "10.1109/TSG.2010.2089069",
292 | ISSN = "1949-3053"
293 | }
294 |
295 | @Article{ NeWa99,
296 | author = "M. E. J. Newman and D. J. Watts",
297 | title = "Renormalization Group Analysis of the Small-World Network Model",
298 | journal = "Physics Letters A",
299 | year = "",
300 | volume = "263",
301 | pages = "341--346"
302 | }
303 |
304 | @Article{ smallw,
305 | address = "Department of Theoretical and Applied Mechanics, Cornell University, Ithaca, New York 14853, USA. djw24@columbia.edu",
306 | author = "Duncan J. Watts and Steven H. Strogatz",
307 | day = "4",
308 | issn = "0028-0836",
309 | journal = "Nature",
310 | keywords = "networks, small-world, strogatz, watts",
311 | month = jun,
312 | number = "6684",
313 | pages = "440--442",
314 | pmid = "9623998",
315 | priority = "2",
316 | publisher = "Nature Publishing Group",
317 | title = "{Collective dynamics of /`small-world/' networks}",
318 | volume = "393",
319 | year = "1998"
320 | }
321 |
322 | @Article{ evans,
323 | title = "Polarization in Abortion Attitudes in U.S. Religious Traditions, 1972-1998",
324 | author = "John H. Evans",
325 | journal = "Sociological Forum",
326 | volume = "17",
327 | number = "3",
328 | pages = "397--422",
329 | ISSN = "08848971",
330 | year = "2002",
331 | publisher = "Springer",
332 | copyright = "Copyright © 2002 Springer",
333 | jstor_articletype = "research-article",
334 | language = "English"
335 | }
336 |
337 | @InProceedings{ meyn_2011,
338 | Title = "A Control Theorist's Perspective on Dynamic Competitive Equilibria in Electricity Markets",
339 | Address = "Milano, Italy",
340 | Author = "G. Wang and A. Kowli and M. Negrete-Pincetic and E. Shafieepoorfard and S. Meyn",
341 | Booktitle = "Proc. 18th World Congress of the International Federation of Automatic Control (IFAC)",
342 | Year = "2011"
343 | }
344 |
345 | @Book{ nocedal_book,
346 | author = "Jorge Nocedal and Stephen J. Wright",
347 | howpublished = "Hardcover",
348 | isbn = "978-0-387-30303-1",
349 | keywords = "optimization",
350 | publisher = "Springer-Verlag",
351 | title = "{Numerical Optimization}",
352 | edition = "2",
353 | year = "2006"
354 | }
355 |
356 |
357 | @article{johari2005efficiency,
358 | title={Efficiency loss in a network resource allocation game: the case of elastic supply},
359 | author={Johari, Ramesh and Mannor, Shie and Tsitsiklis, John N},
360 | journal={ IEEE Transactions on Automatic Control},
361 | volume={50},
362 | number={11},
363 | pages={1712--1724},
364 | year={2005},
365 | publisher={IEEE}
366 | }
367 |
368 | @article{johari2004efficiency,
369 | title={Efficiency loss in a network resource allocation game},
370 | author={Johari, Ramesh and Tsitsiklis, John N},
371 | journal={Mathematics of Operations Research},
372 | volume={29},
373 | number={3},
374 | pages={407--435},
375 | year={2004},
376 | publisher={INFORMS}
377 | }
378 |
379 |
380 | @Article{ Johari09,
381 | author = "Ramesh Johari and John N. Tsitsiklis",
382 | title = "Efficiency of Scalar-Parameterized Mechanisms",
383 | journal = "Oper. Res.",
384 | issue_date = "July 2009",
385 | volume = "57",
386 | number = "4",
387 | month = jul,
388 | year = "2009",
389 | issn = "0030-364X",
390 | pages = "823--839",
391 | numpages = "17"
392 | }
393 |
394 | @Article{ hurwicz73,
395 | title = "The Design of Mechanisms for Resource Allocation",
396 | author = "Leonid Hurwicz",
397 | journal = "The American Economic Review",
398 | volume = "63",
399 | number = "2",
400 | pages = "pp. 1--30",
401 | ISSN = "00028282",
402 | abstract = "",
403 | language = "English",
404 | year = "1973",
405 | publisher = "American Economic Association",
406 | copyright = "Copyright © 1973 American Economic Association"
407 | }
408 |
409 | @InProceedings{ meyn_2011b,
410 | author = "A.S. Kowli and S.P. Meyn",
411 | booktitle = "2011 IEEE Power and Energy Society General Meeting",
412 | title = "Supporting wind generation deployment with demand response",
413 | year = "2011",
414 | month = "july",
415 | volume = " ",
416 | number = " ",
417 | pages = "1--8",
418 | keywords = "day-ahead scheduling decisions, demand response, demand side reserve capacity, DR programs, electricity industry, integration reliability, load consumption, policy-related questions, power generation dispatch, power generation reliability, power generation scheduling, power system operations, stochastic processes, stochastic unit commitment model, two-stage stochastic program, wind generation deployment, wind generation resources, wind power plants",
419 | doi = "10.1109/PES.2011.6039780",
420 | ISSN = "1944-9925"
421 | }
422 |
423 | @Article{ saele11,
424 | author = "H. Saele and O.S. Grande",
425 | journal = " IEEE Transactions on Smart Grid",
426 | title = "Demand Response From Household Customers: Experiences From a Pilot Study in Norway",
427 | year = "2011",
428 | month = "march ",
429 | volume = "2",
430 | number = "1",
431 | pages = "102--109",
432 | keywords = "Contracts, cost-effective realization, day ahead market, day network tariff, demand response, demand side management, electrical water heaters, Electricity, Energy management, energy measurement, household customers, load management, Load management, load reduction, load shedding, metering, Monopoly, Norway, power demand, power markets, Power markets, power meters, pricing, remote load control, Resistance heating, smart metering technology, Water heating",
433 | doi = "10.1109/TSG.2010.2104165",
434 | ISSN = "1949-3053"
435 | }
436 |
437 | @InProceedings{ samadi11,
438 | author = "P. Samadi and R. Schober and V.W.S. Wong",
439 | booktitle = "2011 IEEE International Conference on Smart Grid Communications (SmartGridComm)",
440 | title = "Optimal energy consumption scheduling using mechanism design for the future smart grid",
441 | year = "2011",
442 | month = "oct.",
443 | volume = "",
444 | number = "",
445 | pages = "369--374",
446 | keywords = "demand side, demand side management, energy consumption, Energy consumption, energy provider, Home appliances, mechanism design, Nash equilibrium, optimal energy consumption scheduling, pricing, Pricing, Schedules, Simulation, smart grid, smart power grids, smart pricing, Vectors, Vickrey-Clarke-Groves based mechanism",
447 | doi = "10.1109/SmartGridComm.2011.6102349",
448 | ISSN = ""
449 | }
450 |
451 | @InProceedings{ jun12,
452 | author = "Jun Cao and Bo Yang and Cailian Chen and Xinping Guan",
453 | booktitle = "2012 31st Chinese Control Conference (CCC)",
454 | title = "Optimal demand response using mechanism design in the smart grid",
455 | year = "2012",
456 | month = "july",
457 | volume = "",
458 | number = "",
459 | pages = "2520--2525",
460 | keywords = "Bayesian mechanism design, Demand response, demand side management, Electricity, Games, Load management, mechanism design, optimal demand response, power allocations, Power demand, power usage, residential power network, Resource management, smart grid, Smart grids, smart power grids, social welfare maximization, Vectors",
461 | doi = "",
462 | ISSN = "1934-1768"
463 | }
464 |
465 | @Book{ fudenberg98,
466 | author = "Drew Fudenberg and David K. Levine",
467 | title = "The Theory of Learning in Games",
468 | publisher = "The MIT Press",
469 | year = 1998,
470 | month = "April",
471 | volume = "1",
472 | number = "0262061945",
473 | series = "MIT Press Books",
474 | edition = "",
475 | keywords = "games, equilibrium concepts",
476 | abstract = "In economics, most noncooperative game theory has focused on equilibrium in games, especially Nash equilibrium and its refinements. The traditional explanation for when and why equilibrium arises is that it results from analysis and introspection by the players in a situation where the rules of the game, the rationality of the players, and the players' payoff functions are all common knowledge. Both conceptually and empirically, this theory has many problems. In The Theory of Learning in Games Drew Fudenberg and David Levine develop an alternative explanation that equilibrium arises as the long-run outcome of a process in which less than fully rational players grope for optimality over time. The models they explore provide a foundation for equilibrium theory and suggest useful ways for economists to evaluate and modify traditional equilibrium concepts."
477 | }
478 |
479 | @Article{ milinski,
480 | author = "Manfred Milinski and Dirk Semmann and Hans-Jurgen Krambeck",
481 | title = "Reputation helps solve the `tragedy of the commons'",
482 | journal = "Nature",
483 | issue_date = "Januay 2002",
484 | volume = "415",
485 | number = "6870",
486 | month = jan,
487 | year = "2002",
488 | issn = "0028-0836",
489 | pages = "424--426"
490 | }
491 |
492 | @TechReport{ krupka08,
493 | author = "Erin L. Krupka and Roberto A. Weber",
494 | title = "Identifying Social Norms Using Coordination Games: Why Does Dictator Game Sharing Vary?",
495 | year = 2008,
496 | month = Nov,
497 | institution = "Institute for the Study of Labor (IZA)",
498 | type = "IZA Discussion Papers",
499 | number = "3860",
500 | abstract = "We explore the influence of social norms on behavior. To do so, we introduce a method for identifying norms, based on the property that social norms reflect social consensus regarding the appropriateness of different possible behaviors. We demonstrate that the norms we elicit, along with a simple model combining concern for norm-compliance with utility for money, predict changes in behavior across several variants of the dictator game in which behavior changes substantially following the introduction of minor contextual variations. Our findings indicate that people care not just about monetary payoffs but also care about the social appropriateness of any action they take. Our work also suggests that a social norm is not always a single action that should or should not be taken, but rather a profile of varying degrees of social appropriateness for different available actions.",
501 | keywords = "norms, matching games, dictator games"
502 | }
503 |
504 | @Article{ barreto13,
505 | author = "Carlos Barreto and Eduardo Mojica-Nava and Nicanor Quijano",
506 | title = "A Population Dynamics Model for Opinion Dynamics with Prominent Agents and Incentives ",
507 | year = "2013",
508 | month = "june",
509 | volume = "",
510 | number = "",
511 | pages = "",
512 | keywords = "Bayesian mechanism design, Demand response, demand side management, Electricity, Games, Load management, mechanism design, optimal demand response, power allocations, Power demand, power usage, residential power network, Resource management, smart grid, Smart grids, smart power grids, social welfare maximization, Vectors",
513 | doi = "",
514 | ISSN = "",
515 | journal = "in Proceedings of 2013 IEEE American Control Conference (ACC)"
516 | }
517 |
518 | @Article{ Arslan2007,
519 | title = "Autonomous Vehicle-Target Assignment: A Game-Theoretical Formulation",
520 | volume = "129",
521 | number = "5",
522 | journal = "Journal of Dynamic Systems Measurement and Control",
523 | publisher = "ASME",
524 | author = "Gürdal Arslan and Jason R Marden and Jeff S Shamma",
525 | year = "2007",
526 | pages = "584"
527 | }
528 |
529 | @Book{ AlgorithmicG,
530 | title = "Algorithmic Game Theory",
531 | author = "Noam Nisan and Tim Roughgarden and {\'E}va Tardos and Vijay V. Vazirani",
532 | publisher = "Cambridge University Press",
533 | address = "32 Avenue of the Americas, New York, NY 10013-2473, USA",
534 | year = "2007",
535 | isbn = "978-0-521-87282-9"
536 | }
537 |
538 | @Article{ Myerson83,
539 | title = "Efficient mechanisms for bilateral trading",
540 | journal = "Journal of Economic Theory",
541 | volume = "29",
542 | number = "2",
543 | pages = "265--281",
544 | year = "1983",
545 | note = "",
546 | issn = "0022-0531",
547 | author = "Roger B Myerson and Mark A Satterthwaite"
548 | }
549 |
550 | @Book{ tilman,
551 | title = "An Introduction to the Theory of Mechanism Designgers",
552 | author = {Tilman B{\"o}rgers},
553 | year = "2013"
554 | }
555 |
556 | @Book{ hurwicz06,
557 | author = "Leonid Hurwicz and Stanley Reiter",
558 | title = "Designing Economic Mechanisms",
559 | publisher = "Cambridge University Press",
560 | year = "2006"
561 | }
562 |
563 | @Article{ myerson83,
564 | author = "Roger B. Myerson and Mark A. Satterthwaite",
565 | title = "Efficient mechanisms for bilateral trading",
566 | journal = "Journal of Economic Theory",
567 | year = 1983,
568 | volume = "29",
569 | number = "2",
570 | pages = "265--281",
571 | month = "April"
572 | }
573 |
574 | @Misc{ ferh2003,
575 | title = "Detrimental effects of sanctions on human altruism",
576 | DOI = "10.1038/nature01474",
577 | number = "6928",
578 | journal = "Nature",
579 | author = "Bettina Rockenbach {Ernst Fehr}",
580 | year = "2003",
581 | pages = "137--140"
582 | }
583 |
584 | @InProceedings{ dahleh2010,
585 | author = "M. Roozbehani and Munther Dahleh and S. Mitter",
586 | booktitle = "2010 49th IEEE Conference on Decision and Control (CDC)",
587 | title = "On the stability of wholesale electricity markets under real-time pricing",
588 | year = "2010",
589 | pages = "1911--1918",
590 | keywords = "closed loop systems, feedback, power markets, pricing, stability criteria, closed loop feedback system, mathematical model, real-time pricing mechanisms, stability criteria, static pricing function, wholesale electricity markets, Electricity, Power system dynamics, Power system stability, Pricing, Real time systems, Stability criteria",
591 | doi = "10.1109/CDC.2010.5718173",
592 | ISSN = "0743-1546"
593 | }
594 |
595 | @InProceedings{ chasparis,
596 | author = "Georgios C. Chasparis and J.S. Shamma",
597 | booktitle = "2010 49th IEEE Conference on Decision and Control (CDC) ",
598 | title = "Control of preferences in social networks",
599 | year = "2010",
600 | pages = "6651--6656",
601 | keywords = "advertising, competitive intelligence, infinite horizon, innovation management, optimal control, robust control, social networking (online), Nash solution, competitive firm, endogenous network influence, infinite horizon, innovation, network model, optimal Stackelberg solution, optimal advertising policy, preference control, robust optimal policy, social network, Advertising, Computational modeling, Monopoly, Optimal control, Optimization, Uncertainty",
602 | doi = "10.1109/CDC.2010.5717491",
603 | ISSN = "0743-1546"
604 | }
605 |
606 | @InProceedings{ anahita,
607 | author = "A. MirTabatabaei and P. Jia and F. Bullo",
608 | title = "Eulerian opinion dynamics with bounded confidence and exogenous inputs",
609 | booktitle = "IFAC Workshop on Distributed Estimation and Control in Networked Systems",
610 | year = 2012,
611 | month = sep,
612 | address = "Santa Barbara, CA, USA",
613 | pages = "270--275",
614 | keywords = "Social Networks"
615 | }
616 |
617 | @Article{ specialI,
618 | author = "E. Mojica-Nava and C. Barreto and N. Quijano",
619 | title = "Population games methods for distributed control of microgrids",
620 | journal = "IEEE Transactions on Smart Grid",
621 | year = 2013,
622 | month = apr,
623 | note = "Submitted"
624 | }
625 |
626 | @inproceedings{barreto2013design,
627 | title={Design of mechanisms for demand response programs},
628 | author={Barreto, Carlos and Mojica-Nava, Eduardo and Quijano, Nicanor},
629 | booktitle={Proceedings of the 2013 IEEE 52nd Annual Conference on Decision and Control (CDC)},
630 | pages={1828--1833},
631 | year={2013}
632 | }
633 |
634 |
635 | @article{barreto2014incentives,
636 | title={Incentives-Based Mechanism for Efficient Demand Response Programs},
637 | author={Barreto, Carlos and Mojica-Nava, Eduardo and Quijano, Nicanor},
638 | journal={arXiv preprint arXiv:1408.5366},
639 | year={2014}
640 | }
641 |
642 |
643 |
644 | @Article{ alfredo_g,
645 | author = "Mingyi Hong and Alfredo Garcia",
646 | title = "Mechanism Design for Base Station Association and Resource Allocation in Downlink OFDMA Network",
647 | journal = "IEEE Journal on Selected Areas in Communications",
648 | volume = "30",
649 | number = "11",
650 | year = "2012",
651 | pages = "2238--2250"
652 | }
653 |
654 | @InProceedings{ nedic,
655 | author = "J. Koshal and A. Nedic and U.V. Shanbhag",
656 | booktitle = " 2012 IEEE 51st Annual Conference on Decision and Control (CDC)",
657 | title = "A gossip algorithm for aggregative games on graphs",
658 | year = "2012",
659 | pages = "4840--4845",
660 | keywords = "convergence, distributed algorithms, game theory, graph theory, multi-agent systems, agent objective function, agents decisions, aggregative games, almost-sure convergence, connected network, diminishing step-size sequence, distributed multiagent networked system, gossip algorithm, gossip-based distributed algorithm, graphs, standard conditions, Aggregates, Algorithm design and analysis, Clocks, Convergence, Educational institutions, Games, Vectors",
661 | doi = "10.1109/CDC.2012.6426136",
662 | ISSN = "0743-1546"
663 | }
664 |
665 | @article{rosen1965,
666 | title={Existence and uniqueness of equilibrium points for concave n-person games},
667 | author={Rosen, J Ben},
668 | journal={Econometrica: Journal of the Econometric Society},
669 | pages={520--534},
670 | year={1965},
671 | publisher={JSTOR}
672 | }
673 |
674 |
675 |
676 | @inproceedings{papadimitriou2001algorithms,
677 | title={Algorithms, games, and the internet},
678 | author={Papadimitriou, Christos},
679 | booktitle={Proceedings of the thirty-third annual ACM symposium on Theory of computing},
680 | pages={749--753},
681 | year={2001},
682 | organization={ACM}
683 | }
684 |
685 | @article{tardos,
686 | author = {Roughgarden, Tim and Tardos, \'{E}va},
687 | title = {How Bad is Selfish Routing?},
688 | journal = {Journal of the ACM (JACM)},
689 | issue_date = {March 2002},
690 | volume = {49},
691 | number = {2},
692 | month = mar,
693 | year = {2002},
694 | issn = {0004-5411},
695 | pages = {236--259},
696 | numpages = {24},
697 | doi = {10.1145/506147.506153},
698 | acmid = {506153},
699 | publisher = {ACM},
700 | address = {New York, NY, USA},
701 | keywords = {Braess's Paradox, Nash equilibria, network flow, selfish routing},
702 | }
703 |
704 |
705 | @article{hardin1968tragedy,
706 | title={The tragedy of the commons},
707 | author={Hardin, Garrett},
708 | journal={Science},
709 | volume={162},
710 | number={3859},
711 | pages={1243--1248},
712 | year={1968}
713 | }
714 |
715 |
716 | @book{mas1995microeconomic,
717 | title={Microeconomic theory},
718 | author={Mas-Colell, Andreu and Whinston, Michael D. and Green, Jerry R.},
719 | volume={1},
720 | year={1995},
721 | publisher={Oxford university press New York}
722 | }
723 |
724 |
725 |
726 |
727 | @article{Vic61,
728 | title = {{Counterspeculation, Auctions and Competitive Sealed Tenders}},
729 | author = {W. Vickrey},
730 | journal = {Journal of Finance},
731 | year = {1961},
732 | pages = {8--37},
733 | www_important = {},
734 | www_section = {Mechanism Design},
735 | }
736 |
737 | @article{clarke71,
738 | title = {{Multipart pricing of public goods}},
739 | author = {E. H. Clarke},
740 | journal = {Public Choice},
741 | volume = {2},
742 | year = {1971},
743 | pages = {19--33},
744 | www_section = {Mechanism Design},
745 | }
746 |
747 | @article{groves73,
748 | title = {{Incentives in Teams}},
749 | author = {T. Groves},
750 | journal = {Econometrica},
751 | volume = {41},
752 | year = {1973},
753 | pages = {617--631},
754 | www_section = {Mechanism Design},
755 | }
756 |
757 | @article{monderer1996potential,
758 | title={Potential games},
759 | author={Monderer, Dov and Shapley, Lloyd S},
760 | journal={Games and economic behavior},
761 | volume={14},
762 | number={1},
763 | pages={124--143},
764 | year={1996},
765 | publisher={Elsevier}
766 | }
767 |
768 |
769 | @article{jackson2000mechanism,
770 | title={Mechanism theory},
771 | author={Jackson, Matthew O},
772 | journal={The Encyclopedia of Life Support Systems},
773 | year={2000}
774 | }
775 |
776 | @article{jain2010efficient,
777 | title={An efficient Nash-implementation mechanism for network resource allocation},
778 | author={Jain, Rahul and Walrand, Jean},
779 | journal={Automatica},
780 | volume={46},
781 | number={8},
782 | pages={1276--1283},
783 | year={2010},
784 | publisher={Elsevier}
785 | }
786 |
787 |
788 |
789 | @inproceedings{yang2006vcg,
790 | title="{VCG-Kelly mechanisms for allocation of divisible goods: Adapting VCG mechanisms to one-dimensional signals}",
791 | author={Yang, Sichao and Hajek, Bruce},
792 | booktitle={2006 40th Annual Conference on Information Sciences and Systems},
793 | pages={1391--1396},
794 | year={2006},
795 | organization={IEEE}
796 | }
797 |
798 |
799 | @article{jackson2000mechanism,
800 | title={Mechanism theory},
801 | author={Jackson, Matthew O},
802 | journal={The Encyclopedia of Life Support Systems},
803 | year={2000}
804 | }
805 |
806 |
807 |
808 | @article{kelly1997charging,
809 | title={Charging and rate control for elastic traffic},
810 | author={Kelly, Frank},
811 | journal={European transactions on Telecommunications},
812 | volume={8},
813 | number={1},
814 | pages={33--37},
815 | year={1997},
816 | publisher={Wiley Online Library}
817 | }
818 |
819 | @article{kelly1998rate,
820 | title={Rate control for communication networks: shadow prices, proportional fairness and stability},
821 | author={Kelly, Frank P and Maulloo, Aman K and Tan, David KH},
822 | journal={Journal of the Operational Research society},
823 | pages={237--252},
824 | year={1998},
825 | publisher={JSTOR}
826 | }
827 |
828 |
829 |
830 | @inproceedings{maheswaran2004social,
831 | title={Social welfare of selfish agents: motivating efficiency for divisible resources},
832 | author={Maheswaran, Rajiv T and Basar, Tamer},
833 | booktitle={2004 43rd IEEE Conference on Decision and Control (CDC)},
834 | volume={2},
835 | pages={1550--1555},
836 | year={2004},
837 | organization={IEEE}
838 | }
839 |
840 |
841 | @book{mas1995microeconomic,
842 | title={Microeconomic theory},
843 | author={Mas-Colell, Andreu and Whinston, Michael Dennis and Green, Jerry R and others},
844 | volume={1},
845 | year={1995},
846 | publisher={Oxford university press New York}
847 | }
848 |
849 |
850 | @article{spees2007demand,
851 | title={Demand response and electricity market efficiency},
852 | author={Spees, Kathleen and Lave, Lester B},
853 | journal={The Electricity Journal},
854 | volume={20},
855 | number={3},
856 | pages={69--85},
857 | year={2007},
858 | publisher={Elsevier}
859 | }
860 |
861 |
862 | @article{hofbauer2001nash,
863 | title={From Nash and Brown to Maynard Smith: equilibria, dynamics and ESS},
864 | author={Hofbauer, Josef},
865 | journal={Selection},
866 | volume={1},
867 | number={1},
868 | pages={81--88},
869 | year={2001},
870 | publisher={Akad{\'e}miai Kiad{\'o}}
871 | }
872 |
873 | @article{sandholm2007pigouvian,
874 | title={Pigouvian pricing and stochastic evolutionary implementation},
875 | author={Sandholm, William H},
876 | journal={Journal of Economic Theory},
877 | volume={132},
878 | number={1},
879 | pages={367--382},
880 | year={2007},
881 | publisher={Elsevier}
882 | }
883 |
884 |
885 |
886 | @article{sandholm2005negative,
887 | title={Negative externalities and evolutionary implementation},
888 | author={Sandholm, William H},
889 | journal={The Review of Economic Studies},
890 | volume={72},
891 | number={3},
892 | pages={885--915},
893 | year={2005},
894 | publisher={Oxford University Press}
895 | }
896 |
897 | @article{samadi2012advanced,
898 | title={Advanced demand side management for the future smart grid using mechanism design},
899 | author={Samadi, Pedram and Mohsenian-Rad, Hamed and Schober, Robert and Wong, Vincent WS},
900 | journal={Smart Grid, IEEE Transactions on},
901 | volume={3},
902 | number={3},
903 | pages={1170--1180},
904 | year={2012},
905 | publisher={IEEE}
906 | }
907 |
908 | @article{green1977characterization,
909 | title={Characterization of satisfactory mechanisms for the revelation of preferences for public goods},
910 | author={Green, Jerry and Laffont, Jean-Jacques},
911 | journal={Econometrica: Journal of the Econometric Society},
912 | pages={427--438},
913 | year={1977},
914 | publisher={JSTOR}
915 | }
916 |
917 | @inproceedings{hurwicz1975existence,
918 | title={On the existence of allocation systems whose manipulative Nash equilibria are Pareto optimal},
919 | author={Hurwicz, Leonid},
920 | booktitle={3rd World Congress of the Econometric Society},
921 | year={1975}
922 | }
923 |
924 | @ARTICLE{marden14bounds,
925 | author={Marden, J.R. and Roughgarden, T.},
926 | journal={IEEE Transactions on Automatic Control },
927 | title={Generalized Efficiency Bounds in Distributed Resource Allocation},
928 | year={2014},
929 | month={March},
930 | volume={59},
931 | number={3},
932 | pages={571-584},
933 | keywords={distributed parameter systems;game theory;multi-agent systems;resource allocation;Shapley value utility design;autonomous agents;bicriteria bound;central component;coarse correlated equilibria;distributed control;distributed resource allocation;game-theoretic environment;generalized efficiency bounds;local utility function;multiagent systems;near-optimal efficiency;optimal allocation relative;pure Nash equilibria;Games;Joints;Nash equilibrium;Resource management;Sensors;Vehicles;Cost sharing;distributed control;game theory;price of anarchy},
934 | doi={10.1109/TAC.2014.2301613},
935 | ISSN={0018-9286},}
--------------------------------------------------------------------------------
/dynamics/bnn.m:
--------------------------------------------------------------------------------
1 | function dz = bnn(t,z)
2 | % BNN Computes the differential equation that describes the update of the
3 | % populations' state following the Brown-von Neumann-Nash dynamics
4 | %
5 | % SYNOPSIS: DZ = BNN(T, Z)
6 | %
7 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
8 | % might be used to calculate the fitness function
9 | % Z: Vector of the populations' state
10 | %
11 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
12 | %
13 | % REMARKS Its better to execute first definition and run the game using
14 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
15 | % to define the game
16 | %
17 | % SEE ALSO definition, logit, rd, maynard_rd, smith, smith_b, stopevent, combined_dynamics
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 |
24 | global G norm_dx
25 |
26 | % extract matrix of strategies
27 | n = max(G.S);
28 | x_n = vec2mat(z,n);
29 | x = zeros(G.P, n);
30 |
31 | for p = 1 : G.P
32 | x(p, :) = x_n(p, :) * G.m(p);
33 | end
34 |
35 | F_mean = zeros(G.P, 1);
36 | F = zeros(G.P, n);
37 |
38 | F_excess = zeros(G.P, n);
39 | F_gamma = zeros(G.P, 1);
40 |
41 | x_dot_v = zeros(G.P* n, 1);
42 |
43 | if G.pop_wise == 0
44 | F(:, :) = G.f(x);
45 | else
46 | for p = 1 : G.P
47 | F(p, :) = G.f(x, p);
48 | end
49 | end
50 |
51 | for p = 1 : G.P
52 | F_mean(p) = F(p, :) * x_n(p, :)';
53 |
54 | F_excess(p,:) = max( F(p, :) - F_mean(p), 0 );
55 | F_gamma(p) = F_excess(p, :) * ones(n, 1);
56 |
57 | x_dot_v( (p-1)*n + 1 : p*n ) = F_excess(p, :) - F_gamma(p) * x_n(p, :);
58 |
59 | end
60 |
61 | dz = [x_dot_v];
62 | if G.stop_c == true
63 | norm_dx = norm(dz);
64 | end
65 |
--------------------------------------------------------------------------------
/dynamics/combined_dynamics.m:
--------------------------------------------------------------------------------
1 | function dz = combined_dynamics(t,z)
2 | % COMBINED_DYNAMICS Differential equation defined as the linear combinaiton
3 | % of other existing differential equations
4 | %
5 | % SYNOPSIS: DZ = COMBINED_DYNAMICS(T, Z)
6 | %
7 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
8 | % might be used to calculate the fitness function
9 | % Z: Vector of the populations' state
10 | %
11 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
12 | %
13 | % REMARKS Its better to execute first definition and run the game using
14 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
15 | % to define the game
16 | %
17 | % SEE ALSO definition, bnn, logit, rd, maynard_rd, smith_b, stopevent
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 | global G
24 |
25 | n = max(G.S);
26 | dz = zeros(G.P * n, 1);
27 | dz_i = zeros(G.P * n, 1);
28 |
29 | for i = 1 : length(G.dynamics)
30 | if G.gamma(i) ~= 0
31 | command = strcat( G.dynamics{i}, '(t,z)');
32 | dz_i = eval( command );
33 | dz = dz + G.gamma(i) * dz_i;
34 | end
35 | end
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/dynamics/logit.m:
--------------------------------------------------------------------------------
1 | function dz = logit(t,z)
2 | % LOGIT Computes the differential equation that describes the update of the
3 | % populations' state following the logit dynamics
4 | %
5 | % SYNOPSIS: DZ = LOGIT(T, Z)
6 | %
7 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
8 | % might be used to calculate the fitness function
9 | % Z: Vector of the populations' state
10 | %
11 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
12 | %
13 | % REMARKS Its better to execute first definition and run the game using
14 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
15 | % to define the game
16 | %
17 | % SEE ALSO definition, bnn, rd, maynard_rd, smith, smith_b, stopevent, combined_dynamics
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 | global G norm_dx
24 |
25 | eta = G.eta;
26 |
27 | % extract matrix of strategies
28 | n = max(G.S);
29 | x_n = vec2mat(z,n);
30 | x = zeros(G.P, n);
31 |
32 | for p = 1 : G.P
33 | x(p, :) = x_n(p, :) * G.m(p);
34 | end
35 |
36 | % calculate fitness of each strategy
37 | F = zeros(G.P, n);
38 | F_ = zeros(G.P, n);
39 | F_mean = zeros(G.P, 1);
40 | x_dot_v = zeros(G.P* n, 1);
41 |
42 | if G.pop_wise == 0
43 | F(:, :) = G.f(x);
44 | else
45 | for p = 1 : G.P
46 | F(p, :) = G.f(x, p);
47 | end
48 | end
49 |
50 | for p = 1 : G.P
51 | F_(p, :) = exp( F(p, : ) / eta );
52 | F_mean(p) = F_(p, :) * ones(n, 1);
53 |
54 | % calculate update in the strategy
55 | x_dot_v( (p-1)*n + 1 : p*n) = F_(p, :) / F_mean(p) - x_n(p, :);
56 | end
57 |
58 | dz = [x_dot_v];
59 | if G.stop_c == true
60 | norm_dx = norm(dz);
61 | end
62 |
63 |
--------------------------------------------------------------------------------
/dynamics/maynard_rd.m:
--------------------------------------------------------------------------------
1 | function dz = maynard_rd(t, z)
2 | % MAYNARD_RD Computes the differential equation that describes the update of
3 | % the populations' state following the Maynard replicator dynamcics
4 | %
5 | % SYNOPSIS: DZ = MAYNARD_RD(T, Z)
6 | %
7 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
8 | % might be used to calculate the fitness function
9 | % Z: Vector of the populations' state
10 | %
11 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
12 | %
13 | % REMARKS Its better to execute first definition and run the game using
14 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
15 | % to define the game
16 | %
17 | % SEE ALSO definition, bnn, logit, rd, smith, smith_b, stopevent, combined_dynamics
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 | global G norm_dx
24 |
25 | n = max(G.S);
26 |
27 | F = zeros(G.P, n);
28 | F_mean = zeros(G.P, 1);
29 | x_dot_v = zeros(G.P* n, 1);
30 |
31 | % extract matrix of strategies
32 | x_n = vec2mat(z, n);
33 | x = zeros(G.P, n);
34 |
35 | for p = 1 : G.P
36 | x(p, :) = x_n(p, :) * G.m(p);
37 | end
38 |
39 | if G.pop_wise == 0
40 | F(:, :) = G.f(x);
41 | else
42 | for p = 1 : G.P
43 | F(p, :) = G.f(x, p);
44 | end
45 | end
46 |
47 | for p = 1 : G.P
48 | F_mean(p) = F(p, :) * x_n(p, :)';
49 |
50 | % calculate update in the strategy
51 | F_excess = F(p, :) - ones(1, n) * F_mean(p);
52 | x_dot_v ( (p-1)*n + 1 : p*n ) = F_excess .* x_n(p, :) / F_mean(p);
53 | end
54 |
55 | dz = [x_dot_v];
56 | if G.stop_c == true
57 | norm_dx = norm(dz);
58 | end
59 |
--------------------------------------------------------------------------------
/dynamics/rd.m:
--------------------------------------------------------------------------------
1 | function dz = rd(t, z)
2 | % RD Computes the differential equation that describes the update of the
3 | % populations' state following the replicator dynamics
4 | %
5 | % SYNOPSIS: DZ = RD(T, Z)
6 | %
7 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
8 | % might be used to calculate the fitness function
9 | % Z: Vector of the populations' state
10 | %
11 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
12 | %
13 | % REMARKS Its better to execute first definition and run the game using
14 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
15 | % to define the game
16 | %
17 | % SEE ALSO definition, bnn, logit, maynard_rd, smith, smith_b, stopevent, combined_dynamics
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 | global G norm_dx
24 |
25 | n = max(G.S);
26 |
27 | F = zeros(G.P, n);
28 | F_mean = zeros(G.P, 1);
29 | x_dot_v = zeros(G.P* n, 1);
30 |
31 | % extract matrix of strategies
32 | x_n = vec2mat(z, n);
33 | x = zeros(G.P, n);
34 |
35 | for p = 1 : G.P
36 | x(p, :) = x_n(p, :) * G.m(p);
37 | end
38 |
39 | if G.pop_wise == 0
40 | F(:, :) = G.f(x);
41 | else
42 | for p = 1 : G.P
43 | F(p, :) = G.f(x, p);
44 | end
45 | end
46 |
47 | for p = 1 : G.P
48 | F_mean(p) = F(p, :) * x_n(p, :)';
49 |
50 | % calculate update in the strategy
51 | F_excess = F(p, :) - ones(1, n) * F_mean(p);
52 | x_dot_v ( (p-1)*n + 1 : p*n ) = F_excess .* x_n(p, :);
53 | end
54 |
55 | dz = [x_dot_v];
56 | if G.stop_c == true
57 | norm_dx = norm(dz);
58 | end
59 |
--------------------------------------------------------------------------------
/dynamics/smith.m:
--------------------------------------------------------------------------------
1 | function dz = smith(t,z)
2 | % SMITH Computes the differential equation that describes the update of the
3 | % populations' state following the Smith dynamics
4 | %
5 | % SYNOPSIS: DZ = SMITH(T, Z)
6 | %
7 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
8 | % might be used to calculate the fitness function
9 | % Z: Vector of the populations' state
10 | %
11 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
12 | %
13 | % REMARKS Its better to execute first definition and run the game using
14 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
15 | % to define the game
16 | %
17 | % SEE ALSO definition, bnn, logit, rd, maynard_rd, smith_b, stopevent, combined_dynamics
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 | global G norm_dx
24 |
25 | n = max(G.S);
26 |
27 | F = zeros(G.P, n);
28 | F_sum = zeros(G.P, n);
29 | F_mean = zeros(G.P, n);
30 | x_dot_v = zeros(G.P * n, 1);
31 |
32 | % extract matrix of strategies
33 | x_n = vec2mat(z, n);
34 | x = zeros(G.P, n);
35 |
36 |
37 | for p = 1 : G.P
38 | x(p, :) = x_n(p, :) * G.m(p);
39 | end
40 |
41 | if G.pop_wise == 0
42 | F(:, :) = G.f(x);
43 | else
44 | for p = 1 : G.P
45 | F(p, :) = G.f(x, p);
46 | end
47 | end
48 |
49 | for p = 1 : G.P
50 | A = ones(n,1)*F(p,:);
51 | M = max( zeros(n,n), A - A' );
52 |
53 | F_sum(p, :) = M * ones(n,1);
54 | F_mean(p, :) = x_n(p,:) * M;
55 |
56 | x_dot_v((p-1)*n + 1 : p*n ,:) = F_mean(p,:) - x_n(p,:) .* F_sum(p,:);
57 | end
58 |
59 | dz = [x_dot_v];
60 |
61 | if G.stop_c == true
62 | norm_dx = norm(dz);
63 | end
64 |
--------------------------------------------------------------------------------
/dynamics/smith_b.m:
--------------------------------------------------------------------------------
1 | function dz = smith_b(t,z)
2 | % SMITH_B Computes the differential equation that describes the update of the
3 | % populations' state following the Smith dynamics. This is an
4 | % alternative implementation to smith
5 | %
6 | % SYNOPSIS: DZ = SMITH_B(T, Z)
7 | %
8 | % INPUT T: Time. Although the dynamics are time invariant, this parameter
9 | % might be used to calculate the fitness function
10 | % Z: Vector of the populations' state
11 | %
12 | % OUTPUT DZ: Vector with the value of the differential equation given T and Z
13 | %
14 | % REMARKS Its better to execute first definition and run the game using
15 | % G.run(). This function uses the global variables 'G' and 'norm_dx'
16 | % to define the game
17 | %
18 | % SEE ALSO definition, bnn, logit, rd, maynard_rd, smith, stopevent, combined_dynamics
19 | %
20 | % For more information see: the GitHub's repository.
21 | %
22 | % Carlos Barreto, 04-11-16
23 |
24 | global G norm_dx
25 |
26 | % extract matrix of strategies
27 | n = max(G.S);
28 | x_n = vec2mat(z, n);
29 | x = zeros(G.P, n);
30 |
31 | F = zeros(G.P, n);
32 | F_excess_a = zeros(1,n);
33 | F_excess_b = zeros(1,n);
34 | x_dot_v = zeros(G.P* n, 1);
35 |
36 | for p = 1 : G.P
37 | x(p, :) = x_n(p, :) * G.m(p);
38 | end
39 |
40 | if G.pop_wise == 0
41 | F(:, :) = G.f(x);
42 | else
43 | for p = 1 : G.P
44 | F(p, :) = G.f(x, p);
45 | end
46 | end
47 |
48 |
49 | for p = 1 : G.P
50 | % order the strategies by their fitness
51 | [A, B] = sort( F( p, 1:G.S(p) ) );
52 |
53 | A_sum = A(1:n)*ones(n,1);
54 | A_avg = 0;
55 | x_ordered = x_n(p, B)';
56 | x_cum = 0;
57 | for i=1:G.S(p)
58 | k = B(i);
59 | A_sum = A_sum-A(i);
60 |
61 | F_excess_a(k) = A(i)*x_cum - A_avg;
62 | F_excess_b(k) = A_sum - A(i)*(n-i);
63 |
64 | A_avg = A_avg + A(i)*x_ordered(i);
65 | x_cum = x_cum + x_ordered(i);
66 | end
67 |
68 | % calculate update in the strategy
69 | x_dot_v( (p-1)*n + 1 : p*n) = F_excess_a - F_excess_b .* x_n(p, :);
70 | end
71 |
72 |
73 | dz = [x_dot_v];
74 |
75 | if G.stop_c == true
76 | norm_dx = norm(dz);
77 | end
78 |
--------------------------------------------------------------------------------
/dynamics/stopevent.m:
--------------------------------------------------------------------------------
1 | function [x, isterm, dir] = stopevent(t, y)
2 | % STOPEVENT Event used to stop the ODE solver when the norm of the game's
3 | % differential equation is smaller than C_ERROR
4 | %
5 | % SYNOPSIS: [X, ISTERM, DIR] = STOPEVENT(T, Y)
6 | %
7 | % INPUT T: Time
8 | % Y: State
9 | %
10 | % OUTPUT X: Vector of events. When X(i) reaches zero, the i-th event is
11 | % triggered
12 | % ISTERM: If equal to 1, then stops the solver with the first event.
13 | % If equal to 0, then stops when all events are triggered
14 | % DIR: (1) trigger events when X(i) is increasing; (-1) trigger events
15 | % when X(i) is decreasing; (0) trigger all crosses through zero
16 | %
17 | % SEE ALSO definition, bnn, logit, rd, maynard_rd, smith, smith_b, combined_dynamics
18 | %
19 | % For more information see: the GitHub's repository.
20 | %
21 | % Carlos Barreto, 04-11-16
22 |
23 | global norm_dx c_error
24 |
25 | x = norm_dx - c_error;
26 | isterm = 1;
27 | dir = 0;
28 |
--------------------------------------------------------------------------------
/graphs/graph_evolution.m:
--------------------------------------------------------------------------------
1 | function graph_evolution(name)
2 | % GRAPH_EVOLUTION Plots the evolution of the strategies of each population.
3 | % There are no restrictions in the number of populations or
4 | % the number of strategies per population
5 | %
6 | % SYNOPSIS: GRAPH_EVOLUTION(name)
7 | %
8 | % INPUT name: Name of the structure that represents the game
9 | %
10 | % REMARKS Its better to execute first definition and run_game to plot the evolution
11 | % of strategies with G.graph_evolution()
12 | %
13 | % SEE ALSO definition, run_game, graph_fitness, graph_simplex, graph_multi_pop
14 | %
15 | % For more information see: the GitHub's repository.
16 | %
17 | % Carlos Barreto, 04-11-16
18 |
19 |
20 | % load the structure of the game that calls the function
21 | G = evalin('base', name);
22 |
23 |
24 | cc=hsv(max(G.S));
25 | n = max(G.S);
26 |
27 | figure(2)
28 | clf
29 | for p = 1 : G.P
30 |
31 | subplot(G.P, 1, p);
32 | hold on
33 | labels = cellstr('');
34 | for s = 1 : G.S(p)
35 | plot(G.T, G.X(:, (p-1)*n + s), 'color', cc(s, :));
36 | labels(s) = cellstr([num2str(s), '-th strategy']);
37 | end
38 | ylim([0 1])
39 | xlim([0 G.T(end)])
40 | hold off
41 |
42 | title_i = ['Evolution of the ', num2str(p), '-th Population'];
43 | title(title_i)
44 |
45 | h = legend(labels);
46 | set(h, 'Interpreter', 'none')
47 | xlabel('time')
48 | end
49 |
50 |
--------------------------------------------------------------------------------
/graphs/graph_fitness.m:
--------------------------------------------------------------------------------
1 | function graph_fitness(name)
2 | % GRAPH_FITNESS Plots the evolution of the fitness of each population.
3 | % There are no restrictions in the number of populations or
4 | % the number of strategies per population
5 | %
6 | % SYNOPSIS: GRAPH_FITNESS(name)
7 | %
8 | % INPUT name: Name of the structure that represents the game
9 | %
10 | % REMARKS Its better to execute first definition and run_game to plot the evolution
11 | % of strategies with G.graph_fitness()
12 | %
13 | % SEE ALSO definition, run_game, graph_evolution, graph_simplex, graph_multi_pop
14 | %
15 | % For more information see: the GitHub's repository.
16 | %
17 | % Carlos Barreto, 04-11-16
18 |
19 |
20 | % load the structure of the game that calls the function
21 | G = evalin('base', name);
22 |
23 | cc=hsv(max(G.S));
24 | n = max(G.S);
25 |
26 | figure(5)
27 | clf
28 |
29 | utility = zeros( length(G.T), G.P * n );
30 |
31 | utility_mean = zeros( length(G.T), G.P );
32 | state = zeros(length(G.T), n);
33 |
34 | for t = 1: length(G.T)
35 | x = G.state(G.T(t));
36 | state(t, :) = x(1, :);
37 | F = zeros(G.P, n);
38 | if G.pop_wise == 0
39 | F(:, :) = G.f(x);
40 | else
41 | for p = 1 : G.P
42 | F(p, :) = G.f(x, p);
43 | end
44 | end
45 |
46 | for p = 1 : G.P
47 | k = (p-1) * n;
48 | for s=1:G.S(p)
49 | utility(t, k + s) = F(p, s);
50 | end
51 | utility_mean(t, p) = F(p, :) * x(p, :)' / G.m(p) ;
52 | end
53 |
54 | end
55 |
56 | for p = 1 : G.P
57 | k = (p-1) * n;
58 |
59 | subplot(G.P, 1 , p);
60 | hold on
61 | labels = cellstr('');
62 |
63 | for s = 1 : G.S(p)
64 | plot(G.T, utility(:, k + s), 'color', cc(s, :));
65 | labels(s) = cellstr([num2str(s), '-th strategy']);
66 | end
67 | plot(G.T, utility_mean(:, p), 'k-.');
68 | labels( G.S(p)+1 ) = cellstr(['Weighted Avg. Population']);
69 | xlim([0 G.time])
70 | hold off
71 |
72 | title_i = ['Fitness of the ', num2str(p), '-th Population'];
73 | title(title_i)
74 |
75 | h = legend(labels);
76 | set(h, 'Interpreter', 'none')
77 | xlabel('time')
78 | end
79 |
80 |
--------------------------------------------------------------------------------
/graphs/graph_multi_pop.m:
--------------------------------------------------------------------------------
1 | function graph_multi_pop(name)
2 | % GRAPH_MULTI_POP Plots the evolution of games with two populations and two
3 | % strategies per population
4 | %
5 | % SYNOPSIS: GRAPH_MULTI_POP(name)
6 | %
7 | % INPUT name: Name of the structure that represents the game
8 | %
9 | % REMARKS Its better to execute first definition and run_game to plot the evolution
10 | % of strategies with G.graph2p()
11 | %
12 | % SEE ALSO definition, run_game, graph_evolution, graph_fitness, graph_simplex
13 | %
14 | % For more information see: the GitHub's repository.
15 | %
16 | % Carlos Barreto, 04-11-16
17 |
18 | % load the structure of the game that calls the function
19 | G = evalin('base', name);
20 |
21 |
22 | figure(4)
23 | clf
24 |
25 | plot(G.X(:, 1), G.X(:, G.S(1)+1) )
26 |
27 |
28 | axis([0 1 0 1])
29 |
30 | title('Simplex')
31 |
--------------------------------------------------------------------------------
/graphs/graph_simplex.m:
--------------------------------------------------------------------------------
1 | function graph_simplex(name)
2 | % GRAPH_SIMPLEX Plots the evolution of games with one population and three
3 | % strategies per population
4 | %
5 | % SYNOPSIS: GRAPH_SIMPLEX(name)
6 | %
7 | % INPUT name: Name of the structure that represents the game
8 | %
9 | % REMARKS Its better to execute first definition and run_game to plot the evolution
10 | % of strategies with G.graph()
11 | %
12 | % SEE ALSO definition, run_game, graph_evolution, graph_fitness, graph_multi_pop
13 | %
14 | % For more information see: the GitHub's repository.
15 | %
16 | % Carlos Barreto, 04-11-16
17 |
18 |
19 | % load the structure of the game that calls the function
20 | G = evalin('base', name);
21 |
22 | figure(1)
23 | clf
24 | hold on
25 | plot3([0 1 0 0]', [1 0 0 1]', [0 0 1 0]', 'k')
26 | plot3(G.X(:,1), G.X(:,2), G.X(:,3));
27 | axis([0 1 0 1 0 1]);
28 | view(135,0)
29 | hold off
30 | title('Simplex')
31 |
32 |
33 |
--------------------------------------------------------------------------------
/revision_protocols/comparison2average.m:
--------------------------------------------------------------------------------
1 | function s_i = comparison2average(F, z, s, i, p)
2 | % COMPARISON2AVERAGE Computes the differece equation that describes the update
3 | % of the populations' state following the comparison to the average
4 | % payoff revision protocol. This revision protocol leads to the BNN
5 | % dynamics with a large number of agents
6 | %
7 | % SYNOPSIS: S_I = COMPARISON2AVERAGE(F, z, s, i, p)
8 | %
9 | % INPUT F: Vector with the payoff of each strategy
10 | % z: Vector with the society's state (distribution of strategies)
11 | % s: Vector with the strategy of each agent
12 | % i: ID of the agent that makes an update of its strategy
13 | % p: Population ID. The current version only supports finite games with
14 | % one population
15 | %
16 | % OUTPUT s_i: Update of the agent's strategy
17 | %
18 | % SEE ALSO definition, run_game_finite_population, logit_choice, pairwise_comparison,
19 | % proportional_imitation
20 | %
21 | % For more information see: the GitHub's repository.
22 | %
23 | % Carlos Barreto, 04-11-16
24 |
25 | global G
26 |
27 | j = unidrnd( G.S(p) );
28 |
29 | pi_j = F( j );
30 |
31 | rho_ij = max(pi_j - F' * z' / G.m, 0);
32 |
33 | % prob generator
34 | change = ceil(rand - 1 + rho_ij / G.R);
35 |
36 | if change == 1
37 | s_i = j;
38 | else
39 | s_i = s(i);
40 | end
41 |
42 |
--------------------------------------------------------------------------------
/revision_protocols/logit_choice.m:
--------------------------------------------------------------------------------
1 | function s_i = logit_choice(F, z, s, i, p)
2 | % LOGIT_CHOICE Computes the differece equation that describes the update of
3 | % the populations' state following the logit choice revision protocol.
4 | % This revision protocol leads to the logit dynamics with a large
5 | % number of agents
6 | %
7 | % SYNOPSIS: s_i = LOGIT_CHOICE(F, z, s, i, p)
8 | %
9 | % INPUT F: Vector with the payoff of each strategy
10 | % z: Vector with the society's state (distribution of strategies)
11 | % s: Vector with the strategy of each agent
12 | % i: ID of the agent that makes an update of its strategy
13 | % p: Population ID. The current version only supports finite games with
14 | % one population
15 | %
16 | % OUTPUT s_i: Update of the agent's strategy
17 | %
18 | % SEE ALSO definition, run_game_finite_population, comparison2average,
19 | % pairwise_comparison, proportional_imitation
20 | %
21 | % For more information see: the GitHub's repository.
22 | %
23 | % Carlos Barreto, 04-11-16
24 |
25 | global G
26 |
27 | j = unidrnd( G.S(p) );
28 |
29 |
30 | eta = G.eta;
31 | F_ = exp( F(1:G.S(p) ) / eta );
32 | F_mean = sum(F_(:));
33 |
34 | rho_ij = F_(j) / F_mean;
35 |
36 | % prob generator
37 | change = ceil(rand - 1 + rho_ij / G.R);
38 |
39 | if change == 1
40 | s_i = j;
41 | else
42 | s_i = s(i);
43 | end
44 |
45 | if j == 3
46 | h=1;
47 | end
48 |
--------------------------------------------------------------------------------
/revision_protocols/pairwise_comparison.m:
--------------------------------------------------------------------------------
1 | function s_i = pairwise_comparison(F, z, s, i, p)
2 | % PAIRWISE_COMPARISON Computes the differece equation that describes the update
3 | % of the populations' state following the pairwise comparison revision
4 | % protocol. This revision protocol leads to the Smith dynamics with
5 | % a large number of agents
6 | %
7 | % SYNOPSIS: S_I = PAIRWISE_COMPARISON(F, z, s, i, p)
8 | %
9 | % INPUT F: Vector with the payoff of each strategy
10 | % z: Vector with the society's state (distribution of strategies)
11 | % s: Vector with the strategy of each agent
12 | % i: ID of the agent that makes an update of its strategy
13 | % p: Population ID. The current version only supports finite games with
14 | % one population
15 | %
16 | % OUTPUT s_i: Update of the agent's strategy
17 | %
18 | % SEE ALSO definition, run_game_finite_population, comparison2average,
19 | % logit_choice, proportional_imitation
20 | %
21 | % For more information see: the GitHub's repository.
22 | %
23 | % Carlos Barreto, 04-11-16
24 |
25 | global G
26 |
27 | j = unidrnd( G.S(p) );
28 |
29 | pi_i = F( s(i) );
30 | pi_j = F( j );
31 |
32 | rho_ij = max(pi_j - pi_i, 0);
33 |
34 | % prob generator
35 | change = ceil(rand - 1 + rho_ij / G.R);
36 |
37 | if change == 1
38 | s_i = j;
39 | else
40 | s_i = s(i);
41 | end
42 |
43 |
--------------------------------------------------------------------------------
/revision_protocols/proportional_imitation.m:
--------------------------------------------------------------------------------
1 | function s_i = proportional_imitation(F, z, s, i, p)
2 | % PROPORTIONAL_IMITATION Computes the differece equation that describes the
3 | % update of the populations' state following the pairwise proportional
4 | % imitation revision protocol. This revision protocol leads to the
5 | % replicator dynamics with a large number of agents
6 | %
7 | % SYNOPSIS: S_I = PROPORTIONAL_IMITATION(F, z, s, i, p)
8 | %
9 | % INPUT F: Vector with the payoff of each strategy
10 | % z: Vector with the society's state (distribution of strategies)
11 | % s: Vector with the strategy of each agent
12 | % i: ID of the agent that makes an update of its strategy
13 | % p: Population ID. The current version only supports finite games with
14 | % one population
15 | %
16 | % OUTPUT s_i: Update of the agent's strategy
17 | %
18 | % SEE ALSO definition, run_game_finite_population, comparison2average,
19 | % logit_choice, pairwise_comparison
20 | %
21 | % For more information see: the GitHub's repository.
22 | %
23 | % Carlos Barreto, 04-11-16
24 |
25 | global G
26 |
27 | j = unidrnd( G.N );
28 |
29 | pi_i = F( s(i) );
30 | pi_j = F( s(j) );
31 |
32 | rho_ij = max(pi_j - pi_i, 0);
33 |
34 | % prob generator
35 | change = ceil(rand - 1 + rho_ij / G.R);
36 |
37 | if change == 1
38 | s_i = s(j);
39 | else
40 | s_i = s(i);
41 | end
42 |
43 |
--------------------------------------------------------------------------------
/revision_protocols/state.m:
--------------------------------------------------------------------------------
1 | function z = state(s)
2 | % STATE Computes the strategy's proportion (social state)
3 | %
4 | % SYNOPSIS: z = STATE(s)
5 | %
6 | % INPUT s: Vector with the strategy of each agent
7 | %
8 | % OUTPUT z: Vector with the social state
9 | %
10 | % SEE ALSO definition, run_game_finite_population, comparison2average,
11 | % logit_choice, pairwise_comparison, proportional_imitation
12 | %
13 | % For more information see: the GitHub's repository.
14 | %
15 | % Carlos Barreto, 04-11-16
16 |
17 |
18 | global G
19 |
20 | % find the current social state
21 | z = zeros(1, G.S(1));
22 |
23 | for i = 1: G.S(1)
24 | z(i) = sum(s == i) / G.N;
25 | end
26 |
27 |
--------------------------------------------------------------------------------
/run_game.m:
--------------------------------------------------------------------------------
1 | function run_game(name)
2 | % RUN_GAME Solves the differential equation of a population game
3 | %
4 | % SYNOPSIS: RUN_GAME(name)
5 | %
6 | % INPUT name: Name of the structure that represents the game
7 | %
8 | % The solution of the game, evolution of strategies X and the time T, is
9 | % attached to the game's structure
10 | %
11 | % REMARKS Its better to execute first definition and run the game using G.run().
12 | % This function uses the global variable 'G' to define the game
13 | %
14 | % SEE ALSO add_path, definition, run_game_finite_population,
15 | % combined_dynamics
16 | %
17 | % For more information see: the GitHub's repository.
18 | %
19 | % Carlos Barreto, 04-11-16
20 |
21 | global G
22 |
23 | % load the structure of the game that calls the function
24 | G = evalin('base', name);
25 |
26 |
27 | if length(G.dynamics) == 1
28 | if G.verb == true
29 | disp (['Running ', G.dynamics{1}, ' dynamics']);
30 | end
31 | command = strcat( G.ode, '( @', G.dynamics{1}, ', G.step:G.step:G.time+G.step, transpose(G.x0), G.options_ode );');
32 | else
33 | if G.verb == true
34 | names = strcat(G.dynamics{1});
35 | for i = 2: length(G.dynamics)
36 | names = strcat(names, ', ', ' ', G.dynamics{i});
37 | end
38 | disp (['Running combination of ', names, ' dynamics']);
39 | end
40 | command = strcat( G.ode, '( @combined_dynamics, G.step:G.step:G.time+G.step, transpose(G.x0), G.options_ode );');
41 | % command = strcat( G.ode, '( @heuristic_combined_dynamics, G.step:G.step:G.time+G.step, G.x0, G.options_ode );');
42 | end
43 |
44 | %
45 | if G.verb == true
46 | tic
47 | [T, X] = eval( command );
48 | toc
49 |
50 | disp([' '])
51 | else
52 | [T, X] = eval( command );
53 | end
54 |
55 | G.T = T;
56 | G.X = X;
57 |
58 | if any( isnan( X(:) ) )
59 | warning('The solution of the game has NaN values.')
60 | end
61 |
62 | % save changes on the structure in the workspace
63 | assignin('base', name, G);
64 | clear G
65 |
--------------------------------------------------------------------------------
/run_game_finite_population.m:
--------------------------------------------------------------------------------
1 | function run_game_finite_population(name)
2 | % RUN_GAME_FINITE_POPULATION Solves the difference equation of a populaiton game
3 | %
4 | % SYNOPSIS: RUN_GAME_FINITE_POPULATION(name)
5 | %
6 | % INPUT name: Name of the structure that represents the game
7 | %
8 | % The solution of the game, namely the evolution of strategies X in time,
9 | % are attached to the game's structure
10 | %
11 | % REMARKS Its better to execute first definition and run the game using G.run_finite().
12 | % This function uses the global variable 'G' to define the game
13 | %
14 | % SEE ALSO add_path, definition, run_game
15 | %
16 | % For more information see: the GitHub's repository.
17 | %
18 | % Carlos Barreto, 04-11-16
19 |
20 | global G
21 |
22 | % load the structure of the game that calls the function
23 | G = evalin('base', name);
24 |
25 | % set initial strategy of each agent to satisfy the initial condition
26 | protocol = func2str(G.revision_protocol);
27 |
28 | if G.verb == true
29 | disp (['Running the ', protocol, ' revision protocol']);
30 | tic
31 | end
32 |
33 | s = zeros(G.N, G.P);
34 |
35 | % calculate the initial strategy of each agent given the proportions in x0
36 | h = 0;
37 | for i = 1: max(G.S(1))
38 | p = floor(G.N * G.x0(1, i));
39 | if ((p + h) <= G.N) && (p ~= 0)
40 | s(h + 1: h + p) = i;
41 | h = h + p;
42 | end
43 |
44 | end
45 |
46 | % choose a random strategy to complete the initial state vector
47 | if h ~= G.N
48 | s(h + 1: G.N ) = unidrnd(G.S(1), 1, G.N - h);
49 | end
50 |
51 | % set the number of iterations
52 | t_max = floor(G.time);
53 |
54 | T = 1:1:t_max;
55 | X = zeros( t_max, G.S(1) );
56 |
57 | % Number of agents that update their strategy at each time instant
58 | alarm = poissrnd( G.N * G.R, 1, t_max);
59 |
60 | for t = 1: t_max
61 |
62 | % update society state
63 | x = state(s);
64 |
65 | % find the current payoff of each strategy
66 | F = zeros(G.S(1), 1) ;
67 | F = G.f(x, 1);
68 |
69 | % select users to update their actions at random
70 | update_agents = unidrnd(G.N, 1, alarm(t));
71 |
72 | % procedure to update the strategy of each agent
73 | for k=1 : alarm(t)
74 | i = update_agents(k);
75 | s_update = s;
76 | s_update(i) = G.revision_protocol(F, x, s, i, 1);
77 |
78 | end
79 |
80 | s = s_update;
81 |
82 | X(t, :) = x';
83 |
84 |
85 | end
86 |
87 |
88 | if G.verb == true
89 | toc
90 | disp([' '])
91 | end
92 |
93 |
94 | G.X = X;
95 | G.T = T;
96 |
97 |
98 | % save changes on the structure in the workspace
99 | assignin('base', name, G);
100 | clear G
101 |
--------------------------------------------------------------------------------
/strategy.m:
--------------------------------------------------------------------------------
1 | function x = strategy(name, T)
2 | % STRATEGY Computes the strategy of all populations at time T
3 | %
4 | % SYNOPSIS: X = STRATEGY(name, T)
5 | %
6 | % INPUT name: Name of the structure that represents the game
7 | % T: Time.
8 | %
9 | % OUTPUT X: Matrix with the strategies of the society at time T
10 | %
11 | % SEE ALSO definition, run_game
12 | %
13 | % For more information see: the GitHub's repository.
14 | %
15 | % Carlos Barreto, 04-11-16
16 |
17 | % load the structure of the game that calls the function
18 | G = evalin('base', name);
19 |
20 | n = max(G.S);
21 |
22 | % find the index t of the time instant T
23 | if T < 0
24 | t = length(G.T);
25 | elseif T >= G.T(end)
26 | t = length(G.T);
27 | else
28 | t = floor( T / G.step + 1);
29 | end
30 |
31 | % strategies normalized
32 | x_n = vec2mat(G.X(t, :), n);
33 |
34 | x = zeros(G.P, n);
35 | for p = 1 : G.P
36 | x(p, :) = x_n(p, :) * G.m(p);
37 | end
38 |
39 |
--------------------------------------------------------------------------------
/test/electricity grid/fitness_user.m:
--------------------------------------------------------------------------------
1 | function F = fitness_user(x, p)
2 |
3 | global beta_ef alpha_ef mp
4 |
5 | power = x;
6 | T_ = 24;
7 | F = zeros(T_+1,1);
8 |
9 | index = p;
10 | n = 24 + 1;
11 |
12 | for l = 1 : 24
13 | q_t = power(index, l);
14 | sum_q = sum( power(:, l) );
15 | alpha = alpha_ef(index, l);
16 | F( l ) = alpha / (1+q_t) - 2 * beta_ef * (sum_q) ;
17 | end
18 | F(n) = 0;
19 |
20 |
21 |
--------------------------------------------------------------------------------
/test/electricity grid/fitness_user_inefficient.m:
--------------------------------------------------------------------------------
1 | function F = fitness_user_inefficient(x, p)
2 |
3 | global beta_ef alpha_ef mp
4 |
5 | power = x;
6 | T_ = 24;
7 | F = zeros(T_+1,1);
8 |
9 | index = p;
10 | n = 24 + 1;
11 |
12 |
13 |
14 |
15 | for l = 1 : 24
16 | q_t = power(index, l);
17 | sum_q = sum( power(:, l) );
18 | alpha = alpha_ef(index,l);
19 | F(l) = alpha/(1+q_t) - beta_ef*(sum_q + q_t) ;
20 | end
21 | F(n) = 0;
22 |
23 |
24 |
--------------------------------------------------------------------------------
/test/electricity grid/graph_ev_dynamics.m:
--------------------------------------------------------------------------------
1 | % graph average behavior of the dynamics
2 |
3 |
4 |
5 |
6 | figure(15)
7 | clf
8 | lim_ = abs(max(U_avg_s) - min(U_avg_s));
9 | subplot(2,1,1)
10 | %subplot('Position', [0.1 0.1 0.5 0.5] )
11 | plot(T,U_avg, 'g--','LineWidth',2)
12 | hold on
13 | plot(T,U_avg_rd, '--')
14 | plot(T,U_avg_s, 'k')
15 | plot(T, U_avg_bnn, 'r-.')
16 |
17 |
18 | title('Society`s Surplus')
19 | ylim([min(U_avg_s)-lim_*.1 max(U_avg_s)+lim_*.2])
20 | xlim([T(1) T(end)])
21 | ylabel('Social Surplus')
22 | xlabel('Time')
23 |
24 |
25 | h = legend( 'Logit Dynamics', 'Replicator Dynamics', 'Smith Dynamics', 'BNN Dynamics',4);
26 | set(h,'Interpreter','none')
27 |
28 |
29 | set(gca, 'XScale', 'log')
30 |
31 |
32 |
33 |
34 |
35 | lim_ = abs(max(P_avg_s) - min(P_avg_s));
36 | subplot(2,1,2)
37 | %axes('Position',[0.1 0.1 0.5 0.5])
38 | pd = plot(T,P_avg, 'g--','LineWidth',2);
39 | hold on
40 | pb=plot(T,P_avg_rd, '--');
41 | pa=plot(T,P_avg_s, 'k');
42 | pc = plot(T, P_avg_bnn, 'r-.');
43 | hold off
44 |
45 | title('Electricity Demand')
46 | ylim([min(P_avg_s)-lim_*.1 max(P_avg_s)+lim_*.2])
47 | ylabel('Power Comsumption')
48 | %h = legend( 'Logit Dynamics', 'Replicator Dynamics', 'Smith Dynamics', 'BNN Dynamics',1);
49 | %set(h,'Interpreter','none')
50 | xlabel('Time')
51 | xlim([T(1) T(end)])
52 |
53 |
54 | set(gca, 'XScale', 'log')
55 |
56 |
57 |
58 |
59 |
60 | %lh = legend([pa,pb,pc, pd],{ 'Logit Dynamics', 'Replicator Dynamics', 'Smith Dynamics', 'BNN Dynamics'}); %, 'Logit Dynamics'
61 | %gridLegend([pa,pb,pc, pd],4,'location','southoutside');
62 | %legend('boxoff')
63 |
64 |
65 |
66 |
67 | figure(16)
68 |
69 | lim_ = abs(max(I_avg_s) - min(I_avg_s));
70 | %subplot(3,1,3)
71 | p4 = plot(T,I_avg, 'g--','LineWidth',2);
72 | hold on
73 | p2 = plot(T,I_avg_rd, '--');
74 | p1 = plot(T,I_avg_s, 'k');
75 | p3 = plot(T,I_avg_bnn, 'r-.');
76 |
77 | hold off
78 | %plot([2 2],[min(I_avg)-lim_*.1 max(I_avg)+lim_*.2], 'k--')
79 | %plot(4*[1 1],[min(I_avg)-lim_*.1 max(I_avg)+lim_*.2], 'k--')
80 | %hold off
81 | %title('Average Incentives')
82 | %ylim([min(I_avg_s)-lim_*.1 max(I_avg_s)+lim_*.1])
83 | ylabel('Incentives')
84 | xlabel('Time')
85 | h = legend( 'Logit Dynamics', 'Replicator Dynamics', 'Smith Dynamics', 'BNN Dynamics',1);
86 | set(h,'Interpreter','none')
87 | title('Average Incentives of the Society')
88 | %set(h, 'FontSize', 20)
89 | xlim([T(1) T(end)])
90 |
91 |
92 | set(gca, 'XScale', 'log')
93 |
94 |
95 |
96 |
97 | %sh = subplot(7,1,7)
98 | %p = get(sh, 'position')
99 | %lh = legend([p1,p2,p3,p4],{'Smith Dynamics','Replicator Dynamics ', 'BNN Dynamics ', 'Logit Dynamics'}); %
100 | %gridLegend([p1,p2,p3,p4],4,'location','north');
101 | %legend('boxoff')
102 | %set(lh, 'position', p)
103 | %axis(sh, 'off')
104 |
105 | %newPosition = [0.4 0.4 0.2 0.2];
106 | %newUnits = 'normalized';
107 | %set(lh,'Position', newPosition,'Units', newUnits);
108 |
109 |
110 | %print -deps2 './evolution_dynamics_attack.eps'
111 |
112 | %set(gca, 'XTickLabel', [],'XTick',[])
113 |
114 | %set(gca, 'LooseInset', get(gca,'TightInset'))
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 | %%% integration calculation
124 | int_smith = zeros(1,length(T));
125 | int_replicator = zeros(1,length(T));
126 | int_bnn = zeros(1,length(T));
127 | int_logit = zeros(1,length(T));
128 | int_avg = zeros(1,length(T));
129 |
130 | int_smith(1) = I_avg_s(1) * T_ * G.step + G.step;
131 | int_replicator(1) = I_avg_rd(1) * T_ * G.step + G.step;
132 | int_bnn(1) = I_avg_bnn(1) * T_ * G.step + G.step;
133 | int_logit(1) = I_avg(1) * T_ * G.step + G.step;
134 | %int_avg(1) = (int_smith(1) + int_replicator(1) + int_bnn(1) + int_logit(1))/4;
135 | int_avg(1) = int_smith(1);
136 |
137 | for i=2:length(T)
138 | int_smith(i) = int_smith(i-1) + I_avg_s(i) * T_ * G.step;
139 | int_replicator(i) = int_replicator(i-1) + I_avg_rd(i) * T_ * G.step;
140 | int_bnn(i) = int_bnn(i-1) + I_avg_bnn(i) * T_ * G.step;
141 | int_logit(i) = int_logit(i-1) + I_avg(i) * T_ * G.step;
142 | int_avg(i) = (int_smith(i) + int_replicator(i) + int_bnn(i) + int_logit(i))/4;
143 | %int_avg(i) = int_smith(i);
144 | end
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 | figure(20)
156 | clf
157 | p4=plot(T, int_logit./int_avg, 'g--','LineWidth',2);
158 | hold on
159 | p2=plot(T, int_replicator./int_avg, '--');
160 | p1=plot(T, int_smith./int_avg, 'k');
161 |
162 |
163 | p3=plot(T, int_bnn./int_avg, 'r-.');
164 |
165 | hold off
166 |
167 | xlim([T(1) T(end)])
168 | %ylim([-.2 1.1])
169 | title('Integrate Along the Time of the Incentives')
170 | %ylim([min(I_avg_s)-lim_*.1 max(I_avg_s)+lim_*.2])
171 | ylabel('Accumulated Incentives')
172 | xlabel('Time')
173 | h = legend( 'Logit Dynamics', 'Replicator Dynamics', 'Smith Dynamics', 'BNN Dynamics',4);
174 | set(h,'Interpreter','none')
175 |
176 |
177 |
178 |
179 | set(gca, 'XScale', 'log')
180 | %set(gca, 'YScale', 'log')
181 |
182 |
183 | %lh = legend([p1,p2,p3,p4],{'Smith Dynamics','Replicator Dynamics ', 'BNN Dynamics ', 'Logit Dynamics'}); %
184 | %gridLegend([p1,p2,p3,p4],2,'location','northoutside');
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
--------------------------------------------------------------------------------
/test/electricity grid/graph_final_state.m:
--------------------------------------------------------------------------------
1 | function graph_final_state(G)
2 |
3 | cc=hsv(G.P);
4 | n = max(G.S);
5 | figure(3)
6 | clf
7 | hold on
8 |
9 | x = vec2mat(G.X(end, :), max(G.S));
10 |
11 | for p = 1 : P
12 | plot(1 : 1 : G.S(p)-1, x(p, 1:24), 'color', cc(p, :))
13 | end
14 |
15 | hold off
16 |
17 | title('Final State of the Society')
18 |
--------------------------------------------------------------------------------
/test/electricity grid/graph_incentives_evolution.m:
--------------------------------------------------------------------------------
1 | % graph dynamics of the system for the smith dynamics and the replicator
2 | % dynamics
3 |
4 | valuation = @(q_i, i, t) alpha_ef(i,t) * log(1+q_i);
5 | price = @(q) beta_ef * q .^ 2 + b*q;
6 | h_f =@(q_i) price( q_i * N/(N-1) );
7 |
8 |
9 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
10 | % smith dynamics
11 | T = T_smith;
12 | X = X_smith;
13 | n = T_ + 1;
14 |
15 | U_avg_s = zeros(1,length(T));
16 | I_avg_s = zeros(1,length(T));
17 | P_avg_s = zeros(1,length(T));
18 |
19 |
20 | for t = 1:length(T)
21 |
22 | x = X(t,:)';
23 | x_ = vec2mat(x,n);
24 |
25 | power = x_*mp;
26 |
27 | U = zeros(N,T_+1);
28 | inc = zeros(N,T_);
29 |
30 | total_q = ones(1, P) * power;
31 | p = price(total_q);
32 |
33 | % calculate the fitness and incentives of the system at t
34 | for index=1:N
35 | for l=1:T_
36 | q_t = power(index,l);
37 | sum_q = total_q(l);
38 | sum_q_i = sum_q - q_t;
39 | val = valuation(q_t,index,l);
40 |
41 | inc(index,l) = sum_q_i * ( - p(l) + h_f( sum_q_i ) );
42 |
43 | U(index,l) = val - q_t*p(l);% + inc(index,l);
44 |
45 | end
46 | end
47 |
48 | U_avg_s(t) = sum(sum(U(:,1:T_), 2)/T_);
49 | I_avg_s(t) = sum(sum(inc, 2)/T_);
50 | P_avg_s(t) = sum(sum(power(:,1:T_),2)/T_);
51 | end
52 |
53 |
54 |
55 |
56 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
57 | % rd dynamics
58 | T = T_rd;
59 | X = X_rd;
60 | n = T_ + 1;
61 |
62 | U_avg_rd = zeros(1,length(T));
63 | I_avg_rd = zeros(1,length(T));
64 | P_avg_rd = zeros(1,length(T));
65 |
66 |
67 | for t = 1:length(T)
68 |
69 | x = X(t,:)';
70 | x_ = vec2mat(x,n);
71 |
72 | power = x_*mp;
73 |
74 | U = zeros(N,T_+1);
75 | inc = zeros(N,T_);
76 |
77 | total_q = ones(1, P) * power;
78 | p = price(total_q);
79 |
80 | % calculate the fitness and incentives of the system at t
81 | for index=1:N
82 | for l=1:T_
83 | q_t = power(index,l);
84 | sum_q = total_q(l);
85 | sum_q_i = sum_q - q_t;
86 | val = valuation(q_t,index,l);
87 |
88 | inc(index,l) = sum_q_i * ( - p(l) + h_f( sum_q_i ) );
89 |
90 | U(index,l) = val - q_t*p(l);% + inc(index,l);
91 |
92 | end
93 | end
94 |
95 | U_avg_rd(t) = sum(sum(U(:,1:T_), 2)/T_);
96 | I_avg_rd(t) = sum(sum(inc, 2)/T_);
97 | P_avg_rd(t) = sum(sum(power(:,1:T_),2)/T_);
98 | end
99 |
100 |
101 |
102 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
103 | % bnn dynamics
104 | T = T_bnn;
105 | X = X_bnn;
106 | n = T_ + 1;
107 |
108 | U_avg_bnn = zeros(1,length(T));
109 | I_avg_bnn = zeros(1,length(T));
110 | P_avg_bnn = zeros(1,length(T));
111 |
112 |
113 | for t = 1:length(T)
114 |
115 | x = X(t,:)';
116 | x_ = vec2mat(x,n);
117 |
118 | power = x_*mp;
119 |
120 | U = zeros(N,T_+1);
121 | inc = zeros(N,T_);
122 |
123 | total_q = ones(1, P) * power;
124 | p = price(total_q);
125 |
126 | % calculate the fitness and incentives of the system at t
127 | for index=1:N
128 | for l=1:T_
129 | q_t = power(index,l);
130 | sum_q = total_q(l);
131 | sum_q_i = sum_q - q_t;
132 | val = valuation(q_t,index,l);
133 |
134 | inc(index,l) = sum_q_i * ( - p(l) + h_f( sum_q_i ) );
135 |
136 | U(index,l) = val - q_t*p(l);% + inc(index,l);
137 |
138 | end
139 | end
140 |
141 | U_avg_bnn(t) = sum(sum(U(:,1:T_), 2)/T_);
142 | I_avg_bnn(t) = sum(sum(inc, 2)/T_);
143 | P_avg_bnn(t) = sum(sum(power(:,1:T_),2)/T_);
144 | end
145 |
146 |
147 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
148 | % smith dynamics
149 | T = T_logit;
150 | X = X_logit;
151 | n = T_ + 1;
152 |
153 | U_avg = zeros(1,length(T));
154 | I_avg = zeros(1,length(T));
155 | P_avg = zeros(1,length(T));
156 |
157 |
158 | for t = 1:length(T)
159 |
160 | x = X(t,:)';
161 | x_ = vec2mat(x,n);
162 |
163 | power = x_*mp;
164 |
165 | U = zeros(N,T_+1);
166 | inc = zeros(N,T_);
167 |
168 | total_q = ones(1, P) * power;
169 | p = price(total_q);
170 |
171 | % calculate the fitness and incentives of the system at t
172 | for index=1:N
173 | for l=1:T_
174 | q_t = power(index,l);
175 | sum_q = total_q(l);
176 | sum_q_i = sum_q - q_t;
177 | val = valuation(q_t,index,l);
178 |
179 | inc(index,l) = sum_q_i * ( - p(l) + h_f( sum_q_i ) );
180 |
181 | U(index,l) = val - q_t*p(l);% + inc(index,l);
182 |
183 | end
184 | end
185 |
186 | U_avg(t) = sum(sum(U(:,1:T_), 2)/T_);
187 | I_avg(t) = sum(sum(inc, 2)/T_);
188 | P_avg(t) = sum(sum(power(:,1:T_),2)/T_);
189 | end
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 | % graph the evolution of the dynamics
199 | graph_ev_dynamics
200 |
201 |
--------------------------------------------------------------------------------
/test/electricity grid/h_i.m:
--------------------------------------------------------------------------------
1 | function s = h_i(q)
2 |
3 | global N
4 |
5 | s = p_x(q*N/(N-1), 1);
--------------------------------------------------------------------------------
/test/electricity grid/incentives_comb.m:
--------------------------------------------------------------------------------
1 | % calculate utility over time
2 |
3 | global hybrid
4 |
5 | hybrid = zeros(time/.01+1, 1);
6 |
7 | G.dynamics = {'rd', 'bnn', 'smith', 'logit'};
8 | G.gamma = [1 1 1 1];
9 | G.run()
10 |
11 | T = G.T;
12 | X = G.X;
13 |
14 | n = T_ + 1;
15 |
16 | U_avg_c = zeros(1,length(T));
17 | F_avg_c = zeros(1,length(T));
18 | I_avg_c = zeros(1,length(T));
19 | P_avg_c = zeros(1,length(T));
20 |
21 |
22 |
23 | for t = 1:length(T)
24 |
25 | x = X(t,:)';
26 | x_ = vec2mat(x,n);
27 |
28 | power = x_*mp;
29 |
30 | F = zeros(N,T_+1);
31 | U = zeros(N,T_+1);
32 | inc = zeros(N,T_);
33 |
34 | % calculate the fitness and incentives of the system at t
35 | for index=1:N
36 | %summ = sum(x_(i,:));
37 | %if (summ ~= mp)
38 | % t
39 | %end
40 | for l=1:T_
41 | q_t = power(index,l);
42 | sum_q = sum(power(:,l));
43 | price = p_x(power,l);
44 |
45 | sum_q_i = sum(power(:,l)) - q_t;
46 |
47 | val = v_x(q_t,index,l);
48 |
49 | % if (T(t) < time_on) || (T(t) > time_off)
50 | % F(index,l) = alpha_ef(index,l)/(1+q_t) - beta_ef*(sum_q+q_t);
51 | % inc(index,l) = 0*sum_q_i*( - price + h_i(sum_q_i) );
52 | % else
53 | F(index,l) = alpha_ef(index,l)/(1+q_t) - 2*beta_ef*(sum_q) ;
54 | inc(index,l) = sum_q_i*( - price + h_i(sum_q_i) );
55 | % end
56 |
57 |
58 | U(index,l) = val - q_t*price + inc(index,l);
59 |
60 | end
61 | F(:, T_ + 1) = max(F(:,1:T_),[], 2)*0;
62 |
63 | end
64 |
65 | %sum(power,2)
66 |
67 | U_avg_c(t) = sum(sum(U(:,1:T_), 2)/T_);
68 | F_avg_c(t) = sum(sum(F(:,1:T_), 2)/T_);
69 | I_avg_c(t) = sum(sum(inc,2)/T_);
70 | P_avg_c(t) = sum(sum(power(:,1:T_),2)/T_);
71 |
72 |
73 |
74 | end
75 |
76 |
77 |
78 |
79 |
80 | graph_ev_dynamics
81 |
82 |
83 |
84 | figure(15)
85 | subplot(2,1,1)
86 | hold on
87 | plot(T, U_avg_c, 'm--')
88 | hold off
89 |
90 | subplot(2,1,2)
91 | hold on
92 | plot(T,P_avg_c, 'm--');
93 | hold off
94 |
95 |
96 |
97 |
98 | figure(16)
99 | hold on
100 | plot(T,I_avg_c, 'm--');
101 | hold off
102 |
103 |
104 |
105 | %%% integration calculation
106 | int_c = zeros(1,length(T));
107 |
108 | int_c(1) = I_avg_c(1) * T_ * G.step + G.step;
109 |
110 |
111 | for i=2:length(T)
112 | int_c(i) = int_c(i-1) + I_avg_c(i) * T_ * G.step;
113 | end
114 |
115 | figure(20)
116 | hold on
117 | plot(T, int_c./int_avg, 'm--');
118 | hold off
119 |
120 | set(gca, 'XScale', 'log')
121 |
122 |
123 |
124 |
125 |
126 |
127 | figure(21); plot(G.T, hybrid)
128 |
--------------------------------------------------------------------------------
/test/electricity grid/p_x.m:
--------------------------------------------------------------------------------
1 | function c = p_x(x,t)
2 |
3 | global beta_ef
4 |
5 | x_total = sum(x(:,t));
6 |
7 | c = beta_ef*(x_total);
8 |
9 |
--------------------------------------------------------------------------------
/test/electricity grid/test_electricity_system.m:
--------------------------------------------------------------------------------
1 | % example of a game with one population, three strategies per population, and combined dynamics.
2 |
3 | % TODO:
4 | % review how are the transitions between dynamics
5 | % review why rd and logit are so similar in aggregated evolution, but
6 | % differents in incentives
7 |
8 | % population games tool box
9 | clear
10 |
11 | global G beta_ef alpha_ef time_on time_off mp N T_ hybrid b q_min
12 |
13 | q_min=0;
14 |
15 | N = 5;
16 |
17 | % Definition of the electricity variables
18 | Dt = 15*[51.8743 50.0011 48.6104 48.6384 51.1276 58.7756 ...
19 | 61.0654 65.0167 69.6593 71.6363 75.3904 76.2807 ...
20 | 73.4635 73.3627 74.6492 75.1194 74.8689 74.1951 ...
21 | 78.2569 85.8935 83.5392 77.9073 68.6800 60.5177];
22 | pt = Dt./max(Dt)*8;
23 |
24 | % number of strategies
25 | T_ = length(Dt);
26 |
27 | % valuation parameters of all agents
28 | alpha_ef = zeros(N,T_);
29 | for i=1:N
30 | alpha_ef(i,:) = pt(1:T_)*(1+.5*i/N*0) + 0. *rand(1,T_);
31 | end
32 |
33 | % parametros de la func. de costo agregado
34 | beta_ef = 1;
35 | b = 0;
36 |
37 | % Time of the activation of either incentives or attacks
38 | time_on = 2;
39 | time_off = 4;
40 |
41 |
42 | % number of populations
43 | P =N;
44 |
45 | % number of pure strategies per population
46 | n = 25;
47 |
48 | % mass of the populations
49 | mp = 25;
50 | m = ones(P, 1) * mp;
51 |
52 | % simulation parameters
53 | time = 60;
54 |
55 | % initial condition
56 | pot = ones(N,T_+1)/(T_+1);
57 | x0 = pot;
58 |
59 | % structure with the parameters of the game
60 | %G = struct('P', P, 'n', n, 'f', @fitness_user, 'ode', 'ode113', 'time', time, 'tol', 0.00001, 'x0', x0, 'm', m);
61 |
62 | % random initial condition
63 | G = struct('P', P, 'n', n, 'f', @fitness_user, 'ode', 'ode45', 'time', time, 'tol', 0.000001, 'm', m);
64 |
65 | % verify data of the game
66 | G = definition(G);
67 |
68 | G.step = .01;
69 | G.eta = .02;
70 |
71 | % run different dynamics
72 | G.dynamics = {'rd'};
73 | G.run()
74 | T_rd = G.T;
75 | X_rd = G.X;
76 |
77 |
78 |
79 | G.dynamics = {'bnn'};
80 | G.run()
81 | T_bnn = G.T;
82 | X_bnn = G.X;
83 |
84 |
85 | G.dynamics = {'smith'};
86 | G.run()
87 | T_smith = G.T;
88 | X_smith = G.X;
89 |
90 |
91 | % % extract matrix of strategies
92 | % %n = max(G.S);
93 | % x_n = vec2mat(X_dyn(end, :), n);
94 | % x = zeros(G.P, n);
95 | %
96 | % for p = 1 : G.P
97 | % x(p, :) = x_n(p, :) * G.m(p);
98 | % end
99 | %
100 | % U = utility(x);
101 | %
102 | %pause
103 |
104 | G.dynamics = {'logit'};
105 | G.run()
106 | T_logit = G.T;
107 | X_logit = G.X;
108 |
109 |
110 |
111 | % extract matrix of strategies
112 | x_n = vec2mat(G.X(end, :), n);
113 | x = zeros(G.P, n);
114 | for p = 1 : G.P
115 | x(p, :) = x_n(p, :) * G.m(p);
116 | end
117 | U = utility(x);
118 |
119 | figure(3); plot(1:1:24, U(:, 1:24))
120 | figure(4); plot(1:1:24, x(:, 1:24))
121 |
122 |
123 |
124 |
125 |
126 | %if min(X_logit == )
127 |
128 | %min(X_logit)
129 |
130 | graph_incentives_evolution
131 | %G.graph_state()
132 |
133 | %sum(G.x0(1:24, :))
134 |
--------------------------------------------------------------------------------
/test/electricity grid/test_electricity_system_test_boundary.m:
--------------------------------------------------------------------------------
1 | path(path, '../../revision_protocols')
2 | path(path, '../../graphs')
3 | path(path, '../../dynamics')
4 | path(path, '../../')
5 |
6 |
7 | clear
8 |
9 | l = 10;
10 | ith = 1;
11 |
12 |
13 | global G
14 |
15 | global beta_ef alpha_ef mp N T_
16 |
17 |
18 | size_P = 2:2:200;
19 |
20 | U_pareto = zeros(1, length(size_P));
21 | U_nash = zeros(1, length(size_P));
22 |
23 | X_pareto = zeros(1, length(size_P));
24 | X_nash = zeros(1, length(size_P));
25 |
26 | % Definition of the electricity variables
27 | Dt = 15*[51.8743 50.0011 48.6104 48.6384 51.1276 58.7756 ...
28 | 61.0654 65.0167 69.6593 71.6363 75.3904 76.2807 ...
29 | 73.4635 73.3627 74.6492 75.1194 74.8689 74.1951 ...
30 | 78.2569 85.8935 83.5392 77.9073 68.6800 60.5177];
31 | pt = Dt./max(Dt)*8;
32 |
33 |
34 | T_ = length(Dt);
35 |
36 |
37 |
38 | for k=1 : length(size_P)
39 |
40 | disp (['Running N=', num2str(size_P(k)), '... ']);
41 |
42 | N = size_P(k);
43 |
44 | % valuation parameters of all agents
45 | alpha_ef = zeros(N,T_);
46 | for i=1:N
47 | alpha_ef(i,:) = pt(1:T_)*(1 + 0 * .5*i/N) + 0. *rand(1,T_);
48 | end
49 |
50 | % parametros de la func. de costo agregado
51 | beta_ef = 1;
52 |
53 |
54 |
55 | % definition of the game structure
56 |
57 | % number of populations
58 | P = N;
59 |
60 | % number of pure strategies per population
61 | n = 25;
62 | mp = 30;
63 | m = ones(P, 1) * mp;
64 | dyn = {'rd'};
65 |
66 | % simulation parameters
67 | time = 30;
68 |
69 | pot_r = ones(N,T_+1)*mp/(T_+1);
70 | zz = pot_r'/mp;
71 | x0 = zz(:);
72 |
73 | % structure with the parameters of the game
74 | G = struct('P', P, 'n', n, 'f', @fitness_user, 'ode', 'ode113', 'time', time, 'step', 0.00001, 'x0', x0, 'm', m);
75 |
76 | % random initial condition
77 | %G = struct('P', P, 'n', n, 'f', @fitness_user, 'ode', 'ode23s', 'time', time, 'step', 0.00001);
78 |
79 | % verify data of the game
80 | G = definition(G);
81 |
82 | G.step = .01;
83 |
84 |
85 | % run game
86 | G.dynamics = {'rd'};
87 | G.run()
88 | T_dyn = G.T;
89 | X_dyn = G.X;
90 |
91 |
92 |
93 | % extract matrix of strategies
94 | %n = max(G.S);
95 | x_n = vec2mat(X_dyn(end, :), n);
96 | x_i = zeros(G.P, n);
97 |
98 | for p = 1 : G.P
99 | x_i(p, :) = x_n(p, :) * G.m(p);
100 | end
101 |
102 | %U_i = utility_incentives(x_i);
103 | U_i = utility(x_i);
104 |
105 | U_pareto(k) = sum( sum( U_i(:, l) ) );
106 | X_pareto(k) = sum( sum( x_i(:, l) ) );
107 |
108 |
109 |
110 |
111 |
112 | % run the simulations with the inneficient case
113 | G.f = @fitness_user_inefficient;
114 | G.run()
115 | X_dyn = G.X;
116 |
117 | % extract matrix of strategies
118 | %n = max(G.S);
119 | x_n = vec2mat(X_dyn(end, :), n);
120 | x = zeros(G.P, n);
121 |
122 | for p = 1 : G.P
123 | x(p, :) = x_n(p, :) * G.m(p);
124 | end
125 |
126 | U = utility(x);
127 |
128 | U_nash(k) = sum( sum( U(:, l) ) );
129 | X_nash(k) = sum( sum( x(:, l) ) );
130 |
131 | %figure(3); plot(1:1:24, U(ith, 1:24), 1:1:24, U_i(ith, 1:24), 'r')
132 | %figure(4); plot(1:1:24, x(ith, 1:24), 1:1:24, x_i(ith, 1:24), 'r')
133 |
134 | end
135 |
136 | figure(1)
137 | clf
138 | plot(size_P, U_nash ./ U_pareto )
139 |
140 |
141 | figure(2)
142 | clf
143 | plot(size_P, X_pareto ./ X_nash )
144 | hold on
145 | plot(size_P, (size_P+1)./(2*size_P), '--k')
146 | hold off
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
--------------------------------------------------------------------------------
/test/electricity grid/utility.m:
--------------------------------------------------------------------------------
1 | function U = utility(x)
2 |
3 | global N T_ beta_ef alpha_ef
4 |
5 | power = x;
6 |
7 | F = zeros(N,T_+1);
8 | U = zeros(N,T_+1);
9 | %inc = zeros(N,T_);
10 |
11 | % calculate the utility
12 | for index=1:N
13 | for l=1:T_
14 | q_t = power(index,l);
15 | sum_q = sum(power(:,l));
16 | price = p_x(power,l);
17 | %sum_q_i = sum(power(:,l)) - q_t;
18 |
19 | val = v_x(q_t,index,l);
20 | %F(index,l) = alpha_ef(index,l)/(1+q_t) - 2*beta_ef*(sum_q) ;
21 | %inc(index,l) = sum_q_i*( - price + h_i(sum_q_i) );
22 |
23 | U(index,l) = val - q_t*price;% + inc(index,l);
24 |
25 | end
26 | end
27 |
28 |
--------------------------------------------------------------------------------
/test/electricity grid/utility_incentives.m:
--------------------------------------------------------------------------------
1 | function U = utility_incentives(x)
2 |
3 | global N T_ beta_ef alpha_ef
4 |
5 | power = x;
6 |
7 | F = zeros(N,T_+1);
8 | U = zeros(N,T_+1);
9 | inc = zeros(N,T_);
10 |
11 | % calculate the utility
12 | for index=1:N
13 | for l=1:T_
14 | q_t = power(index,l);
15 | %sum_q = sum(power(:,l));
16 | price = p_x(power,l);
17 | sum_q_i = sum(power(:,l)) - q_t;
18 |
19 | val = v_x(q_t,index,l);
20 | %F(index,l) = alpha_ef(index,l)/(1+q_t) - 2*beta_ef*(sum_q) ;
21 | inc(index,l) = sum_q_i*( - price + h_i(sum_q_i) );
22 |
23 | U(index,l) = val - q_t*price + inc(index,l);
24 |
25 | end
26 | end
27 |
28 |
--------------------------------------------------------------------------------
/test/electricity grid/v_x.m:
--------------------------------------------------------------------------------
1 | function v = v_x(q,i,t)
2 |
3 | global alpha_ef alpha_mean
4 |
5 |
6 | v = alpha_ef(i,t) * log(1+q);
7 |
--------------------------------------------------------------------------------
/test/fitness1.m:
--------------------------------------------------------------------------------
1 | function f_i = fitness1(x, p)
2 |
3 | %This fitness function corresponds to a rock-paper-scissors game
4 | A = [ 0 -1 1;...
5 | 1 0 -1;...
6 | -1 1 0];
7 |
8 | % we add a constant calue to obtain positive payoffs
9 | A = A + 2;
10 |
11 | f_i = A * x' ;
12 |
--------------------------------------------------------------------------------
/test/fitness2.m:
--------------------------------------------------------------------------------
1 | function f_i = fitness2(x, p)
2 |
3 | A = zeros(2,2,2);
4 |
5 | A(:, :, 1) = [2 1; ...
6 | 1 2];
7 |
8 | A(:, :, 2) = [1 2; ...
9 | 2 1];
10 |
11 | Ai = squeeze(A(:, :, p));
12 |
13 | p_ = p + (-1)^(p+1);
14 |
15 | f_i = Ai * x(p_, :)';
16 |
--------------------------------------------------------------------------------
/test/fitness3.m:
--------------------------------------------------------------------------------
1 | function f_i = fitness3(x, p, s)
2 |
3 | A = zeros(2,2,2);
4 |
5 | A(:, :, 1) = [2 1; ...
6 | 1 2];
7 |
8 | A(:, :, 2) = [1 2; ...
9 | 2 1];
10 |
11 | Ai = squeeze(A(:, :, p));
12 |
13 | p_ = p + (-1)^(p+1);
14 |
15 | f_i = Ai(s, :) * x(p_, :)';
16 |
--------------------------------------------------------------------------------
/test/test1.m:
--------------------------------------------------------------------------------
1 | % example of a game with one population and three strategies per population.
2 |
3 | clear
4 |
5 | % number of populations
6 | P = 1;
7 |
8 | % number of pure strategies per population
9 | n = 3;
10 |
11 | % mass of the population
12 | m = 1;
13 |
14 | % initial condition
15 | x0 = [0.2 0.7 0.1 ];
16 |
17 | % simulation parameters
18 | time = 30;
19 |
20 | % structure with the parameters of the game
21 | G1 = struct('n', n, 'f', @fitness1, 'x0', x0, 'ode', 'ode113', 'time', time, 'tol', 0.000001, 'step', .01);
22 |
23 | % verify data of the game
24 | G1 = definition(G1);
25 |
26 | G1.dynamics = {'rd'};
27 | G1.run()
28 | G1.graph()
29 | G1.graph_evolution()
30 |
31 | disp (['Press any key to continue...', sprintf('\n') ]);
32 | pause
33 |
34 |
35 | G1.dynamics = {'maynard_rd'};
36 | G1.run()
37 | G1.graph()
38 | G1.graph_evolution()
39 |
40 | disp (['Press any key to continue...', sprintf('\n') ]);
41 | pause
42 |
43 |
44 | G1.dynamics = {'bnn'};
45 | G1.run()
46 | G1.graph()
47 | G1.graph_evolution()
48 |
49 | disp (['Press any key to continue...', sprintf('\n') ]);
50 | pause
51 |
52 |
53 | G1.dynamics = {'smith'};
54 | G1.run()
55 | G1.graph()
56 | G1.graph_evolution()
57 |
58 | disp (['Press any key to continue...', sprintf('\n') ]);
59 | pause
60 |
61 |
62 | G1.dynamics = {'logit'};
63 | G1.eta = .02;
64 | G1.run()
65 | G1.graph()
66 | G1.graph_evolution()
67 |
--------------------------------------------------------------------------------
/test/test1.m.orig:
--------------------------------------------------------------------------------
1 | % example of a game with one population and three strategies per population.
2 |
3 | clear
4 |
5 | <<<<<<< HEAD
6 | global G1
7 |
8 | =======
9 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
10 | % number of populations
11 | P = 1;
12 |
13 | % number of pure strategies per population
14 | n = 3;
15 |
16 | % mass of the population
17 | m = 1;
18 |
19 | % initial condition
20 | x0 = [0.2 .7 0.1 ]';
21 |
22 | % simulation parameters
23 | time = 30;
24 |
25 | % structure with the parameters of the game
26 | <<<<<<< HEAD
27 | G1 = struct('n', n, 'f', @fitness1, 'x0', x0, 'ode', 'ode113', 'time', time);
28 |
29 | % verify data of the game
30 | G1 = definition(G1);
31 | %G1_ = definition(G1);
32 | =======
33 | G1 = struct('n', n, 'f', @fitness1, 'x0', x0, 'ode', 'ode113', 'time', time, 'tol', 0.000001, 'step', .01);
34 |
35 | % verify data of the game
36 | G1 = definition(G1);
37 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
38 |
39 |
40 | G1.dynamics = {'rd'};
41 | G1.run()
42 | G1.graph()
43 | G1.graph_evolution()
44 |
45 | disp (['Press any key to continue...', sprintf('\n') ]);
46 | pause
47 |
48 |
49 | <<<<<<< HEAD
50 |
51 | G1.dynamics = {'maynard_rd'};
52 | G1.ode = 'ode23s';
53 | =======
54 | G1.dynamics = {'maynard_rd'};
55 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
56 | G1.run()
57 | G1.graph()
58 | G1.graph_evolution()
59 |
60 | disp (['Press any key to continue...', sprintf('\n') ]);
61 | pause
62 |
63 |
64 | <<<<<<< HEAD
65 | G1.ode = 'ode113';
66 | =======
67 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
68 | G1.dynamics = {'bnn'};
69 | G1.run()
70 | G1.graph()
71 | G1.graph_evolution()
72 |
73 | disp (['Press any key to continue...', sprintf('\n') ]);
74 | pause
75 |
76 |
77 | <<<<<<< HEAD
78 |
79 | =======
80 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
81 | G1.dynamics = {'smith'};
82 | G1.run()
83 | G1.graph()
84 | G1.graph_evolution()
85 |
86 | disp (['Press any key to continue...', sprintf('\n') ]);
87 | pause
88 |
89 |
90 | <<<<<<< HEAD
91 |
92 | =======
93 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
94 | G1.dynamics = {'logit'};
95 | G1.eta = .02;
96 | G1.run()
97 | G1.graph()
98 | <<<<<<< HEAD
99 | G1.graph_evolution()
100 |
101 |
102 |
103 |
104 |
105 | =======
106 | G1.graph_evolution()
107 | >>>>>>> fe7991eef070aa3348a2925f266b138fe6292d4b
108 |
--------------------------------------------------------------------------------
/test/test2.m:
--------------------------------------------------------------------------------
1 | % example of a game with two populations and two strategies per population.
2 |
3 | clear
4 |
5 | % number of populations
6 | P = 2;
7 |
8 | % number of pure strategies per population
9 | n = 2;
10 |
11 | % mass of the population
12 | m = 1;
13 |
14 | % initial condition
15 | x0 = [0.1 0.9; 0.2 0.8];
16 |
17 | % simulation parameters
18 | time = 30;
19 |
20 | % structure with the parameters of the game
21 | G1 = struct('P', P, 'n', n, 'f', @fitness2, 'x0', x0, 'ode', 'ode45', 'time', time);
22 |
23 | % The following game uses a random intial condition
24 | % G1 = struct('P', P, 'n', n, 'f', @fitness2, 'ode', 'ode45','time', time);
25 |
26 | % verify data of the game
27 | G1 = definition(G1);
28 |
29 | G1.dynamics = {'rd'};
30 | G1.run()
31 | G1.graph2p()
32 | G1.graph_evolution()
33 |
34 | disp (['Press any key to continue...', sprintf('\n') ]);
35 | pause
36 |
37 |
38 | G1.dynamics = {'maynard_rd'};
39 | G1.run()
40 | G1.graph2p()
41 | G1.graph_evolution()
42 |
43 | disp (['Press any key to continue...', sprintf('\n') ]);
44 | pause
45 |
46 |
47 | G1.dynamics = {'bnn'};
48 | G1.run()
49 | G1.graph2p()
50 | G1.graph_evolution()
51 |
52 | disp (['Press any key to continue...', sprintf('\n') ]);
53 | pause
54 |
55 |
56 | G1.dynamics = {'smith'};
57 | G1.run()
58 | G1.graph2p()
59 | G1.graph_evolution()
60 |
61 | disp (['Press any key to continue...', sprintf('\n') ]);
62 | pause
63 |
64 |
65 | G1.dynamics = {'logit'};
66 | G1.eta = .02;
67 | G1.run()
68 | G1.graph2p()
69 | G1.graph_evolution()
70 |
71 |
72 |
--------------------------------------------------------------------------------
/test/test3.m:
--------------------------------------------------------------------------------
1 | % example of a game with one population, three strategies per population, and combined dynamics.
2 |
3 | clear
4 |
5 | % number of populations
6 | P = 1;
7 |
8 | % number of pure strategies per population
9 | n = 3;
10 |
11 | % Mass of the population
12 | m = 1;
13 |
14 | % intial condition
15 | x0 = [.1 .75 .15];
16 |
17 | % Dynamics that will be combined
18 | dyn = {'bnn', 'logit'};
19 | gamma = [.75, .25];
20 | % gamma = [0, 1];
21 | % gamma = [1, 0];
22 |
23 | % simulation parameters
24 | time = 50;
25 |
26 | % structure with the parameters of the game
27 | G1 = struct('P', P, 'n', n, 'f', @fitness1, 'x0', x0, 'dynamics', {dyn}, 'gamma', gamma, 'ode', 'ode45', 'time', time);
28 |
29 | % verify data of the game
30 | G1 = definition(G1);
31 |
32 | G1.eta = .02;
33 | G1.run()
34 |
35 |
36 | % Dynamics that will be combined
37 | dyn = {'smith', 'rd'};
38 | gamma = [.75, .25];
39 |
40 | G2 = struct('P', P, 'n', n, 'f', @fitness1, 'x0', x0, 'dynamics', {dyn}, 'gamma', gamma, 'ode', 'ode45', 'time', time);
41 |
42 | % verify data of the game
43 | G2 = definition(G2);
44 |
45 | G2.eta = .02;
46 | G2.run()
47 |
48 |
49 | % plot the evolution of the games
50 | G1.graph()
51 | G1.graph_evolution()
52 |
53 | disp (['Press any key to continue...', sprintf('\n') ]);
54 | pause
55 |
56 |
57 | G2.graph()
58 | G2.graph_evolution()
59 |
60 |
61 |
--------------------------------------------------------------------------------
/test/test_finite_population1.m:
--------------------------------------------------------------------------------
1 | % test of a game with finite population
2 |
3 | clear
4 |
5 | % number of populations
6 | P = 1;
7 |
8 | % number of agents
9 | N = 200;
10 |
11 | % number of pure strategies per population
12 | n = 3;
13 |
14 | % mass of the population
15 | m = 1;
16 |
17 | % initial condition
18 | x0 = [0.2 0.7 0.1];
19 |
20 | % simulation parameters
21 | iterations = 10000;
22 |
23 |
24 | % structure with the parameters of the game
25 | G = struct('N', N, 'n', n, 'f', @fitness1, 'x0', x0, 'ode', 'ode113', 'time', iterations, 'eta', 0.02, 'revision_protocol', @proportional_imitation);
26 |
27 | G.R = 1;
28 |
29 | % verify data of the game
30 | G = definition(G);
31 |
32 |
33 | G.revision_protocol = @proportional_imitation;
34 | G.run_finite();
35 | G.graph()
36 | G.graph_evolution()
37 | G.graph_fitness()
38 |
39 | disp (['Press any key to continue... ', sprintf('\n') ] );
40 | pause
41 |
42 |
43 | G.revision_protocol = @comparison2average;
44 | G.run_finite();
45 | G.graph()
46 | G.graph_evolution()
47 | G.graph_fitness()
48 |
49 | disp (['Press any key to continue...', sprintf('\n') ]);
50 | pause
51 |
52 |
53 | G.revision_protocol = @pairwise_comparison;
54 | G.run_finite();
55 | G.graph()
56 | G.graph_evolution()
57 | G.graph_fitness()
58 |
59 | disp (['Press any key to continue...', sprintf('\n') ]);
60 | pause
61 |
62 |
63 | G.eta = 0.02;
64 | G.revision_protocol = @logit_choice;
65 | G.run_finite();
66 | G.graph()
67 | G.graph_evolution()
68 | G.graph_fitness()
69 |
--------------------------------------------------------------------------------
/test/test_maximization.m:
--------------------------------------------------------------------------------
1 | % example of a maximization problem.
2 |
3 | clear
4 |
5 | % number of populations
6 | P = 1;
7 |
8 | % number of pure strategies per population
9 | n = 3;
10 |
11 | % mass of the population
12 | m = [10; 10];
13 |
14 | % initial condition
15 | x0 = [0.1 0.8 0.1 ];
16 |
17 | % simulation parameters
18 | time = 1;
19 |
20 | % fitness fucntion (not dependent on the population p)
21 | fitness_f = @(x, p) - [2*(x(1) - 5); 2*(x(2) - 5); 0];
22 |
23 | % structure with the parameters of the game
24 | G = struct('P', P, 'n', n, 'm', m, 'f', fitness_f, 'x0', x0, 'ode', 'ode113', 'time', time, 'stop_c', true);
25 |
26 | % verify data of the game
27 | G = definition(G);
28 |
29 | G.dynamics = {'smith'};
30 | G.run()
31 | G.graph()
32 | G.graph_evolution()
33 |
34 |
35 | % objective function
36 | f = @(x) -((x(1) - 5)^2 + (x(2) - 5)^2);
37 |
38 | % plot the evolution of the strategies
39 | z = 0:0.1:10;
40 | M = zeros(length(z), length(z));
41 | for i=1:length(z)
42 | for j=1:length(z)
43 | M(i,j) = f([z(i); z(j)]);
44 | end
45 | end
46 |
47 | figure(3)
48 | clf
49 | contour(z,z,M)
50 | hold on
51 | plot(5,5,'ok')
52 | plot(G.X(:,1)*m(1), G.X(:,2)*m(2), 'k')
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/test/test_maximization_b.m:
--------------------------------------------------------------------------------
1 | % example of a maximization problem.
2 |
3 | clear
4 |
5 | % number of populations
6 | P = 2;
7 |
8 | % number of pure strategies per population
9 | n = 2;
10 |
11 | % mass of the population
12 | m = [10; 10];
13 |
14 | % initial condition
15 | x0 = [0.1 0.9; ...
16 | 0.8 0.2];
17 |
18 | % simulation parameters
19 | time = 1;
20 |
21 | % fitness function (not dependent on the population p)
22 | fitness_f = @(x,p) - [2*(x(p,1) - 5); 0] ;
23 |
24 | % structure with the parameters of the game
25 | G = struct('P', P, 'n', n, 'm', m, 'f', fitness_f, 'x0', x0, 'ode', 'ode113', 'time', time, 'stop_c', true);
26 |
27 | % verify data of the game
28 | G = definition(G);
29 |
30 | G.dynamics = {'smith'};
31 | G.run()
32 | G.graph2p()
33 | G.graph_evolution()
34 |
35 |
36 | % objective function
37 | f = @(x) -((x(1) - 5)^2 + (x(2) - 5)^2);
38 |
39 | % plot the evolution of the strategies
40 | z = 0:0.1:10;
41 | M = zeros(length(z), length(z));
42 | for i=1:length(z)
43 | for j=1:length(z)
44 | M(i,j) = f([z(i); z(j)]);
45 | end
46 | end
47 |
48 | figure(3)
49 | clf
50 | contour(z,z,M)
51 | hold on
52 | plot(5,5,'ok')
53 | plot(G.X(:,1)*m(1), G.X(:,3)*m(2), 'k')
54 |
55 |
56 |
57 |
58 |
59 |
--------------------------------------------------------------------------------