├── .gitignore ├── CHANGELOG ├── CONTRIBUTING.md ├── DCO.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── macronometrics ├── DulmageMendelsohnDecomposition.py ├── __init__.py ├── analyze.py ├── bipartiteMatching.py ├── equation.py ├── estimation.py ├── getDst.py ├── graph.py ├── model.py ├── numsolve.py ├── stronglyConnectedComponents_kosaraju.py ├── symbolic.py ├── tools.py ├── tools_ts.py └── trollparser.py ├── requirements.txt └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | **/__pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | .vscode/ 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | **/build/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | .hypothesis/ 53 | .pytest_cache/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | *.ipynb_checkpoints 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # celery beat schedule file 89 | celerybeat-schedule 90 | 91 | # SageMath parsed files 92 | *.sage.py 93 | 94 | # Environments 95 | .env 96 | .venv 97 | env/ 98 | venv/ 99 | ENV/ 100 | env.bak/ 101 | venv.bak/ 102 | 103 | # Spyder project settings 104 | .spyderproject 105 | .spyproject 106 | 107 | # Rope project settings 108 | .ropeproject 109 | 110 | # mkdocs documentation 111 | /site 112 | 113 | # mypy 114 | .mypy_cache/ 115 | .dmypy.json 116 | dmypy.json 117 | 118 | # Pyre type checker 119 | .pyre/ 120 | 121 | # modeles 122 | modeles/ 123 | modeles_python/ 124 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | # Changelog 2 | All notable changes to this project will be documented in this file. 3 | 4 | ## [0.0.1] - 2020-03-03 5 | ### Added 6 | - Initial version 7 | - Solver : scipy.optimize.root 8 | - Other solvers in numsolve.py -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributions 2 | 3 | All contributions, whatever their forms, are welcome. 4 | 5 | ### How ? 6 | 7 | * To declare a bug, or ask for an improvement, create an [*issue*](https://github.com/InseeFrLab/Macronometrics/issues) 8 | * To contribute in code or documentation directly, you can use a [*Pull Request*](https://github.com/InseeFrLab/Macronometrics/pulls). In this case, it is best to link it to an *issue*. 9 | * The commit messages, as well as the code itself, will preferably be written in English. 10 | * The documentation will be translated into both French and English. 11 | 12 | ### Certificate of Origin 13 | 14 | By contributing to this project you agree to the Developer Certificate of Origin (DCO). This document was created by the Linux Kernel community and is a simple statement that you, as a contributor, have the legal right to make the contribution. [See the DCO file for details.](DCO.txt) 15 | 16 | Contributors sign-off that they adhere to these requirements by adding a Signed-off-by line to commit messages. For example: 17 | 18 | ``` 19 | This is my commit message 20 | 21 | Signed-off-by: Random J Developer 22 | ``` 23 | 24 | Git even has a -s command line option to append this automatically to your commit message: 25 | ``` 26 | $ git commit -s -m 'This is my commit message' 27 | ``` 28 | 29 | If you have already made a commit and forgot to include the sign-off, you can amend your last commit to add the sign-off with the following command, which can then be force pushed. 30 | ``` 31 | git commit --amend -s 32 | ``` 33 | 34 | 35 | ### Licence 36 | 37 | By contributing, you agree that your contributions will be licensed under the [CeCILL license](LICENSE). 38 | -------------------------------------------------------------------------------- /DCO.txt: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 1 Letterman Drive 6 | Suite D4700 7 | San Francisco, CA, 94129 8 | 9 | Everyone is permitted to copy and distribute verbatim copies of this 10 | license document, but changing it is not allowed. 11 | 12 | 13 | Developer's Certificate of Origin 1.1 14 | 15 | By making a contribution to this project, I certify that: 16 | 17 | (a) The contribution was created in whole or in part by me and I 18 | have the right to submit it under the open source license 19 | indicated in the file; or 20 | 21 | (b) The contribution is based upon previous work that, to the best 22 | of my knowledge, is covered under an appropriate open source 23 | license and I have the right under that license to submit that 24 | work with modifications, whether created in whole or in part 25 | by me, under the same open source license (unless I am 26 | permitted to submit under a different license), as indicated 27 | in the file; or 28 | 29 | (c) The contribution was provided directly to me by some other 30 | person who certified (a), (b) or (c) and I have not modified 31 | it. 32 | 33 | (d) I understand and agree that this project and the contribution 34 | are public and that a record of the contribution (including all 35 | personal information I submit with it, including my sign-off) is 36 | maintained indefinitely and may be redistributed consistent with 37 | this project or the open source license(s) involved. 38 | 39 | --------------------------------------------------------------------- 40 | 41 | Developer Certificate of Origin 42 | Version 1.1 43 | 44 | Certificat d'origine des contributions du développeur : 45 | 46 | En faisant une contribution à ce projet, j’atteste que : 47 | 48 | (a) Ma contribution a été créée en tout ou partie par moi, et que j’ai 49 | le droit de la soumettre sous la licence applicable au projet; ou 50 | que 51 | 52 | (b) Ma contribution s’appuie sur des travaux antérieurs qui, à ma 53 | connaissance, sont couverts par une licence open source et que 54 | j’ai le droit sous cette licence de soumettre cette contribution 55 | avec mes modifications, créées en tout ou partie par moi, sous la 56 | licence applicable au projet; ou que 57 | 58 | (c) La contribution m’a été offerte par un tiers qui a certifié que 59 | les points (a), (b) ou (c) ont été respectés et que je n’ai pas 60 | modifié cette contribution. 61 | 62 | (d) Je comprends et accepte que ce projet et ma contribution sont 63 | publics, et qu’un enregistrement de cette contribution (comprenant 64 | également l’ensemble des informations à caractère personnel me 65 | concernant et notamment ma signature) soit attaché indéfiniment à 66 | ce projet et qu’il peut librement être rediffusé à des tiers 67 | conformément à la licence applicable au projet ou aux autres 68 | licences impliquées. 69 | 70 | Certificat d'origine des contributions du développeur est une 71 | traduction et a été conçu pour être compatible avec sa version 72 | anglaise (même numéro de version) qui peut être utilisée 73 | indifféremment. 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | CeCILL FREE SOFTWARE LICENSE AGREEMENT 3 | 4 | Version 2.1 dated 2013-06-21 5 | 6 | 7 | Notice 8 | 9 | This Agreement is a Free Software license agreement that is the result 10 | of discussions between its authors in order to ensure compliance with 11 | the two main principles guiding its drafting: 12 | 13 | * firstly, compliance with the principles governing the distribution 14 | of Free Software: access to source code, broad rights granted to users, 15 | * secondly, the election of a governing law, French law, with which it 16 | is conformant, both as regards the law of torts and intellectual 17 | property law, and the protection that it offers to both authors and 18 | holders of the economic rights over software. 19 | 20 | The authors of the CeCILL (for Ce[a] C[nrs] I[nria] L[ogiciel] L[ibre]) 21 | license are: 22 | 23 | Commissariat à l'énergie atomique et aux énergies alternatives - CEA, a 24 | public scientific, technical and industrial research establishment, 25 | having its principal place of business at 25 rue Leblanc, immeuble Le 26 | Ponant D, 75015 Paris, France. 27 | 28 | Centre National de la Recherche Scientifique - CNRS, a public scientific 29 | and technological establishment, having its principal place of business 30 | at 3 rue Michel-Ange, 75794 Paris cedex 16, France. 31 | 32 | Institut National de Recherche en Informatique et en Automatique - 33 | Inria, a public scientific and technological establishment, having its 34 | principal place of business at Domaine de Voluceau, Rocquencourt, BP 35 | 105, 78153 Le Chesnay cedex, France. 36 | 37 | 38 | Preamble 39 | 40 | The purpose of this Free Software license agreement is to grant users 41 | the right to modify and redistribute the software governed by this 42 | license within the framework of an open source distribution model. 43 | 44 | The exercising of this right is conditional upon certain obligations for 45 | users so as to preserve this status for all subsequent redistributions. 46 | 47 | In consideration of access to the source code and the rights to copy, 48 | modify and redistribute granted by the license, users are provided only 49 | with a limited warranty and the software's author, the holder of the 50 | economic rights, and the successive licensors only have limited liability. 51 | 52 | In this respect, the risks associated with loading, using, modifying 53 | and/or developing or reproducing the software by the user are brought to 54 | the user's attention, given its Free Software status, which may make it 55 | complicated to use, with the result that its use is reserved for 56 | developers and experienced professionals having in-depth computer 57 | knowledge. Users are therefore encouraged to load and test the 58 | suitability of the software as regards their requirements in conditions 59 | enabling the security of their systems and/or data to be ensured and, 60 | more generally, to use and operate it in the same conditions of 61 | security. This Agreement may be freely reproduced and published, 62 | provided it is not altered, and that no provisions are either added or 63 | removed herefrom. 64 | 65 | This Agreement may apply to any or all software for which the holder of 66 | the economic rights decides to submit the use thereof to its provisions. 67 | 68 | Frequently asked questions can be found on the official website of the 69 | CeCILL licenses family (http://www.cecill.info/index.en.html) for any 70 | necessary clarification. 71 | 72 | 73 | Article 1 - DEFINITIONS 74 | 75 | For the purpose of this Agreement, when the following expressions 76 | commence with a capital letter, they shall have the following meaning: 77 | 78 | Agreement: means this license agreement, and its possible subsequent 79 | versions and annexes. 80 | 81 | Software: means the software in its Object Code and/or Source Code form 82 | and, where applicable, its documentation, "as is" when the Licensee 83 | accepts the Agreement. 84 | 85 | Initial Software: means the Software in its Source Code and possibly its 86 | Object Code form and, where applicable, its documentation, "as is" when 87 | it is first distributed under the terms and conditions of the Agreement. 88 | 89 | Modified Software: means the Software modified by at least one 90 | Contribution. 91 | 92 | Source Code: means all the Software's instructions and program lines to 93 | which access is required so as to modify the Software. 94 | 95 | Object Code: means the binary files originating from the compilation of 96 | the Source Code. 97 | 98 | Holder: means the holder(s) of the economic rights over the Initial 99 | Software. 100 | 101 | Licensee: means the Software user(s) having accepted the Agreement. 102 | 103 | Contributor: means a Licensee having made at least one Contribution. 104 | 105 | Licensor: means the Holder, or any other individual or legal entity, who 106 | distributes the Software under the Agreement. 107 | 108 | Contribution: means any or all modifications, corrections, translations, 109 | adaptations and/or new functions integrated into the Software by any or 110 | all Contributors, as well as any or all Internal Modules. 111 | 112 | Module: means a set of sources files including their documentation that 113 | enables supplementary functions or services in addition to those offered 114 | by the Software. 115 | 116 | External Module: means any or all Modules, not derived from the 117 | Software, so that this Module and the Software run in separate address 118 | spaces, with one calling the other when they are run. 119 | 120 | Internal Module: means any or all Module, connected to the Software so 121 | that they both execute in the same address space. 122 | 123 | GNU GPL: means the GNU General Public License version 2 or any 124 | subsequent version, as published by the Free Software Foundation Inc. 125 | 126 | GNU Affero GPL: means the GNU Affero General Public License version 3 or 127 | any subsequent version, as published by the Free Software Foundation Inc. 128 | 129 | EUPL: means the European Union Public License version 1.1 or any 130 | subsequent version, as published by the European Commission. 131 | 132 | Parties: mean both the Licensee and the Licensor. 133 | 134 | These expressions may be used both in singular and plural form. 135 | 136 | 137 | Article 2 - PURPOSE 138 | 139 | The purpose of the Agreement is the grant by the Licensor to the 140 | Licensee of a non-exclusive, transferable and worldwide license for the 141 | Software as set forth in Article 5 <#scope> hereinafter for the whole 142 | term of the protection granted by the rights over said Software. 143 | 144 | 145 | Article 3 - ACCEPTANCE 146 | 147 | 3.1 The Licensee shall be deemed as having accepted the terms and 148 | conditions of this Agreement upon the occurrence of the first of the 149 | following events: 150 | 151 | * (i) loading the Software by any or all means, notably, by 152 | downloading from a remote server, or by loading from a physical medium; 153 | * (ii) the first time the Licensee exercises any of the rights granted 154 | hereunder. 155 | 156 | 3.2 One copy of the Agreement, containing a notice relating to the 157 | characteristics of the Software, to the limited warranty, and to the 158 | fact that its use is restricted to experienced users has been provided 159 | to the Licensee prior to its acceptance as set forth in Article 3.1 160 | <#accepting> hereinabove, and the Licensee hereby acknowledges that it 161 | has read and understood it. 162 | 163 | 164 | Article 4 - EFFECTIVE DATE AND TERM 165 | 166 | 167 | 4.1 EFFECTIVE DATE 168 | 169 | The Agreement shall become effective on the date when it is accepted by 170 | the Licensee as set forth in Article 3.1 <#accepting>. 171 | 172 | 173 | 4.2 TERM 174 | 175 | The Agreement shall remain in force for the entire legal term of 176 | protection of the economic rights over the Software. 177 | 178 | 179 | Article 5 - SCOPE OF RIGHTS GRANTED 180 | 181 | The Licensor hereby grants to the Licensee, who accepts, the following 182 | rights over the Software for any or all use, and for the term of the 183 | Agreement, on the basis of the terms and conditions set forth hereinafter. 184 | 185 | Besides, if the Licensor owns or comes to own one or more patents 186 | protecting all or part of the functions of the Software or of its 187 | components, the Licensor undertakes not to enforce the rights granted by 188 | these patents against successive Licensees using, exploiting or 189 | modifying the Software. If these patents are transferred, the Licensor 190 | undertakes to have the transferees subscribe to the obligations set 191 | forth in this paragraph. 192 | 193 | 194 | 5.1 RIGHT OF USE 195 | 196 | The Licensee is authorized to use the Software, without any limitation 197 | as to its fields of application, with it being hereinafter specified 198 | that this comprises: 199 | 200 | 1. permanent or temporary reproduction of all or part of the Software 201 | by any or all means and in any or all form. 202 | 203 | 2. loading, displaying, running, or storing the Software on any or all 204 | medium. 205 | 206 | 3. entitlement to observe, study or test its operation so as to 207 | determine the ideas and principles behind any or all constituent 208 | elements of said Software. This shall apply when the Licensee 209 | carries out any or all loading, displaying, running, transmission or 210 | storage operation as regards the Software, that it is entitled to 211 | carry out hereunder. 212 | 213 | 214 | 5.2 ENTITLEMENT TO MAKE CONTRIBUTIONS 215 | 216 | The right to make Contributions includes the right to translate, adapt, 217 | arrange, or make any or all modifications to the Software, and the right 218 | to reproduce the resulting software. 219 | 220 | The Licensee is authorized to make any or all Contributions to the 221 | Software provided that it includes an explicit notice that it is the 222 | author of said Contribution and indicates the date of the creation thereof. 223 | 224 | 225 | 5.3 RIGHT OF DISTRIBUTION 226 | 227 | In particular, the right of distribution includes the right to publish, 228 | transmit and communicate the Software to the general public on any or 229 | all medium, and by any or all means, and the right to market, either in 230 | consideration of a fee, or free of charge, one or more copies of the 231 | Software by any means. 232 | 233 | The Licensee is further authorized to distribute copies of the modified 234 | or unmodified Software to third parties according to the terms and 235 | conditions set forth hereinafter. 236 | 237 | 238 | 5.3.1 DISTRIBUTION OF SOFTWARE WITHOUT MODIFICATION 239 | 240 | The Licensee is authorized to distribute true copies of the Software in 241 | Source Code or Object Code form, provided that said distribution 242 | complies with all the provisions of the Agreement and is accompanied by: 243 | 244 | 1. a copy of the Agreement, 245 | 246 | 2. a notice relating to the limitation of both the Licensor's warranty 247 | and liability as set forth in Articles 8 and 9, 248 | 249 | and that, in the event that only the Object Code of the Software is 250 | redistributed, the Licensee allows effective access to the full Source 251 | Code of the Software for a period of at least three years from the 252 | distribution of the Software, it being understood that the additional 253 | acquisition cost of the Source Code shall not exceed the cost of the 254 | data transfer. 255 | 256 | 257 | 5.3.2 DISTRIBUTION OF MODIFIED SOFTWARE 258 | 259 | When the Licensee makes a Contribution to the Software, the terms and 260 | conditions for the distribution of the resulting Modified Software 261 | become subject to all the provisions of this Agreement. 262 | 263 | The Licensee is authorized to distribute the Modified Software, in 264 | source code or object code form, provided that said distribution 265 | complies with all the provisions of the Agreement and is accompanied by: 266 | 267 | 1. a copy of the Agreement, 268 | 269 | 2. a notice relating to the limitation of both the Licensor's warranty 270 | and liability as set forth in Articles 8 and 9, 271 | 272 | and, in the event that only the object code of the Modified Software is 273 | redistributed, 274 | 275 | 3. a note stating the conditions of effective access to the full source 276 | code of the Modified Software for a period of at least three years 277 | from the distribution of the Modified Software, it being understood 278 | that the additional acquisition cost of the source code shall not 279 | exceed the cost of the data transfer. 280 | 281 | 282 | 5.3.3 DISTRIBUTION OF EXTERNAL MODULES 283 | 284 | When the Licensee has developed an External Module, the terms and 285 | conditions of this Agreement do not apply to said External Module, that 286 | may be distributed under a separate license agreement. 287 | 288 | 289 | 5.3.4 COMPATIBILITY WITH OTHER LICENSES 290 | 291 | The Licensee can include a code that is subject to the provisions of one 292 | of the versions of the GNU GPL, GNU Affero GPL and/or EUPL in the 293 | Modified or unmodified Software, and distribute that entire code under 294 | the terms of the same version of the GNU GPL, GNU Affero GPL and/or EUPL. 295 | 296 | The Licensee can include the Modified or unmodified Software in a code 297 | that is subject to the provisions of one of the versions of the GNU GPL, 298 | GNU Affero GPL and/or EUPL and distribute that entire code under the 299 | terms of the same version of the GNU GPL, GNU Affero GPL and/or EUPL. 300 | 301 | 302 | Article 6 - INTELLECTUAL PROPERTY 303 | 304 | 305 | 6.1 OVER THE INITIAL SOFTWARE 306 | 307 | The Holder owns the economic rights over the Initial Software. Any or 308 | all use of the Initial Software is subject to compliance with the terms 309 | and conditions under which the Holder has elected to distribute its work 310 | and no one shall be entitled to modify the terms and conditions for the 311 | distribution of said Initial Software. 312 | 313 | The Holder undertakes that the Initial Software will remain ruled at 314 | least by this Agreement, for the duration set forth in Article 4.2 <#term>. 315 | 316 | 317 | 6.2 OVER THE CONTRIBUTIONS 318 | 319 | The Licensee who develops a Contribution is the owner of the 320 | intellectual property rights over this Contribution as defined by 321 | applicable law. 322 | 323 | 324 | 6.3 OVER THE EXTERNAL MODULES 325 | 326 | The Licensee who develops an External Module is the owner of the 327 | intellectual property rights over this External Module as defined by 328 | applicable law and is free to choose the type of agreement that shall 329 | govern its distribution. 330 | 331 | 332 | 6.4 JOINT PROVISIONS 333 | 334 | The Licensee expressly undertakes: 335 | 336 | 1. not to remove, or modify, in any manner, the intellectual property 337 | notices attached to the Software; 338 | 339 | 2. to reproduce said notices, in an identical manner, in the copies of 340 | the Software modified or not. 341 | 342 | The Licensee undertakes not to directly or indirectly infringe the 343 | intellectual property rights on the Software of the Holder and/or 344 | Contributors, and to take, where applicable, vis-�-vis its staff, any 345 | and all measures required to ensure respect of said intellectual 346 | property rights of the Holder and/or Contributors. 347 | 348 | 349 | Article 7 - RELATED SERVICES 350 | 351 | 7.1 Under no circumstances shall the Agreement oblige the Licensor to 352 | provide technical assistance or maintenance services for the Software. 353 | 354 | However, the Licensor is entitled to offer this type of services. The 355 | terms and conditions of such technical assistance, and/or such 356 | maintenance, shall be set forth in a separate instrument. Only the 357 | Licensor offering said maintenance and/or technical assistance services 358 | shall incur liability therefor. 359 | 360 | 7.2 Similarly, any Licensor is entitled to offer to its licensees, under 361 | its sole responsibility, a warranty, that shall only be binding upon 362 | itself, for the redistribution of the Software and/or the Modified 363 | Software, under terms and conditions that it is free to decide. Said 364 | warranty, and the financial terms and conditions of its application, 365 | shall be subject of a separate instrument executed between the Licensor 366 | and the Licensee. 367 | 368 | 369 | Article 8 - LIABILITY 370 | 371 | 8.1 Subject to the provisions of Article 8.2, the Licensee shall be 372 | entitled to claim compensation for any direct loss it may have suffered 373 | from the Software as a result of a fault on the part of the relevant 374 | Licensor, subject to providing evidence thereof. 375 | 376 | 8.2 The Licensor's liability is limited to the commitments made under 377 | this Agreement and shall not be incurred as a result of in particular: 378 | (i) loss due the Licensee's total or partial failure to fulfill its 379 | obligations, (ii) direct or consequential loss that is suffered by the 380 | Licensee due to the use or performance of the Software, and (iii) more 381 | generally, any consequential loss. In particular the Parties expressly 382 | agree that any or all pecuniary or business loss (i.e. loss of data, 383 | loss of profits, operating loss, loss of customers or orders, 384 | opportunity cost, any disturbance to business activities) or any or all 385 | legal proceedings instituted against the Licensee by a third party, 386 | shall constitute consequential loss and shall not provide entitlement to 387 | any or all compensation from the Licensor. 388 | 389 | 390 | Article 9 - WARRANTY 391 | 392 | 9.1 The Licensee acknowledges that the scientific and technical 393 | state-of-the-art when the Software was distributed did not enable all 394 | possible uses to be tested and verified, nor for the presence of 395 | possible defects to be detected. In this respect, the Licensee's 396 | attention has been drawn to the risks associated with loading, using, 397 | modifying and/or developing and reproducing the Software which are 398 | reserved for experienced users. 399 | 400 | The Licensee shall be responsible for verifying, by any or all means, 401 | the suitability of the product for its requirements, its good working 402 | order, and for ensuring that it shall not cause damage to either persons 403 | or properties. 404 | 405 | 9.2 The Licensor hereby represents, in good faith, that it is entitled 406 | to grant all the rights over the Software (including in particular the 407 | rights set forth in Article 5 <#scope>). 408 | 409 | 9.3 The Licensee acknowledges that the Software is supplied "as is" by 410 | the Licensor without any other express or tacit warranty, other than 411 | that provided for in Article 9.2 <#good-faith> and, in particular, 412 | without any warranty as to its commercial value, its secured, safe, 413 | innovative or relevant nature. 414 | 415 | Specifically, the Licensor does not warrant that the Software is free 416 | from any error, that it will operate without interruption, that it will 417 | be compatible with the Licensee's own equipment and software 418 | configuration, nor that it will meet the Licensee's requirements. 419 | 420 | 9.4 The Licensor does not either expressly or tacitly warrant that the 421 | Software does not infringe any third party intellectual property right 422 | relating to a patent, software or any other property right. Therefore, 423 | the Licensor disclaims any and all liability towards the Licensee 424 | arising out of any or all proceedings for infringement that may be 425 | instituted in respect of the use, modification and redistribution of the 426 | Software. Nevertheless, should such proceedings be instituted against 427 | the Licensee, the Licensor shall provide it with technical and legal 428 | expertise for its defense. Such technical and legal expertise shall be 429 | decided on a case-by-case basis between the relevant Licensor and the 430 | Licensee pursuant to a memorandum of understanding. The Licensor 431 | disclaims any and all liability as regards the Licensee's use of the 432 | name of the Software. No warranty is given as regards the existence of 433 | prior rights over the name of the Software or as regards the existence 434 | of a trademark. 435 | 436 | 437 | Article 10 - TERMINATION 438 | 439 | 10.1 In the event of a breach by the Licensee of its obligations 440 | hereunder, the Licensor may automatically terminate this Agreement 441 | thirty (30) days after notice has been sent to the Licensee and has 442 | remained ineffective. 443 | 444 | 10.2 A Licensee whose Agreement is terminated shall no longer be 445 | authorized to use, modify or distribute the Software. However, any 446 | licenses that it may have granted prior to termination of the Agreement 447 | shall remain valid subject to their having been granted in compliance 448 | with the terms and conditions hereof. 449 | 450 | 451 | Article 11 - MISCELLANEOUS 452 | 453 | 454 | 11.1 EXCUSABLE EVENTS 455 | 456 | Neither Party shall be liable for any or all delay, or failure to 457 | perform the Agreement, that may be attributable to an event of force 458 | majeure, an act of God or an outside cause, such as defective 459 | functioning or interruptions of the electricity or telecommunications 460 | networks, network paralysis following a virus attack, intervention by 461 | government authorities, natural disasters, water damage, earthquakes, 462 | fire, explosions, strikes and labor unrest, war, etc. 463 | 464 | 11.2 Any failure by either Party, on one or more occasions, to invoke 465 | one or more of the provisions hereof, shall under no circumstances be 466 | interpreted as being a waiver by the interested Party of its right to 467 | invoke said provision(s) subsequently. 468 | 469 | 11.3 The Agreement cancels and replaces any or all previous agreements, 470 | whether written or oral, between the Parties and having the same 471 | purpose, and constitutes the entirety of the agreement between said 472 | Parties concerning said purpose. No supplement or modification to the 473 | terms and conditions hereof shall be effective as between the Parties 474 | unless it is made in writing and signed by their duly authorized 475 | representatives. 476 | 477 | 11.4 In the event that one or more of the provisions hereof were to 478 | conflict with a current or future applicable act or legislative text, 479 | said act or legislative text shall prevail, and the Parties shall make 480 | the necessary amendments so as to comply with said act or legislative 481 | text. All other provisions shall remain effective. Similarly, invalidity 482 | of a provision of the Agreement, for any reason whatsoever, shall not 483 | cause the Agreement as a whole to be invalid. 484 | 485 | 486 | 11.5 LANGUAGE 487 | 488 | The Agreement is drafted in both French and English and both versions 489 | are deemed authentic. 490 | 491 | 492 | Article 12 - NEW VERSIONS OF THE AGREEMENT 493 | 494 | 12.1 Any person is authorized to duplicate and distribute copies of this 495 | Agreement. 496 | 497 | 12.2 So as to ensure coherence, the wording of this Agreement is 498 | protected and may only be modified by the authors of the License, who 499 | reserve the right to periodically publish updates or new versions of the 500 | Agreement, each with a separate number. These subsequent versions may 501 | address new issues encountered by Free Software. 502 | 503 | 12.3 Any Software distributed under a given version of the Agreement may 504 | only be subsequently distributed under the same version of the Agreement 505 | or a subsequent version, subject to the provisions of Article 5.3.4 506 | <#compatibility>. 507 | 508 | 509 | Article 13 - GOVERNING LAW AND JURISDICTION 510 | 511 | 13.1 The Agreement is governed by French law. The Parties agree to 512 | endeavor to seek an amicable solution to any disagreements or disputes 513 | that may arise during the performance of the Agreement. 514 | 515 | 13.2 Failing an amicable solution within two (2) months as from their 516 | occurrence, and unless emergency proceedings are necessary, the 517 | disagreements or disputes shall be referred to the Paris Courts having 518 | jurisdiction, by the more diligent Party. 519 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | include *.txt -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Macronometrics 2 | 3 | A toolbox for macroeconometric modeling 4 | 5 | Version 0.0.1 6 | 7 | ## Key features 8 | 9 | * High-level language for model description (parser based on Lark) 10 | * backward looking modeling with AR / ECM processes 11 | * Dulmage - Mendelsohn block decomposition of the model 12 | * Symbolic computation of the jacobian 13 | * Several choices of numerical solvers (based on Scipy, or high-order Newton methods) 14 | * Time-series management based on Pandas 15 | 16 | ## Usage 17 | 18 | A macro model is defined by a set of static and dynamic equations, which determines the evolution of economic variables (such as GDP, interest rate, etc). The toolbox is able to simulate a trajectory (yearly or quarterly) of a model, based on a sample of time series (a training set). With this training set, the coefficients of the dynamic equations can be estimated, and the residuals of the model computed. 19 | 20 | ## Getting started 21 | 22 | * Clone the repository 23 | ~~~ 24 | git clone https://github.com/InseeFrLab/Macronometrics.git 25 | ~~~ 26 | 27 | * Install the package 28 | 29 | ~~~ 30 | cd Macronometrics 31 | python setup.py install 32 | ~~~ 33 | 34 | * Clone the repository containing an illustrative model 35 | 36 | ~~~ 37 | cd .. 38 | git clone https://github.com/InseeFrLab/Macronometrics-Notebook.git 39 | ~~~ 40 | 41 | * Run the Jupyter notebook ```Colibri.ipynb``` 42 | 43 | 44 | ## To do 45 | 46 | * Numba just-in-time compilation of the solving functions 47 | * Estimation of the coefficients of the model (OLS) 48 | 49 | ## Acknowledgements 50 | 51 | The code for Dulmage - Mendelsohn block decomposition is implemented with courtesy of Bank of Japan research team : 52 | 53 | Hirakata, N., K. Kazutoshi, A. Kanafuji, Y. Kido, Y. Kishaba, T. Murakoshi, and T. Shinohara (2019) "The Quarterly Japanese Economic Model (Q-JEM): 2019 version" Bank of Japan Working Paper Series, No. 19-E-7. 54 | 55 | Some features of the toolbox are inspired from the Grocer package for Scilab, and implemented with courtesy of Eric Dubois, lead developer of Grocer : http://grocer.toolbox.free.fr/ 56 | 57 | ## Credits 58 | 59 | Institut National de la Statistique et des Etudes Economiques 60 | Direction des Etudes et Synthèses Economiques 61 | Département des Etudes Economiques 62 | Division des Etudes Macroéconomiques 63 | 64 | Alexandre Bourgeois - Benjamin Favetto ([@BFavetto](https://github.com/BFavetto)) - Adrien Lagouge - Matthieu Lequien ([@MLequien](https://github.com/MLequien)) - Olivier Simon 65 | 66 | ## Contributing 67 | 68 | [Contributing](.\CONTRIBUTING.md) 69 | 70 | ## Licence 71 | 72 | [CeCILL license](.\LICENSE) 73 | 74 | -------------------------------------------------------------------------------- /macronometrics/DulmageMendelsohnDecomposition.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from .graph import Edge, reverse 3 | from .bipartiteMatching import bipartiteMatching 4 | from .stronglyConnectedComponents_kosaraju import stronglyConnectedComponents 5 | 6 | # return reachable nodes from src 7 | def getDst(g, src): 8 | def visit(g, u, visited): 9 | visited[u] = True 10 | for e in g[u]: 11 | if not visited[e.dst]: 12 | visit(g, e.dst, visited) 13 | 14 | visited = [False]*len(g) 15 | for u in src: 16 | if not visited[u]: 17 | visit(g, u, visited) 18 | dst = [v for v in range(len(g)) if visited[v]] 19 | return dst 20 | 21 | 22 | # generate bipartite graph 23 | def bipartiteGraph(g, rL, cL): 24 | gb = [[] for n in range(rL+cL)] 25 | for edges in g: 26 | for e in edges: 27 | gb[e.src ].append(Edge(e.src , e.dst+rL)) 28 | gb[e.dst+rL].append(Edge(e.dst+rL, e.src )) 29 | return gb 30 | 31 | 32 | def DulmageMendelsohnDecomposition(g, rs, cs): 33 | rL = len(g) 34 | cL = max([e.dst for edges in g for e in edges])+1 35 | 36 | # step0: generate bipartite graph 37 | gb = bipartiteGraph(g, rL, cL) 38 | 39 | # step1: find bipartiteMatching 40 | matching = [] 41 | bipartiteMatching(gb, rL, matching) 42 | 43 | # step2: modify graph i.e. birateral M and R -> C edges 44 | gb[rL:] = [[] for n in range(cL)] 45 | for m in matching: 46 | r = min(m.src, m.dst) 47 | c = max(m.src, m.dst) 48 | gb[c].append(Edge(c, r)) 49 | 50 | # step3: find V0 and Vinf 51 | matched = [m.src for m in matching] 52 | matched += [m.dst for m in matching] 53 | rsrc = [n for n in range(rL) if not n in matched] 54 | cdst = [n for n in range(rL, rL+cL) if not n in matched] 55 | Vinf = getDst(gb, rsrc) 56 | V0 = getDst(reverse(gb), cdst) 57 | 58 | # step4: find scc of g without V0 and Vinf 59 | # Kosaraju's algorithm to preserve topological order of scc 60 | V = V0 + Vinf 61 | gb[:] = [[e for e in edges if not (e.dst in V or e.src in V)] for edges in gb] 62 | scc = [] 63 | stronglyConnectedComponents(gb, scc) 64 | scc[:] = [c for c in scc if not c[0] in V] # remove V0 and Vinf 65 | scc[:] = [V0] + scc + [Vinf] 66 | 67 | rs[:] = [[n for n in c if n < rL] for c in scc] 68 | cs[:] = [[n-rL for n in c if n >= rL] for c in scc] 69 | 70 | -------------------------------------------------------------------------------- /macronometrics/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | name = "macronometrics" 4 | 5 | __version__ = "0.0.1" 6 | __author__ = "Insee Division des Etudes Macroéconomiques" 7 | __date__ = "Février 2020" 8 | 9 | from . import graph 10 | from . import getDst 11 | from . import bipartiteMatching 12 | from . import stronglyConnectedComponents_kosaraju 13 | from . import DulmageMendelsohnDecomposition 14 | 15 | 16 | 17 | from . import analyze 18 | from . import equation 19 | from . import model 20 | from . import numsolve 21 | from . import symbolic 22 | from . import tools_ts 23 | from . import tools 24 | from . import trollparser -------------------------------------------------------------------------------- /macronometrics/analyze.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | 6 | from .trollparser import parser # Parseur de syntaxe Troll 7 | from lark import Tree, Token 8 | from time import time 9 | from yaml import dump # pour l'écriture du fichier yaml 10 | 11 | 12 | def unique_list(liste): 13 | """ 14 | Return a list of unique elements 15 | """ 16 | return sorted(list(set(liste))) 17 | 18 | 19 | def analyze_model(model): 20 | """ 21 | Analyze the model before parsing : 22 | * Compute the syntaxic trees for each equation 23 | * Update the sets of variables and the dictionaries 24 | 25 | """ 26 | 27 | model.prelim() # pour avoir les variables ordonnées 28 | 29 | # unpack 30 | name_coeff_list = model.name_coeff_list 31 | name_endo_list = model.name_endo_list 32 | name_exo_list = model.name_exo_list 33 | name_policy_list = model.name_policy_list 34 | 35 | # coln_list = model.coln_list 36 | 37 | var_eq_dict = model.var_eq_dict 38 | 39 | eq_exo_dict = model.eq_exo_dict 40 | eq_policy_dict = model.eq_policy_dict 41 | eq_endo_dict = model.eq_endo_dict 42 | eq_endo_lag_dict = model.eq_endo_lag_dict 43 | eq_coeff_dict = model.eq_coeff_dict 44 | 45 | def analyse_eq(t, courant, num_eq): 46 | """ 47 | Analyse une equation : 48 | 49 | * déclaration des exogènes et des coefficients à la volée 50 | * construit les dictionnaires nom_var -> num_eq et num_eq -> nom_var 51 | 52 | """ 53 | if t.data == 'define_eq': # un seul signe = par équation 54 | leftpart, rightpart = t.children 55 | analyse_eq(leftpart, courant, num_eq) 56 | analyse_eq(rightpart, courant, num_eq) 57 | return 58 | 59 | elif t.data == 'add': 60 | leftpart, rightpart = t.children 61 | analyse_eq(leftpart, courant, num_eq) 62 | analyse_eq(rightpart, courant, num_eq) 63 | return 64 | 65 | elif t.data == 'sub': 66 | leftpart, rightpart = t.children 67 | analyse_eq(leftpart, courant, num_eq) 68 | analyse_eq(rightpart, courant, num_eq) 69 | return 70 | 71 | elif t.data == 'mul': 72 | leftpart, rightpart = t.children 73 | analyse_eq(leftpart, courant, num_eq) 74 | analyse_eq(rightpart, courant, num_eq) 75 | return 76 | 77 | elif t.data == 'div': 78 | leftpart, rightpart = t.children 79 | analyse_eq(leftpart, courant, num_eq) 80 | analyse_eq(rightpart, courant, num_eq) 81 | return 82 | 83 | elif t.data == 'pow': 84 | leftpart, rightpart = t.children 85 | analyse_eq(leftpart, courant, num_eq) 86 | analyse_eq(rightpart, courant, num_eq) 87 | return 88 | 89 | elif t.data == 'par': 90 | op = t.children[0] 91 | analyse_eq(op, courant, num_eq) 92 | return 93 | 94 | elif t.data == "delta": 95 | delay, part = t.children 96 | lag = int(delay) 97 | if lag > 0: 98 | analyse_eq(part, courant, num_eq) 99 | analyse_eq(part, courant + int(delay), num_eq) 100 | return 101 | else: 102 | raise ValueError("Illegal value for the lag !") 103 | 104 | elif t.data == "deltaone": # pour prendre en compte l'ommission en cas de lag unitaire 105 | part = t.children[0] 106 | analyse_eq(part, courant, num_eq) 107 | analyse_eq(part, courant + 1, num_eq) 108 | return 109 | 110 | elif t.data == "lag": 111 | expr, delay = t.children 112 | lag = int(delay) 113 | if lag <= 0: 114 | analyse_eq(expr, courant + abs(lag), num_eq) 115 | return 116 | else: 117 | raise ValueError("Anticipated variables in the model !") 118 | 119 | elif t.data == "coeff": 120 | nom = str(t.children[0]) 121 | if nom not in name_coeff_list: 122 | # on ajoute le coefficient déclaré à la volée 123 | name_coeff_list.append(nom) 124 | eq_coeff_dict[num_eq] = eq_coeff_dict[num_eq] | {nom} 125 | return 126 | 127 | elif t.data == "var": 128 | nom = str(t.children[0]) 129 | 130 | if nom in name_exo_list: # cas d'une variable exogène 131 | if nom not in var_eq_dict.keys(): 132 | var_eq_dict[nom] = {num_eq} 133 | else: 134 | var_eq_dict[nom] = var_eq_dict[nom] | { 135 | num_eq} # Ajout pour décomposition D-M 136 | 137 | eq_exo_dict[num_eq] = eq_exo_dict[num_eq] | {nom} 138 | return 139 | 140 | elif nom in name_policy_list: # cas d'une variable exogène 141 | if nom not in var_eq_dict.keys(): 142 | var_eq_dict[nom] = {num_eq} 143 | else: 144 | var_eq_dict[nom] = var_eq_dict[nom] | { 145 | num_eq} # Ajout pour décomposition D-M 146 | 147 | eq_policy_dict[num_eq] = eq_policy_dict[num_eq] | {nom} 148 | return 149 | 150 | elif nom in name_endo_list: # cas d'une variable endogène 151 | if nom not in var_eq_dict.keys(): 152 | var_eq_dict[nom] = {num_eq} 153 | else: 154 | var_eq_dict[nom] = var_eq_dict[nom] | { 155 | num_eq} # Ajout pour décomposition D-M 156 | 157 | if courant == 0: # variable dont la valeur doit être déterminée 158 | eq_endo_dict[num_eq] = eq_endo_dict[num_eq] | {nom} 159 | else : # variable endogène retardée (valeur connue) 160 | eq_endo_lag_dict[num_eq] = eq_endo_lag_dict[num_eq] | {nom} 161 | 162 | return 163 | 164 | elif nom in name_coeff_list: 165 | eq_coeff_dict[num_eq] = eq_coeff_dict[num_eq] | {nom} 166 | return 167 | 168 | else: # déclaration implicite d'une exogène 169 | name_exo_list.append(nom) 170 | var_eq_dict[nom] = {num_eq} 171 | eq_exo_dict[num_eq] = eq_exo_dict[num_eq] | {nom} 172 | return 173 | 174 | elif t.data == "log": 175 | op = t.children[0] 176 | analyse_eq(op, courant, num_eq) 177 | return 178 | 179 | elif t.data == "exp": 180 | op = t.children[0] 181 | analyse_eq(op, courant, num_eq) 182 | return 183 | 184 | elif t.data == 'neg': 185 | op = t.children[0] 186 | analyse_eq(op, courant, num_eq) 187 | return 188 | 189 | elif t.data == 'pos': 190 | op = t.children[0] 191 | analyse_eq(op, courant, num_eq) 192 | return 193 | 194 | elif (t.data == "number") or (t.data == "signednumber"): 195 | return 196 | 197 | elif t.data == "diff": 198 | return 199 | 200 | else: 201 | raise SyntaxError('Unknown instruction: %s' % t.data) 202 | 203 | start_time = time() # tic 204 | 205 | # on boucle sur les équations 206 | 207 | for item in model.eq_obj_dict.keys(): 208 | 209 | # on parse l'équation 210 | eq_parse = parser.parse(model.eq_obj_dict[item].text_eq) 211 | 212 | # unpack 213 | num_eq = model.eq_obj_dict[item].num_eq 214 | 215 | analyse_eq(eq_parse, 0, num_eq) # analyse l'équation 216 | 217 | # mise à jour de l'arbre syntaxique dans l'objet équation 218 | model.eq_obj_dict[item].tree_eq = eq_parse 219 | 220 | model.eq_obj_dict[item].coeff_name_list = unique_list( 221 | eq_coeff_dict[num_eq]) 222 | model.eq_obj_dict[item].exo_name_list = unique_list( 223 | eq_exo_dict[num_eq]) 224 | model.eq_obj_dict[item].policy_name_list = unique_list( 225 | eq_policy_dict[num_eq]) 226 | model.eq_obj_dict[item].endo_name_list = unique_list( 227 | eq_endo_dict[num_eq]) 228 | model.eq_obj_dict[item].endo_lag_name_list = unique_list( 229 | eq_endo_lag_dict[num_eq]) 230 | 231 | model.name_endo_list = unique_list( 232 | name_endo_list) # On a toutes les endogènes ! 233 | 234 | model.name_exo_list = unique_list( 235 | name_exo_list) # on a toutes les exogènes ! 236 | 237 | model.name_policy_list = unique_list(name_policy_list) 238 | 239 | model.name_coeff_list = unique_list( 240 | name_coeff_list) # on a tous les coefficients ! 241 | 242 | model.coln_list = model.name_endo_list + model.name_exo_list + \ 243 | model.name_policy_list # on a toutes les variables du modèle ! 244 | 245 | model.dicovar = {} 246 | for i in range(len(model.coln_list)): 247 | # crée un dictionnaire de correspondance globale nom / indice 248 | model.dicovar[model.coln_list[i]] = i 249 | 250 | model.dicoeff = {} 251 | for i in range(len(model.name_coeff_list)): 252 | # crée un dictionnaire de correspondance globale nom / indice 253 | model.dicoeff[model.name_coeff_list[i]] = i 254 | 255 | elapsed_time = time() - start_time # toc 256 | 257 | print(f"The analysis of the model took {elapsed_time:.3f} secondes.\n") 258 | 259 | derive_model(model) 260 | 261 | for item in model.name_endo_list: 262 | model.symboles_dict[item] = "endogenous" 263 | 264 | for item in model.name_exo_list: 265 | model.symboles_dict[item] = "exogenous" 266 | 267 | for item in model.name_policy_list: 268 | model.symboles_dict[item] = "policy" 269 | 270 | for item in model.name_coeff_list: 271 | model.symboles_dict[item] = "coefficient" 272 | 273 | model.is_analyzed = True # le modèle est désormais analysé 274 | 275 | return 276 | 277 | 278 | def derive_equation(eq): 279 | """ 280 | Compute the syntaxic trees for the derivatives 281 | """ 282 | 283 | # unpack 284 | 285 | tree_eq = eq.tree_eq 286 | endo = eq.endo_name_list 287 | exo = eq.exo_name_list + eq.policy_name_list 288 | coeff = eq.coeff_name_list 289 | 290 | eq.tree_diff = deriv_tree(tree_eq, 0, endo, exo, coeff) 291 | 292 | 293 | def derive_model(model, debug=False): 294 | """ 295 | Compute the derivatives of the whole model 296 | """ 297 | 298 | # unpack 299 | 300 | eq_obj_dict = model.eq_obj_dict 301 | dicoeff = model.dicoeff 302 | dicovar = model.dicovar 303 | 304 | start_time = time() # tic 305 | 306 | for item in eq_obj_dict.keys(): 307 | 308 | if debug: 309 | print(item) 310 | 311 | eq = eq_obj_dict[item] 312 | 313 | for c in eq.coeff_name_list: 314 | eq.coeff_eq_dict[c] = dicoeff[c] 315 | 316 | for ex in eq.exo_name_list: 317 | eq.exo_eq_dict[ex] = dicovar[ex] 318 | 319 | for en in eq.endo_name_list: 320 | eq.endo_eq_dict[en] = dicovar[en] 321 | 322 | for enl in eq.endo_lag_name_list: 323 | eq.endo_lag_eq_dict[enl] = dicovar[enl] 324 | 325 | for po in eq.policy_name_list: 326 | eq.policy_eq_dict[en] = dicovar[po] 327 | 328 | derive_equation(eq) 329 | 330 | elapsed_time = time() - start_time # toc 331 | 332 | print(f"The computation of the derivatives of the model took {elapsed_time:.3f} secondes.\n") 333 | 334 | return 335 | 336 | 337 | def deriv_tree(t, courant, endo, exo, coeff): 338 | """ 339 | Calcul de l'arbre syntaxique dérivé d'une équation 340 | 341 | Version avec simplification de l'expression 342 | """ 343 | 344 | if courant > 0: 345 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 346 | 347 | if t.data == "define_eq": 348 | # a = b -> a' = b' 349 | leftpart, rightpart = t.children 350 | leftderiv = deriv_tree(leftpart, courant, endo, exo, coeff) 351 | rightderiv = deriv_tree(rightpart, courant, endo, exo, coeff) 352 | 353 | return Tree(data=t.data, children=[leftderiv, rightderiv]) 354 | 355 | elif (t.data == 'add'): 356 | # a +/- b -> a' +/- b' 357 | leftpart, rightpart = t.children 358 | leftderiv = deriv_tree(leftpart, courant, endo, exo, coeff) 359 | rightderiv = deriv_tree(rightpart, courant, endo, exo, coeff) 360 | 361 | # simplification de l'addition 362 | 363 | if (leftderiv.data == "number") and (leftderiv.children[0].value == "0"): 364 | # a' = 0 365 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 366 | # b' = 0 367 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 368 | else: 369 | return rightderiv 370 | else: 371 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 372 | # b' = 0 373 | return leftderiv 374 | else: 375 | return Tree(data=t.data, children=[leftderiv, rightderiv]) 376 | 377 | elif (t.data == "sub"): 378 | # a +/- b -> a' +/- b' 379 | leftpart, rightpart = t.children 380 | leftderiv = deriv_tree(leftpart, courant, endo, exo, coeff) 381 | rightderiv = deriv_tree(rightpart, courant, endo, exo, coeff) 382 | # simplification de la soustraction 383 | 384 | if (leftderiv.data == "number") and (leftderiv.children[0].value == "0"): 385 | # a' = 0 386 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 387 | # b' = 0 388 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 389 | else: 390 | return Tree(data="neg", children=[rightderiv]) 391 | else: 392 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 393 | # b' = 0 394 | return leftderiv 395 | else: 396 | return Tree(data=t.data, children=[leftderiv, rightderiv]) 397 | 398 | elif t.data == 'mul': 399 | # a * b -> a' * b + a * b' 400 | leftpart, rightpart = t.children 401 | leftderiv = deriv_tree(leftpart, courant, endo, exo, coeff) 402 | rightderiv = deriv_tree(rightpart, courant, endo, exo, coeff) 403 | # simplification du produit 404 | if (leftderiv.data == "number") and (leftderiv.children[0].value == "0"): 405 | # a' = 0 406 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 407 | # b' = 0 408 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 409 | else: 410 | rhs = Tree(data="mul", children=[leftpart, rightderiv]) 411 | return rhs 412 | else: 413 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 414 | # b' = 0 415 | lhs = Tree(data="mul", children=[leftderiv, rightpart]) 416 | return lhs 417 | else: 418 | lhs = Tree(data="mul", children=[leftderiv, rightpart]) 419 | rhs = Tree(data="mul", children=[leftpart, rightderiv]) 420 | return Tree(data="add", children=[lhs, rhs]) 421 | 422 | elif t.data == "div": 423 | # a / b -> a' / b - a * b' / b^2 424 | leftpart, rightpart = t.children 425 | leftderiv = deriv_tree(leftpart, courant, endo, exo, coeff) 426 | rightderiv = deriv_tree(rightpart, courant, endo, exo, coeff) 427 | # simplification de la division 428 | if (leftderiv.data == "number") and (leftderiv.children[0].value == "0"): 429 | # a' = 0 430 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 431 | # b' = 0 432 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 433 | else: 434 | mhs1 = Tree(data="div", children=[leftpart, rightpart]) 435 | mhs2 = Tree(data="div", children=[rightderiv, rightpart]) 436 | rhs = Tree(data="mul", children=[mhs1, mhs2]) 437 | return Tree(data="neg", children=[rhs]) 438 | else: 439 | if (rightderiv.data == "number") and (rightderiv.children[0].value == "0"): 440 | # b' = 0 441 | lhs = Tree(data="div", children=[leftderiv, rightpart]) 442 | return lhs 443 | else: 444 | lhs = Tree(data="div", children=[leftderiv, rightpart]) 445 | mhs1 = Tree(data="div", children=[leftpart, rightpart]) 446 | mhs2 = Tree(data="div", children=[rightderiv, rightpart]) 447 | rhs = Tree(data="mul", children=[mhs1, mhs2]) 448 | return Tree(data="sub", children=[lhs, rhs]) 449 | 450 | elif t.data == "pow": # pas d'endogène dans la puissance !!! 451 | # a^b -> a' * b * a^(b-1) 452 | leftpart, rightpart = t.children # rightpart -> la puissance 453 | leftderiv = deriv_tree(leftpart, courant, endo, exo, coeff) # a' 454 | if (leftderiv.data == "number") and (leftderiv.children[0].value == "0"): 455 | # a' = 0 456 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 457 | else: 458 | newpow = Tree(data="sub", children=[rightpart, Tree( 459 | data="number", children=[Token(type_="NUMBER", value='1')])]) 460 | rhs = Tree(data="pow", children=[leftpart, newpow]) # a^(b-1) 461 | mhs = Tree(data="mul", children=[rightpart, rhs]) 462 | return Tree(data="mul", children=[leftderiv, mhs]) 463 | 464 | elif t.data == "delta": 465 | # delta(lag:op) -> (op') 466 | delay, part = t.children 467 | 468 | op = deriv_tree(part, courant, endo, exo, coeff) 469 | 470 | return op 471 | 472 | elif t.data == "deltaone": 473 | # delta(op) -> (op') 474 | op = deriv_tree(t.children[0], courant, endo, exo, coeff) 475 | 476 | return op 477 | 478 | elif t.data == "lag": 479 | # a(-lag) 480 | expr, delay = t.children 481 | lag = int(delay) 482 | if lag == 0: 483 | op = deriv_tree(expr, courant + abs(lag), endo, exo, coeff) 484 | return op 485 | else: 486 | return Tree(data="number", children=[Token(type_="NUMBER", value='0')]) 487 | 488 | elif t.data == "coeff": 489 | op = Token(type_="NUMBER", value="0") # coeff' = 0 490 | return Tree(data="number", children=[op]) 491 | 492 | elif (t.data == "number") or (t.data == "signednumber"): 493 | op = Token(type_="NUMBER", value="0") # number' = 0 494 | return Tree(data=t.data, children=[op]) 495 | 496 | elif (t.data == 'neg') or (t.data == 'pos') or (t.data == "par"): 497 | op = deriv_tree(t.children[0], courant, endo, exo, coeff) 498 | if (op.data == "number") and (op.children[0].value == "0"): 499 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 500 | else: 501 | return Tree(data=t.data, children=[op]) 502 | 503 | elif t.data == "log": 504 | # log(a) -> a' / a 505 | op = deriv_tree(t.children[0], courant, endo, exo, coeff) 506 | if (op.data == "number") and (op.children[0].value == "0"): 507 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 508 | else: 509 | return Tree(data="div", children=[op, t.children[0]]) 510 | 511 | elif t.data == "exp": 512 | # exp(a) -> a' * exp(a) 513 | op = deriv_tree(t.children[0], courant, endo, exo, coeff) 514 | if (op.data == "number") and (op.children[0].value == "0"): 515 | return Tree(data="number", children=[Token(type_="NUMBER", value="0")]) 516 | else: 517 | rightpart = Tree(data="exp", children=[t.children[0]]) 518 | return Tree(data="mul", children=[op, rightpart]) 519 | 520 | elif t.data == "var": # !!! cas d'une variable !!! 521 | nom = str(t.children[0]) 522 | if (nom in exo) or (nom in coeff): # cas d'une variable exogène ou d'un coefficient 523 | op = Token(type_="NUMBER", value="0") 524 | return Tree(data="number", children=[op]) # exo' = 0 525 | 526 | elif nom in endo: # cas d'une variable endogène 527 | 528 | if courant == 0: # On introduit dx qui vaudra 0 ou 1 lors de l'évaluation 529 | return Tree(data="diff", children=t.children) 530 | 531 | else: # L'endogène est retardée 532 | op = Token(type_="NUMBER", value="0") 533 | return Tree(data="number", children=[op]) 534 | 535 | else: 536 | raise SyntaxError('Unknown variable') 537 | 538 | else: 539 | raise SyntaxError('Unknown instruction: %s' % t.data) 540 | 541 | 542 | def write_yaml_file(model, yaml_filename, dir="./modeles_python"): 543 | """ 544 | Write all the information about the model in a yaml file 545 | 546 | yaml_filename : name of the yaml file (with .yaml extension) 547 | """ 548 | 549 | if (not model.is_analyzed): 550 | 551 | raise ValueError("The model is not analyzed.") 552 | 553 | mod_dict = [{'name_mod': model.name_mod}, {'name_endo_list': model.name_endo_list}, 554 | {'name_exo_list': model.name_exo_list}, { 555 | 'name_policy_list': model.name_policy_list}, 556 | {'name_coeff_list': model.name_coeff_list}] 557 | 558 | mod_eq = dict() 559 | 560 | for item in model.eq_obj_dict.keys(): 561 | mod_eq[item] = {'name_eq': model.eq_obj_dict[item].name_eq, 'text_eq': model.eq_obj_dict[item].text_eq, 'num_eq': model.eq_obj_dict[item].num_eq, 562 | 'coeff_eq_dict': model.eq_obj_dict[item].coeff_eq_dict, 'coeff_name_list': model.eq_obj_dict[item].coeff_name_list, 563 | 'endo_eq_dict': model.eq_obj_dict[item].endo_eq_dict, 564 | 'endo_lag_eq_dict': model.eq_obj_dict[item].endo_lag_eq_dict, 565 | 'endo_name_list': model.eq_obj_dict[item].endo_name_list, 566 | 'endo_lag_name_list': model.eq_obj_dict[item].endo_lag_name_list, 567 | 'exo_eq_dict': model.eq_obj_dict[item].exo_eq_dict, 'exo_name_list': model.eq_obj_dict[item].exo_name_list, 568 | 'policy_eq_dict': model.eq_obj_dict[item].policy_eq_dict, 'policy_name_list': model.eq_obj_dict[item].policy_name_list} 569 | 570 | with open(dir+"/"+yaml_filename, 'w+') as f: 571 | 572 | dump(mod_dict, f) 573 | dump([{'equations': mod_eq}], f) 574 | 575 | return 576 | -------------------------------------------------------------------------------- /macronometrics/bipartiteMatching.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from .graph import Edge 3 | 4 | # Ford-Fulkerson algorithm 5 | def augment(g, u, matchTo, visited): 6 | if u < 0: return True 7 | for e in g[u]: 8 | if not visited[e.dst]: 9 | visited[e.dst] = True 10 | if augment(g, matchTo[e.dst], matchTo, visited): 11 | matchTo[e.src] = e.dst 12 | matchTo[e.dst] = e.src 13 | return True 14 | return False 15 | 16 | # g: bipartite graph 17 | # L: size of the left side 18 | def bipartiteMatching(g, L, matching): 19 | n = len(g) 20 | matchTo = [-1 for n in range(n)] 21 | match = 0 22 | for u in range(L): 23 | visited = [False]*n 24 | if augment(g, u, matchTo, visited): 25 | match+=1 26 | for u in range(L): 27 | if matchTo[u] >= 0: 28 | matching.append(Edge(u, matchTo[u])) 29 | return match 30 | 31 | -------------------------------------------------------------------------------- /macronometrics/equation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | 6 | 7 | class Equation(): 8 | """ 9 | Definition of an Equation object 10 | 11 | """ 12 | 13 | def __repr__(self): 14 | texte = "Equation : {} \n".format(self.name_eq) 15 | texte += "Texte : {} \n".format(self.text_eq) 16 | texte += "Numéro : {} \n".format(self.num_eq) 17 | texte += "Coefficients : {} \n".format(self.coeff_name_list) 18 | texte += "Exogènes : {} \n".format(self.exo_name_list) 19 | texte += "Policy : {} \n".format(self.policy_name_list) 20 | texte += "Endogènes contemporaines : {} \n".format(self.endo_name_list) 21 | texte += "Endogènes retardées : {} \n".format(self.endo_lag_name_list) 22 | return texte 23 | 24 | def __init__(self, name_eq, text_eq, num_eq): 25 | """ 26 | name_eq : name of the equation (in the model text) 27 | text_eq : text of the equation 28 | num_eq = equation index 29 | """ 30 | 31 | self.name_eq = name_eq # nom de l'équation 32 | self.text_eq = text_eq # texte de l'équation 33 | self.num_eq = num_eq # numéro de l'équation 34 | 35 | # self.num_block = None # numéro du bloc contenant l'équation 36 | 37 | # dictionnaire coefficients de l'équation -> indice global 38 | self.coeff_eq_dict = dict() 39 | self.coeff_name_list = [] # nom des coefficients de l'équation 40 | 41 | # correspondance variables / données 42 | 43 | self.exo_eq_dict = dict() # dictionnaire exogènes de l'équation -> indice global 44 | self.exo_name_list = [] # nom des exogènes de l'équation 45 | 46 | # dictionnaire endogènes contemporaines de l'équation -> indice global 47 | self.endo_eq_dict = dict() 48 | self.endo_name_list = [] # nom des endogènes de l'équation 49 | 50 | # dictionnaire endogènes retardées de l'équation -> indice global 51 | self.endo_lag_eq_dict = dict() 52 | self.endo_lag_name_list = [] # nom des endogènes retardées de l'équation 53 | 54 | self.policy_eq_dict = dict() # dictionnaire policy -> indice 55 | self.policy_name_list = [] 56 | 57 | # arbres syntaxiques de l'équation et de ses dérivées 58 | self.tree_eq = None # arbre syntaxique de l'équation 59 | self.tree_diff = None # arbre syntaxique dérivé 60 | # self.tree_diff_dict = dict() # dictionnaire nom des endogènes contemporaines -> arbre dérivé 61 | 62 | def print_tree(self, deriv=False): 63 | """ 64 | Print the trees 65 | """ 66 | if deriv: 67 | print(self.tree_diff.pretty()) 68 | else: 69 | print(self.tree_eq.pretty()) 70 | return 71 | -------------------------------------------------------------------------------- /macronometrics/estimation.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | import pandas as pd 6 | 7 | def unique_list(liste): 8 | """ 9 | Return a list without duplicate entries. 10 | """ 11 | return list(set(liste)) 12 | 13 | 14 | class Estim(): 15 | """ 16 | Classe permettant l'estimation d'une équation. 17 | 18 | Le modèle doit être analysé auparavant avec les outils de la classe Analyse. 19 | 20 | ATTENTION : pour le moment, on considère le texte des équations comme étant 21 | syntaxiquement correct (au sens de Troll) 22 | """ 23 | 24 | def __init__(self, equation, model, df_mod): 25 | """ 26 | 27 | 28 | Paramètres 29 | ========== 30 | 31 | list_eq_block : ordre des équations dans le block 32 | set_eq : ensemble d'arbres syntaxiques 33 | set_diff : ensemble d'arbres dérivés 34 | dict_eq_var : dictionnaire de correspondance equation -> nom de variable 35 | endo : nom des endogènes 36 | exo : nom des exogènes et des policy issues de la lecture du modèle 37 | dicovar : correspondance variable -> indice globale (structure de données) 38 | coeff : nom des coefficients issus de la lecture du modèle 39 | dicoeff : correspondance coefficient -> indice 40 | 41 | """ 42 | self.equation = equation 43 | 44 | self.model = model 45 | 46 | self.coeff_name_list = equation.coeff_name_list 47 | 48 | self.coeff_eq_dict_loc = dict() 49 | 50 | self.fun_text = "" 51 | 52 | self.n_coeff = len(self.coeff_name_list) 53 | 54 | self.var_list_loc = unique_list(equation.policy_name_list 55 | + equation.endo_lag_name_list 56 | + equation.endo_name_list 57 | + equation.exo_name_list) 58 | 59 | self.var_list_loc.sort() 60 | 61 | self.var_eq_dict_loc = dict() 62 | 63 | if len(self.var_list_loc) ==0 : 64 | print("Pas de variable dans l'équation !\n") 65 | else: 66 | print("Variables dans l'équation\n") 67 | print(self.var_list_loc) 68 | for i,v in enumerate(self.var_list_loc): 69 | self.var_eq_dict_loc[v] = i 70 | 71 | 72 | if self.n_coeff ==0 : 73 | print("Pas de coefficient à estimer !\n") 74 | else: 75 | print(str(self.n_coeff)+" coefficient(s) à estimer\n") 76 | print(self.coeff_name_list) 77 | for i,v in enumerate(self.coeff_name_list): 78 | self.coeff_eq_dict_loc[v] = i 79 | 80 | self.data_eq = df_mod[self.var_list_loc].copy() 81 | 82 | 83 | def create_estimfun_python(self,create_resid=False): 84 | """ 85 | Permet la traduction d'une équation en python pour l'estimation de ses coefficients. 86 | Le modèle doit être analysé avec les outils de la bibliothèque Analyse. 87 | 88 | ATTENTION : pour le moment, on considère le texte des équations comme étant 89 | syntaxiquement correct (au sens de Troll) 90 | 91 | Paramètres 92 | ========== 93 | 94 | model : un modèle de la classe Modele préalablement analysé 95 | 96 | """ 97 | dicovar = self.var_eq_dict_loc 98 | 99 | def run_instruction(t, courant, vari=None): 100 | # """ 101 | # Règles de production (evaluation) pour une équation. 102 | 103 | # On suppose que le modèle est correctement analysé. 104 | 105 | # Le but est de produire une chaine de caractères (à passer à la fonction eval de Python 106 | # ou à enregistrer dans une fonction). 107 | 108 | # Arguments : 109 | # * t : arbre syntaxique 110 | # * courant : date courante (pour les retards) 111 | # -> on démarre à courant = 0 et on doit disposer des endogènes jusqu'à l'instant donné. 112 | # * vari : nom de variable (pour les dérivées partielles) 113 | # """ 114 | 115 | if t.data == 'define_eq': # un seul signe = par équation 116 | leftpart, rightpart = t.children 117 | 118 | t1 = run_instruction(leftpart, courant, vari) 119 | t2 = run_instruction(rightpart, courant, vari) 120 | 121 | if t1 == "0": 122 | if t2 == "0": 123 | return "0" 124 | elif rightpart.data == 'par': 125 | return "-"+t2 126 | else: 127 | return "-("+t2+")" 128 | elif t2 == "0": 129 | if leftpart.data == 'par': 130 | return t1 131 | else: 132 | return '('+t1+')' 133 | elif rightpart.data == 'par': 134 | if leftpart.data == 'par': 135 | return t1+'-'+t2 136 | else: 137 | return '('+t1+')-'+t2 138 | elif leftpart.data == "par": 139 | return t1+'-('+t2+')' 140 | else: 141 | return '('+t1+')-('+t2+')' 142 | 143 | elif t.data == 'add': 144 | leftpart, rightpart = t.children 145 | t1 = run_instruction(leftpart, courant, vari) 146 | t2 = run_instruction(rightpart, courant, vari) 147 | 148 | if t1 == "0": 149 | if t2 == "0": 150 | return "0" 151 | else: 152 | return t2 153 | elif t2 == "0": 154 | return t1 155 | else: 156 | if leftpart.children == 'par': 157 | if rightpart.children == 'par': 158 | return t1+'+'+t2 159 | else: 160 | return t1 + '('+t2+')' 161 | elif rightpart.children == 'par': 162 | return '('+t1+')'+t2 163 | else: 164 | return '('+t1+')+('+t2+')' 165 | 166 | elif t.data == 'sub': 167 | leftpart, rightpart = t.children 168 | t1 = run_instruction(leftpart, courant, vari) 169 | t2 = run_instruction(rightpart, courant, vari) 170 | 171 | if t1 == "0": 172 | if t2 == "0": 173 | return "0" 174 | elif rightpart.data == 'par': 175 | return "-"+t2 176 | else: 177 | return "-("+t2+")" 178 | elif t2 == "0": 179 | if leftpart.data == 'par': 180 | return t1 181 | else: 182 | return '('+t1+')' 183 | elif rightpart.data == 'par': 184 | if leftpart.data == 'par': 185 | return t1+'-'+t2 186 | else: 187 | return '('+t1+')-'+t2 188 | elif leftpart.data == "par": 189 | return t1+'-('+t2+')' 190 | else: 191 | return '('+t1+')-('+t2+')' 192 | 193 | elif t.data == 'mul': 194 | leftpart, rightpart = t.children 195 | t1 = run_instruction(leftpart, courant, vari) 196 | t2 = run_instruction(rightpart, courant, vari) 197 | if t1 == "0" or t2 == "0": 198 | return "0" 199 | elif t1 == "1": 200 | return t2 201 | elif t2 == "1": 202 | return t1 203 | else: 204 | return "(" + t1 + ")*(" + t2 + ")" 205 | 206 | elif t.data == 'div': 207 | leftpart, rightpart = t.children 208 | t1 = run_instruction(leftpart, courant, vari) 209 | if t1 == "0": 210 | return "0" 211 | elif t1 == "1": 212 | return "1/(" + run_instruction(rightpart, courant, vari)+")" 213 | else: 214 | return "("+t1+")" + "/(" + run_instruction(rightpart, courant, vari)+")" 215 | 216 | elif t.data == 'pow': 217 | leftpart, rightpart = t.children 218 | t1 = run_instruction(leftpart, courant, vari) 219 | if t1 == "0": 220 | return "0" 221 | elif t1 == "1": 222 | return "1" 223 | else: 224 | return "(" + run_instruction(leftpart, courant, vari)+")" + "**" + "("+run_instruction(rightpart, courant, vari)+")" 225 | 226 | elif t.data == 'par': 227 | op = t.children[0] 228 | t1 = run_instruction(op, courant, vari) 229 | 230 | if t1 == "0": 231 | return "0" 232 | elif t1 == "1": 233 | return "1" 234 | elif op.data == 'par' or op.data == 'pos' or op.data == 'number' or op.data == "diff": 235 | return t1 236 | else: 237 | return "(" + t1 + ")" 238 | 239 | elif t.data == "delta": 240 | delay, part = t.children 241 | lag = int(delay) 242 | if lag > 0: 243 | return "(" + run_instruction(part, courant, vari) + "- (" + run_instruction(part, courant + int(delay), vari) + "))" 244 | else: 245 | raise ValueError("Valeur incohérente du retard !") 246 | 247 | elif t.data == "deltaone": # pour prendre en compte l'ommission en cas de lag unitaire 248 | part = t.children[0] 249 | return "(" + run_instruction(part, courant, vari) + "- (" + run_instruction(part, courant + 1, vari) + "))" 250 | 251 | elif t.data == "lag": 252 | expr, delay = t.children 253 | lag = int(delay) 254 | if lag <= 0: 255 | return run_instruction(expr, courant + abs(lag), vari) 256 | else: 257 | raise ValueError( 258 | "Le modèle contient des variables anticipées !") 259 | 260 | elif t.data == "coeff": 261 | nom = str(t.children[0]) 262 | if nom not in self.coeff_name_list: 263 | raise ValueError("Valeur inconnue pour le coefficient !") 264 | # on va chercher le coefficient dans un dictionnaire 265 | return "_z[" + str(self.coeff_eq_dict_loc[nom]) + "]" 266 | 267 | elif t.data == "var": 268 | nom = str(t.children[0]) 269 | 270 | 271 | if (nom in self.var_list_loc): 272 | # on cherche dans la base de données du modèle la valeur 273 | return "_data[_t-"+str(courant)+"," + str(dicovar[nom]) + "]" 274 | 275 | elif nom in self.coeff_name_list : 276 | # on va chercher le coefficient dans un dictionnaire 277 | return "_z[" + str(self.coeff_eq_dict_loc[nom]) + "]" 278 | 279 | else: 280 | raise ValueError("Le modèle doit être analysé !") 281 | 282 | elif t.data == "log": 283 | op = t.children[0] 284 | return "log(" + run_instruction(op, courant, vari) + ")" 285 | 286 | elif t.data == "exp": 287 | op = t.children[0] 288 | return "exp(" + run_instruction(op, courant, vari) + ")" 289 | 290 | elif t.data == 'neg': 291 | op = t.children[0] 292 | 293 | t1 = run_instruction(op, courant, vari) 294 | 295 | if t1 == "0": 296 | return "0" 297 | elif t1 == "1": 298 | return "-1" 299 | elif op.data == 'par' or op.data == 'pos' or op.data == 'number' or op.data == "diff": 300 | return "-"+t1 301 | else: 302 | return "-(" + t1 + ")" 303 | 304 | elif t.data == 'pos': 305 | op = t.children[0] 306 | return run_instruction(op, courant, vari) 307 | 308 | elif (t.data == "number") or (t.data == "signednumber"): 309 | valeur = t.children[0] 310 | if valeur == 0: 311 | return "0" 312 | else: 313 | return str(valeur) 314 | 315 | elif t.data == "diff": 316 | nom = str(t.children[0]) 317 | if nom == vari: 318 | return "1" 319 | else: 320 | return "0" 321 | 322 | else: 323 | raise SyntaxError('Unknown instruction: %s' % t.data) 324 | 325 | 326 | 327 | # on récupère l'arbre syntaxique de l'équation et de sa dérivée 328 | eq_parse = self.equation.tree_eq 329 | 330 | 331 | texte_eq = run_instruction(eq_parse, 0) 332 | 333 | if create_resid : 334 | 335 | res_block = "def _f_resid(_z,_t,_data):\n" 336 | res_block += "\treturn " 337 | res_block += texte_eq 338 | 339 | else: 340 | 341 | 342 | res_block = "def _f_estim(_z,_t_start,_t_stop,_data):\n" 343 | res_block += "\t_res = 0\n" 344 | res_block += "\tfor _t in range(_t_start,_t_stop):\n" 345 | res_block += "\t\t_res += (" 346 | res_block += texte_eq # on met à jour le texte de la fonction 347 | res_block += ")**2\n" 348 | res_block += "\treturn _res\n" 349 | 350 | 351 | # # ensemble des endogènes contemporaines de l'équation 352 | # endo_name_list = eq.endo_name_list 353 | 354 | # for j, vari in enumerate(self.list_endo_block): 355 | 356 | 357 | 358 | # if vari in endo_name_list: 359 | # jac_block += "\tdf[" + str(ell) + "][" + str(j) + "] = " 360 | # partialder = run_instruction(jac_parse, 0, vari) 361 | # jac_block += partialder + "\n" 362 | 363 | # ell += 1 364 | 365 | self.fun_text = res_block 366 | 367 | 368 | 369 | return -------------------------------------------------------------------------------- /macronometrics/getDst.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from .graph import Edge, reverse 3 | 4 | # return reachable nodes from src 5 | def getDst(g, src): 6 | def visit(g, u, visited): 7 | visited[u] = True 8 | for e in g[u]: 9 | if not visited[e.dst]: 10 | visit(g, e.dst, visited) 11 | 12 | visited = [False]*len(g) 13 | for u in src: 14 | if not visited[u]: 15 | visit(g, u, visited) 16 | dst = [v for v in range(len(g)) if visited[v]] 17 | return dst 18 | 19 | def getSrc(g, dst): 20 | return getDst(reverse(g), dst) 21 | -------------------------------------------------------------------------------- /macronometrics/graph.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # define basic element of graphs 3 | 4 | # graph is an adjacency list 5 | # i.e. graph g = [node] = [[edge]] 6 | class Edge(): 7 | def __init__(self, src, dst, weight=0): 8 | self.src = src 9 | self.dst = dst 10 | self.weight = weight 11 | def __str__(self): 12 | return str("("+str(self.src)+","+str(self.dst)+")") 13 | 14 | # return adj_matrix 15 | def adj_matrix(g): 16 | gm = [[0]*len(g) for n in range(len(g))] 17 | for edges in g: 18 | for e in edges: 19 | gm[e.src][e.dst] = 1 20 | return gm 21 | 22 | # return adj_list 23 | def adj_list(gm): 24 | g = [[] for n in range(len(gm))] 25 | for u in range(len(gm)): 26 | for v in range(len(gm)): 27 | if gm[u][v]: g[u].append(Edge(u,v)) 28 | return g 29 | 30 | # return reversed graph 31 | def reverse(g): 32 | gr = [[] for n in range(len(g))] 33 | for edges in g: 34 | for e in edges: 35 | gr[e.dst].append(Edge(e.dst, e.src)) 36 | return gr 37 | 38 | # print graph 39 | def debug_print_graph(g): 40 | s = [] 41 | for edges in g: 42 | for e in edges: 43 | s.append('('+str(e.src)+','+str(e.dst)+')') 44 | s.append('\n') 45 | print(''.join(s)) 46 | 47 | -------------------------------------------------------------------------------- /macronometrics/model.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | 6 | from re import sub # Pour les expressions régulières 7 | from copy import deepcopy 8 | 9 | from .equation import Equation # gestion du parseur 10 | from .symbolic import Block # construction des fonctions de résolution 11 | from .analyze import analyze_model, write_yaml_file 12 | 13 | from time import time 14 | 15 | from .graph import Edge 16 | from .getDst import getDst, getSrc 17 | from .DulmageMendelsohnDecomposition import DulmageMendelsohnDecomposition 18 | 19 | 20 | def unique_list(liste): 21 | """ 22 | Return a list without duplicate entries. 23 | """ 24 | return list(set(liste)) 25 | 26 | 27 | class Model(): 28 | """ 29 | Definition of the main class to manage a Model object. 30 | 31 | The Model class : 32 | - performs the lexing stage (model.lexer), 33 | - makes preliminary computations (model.prelim), 34 | - contains a deepcopy tool (model.copy). 35 | 36 | """ 37 | 38 | def __repr__(self): 39 | return "Macroeconometric model : {}".format(self.name_mod) 40 | 41 | def __init__(self): 42 | """ 43 | Class builder 44 | """ 45 | # nom du modèle 46 | self.name_mod = "" 47 | 48 | # listes globales des noms des variables, des coefficients et des étiquettes d'équations 49 | self.name_endo_list = [] 50 | self.name_exo_list = [] 51 | self.name_policy_list = [] 52 | self.name_coeff_list = [] 53 | self.name_eq_list = [] 54 | 55 | # pour avoir l'ensemble des variables et créer la fonction du modèle 56 | self.coln_list = [] 57 | 58 | # équations et coefficients 59 | 60 | self.eq_text_dict = dict() # correspondance nom_eq : texte 61 | self.eq_obj_dict = dict() # pour utiliser l'objet equation identifiée par son nom 62 | self.eq_obj_dict_number = dict() 63 | self.coeffs_dict = dict() 64 | 65 | self.dicovar = dict() # dictionnaires globaux (variables, coefficients) 66 | self.dicoeff = dict() 67 | 68 | self.var_eq_dict = dict() # correspondance variable : équation 69 | 70 | # correspondances numéro d'équation : variables (exo / policy / endo / coeff) + endogènes retardées (pour l'analyse du modèle) 71 | self.eq_exo_dict = dict() 72 | self.eq_policy_dict = dict() 73 | self.eq_endo_dict = dict() 74 | self.eq_coeff_dict = dict() 75 | self.eq_endo_lag_dict = dict() 76 | 77 | self.symboles_dict = dict() # statut des symboles (pour les variantes) 78 | self.vall_set = set() # ensemble des variables 79 | 80 | # texte des fonctions 81 | self.fun_text = "" 82 | self.jac_text = "" 83 | 84 | self.n_eq = 0 # nomnbre d'équations 85 | # le modèle est construit lorsqu'on a écrit les fonctions de résolution 86 | self.is_built = False 87 | # le modèle est analysé lorsqu'on a construit l'ensemble des objets nécessaires 88 | self.is_analyzed = False 89 | 90 | def lexer(self, model_code_filename): 91 | """ 92 | Lexer of model code file. 93 | 94 | Argument : 95 | model_code_filename : name of the file (Troll format) 96 | 97 | Warning : the file is supposed to be valid. 98 | """ 99 | 100 | self.nom_fichier_code = model_code_filename 101 | str_file = "" 102 | 103 | # Lecture du fichier (en utf 8) 104 | with open(self.nom_fichier_code, "r", encoding="utf-8") as f: 105 | for line in f.readlines(): 106 | str_file += line.split("//")[0] 107 | 108 | # prétraitement 109 | str_file = str_file.strip() 110 | str_file = str_file.lower() 111 | str_file = str_file.replace("\t", " ") 112 | str_file = str_file.replace("\n", " ") 113 | 114 | # suppression des commentaires et simplification des espaces 115 | # expression régulière magique ! 116 | str_file2 = sub( 117 | r"/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/", " ", str_file) 118 | str_file2 = sub(" +", " ", str_file2) 119 | 120 | self.list_file = str_file2.strip().split(";") 121 | 122 | compte_eq = 0 # compte le nombre d'équations dans le fichier du modèle 123 | for content in self.list_file: 124 | 125 | indic = content.strip().split() 126 | 127 | if len(indic) > 0: 128 | 129 | if indic[0] == 'filemod': 130 | # recupère le nom du modele en fin de fichier 131 | self.name_mod = indic[1] 132 | 133 | elif indic[0] == 'addsym': 134 | # traitement des variables 135 | variables = content.split(",") 136 | 137 | for item in variables: 138 | var_list_raw = item.split() 139 | if var_list_raw[0] == 'addsym': 140 | self.affecte_var(var_list_raw[1:]) 141 | else: 142 | self.affecte_var(var_list_raw) 143 | 144 | elif indic[0] == "addeq": 145 | # traitement des équations 146 | eqns = content.split(",")[1:] 147 | for eq in eqns: 148 | if len(eq) > 0: 149 | eq_split = eq.strip().split(":") 150 | lhs = eq_split[0].strip() 151 | rhs = ":".join(eq_split[1:]) 152 | if lhs not in self.eq_text_dict.keys(): 153 | self.eq_text_dict[lhs] = [ 154 | compte_eq, rhs.strip()] 155 | self.eq_obj_dict[lhs] = Equation( 156 | name_eq=lhs, text_eq=rhs.strip(), num_eq=compte_eq) 157 | self.eq_exo_dict[compte_eq] = set() 158 | self.eq_policy_dict[compte_eq] = set() 159 | self.eq_endo_dict[compte_eq] = set() 160 | self.eq_endo_lag_dict[compte_eq] = set() 161 | self.eq_coeff_dict[compte_eq] = set() 162 | compte_eq += 1 163 | 164 | else: # cas de doublons dans les noms d'équations 165 | self.eq_text_dict[lhs + 166 | "_bis"] = [compte_eq, rhs.strip()] 167 | self.eq_obj_dict[lhs+"_bis"] = Equation( 168 | name_eq=lhs+"_bis", text_eq=rhs.strip(), num_eq=compte_eq) 169 | self.eq_exo_dict[compte_eq] = set() 170 | self.eq_policy_dict[compte_eq] = set() 171 | self.eq_endo_dict[compte_eq] = set() 172 | self.eq_endo_lag_dict[compte_eq] = set() 173 | self.eq_coeff_dict[compte_eq] = set() 174 | compte_eq += 1 175 | 176 | # on maintient les listes globales des noms des variables 177 | self.name_eq_list = list(self.eq_text_dict.keys()) 178 | self.n_eq = compte_eq 179 | 180 | self.name_endo_list = unique_list(self.name_endo_list) 181 | self.name_exo_list = unique_list(self.name_exo_list) 182 | self.name_coeff_list = unique_list(self.name_coeff_list) 183 | self.name_policy_list = unique_list(self.name_policy_list) 184 | 185 | # ensemble des variables 186 | self.vall_set = set(self.name_endo_list) | set( 187 | self.name_exo_list) | set(self.name_policy_list) 188 | 189 | return 190 | 191 | def affecte_var(self, tab): 192 | """ 193 | Allocation of the names of the variables from the header of the file. 194 | 195 | Argument : 196 | tab : an array of strings from the lexer. 197 | 198 | Warning : some exogenous variables may be definend implicitely or directly in the code. 199 | """ 200 | if tab[0] == "policy": 201 | self.name_policy_list = tab[1:] 202 | elif tab[0] == "exogenous": 203 | self.name_exo_list = tab[1:] 204 | elif tab[0] == "endogenous": 205 | self.name_endo_list = tab[1:] 206 | elif tab[0] == "coefficients": 207 | self.name_coeff_list = tab[1:] 208 | return 209 | 210 | def sort_name_var(self): 211 | """ 212 | Sort the lists of the variables and coefficients names. 213 | 214 | """ 215 | 216 | self.name_endo_list.sort() 217 | self.name_exo_list.sort() 218 | self.name_coeff_list.sort() 219 | self.name_policy_list.sort() 220 | 221 | return 222 | 223 | def prelim(self): 224 | """ 225 | Preliminary computations and initialization of data structures for solving the model 226 | 227 | Argument : None 228 | 229 | """ 230 | # self.date = date 231 | 232 | # contient les lignes correspondant aux equations nettoyées du fichier source 233 | self.lines = [] 234 | for v in list(self.eq_text_dict.keys()): 235 | self.lines.append(v+':'+self.eq_text_dict[v][1]) 236 | 237 | # contient les équations sans leur étiquette 238 | self.lines_eq = [] 239 | for v in self.eq_text_dict.values(): 240 | self.lines_eq.append(v[1]) 241 | 242 | self.sort_name_var() 243 | 244 | print("The model has ", self.n_eq, " equations.\n") 245 | print(len(self.name_endo_list), " endogenous variables declared.\n") 246 | 247 | return 248 | 249 | ################################################### 250 | # Début des fonctions permettant la décomposition # 251 | # de Dulmage - Mendelsohn (BoJ) # 252 | ################################################### 253 | 254 | def setup(self): 255 | """ 256 | Setup of some useful data structures for the Dulmage-Mendelsohn decomposition 257 | 258 | Argument : None 259 | 260 | """ 261 | 262 | # Pour avoir la clé de passage entre le nom d'une variable endogène et un indice chiffré 263 | 264 | # correspondance nom : indice / identique à dicovar 265 | self.d = {v: i for i, v in enumerate(sorted(self.name_endo_list))} 266 | # correspondance indice : nom 267 | self.dr = {i: v for i, v in enumerate(sorted(self.name_endo_list))} 268 | 269 | return 270 | 271 | def analyze_structure(self): 272 | """ 273 | Analysis of the block structure of a model. 274 | 275 | Argument : None 276 | """ 277 | self.setup() 278 | self.construction_graph() 279 | self.dm_decomp() 280 | self.construct_gb() # optional 281 | self.construct_blocked_model() # optional 282 | 283 | def construction_graph(self): 284 | """ 285 | Computation of a graph of dependence between exogenous variables 286 | 287 | """ 288 | 289 | # le graphe est défini par sa matrice d'adjacence 290 | self.g = [[] for n in range(len(self.d))] 291 | 292 | for e in range(len(self.endoss)): 293 | for v in sorted(self.endoss[e]): 294 | self.g[e].append(Edge(e, self.d[v])) 295 | 296 | return 297 | 298 | def dm_decomp(self): 299 | """ 300 | Finest blocks decomposition 301 | """ 302 | 303 | # detect finest block structure (core routine) 304 | self.rss = [] 305 | self.css = [] 306 | DulmageMendelsohnDecomposition(self.g, self.rss, self.css) 307 | 308 | return 309 | 310 | def find_css_block(self, v): 311 | # find css_block in which variable v joins 312 | for k in range(len(self.rss)): 313 | if v in self.css[k]: 314 | return k 315 | 316 | def construct_gb(self): 317 | # construct graph of dm-decomped blocks' dependency 318 | nb = len(self.rss) 319 | self.endossb = [set() for n in range(nb)] 320 | for k in range(nb): 321 | for e in self.rss[k]: 322 | self.endossb[k] |= { 323 | self.find_css_block(v.dst) for v in self.g[e]} 324 | 325 | self.gb = [[] for n in range(nb)] 326 | for k in range(nb): 327 | for v in self.endossb[k]: 328 | self.gb[k].append(Edge(k, v)) 329 | 330 | def construct_blocked_model(self): 331 | # each block's endos set, exogs set, equation list, determined vars set 332 | self.endobss = [{v for r in rs for v in self.endoss[r]} 333 | for rs in self.rss] 334 | self.exogbss = [{v for r in rs for v in self.exogss[r]} 335 | for rs in self.rss] 336 | self.linebss = [[self.lines[r] for r in rs] for rs in self.rss] 337 | self.lineqbss = [[self.lines_eq[r] for r in rs] for rs in self.rss] 338 | self.determined = [{self.dr[c] for c in cs} for cs in self.css] 339 | 340 | def classify_vars(self, var): 341 | # pre = v joins predetermined blocks before var 342 | # sim = v joins simultaneously determined block with var 343 | # pos = v joins postdetermined blocks after var ('burasagari' in Japanese) 344 | # iso = v joins isolated block from var block 345 | v = self.d[var] 346 | simb = self.find_css_block(v) 347 | preb = getDst(self.gb, [simb]) 348 | posb = getSrc(self.gb, [simb]) 349 | preb.remove(simb) 350 | posb.remove(simb) 351 | pre = {self.dr[v] for b in preb for v in self.css[b]} 352 | pos = {self.dr[v] for b in posb for v in self.css[b]} 353 | sim = {self.dr[v] for v in self.css[simb]} 354 | iso = set(self.name_endo_list) - pre - pos - sim 355 | return [pre, sim, pos, iso] 356 | 357 | def build_modeltext_blocked(self): 358 | """ 359 | Compute the structure of blocks and the associated text of equations sets. 360 | 361 | """ 362 | self.endobss_left_eq = [{l.split(':')[0] 363 | for l in ls} for ls in self.linebss] 364 | self.vendo_left_eq = {v for vs in self.endobss_left_eq for v in vs} 365 | self.vexog_left_eq = self.vall_set - self.vendo_left_eq 366 | 367 | # calc exog2endo and endo2exog in each finest block 368 | nb = len(self.rss) 369 | self.solved = set() 370 | self.determined = [set() for n in range(nb)] 371 | self.exog2endo = [set() for n in range(nb)] 372 | self.endo2exog = [set() for n in range(nb)] 373 | for b in range(nb): 374 | self.determined[-b-1] = self.endobss[-b-1] - self.solved 375 | #print('determined : '+str(self.determined[-b-1])) 376 | # exog for EViews, mathematically endo determined in the block 377 | self.exog2endo[-b-1] = self.determined[-b-1] - \ 378 | self.endobss_left_eq[-b-1] 379 | #print('exog2endo : '+str(self.exog2endo[-b-1])) 380 | # endo for EViews, mathematically not endo determined in the block 381 | self.endo2exog[-b-1] = self.endobss_left_eq[-b-1] - \ 382 | self.determined[-b-1] 383 | #print('endo2exog : '+str(self.endo2exog[-b-1])) 384 | self.solved |= self.endobss[-b-1] 385 | #print('solved : '+str(self.solved)) 386 | 387 | # merge until endo2exog and exog2endo are empty 388 | self.exog2endo_mg = [[] for n in range(nb)] 389 | self.endo2exog_mg = [[] for n in range(nb)] 390 | self.rs_mg = [[] for n in range(nb)] 391 | n_mg, prev = 0, False 392 | for b in range(nb): 393 | # split at non empty exog2endo and V0 and Vinf 394 | if self.exog2endo[-b-1] or prev or b == 1 or b == nb-1: 395 | n_mg += 1 396 | self.exog2endo_mg[n_mg] = self.exog2endo[-b-1] 397 | self.endo2exog_mg[n_mg] = self.endo2exog[-b-1] 398 | self.rs_mg[n_mg] += self.rss[-b-1][::-1] # reverse 399 | prev = True if self.exog2endo[-b-1] else False 400 | self.exog2endo_mg = self.exog2endo_mg[0:n_mg+1] 401 | self.endo2exog_mg = self.endo2exog_mg[0:n_mg+1] 402 | self.rs_mg = self.rs_mg[0:n_mg+1] 403 | 404 | # merged blocked model for writing modeltext file 405 | self.fs = dict() 406 | print("The block decomposition has " + 407 | str(len(self.rs_mg)-2) + ' blocks.\n') 408 | self.fs[0] = [0, [], set(), set()] 409 | exo = set() 410 | for n in range(1, len(self.rs_mg)-1): 411 | self.fs[n] = [len(self.rs_mg[n]), [], set(), set()] 412 | for r in range(len(self.rs_mg[n])): 413 | exo = exo.union(self.fs[n-1][2]) 414 | self.fs[n][1].append(self.lines_eq[self.rs_mg[n][r]]) 415 | self.fs[n][2] = self.fs[n][2].union( 416 | self.endoss[self.rs_mg[n][r]])-exo 417 | self.fs[n][3] = self.fs[n][3].union( 418 | exo).union(self.exogss[self.rs_mg[n][r]]) 419 | exo = exo.union(self.fs[n][3]) 420 | 421 | # Ecriture de la décomposition par blocs à l'aide des indices des équations (et non du texte) 422 | 423 | self.fsix = dict() 424 | self.fsix[0] = [0, [], set(), set()] 425 | exo = set() 426 | for n in range(1, len(self.rs_mg)-1): 427 | # premier élément : taille du bloc 428 | self.fsix[n] = [len(self.rs_mg[n]), [], set(), set()] 429 | for r in range(len(self.rs_mg[n])): 430 | exo = exo.union(self.fsix[n-1][2]) 431 | # numéro des équations du bloc 432 | self.fsix[n][1].append(self.rs_mg[n][r]) 433 | self.fsix[n][2] = self.fsix[n][2].union( 434 | self.endoss[self.rs_mg[n][r]])-exo # endogènes du bloc 435 | self.fsix[n][3] = self.fsix[n][3].union(exo).union( 436 | self.exogss[self.rs_mg[n][r]]) # exogènes du bloc 437 | exo = exo.union(self.fsix[n][3]) 438 | 439 | ################################################ 440 | # Fin des fonctions de décomposition par blocs # 441 | ################################################ 442 | 443 | def build_model(self, function_name, dir="./modeles_python", prod="python"): 444 | """ 445 | Fill some useful data structures and performs the Dulmage - Mendelsohn block decomposition. 446 | * performs an analysis of the model 447 | * generates Python code 448 | 449 | Argument : aucun 450 | """ 451 | 452 | start_time = time() # tic 453 | 454 | analyze_model(self) # Les équations du modèle sont analysées ... 455 | 456 | self.eq_obj_dict_number = { 457 | self.eq_obj_dict[k].num_eq: self.eq_obj_dict[k] for k in self.eq_obj_dict.keys()} 458 | 459 | # Construction des dictionnaires liant variables et équations qui les contiennent 460 | # pour utiliser la décomposition D-M 461 | 462 | # dictionnaire nom de variable -> ens. d'equations 463 | self.varendo_eq_dict = { 464 | k: v for k, v in self.var_eq_dict.items() if k in self.name_endo_list} 465 | self.varexo_eq_dict = {k: v for k, v in self.var_eq_dict.items() if k in ( 466 | self.name_exo_list+self.name_policy_list)} 467 | 468 | endoss = [set()]*self.n_eq 469 | for k, v in self.varendo_eq_dict.items(): 470 | for i in v: 471 | endoss[i] = endoss[i] | {k} 472 | 473 | self.endoss = endoss 474 | 475 | exogss = [set()]*self.n_eq 476 | for k, v in self.varexo_eq_dict.items(): 477 | for i in v: 478 | exogss[i] = exogss[i] | {k} 479 | 480 | self.exogss = exogss 481 | 482 | self.analyze_structure() # Analyse la structure en blocs du modèle 483 | 484 | self.build_modeltext_blocked() # calcul des blocs 485 | 486 | elapsed_time = time() - start_time # toc 487 | 488 | print(f"The block decomposition took {elapsed_time:.3f} seconds.\n") 489 | 490 | # pour stocker le texte de chaque bloc en vue de produire un fichier python 491 | start_time = time() # tic 492 | 493 | liste_string_func_block = [] 494 | 495 | for i in range(len(self.fsix)): 496 | 497 | if self.fsix[i][0] != 0: 498 | 499 | # liste des équations du bloc 500 | list_eq_block = self.fsix[i][1] 501 | # liste des endogènes du bloc 502 | list_endo_block = list(self.fsix[i][2]) 503 | # ensemble des exogènes du bloc 504 | list_exo_block = list(self.fsix[i][3]) 505 | 506 | block = Block(self, list_eq_block, 507 | list_endo_block, list_exo_block, i) 508 | 509 | block.translate_block_python() 510 | 511 | liste_string_func_block.append( 512 | [block.fun_text, list(self.fsix[i][2]), block.jac_text]) 513 | # on ajoute le jacobien en dernière position pour ne pas modifier les programmes existants / modif BF 514 | 515 | self.model_fun_block = liste_string_func_block 516 | 517 | elapsed_time = time() - start_time # toc 518 | 519 | print(f"Building the function took {elapsed_time:.3f} seconds.\n") 520 | 521 | # Correspondance symbole -> statut (pour le calage et le calcul des variantes) 522 | 523 | for item in self.name_endo_list: 524 | self.symboles_dict[item] = "endogenous" 525 | 526 | for item in self.name_exo_list: 527 | self.symboles_dict[item] = "exogenous" 528 | 529 | for item in self.name_policy_list: 530 | self.symboles_dict[item] = "policy" 531 | 532 | for item in self.name_coeff_list: 533 | self.symboles_dict[item] = "coefficient" 534 | 535 | self.is_built = True # Le modèle est désormais construit. 536 | 537 | # if prod == "python": 538 | # self.write_model(function_name, dir) 539 | # write_yaml_file(self, function_name+".yaml", dir) 540 | # else: 541 | # self.write_model_cython(function_name, dir='./modeles_cython') 542 | # write_yaml_file(self, function_name+".yaml", dir='./modeles_cython') 543 | 544 | self.write_model(function_name, dir) 545 | write_yaml_file(self, function_name+".yaml", dir) 546 | 547 | return 548 | 549 | def copy(self): 550 | """ 551 | Return a copy (without reference / deepcopy) of the model. 552 | """ 553 | 554 | return deepcopy(self) 555 | 556 | def write_model(self, function_name, dir="./modeles_python"): 557 | """ 558 | Creation of a Python file containing the text of the model functions. 559 | 560 | Arguments : 561 | =========== 562 | model : a preliminary analyzed model 563 | function_name : name of the function (string) 564 | dir : name of the directory (string) 565 | 566 | Result : 567 | ======== 568 | * A .py file with the functions (for each block) and their jacobians. 569 | 570 | """ 571 | 572 | if (not self.is_built): 573 | 574 | raise ValueError("The model is not built.") 575 | 576 | n_blocks = len(self.model_fun_block) 577 | 578 | # Structures de données pour le traitement d'un bloc 579 | 580 | list_block_fun = [] 581 | list_block_varendo = [] 582 | list_block_jac = [] 583 | list_block_dicoendo = [] 584 | 585 | # sous la forme [ texte , var_endo , texte_jac ] 586 | for item in self.model_fun_block: 587 | list_block_fun.append(item[0]) 588 | list_block_varendo.append(item[1]) 589 | list_block_jac.append(item[2]) 590 | 591 | # pour chaque bloc, on construit un dictionnaire de correspondance 592 | # endogène -> numero 593 | nom_col_endo = {} 594 | # on considère les endogènes du bloc courant 595 | for endocourant in item[1]: 596 | # correspondance nom / indice 597 | nom_col_endo[endocourant] = self.dicovar[endocourant] 598 | 599 | list_block_dicoendo.append(nom_col_endo) 600 | 601 | # texte de la fonction déterminant les endogènes pour chaque bloc 602 | 603 | text_varendo = 'def ' + function_name.strip() + '_varendo(num_block): \n' 604 | text_varendo += '\t"""\n \tFonction produite automatiquement pour la résolution du modèle \n\n' 605 | text_varendo += '\tDétermine les endogènes associées à chaque bloc \n' 606 | text_varendo += '\t\n\tArguments : \n' 607 | text_varendo += '\t\tnum_block : numéro du bloc (décomposition de Dulmage-Mendelsohn) \n' 608 | text_varendo += '\t\n\t""" \n' 609 | text_varendo += '\tlist_block_varendo = [' 610 | 611 | for item in list_block_varendo: 612 | text_varendo += str(item) + " , \\\n\t\t" 613 | 614 | text_varendo += "] \n" 615 | 616 | text_varendo += '\treturn list_block_varendo[num_block] \n' 617 | 618 | # texte de la fonction donnant la correspondance bloc -> endogènes 619 | 620 | text_dicoendo = 'def ' + function_name.strip() + '_dicoendo(num_block): \n' 621 | text_dicoendo += '\t"""\n \tFonction produite automatiquement pour la résolution du modèle \n\n' 622 | text_dicoendo += '\tDétermine les correspondances des endogènes associées à chaque bloc \n' 623 | text_dicoendo += '\t\n\tArguments : \n' 624 | text_dicoendo += '\t\tnum_block : numéro du bloc (décomposition de Dulmage-Mendelsohn) \n' 625 | text_dicoendo += '\t\n\t""" \n' 626 | text_dicoendo += '\tlist_block_dicoendo = [' 627 | 628 | for item in list_block_dicoendo: 629 | text_dicoendo += str(item) + " , \\\n\t\t" 630 | 631 | text_dicoendo += "] \n" 632 | 633 | text_dicoendo += '\treturn list_block_dicoendo[num_block] \n' 634 | 635 | # préambule de la fonction associée à un bloc 636 | 637 | text_fun_pre = 'def ' + function_name.strip() 638 | text_fun = '(x,t,data,coeff): \n' 639 | text_fun += '\t"""\n\tFonction produite automatiquement pour la résolution du modèle \n\n' 640 | text_fun += '\tBloc représenté par la fonction F telle que F(x)=0 \n' 641 | text_fun += '\t\n\tArguments : \n' 642 | text_fun += '\t\tx : vecteur de variables endogènes contemporaines \n' 643 | text_fun += '\t\tt : date courante (dans le tableau de données) \n' 644 | text_fun += '\t\tdata : tableau numpy contenant les données du modèle \n' 645 | text_fun += '\t\n\t""" \n' 646 | 647 | text_jac_pre = 'def ' + function_name.strip() 648 | 649 | text_jac = '_jac(x,t,data,coeff): \n' 650 | text_jac += '\t"""\n\tFonction produite automatiquement pour la résolution du modèle \n\n' 651 | text_jac += '\tJacobienne de la fonction associée au bloc \n' 652 | text_jac += '\t\n\tArguments : \n' 653 | text_jac += '\t\tx : vecteur de variables endogènes contemporaines \n' 654 | text_jac += '\t\tt : date courante (dans le tableau de données) \n' 655 | text_jac += '\t\tdata : tableau numpy contenant les données du modèle \n' 656 | text_jac += '\t\n\t""" \n' 657 | 658 | # Ecriture du fichier 659 | 660 | with open(dir+"/" + function_name+".py", "w+", encoding='utf-8') as out_file: 661 | 662 | out_file.write("import numpy as np\n") 663 | out_file.write("from math import log, exp\n\n") 664 | 665 | out_file.write("import numpy as np\n\n") 666 | 667 | out_file.write("# Nombre de blocs du modèle\n") 668 | out_file.write("n_blocks = " + str(n_blocks) + " \n\n") 669 | out_file.write("# Liste des noms de variables\n") 670 | out_file.write("coln = " + str(self.coln_list) + " \n\n") 671 | out_file.write( 672 | "# Dictionnaire de correspondance des noms de variables\n") 673 | out_file.write("dicovar = " + str(self.dicovar) + " \n\n") 674 | out_file.write("# Liste des noms de coefficients\n") 675 | out_file.write("coeffs = " + str(self.name_coeff_list) + " \n\n") 676 | # out_file.write("# Dictionnaire de correspondance des noms de coefficients\n") 677 | # out_file.write("dicoeff = " + str(self.dicoeff)+ " \n\n") 678 | 679 | out_file.write(text_varendo) 680 | out_file.write("\n") 681 | 682 | out_file.write(text_dicoendo) 683 | out_file.write("\n") 684 | 685 | for i, item in enumerate(list_block_fun): 686 | out_file.write(text_fun_pre+"_"+str(i)+text_fun+item) 687 | out_file.write('\treturn f') 688 | out_file.write("\n\n") 689 | 690 | out_file.write(text_jac_pre+"_"+str(i) + 691 | text_jac+list_block_jac[i]) 692 | out_file.write('\treturn df') 693 | out_file.write("\n\n") 694 | 695 | return 696 | 697 | # def write_model_cython(self, function_name, dir="./modeles_cython"): 698 | # """ 699 | # Creation of a Cython file containing the text of the model functions. 700 | 701 | # Arguments : 702 | # =========== 703 | # model : a preliminary analyzed model 704 | # function_name : name of the function (string) 705 | # dir : name of the directory (string) 706 | 707 | # Result : 708 | # ======== 709 | # * A .pyx file with the functions (for each block) and their jacobians. 710 | 711 | # """ 712 | 713 | # if (not self.is_built): 714 | 715 | # raise ValueError("The model is not built.") 716 | 717 | # n_blocks = len(self.model_fun_block) 718 | 719 | # # Structures de données pour le traitement d'un bloc 720 | 721 | # list_block_fun = [] 722 | # list_block_varendo = [] 723 | # list_block_jac = [] 724 | # list_block_dicoendo = [] 725 | 726 | # # sous la forme [ texte , var_endo , texte_jac ] 727 | # for item in self.model_fun_block: 728 | # list_block_fun.append(item[0]) 729 | # list_block_varendo.append(item[1]) 730 | # list_block_jac.append(item[2]) 731 | 732 | # # pour chaque bloc, on construit un dictionnaire de correspondance 733 | # # endogène -> numero 734 | # nom_col_endo = {} 735 | # # on considère les endogènes du bloc courant 736 | # for endocourant in item[1]: 737 | # # correspondance nom / indice 738 | # nom_col_endo[endocourant] = self.dicovar[endocourant] 739 | 740 | # list_block_dicoendo.append(nom_col_endo) 741 | 742 | # # texte de la fonction déterminant les endogènes pour chaque bloc 743 | 744 | # text_varendo = 'cpdef list ' + function_name.strip() + '_varendo(int num_block): \n' 745 | # text_pyd = 'cpdef list ' + function_name.strip() + '_varendo(int num_block)\n\n' 746 | # text_varendo += '\t"""\n \tFonction produite automatiquement pour la résolution du modèle \n\n' 747 | # text_varendo += '\tDétermine les endogènes associées à chaque bloc \n' 748 | # text_varendo += '\t\n\tArguments : \n' 749 | # text_varendo += '\t\tnum_block : numéro du bloc (décomposition de Dulmage-Mendelsohn) \n' 750 | # text_varendo += '\t\n\t""" \n' 751 | # text_varendo += '\tcdef list list_block_varendo = [' 752 | 753 | # for item in list_block_varendo: 754 | # text_varendo += str(item) + " , \\\n\t\t" 755 | 756 | # text_varendo += "] \n" 757 | 758 | # text_varendo += '\treturn list_block_varendo[num_block] \n' 759 | 760 | # # texte de la fonction donnant la correspondance bloc -> endogènes 761 | 762 | # text_dicoendo = 'cpdef dict ' + \ 763 | # function_name.strip() + '_dicoendo(int num_block): \n' 764 | # text_pyd += 'cpdef dict ' + \ 765 | # function_name.strip() + '_dicoendo(int num_block)\n\n' 766 | # text_dicoendo += '\t"""\n \tFonction produite automatiquement pour la résolution du modèle \n\n' 767 | # text_dicoendo += '\tDétermine les correspondances des endogènes associées à chaque bloc \n' 768 | # text_dicoendo += '\t\n\tArguments : \n' 769 | # text_dicoendo += '\t\tnum_block : numéro du bloc (décomposition de Dulmage-Mendelsohn) \n' 770 | # text_dicoendo += '\t\n\t""" \n' 771 | # text_dicoendo += '\tcdef list list_block_dicoendo = [' 772 | 773 | # for item in list_block_dicoendo: 774 | # text_dicoendo += str(item) + " , \\\n\t\t" 775 | 776 | # text_dicoendo += "] \n" 777 | 778 | # text_dicoendo += '\treturn list_block_dicoendo[num_block] \n' 779 | 780 | # # préambule de la fonction associée à un bloc 781 | 782 | # text_fun_pre = 'cpdef np.ndarray[DTYPE_t, ndim=1] ' + \ 783 | # function_name.strip() 784 | # text_fun = '(np.ndarray[DTYPE_t, ndim=1] x, int t, np.ndarray[DTYPE_t, ndim=2] data, dict coeff): \n' 785 | # text_fun += '\t"""\n\tFonction produite automatiquement pour la résolution du modèle \n\n' 786 | # text_fun += '\tBloc représenté par la fonction F telle que F(x)=0 \n' 787 | # text_fun += '\t\n\tArguments : \n' 788 | # text_fun += '\t\tx : vecteur de variables endogènes contemporaines \n' 789 | # text_fun += '\t\tt : date courante (dans le tableau de données) \n' 790 | # text_fun += '\t\tdata : tableau numpy contenant les données du modèle \n' 791 | # text_fun += '\t\n\t""" \n' 792 | 793 | # text_jac_pre = 'cpdef np.ndarray[DTYPE_t, ndim=2] ' + \ 794 | # function_name.strip() 795 | # text_jac = '_jac(np.ndarray[DTYPE_t, ndim=1] x, int t, np.ndarray[DTYPE_t, ndim=2] data, dict coeff): \n' 796 | # text_jac += '\t"""\n\tFonction produite automatiquement pour la résolution du modèle \n\n' 797 | # text_jac += '\tJacobienne de la fonction associée au bloc \n' 798 | # text_jac += '\t\n\tArguments : \n' 799 | # text_jac += '\t\tx : vecteur de variables endogènes contemporaines \n' 800 | # text_jac += '\t\tt : date courante (dans le tableau de données) \n' 801 | # text_jac += '\t\tdata : tableau numpy contenant les données du modèle \n' 802 | # text_jac += '\t\n\t""" \n' 803 | 804 | # # Ecriture du fichier 805 | 806 | # with open(dir+"/" + function_name+".pyx", "w+", encoding='utf-8') as out_file: 807 | 808 | # out_file.write("from libc.math cimport log, exp\n\n") 809 | # out_file.write("import numpy as np\n") 810 | # out_file.write("cimport numpy as np\n\n") 811 | 812 | # out_file.write("# Nombre de blocs du modèle\n") 813 | # out_file.write( 814 | # "cpdef int " + function_name.strip() + "_n_blocks(): \n") 815 | # out_file.write("\treturn " + str(n_blocks) + " \n\n") 816 | 817 | # out_file.write("# Liste des noms de variables\n") 818 | # out_file.write("cpdef list " + 819 | # function_name.strip() + "_coln(): \n") 820 | # out_file.write("\tcdef list coln = " + 821 | # str(self.coln_list) + " \n\n") 822 | # out_file.write("\treturn coln \n\n") 823 | 824 | # out_file.write( 825 | # "# Dictionnaire de correspondance des noms de variables\n") 826 | # out_file.write("cpdef dict " + 827 | # function_name.strip() + "_dicovar():\n") 828 | # out_file.write("\tcdef dict dicovar = " + 829 | # str(self.dicovar) + " \n\n") 830 | # out_file.write("\treturn dicovar \n\n") 831 | 832 | # out_file.write("# Liste des noms de coefficients\n") 833 | # out_file.write("cpdef list " + 834 | # function_name.strip() + "_coeffs():\n") 835 | # out_file.write("\tcdef list coeffs = " + 836 | # str(self.name_coeff_list) + " \n") 837 | # out_file.write("\treturn coeffs \n\n") 838 | 839 | # out_file.write(text_varendo) 840 | # out_file.write("\n") 841 | 842 | # out_file.write(text_dicoendo) 843 | # out_file.write("\n") 844 | 845 | # for i, item in enumerate(list_block_fun): 846 | # out_file.write(text_fun_pre+"_"+str(i)+text_fun + item + "\n\n") 847 | # out_file.write("\tcdef np.ndarray[DTYPE_t, ndim=1] res = f\n\n") 848 | # out_file.write("\treturn res") 849 | # out_file.write("\n\n") 850 | 851 | # out_file.write(text_jac_pre+"_"+str(i) + 852 | # text_jac+list_block_jac[i] + "\n\n") 853 | # out_file.write("\tcdef np.ndarray[DTYPE_t, ndim=2] res = df\n\n") 854 | # out_file.write("\treturn res") 855 | # out_file.write("\n\n") 856 | 857 | # with open(dir+"/" + function_name+".pxd", "w+", encoding='utf-8') as out_file_2: 858 | 859 | # out_file_2.write("import numpy as np\ncimport numpy as np\n\n") 860 | 861 | # out_file_2.write("ctypedef np.double_t DTYPE_t\n\n") 862 | 863 | # out_file_2.write( 864 | # "cpdef int " + function_name.strip() + "_n_blocks() \n\n") 865 | 866 | # out_file_2.write("cpdef list " + 867 | # function_name.strip() + "_coln() \n\n") 868 | 869 | # out_file_2.write("cpdef dict " + 870 | # function_name.strip() + "_dicovar()\n") 871 | 872 | # out_file_2.write("cpdef list " + 873 | # function_name.strip() + "_coeffs()\n\n") 874 | 875 | # out_file_2.write(text_pyd) 876 | 877 | # for i, item in enumerate(list_block_fun): 878 | # out_file_2.write( 879 | # text_fun_pre+"_"+str(i)+'(np.ndarray[DTYPE_t, ndim=1] x, int t, np.ndarray[DTYPE_t, ndim=2] data, dict coeff)\n\n') 880 | # out_file_2.write(text_jac_pre+"_"+str(i) + 881 | # '_jac(np.ndarray[DTYPE_t, ndim=1] x, int t, np.ndarray[DTYPE_t, ndim=2] data, dict coeff)\n\n') 882 | 883 | # with open(dir+"/setup_" + function_name+".py", "w+", encoding='utf-8') as out_file_3: 884 | 885 | # out_file_3.write("from setuptools import setup\n\n" + 886 | # "from Cython.Build import cythonize\n\n" + 887 | # "from sys import version_info\n\n" + 888 | # "import numpy\n\n" + 889 | # "setup(\n" + 890 | # "ext_modules=cythonize('"+function_name+".pyx',force=True, compiler_directives={'language_level' : version_info[0]}),\n" + 891 | # 'include_dirs=[numpy.get_include()],\n' + 892 | # ')' 893 | # ) 894 | 895 | # # import os 896 | 897 | # # os.chdir(dir) 898 | 899 | # # os.system("$ python setup_"+function_name+".py build_ext --inplace") 900 | 901 | # return 902 | 903 | -------------------------------------------------------------------------------- /macronometrics/numsolve.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | import numpy as np 6 | import scipy as sp 7 | import scipy.optimize as spo 8 | # from numba import jit 9 | 10 | 11 | def ggn11(xinit,fun,jac,args,ftol=1e-10,itermax=1000,alphamin=0.05) : 12 | """ 13 | Implémentation de la variante de la méthode de Newton 14 | Grau, Grau-Sanchez et Noguera 2011 15 | x0 : valeur initiale 16 | fun : fonction 17 | jac : jacobienne 18 | args : autres arguments de la fonction 19 | ftol : critère de convergence 20 | itermax : limite d'appels à la fonction 21 | alphamin : paramètre de relaxation 22 | """ 23 | 24 | x0 = xinit # point courant 25 | f0 = fun(x0,*args) # évaluation de la fonction au point courant 26 | maxf0 = np.linalg.norm(f0,np.inf) # norme infinie de f(x0) 27 | 28 | decreasing = True # pour un critère de décroissance globale 29 | iterate = 0 30 | 31 | while (maxf0 > ftol) & decreasing & (iterate < itermax) : 32 | iterate = iterate + 1 33 | 34 | J0 = jac(x0,*args) # F'(x0) jacobienne au point courant 35 | IJ0f0 = np.linalg.inv(J0) @ f0 # F'(x0)^{-1} * F(x0) 36 | y1 = x0 - IJ0f0 # Etape de l'algorithme de Newton usuel 37 | 38 | Jy1 = jac(y1,*args) # F'(y1) 39 | IJy1 = np.linalg.inv(Jy1) 40 | IJy1f0 = IJy1 @ f0 # F'(y1)^{-1} * F(x0) 41 | z1 = x0 - 0.5*(IJ0f0 + IJy1f0) # méthode de Newton harmonique 42 | 43 | fz1 = fun(z1,*args) # F(z1) 44 | IJy1fz1 = IJy1 @ fz1 # F'(y1)^{-1} * F(z1) 45 | x1 = z1 - IJy1fz1 # Méthode de Newton modifiée d'ordre 5 46 | 47 | try : 48 | 49 | f0 = fun(x1,*args) 50 | maxf1 = np.linalg.norm(f0,np.inf) 51 | 52 | # mise à jour 53 | if (maxf1 < maxf0) : 54 | maxf0 = maxf1 55 | x0 = x1 56 | 57 | else : 58 | maxf1 = np.linalg.norm(fz1,np.inf) 59 | 60 | if (maxf1 < maxf0) : 61 | maxf0 = maxf1 62 | x0 = z1 63 | f0 = fz1 # on se replie sur la méthode harmonique 64 | 65 | else: 66 | fy1 = fun(y1,*args) 67 | maxf1 = np.linalg.norm(fy1,np.inf) 68 | 69 | if (maxf1 < maxf0) : 70 | maxf0 = maxf1 71 | x0 = y1 72 | f0 = fy1 # on se replie sur la méthode de Newton 73 | 74 | else : 75 | decreasing = False 76 | 77 | except : 78 | 79 | # relaxation / algorithme de Bank - Rose (1981) 80 | alpha = 0.9 81 | 82 | while (alpha > alphamin) : 83 | 84 | try : 85 | 86 | x1 = x0 - alpha * IJ0f0 # méthode de Newton 87 | f1 = fun(x1,*args) 88 | alpha = alphamin 89 | 90 | except : 91 | 92 | alpha = 0.5* alpha 93 | 94 | maxf1 = np.linalg.norm(f1,np.inf) 95 | 96 | if (maxf1 < maxf0) : 97 | maxf0 = maxf1 98 | x0 = x1 99 | f0 = f1 100 | 101 | else : 102 | decreasing = False 103 | 104 | 105 | return (x0,f0,maxf0) 106 | 107 | def newton(xinit,fun,jac,args,ftol=1e-10,itermax=1000,alphamin=0.05) : 108 | """ 109 | Implémentation de la méthode de Newton (avec relaxation) 110 | x0 : valeur initiale 111 | fun : fonction 112 | jac : jacobienne 113 | args : autres arguments de la fonction 114 | ftol : critère de convergence 115 | itermax : limite d'appels à la fonction 116 | alphamin : paramètre de relaxation 117 | """ 118 | 119 | x0 = xinit # point courant 120 | f0 = fun(x0,*args) # évaluation de la fonction au point courant 121 | maxf0 = np.linalg.norm(f0,np.inf) # norme infinie de f(x0) 122 | 123 | decreasing = True # pour un critère de décroissance globale 124 | iterate = 0 125 | 126 | while (maxf0 > ftol) & decreasing & (iterate < itermax) : 127 | iterate = iterate + 1 128 | 129 | J0 = jac(x0,*args) # F'(x0) jacobienne au point courant 130 | IJ0f0 = np.linalg.inv(J0) @ f0 # F'(x0)^{-1} * F(x0) 131 | y1 = x0 - IJ0f0 # Etape de l'algorithme de Newton usuel 132 | 133 | 134 | try : 135 | 136 | 137 | fy1 = fun(y1,*args) 138 | maxf1 = np.linalg.norm(fy1,np.inf) 139 | 140 | if (maxf1 < maxf0) : 141 | maxf0 = maxf1 142 | x0 = y1 143 | f0 = fy1 # on se replie sur la méthode de Newton 144 | 145 | else : 146 | decreasing = False 147 | 148 | except : 149 | 150 | # relaxation / algorithme de Bank - Rose (1981) 151 | alpha = 0.9 152 | 153 | while (alpha > alphamin) : 154 | 155 | try : 156 | 157 | x1 = x0 - alpha * IJ0f0 # méthode de Newton 158 | f1 = fun(x1,*args) 159 | alpha = alphamin 160 | 161 | except : 162 | 163 | alpha = 0.5* alpha 164 | 165 | maxf1 = np.linalg.norm(f1,np.inf) 166 | 167 | if (maxf1 < maxf0) : 168 | maxf0 = maxf1 169 | x0 = x1 170 | f0 = f1 171 | 172 | else : 173 | decreasing = False 174 | 175 | return (x0,f0,maxf0) 176 | 177 | def sp_root(xinit,fun,jac,args,ftol=1e-10,itermax=1000) : 178 | """ 179 | Encapsulation de la méthode standard root de scipy 180 | x0 : valeur initiale 181 | fun : fonction 182 | jac : jacobienne 183 | args : autres arguments de la fonction 184 | ftol : critère de convergence 185 | itermax : limite d'appels à la fonction 186 | alphamin : paramètre de relaxation 187 | """ 188 | 189 | res_spo = spo.root(fun,xinit,args,jac=jac,tol=ftol) 190 | return (res_spo.x, res_spo.fun, np.linalg.norm(res_spo.fun,np.inf)) 191 | 192 | def newton_alt(xinit,fun,jac,args,ftol=1e-10,itermax=1000,alphamin=0.05) : 193 | """ 194 | Implémentation de la méthode de Newton (avec relaxation) 195 | Changement dans le calcul des inverses matricielles 196 | x0 : valeur initiale 197 | fun : fonction 198 | jac : jacobienne 199 | args : autres arguments de la fonction 200 | ftol : critère de convergence 201 | itermax : limite d'appels à la fonction 202 | alphamin : paramètre de relaxation 203 | """ 204 | 205 | x0 = xinit # point courant 206 | f0 = fun(x0,*args) # évaluation de la fonction au point courant 207 | maxf0 = np.linalg.norm(f0,np.inf) # norme infinie de f(x0) 208 | 209 | decreasing = True # pour un critère de décroissance globale 210 | iterate = 0 211 | 212 | while (maxf0 > ftol) & decreasing & (iterate < itermax) : 213 | iterate = iterate + 1 214 | 215 | J0 = jac(x0,*args) # F'(x0) jacobienne au point courant 216 | IJ0f0 = np.linalg.solve(J0, f0) # F'(x0)^{-1} * F(x0) 217 | y1 = x0 - IJ0f0 # Etape de l'algorithme de Newton usuel 218 | 219 | 220 | try : 221 | 222 | fy1 = fun(y1,*args) 223 | maxf1 = np.linalg.norm(fy1,np.inf) 224 | 225 | if (maxf1 < maxf0) : 226 | maxf0 = maxf1 227 | x0 = y1 228 | f0 = fy1 # on se replie sur la méthode de Newton 229 | 230 | else : 231 | decreasing = False 232 | 233 | except : 234 | 235 | # relaxation / algorithme de Bank - Rose (1981) 236 | alpha = 0.9 237 | 238 | while (alpha > alphamin) : 239 | 240 | try : 241 | 242 | x1 = x0 - alpha * IJ0f0 # méthode de Newton 243 | f1 = fun(x1,*args) 244 | alpha = alphamin 245 | 246 | except : 247 | 248 | alpha = 0.5* alpha 249 | 250 | maxf1 = np.linalg.norm(f1,np.inf) 251 | 252 | if (maxf1 < maxf0) : 253 | maxf0 = maxf1 254 | x0 = x1 255 | f0 = f1 256 | 257 | else : 258 | decreasing = False 259 | 260 | 261 | return (x0,f0,maxf0) 262 | -------------------------------------------------------------------------------- /macronometrics/stronglyConnectedComponents_kosaraju.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from .graph import Edge, reverse 3 | 4 | 5 | def visit1(g, v, visited, order, k): 6 | visited[v] = True 7 | for e in g[v]: 8 | if not visited[e.dst]: 9 | visit1(g, e.dst, visited, order, k) 10 | order[k[0]] = v 11 | k[0] += 1 12 | 13 | 14 | def visit2(g, v, visited, scc, k): 15 | visited[v] = True 16 | for e in g[v]: 17 | if not visited[e.dst]: 18 | visit2(g, e.dst, visited, scc, k) 19 | scc[k].append(v) 20 | 21 | # Kosaraju's algorithm 22 | 23 | 24 | def stronglyConnectedComponents(g, scc): 25 | scc[:] = [] 26 | n = len(g) 27 | 28 | visited = [False]*n 29 | order = [-1]*n 30 | k = [0] 31 | for v in range(n): 32 | if not visited[v]: 33 | visit1(g, v, visited, order, k) 34 | 35 | g = reverse(g) 36 | k = -1 37 | visited = [False]*n 38 | for u in range(n): 39 | if not visited[order[n-1-u]]: 40 | k += 1 41 | scc.append([]) 42 | visit2(g, order[n-1-u], visited, scc, k) 43 | 44 | return scc 45 | -------------------------------------------------------------------------------- /macronometrics/symbolic.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | 6 | 7 | class Block(): 8 | """ 9 | Classe définissant les outils de construction d'un bloc d'un modèle. 10 | 11 | Le modèle doit être analysé auparavant avec les outils de la classe Analyse. 12 | 13 | ATTENTION : pour le moment, on considère le texte des équations comme étant 14 | syntaxiquement correct (au sens de Troll) 15 | """ 16 | 17 | def __init__(self, model, list_eq_block, list_endo_block, list_exo_block, n_block): 18 | """ 19 | Constructeur de classe Symbolic 20 | 21 | Permet la traduction d'un bloc du modèle 22 | 23 | Paramètres 24 | ========== 25 | 26 | list_eq_block : ordre des équations dans le block 27 | set_eq : ensemble d'arbres syntaxiques 28 | set_diff : ensemble d'arbres dérivés 29 | dict_eq_var : dictionnaire de correspondance equation -> nom de variable 30 | endo : nom des endogènes 31 | exo : nom des exogènes et des policy issues de la lecture du modèle 32 | dicovar : correspondance variable -> indice globale (structure de données) 33 | coeff : nom des coefficients issus de la lecture du modèle 34 | dicoeff : correspondance coefficient -> indice 35 | 36 | """ 37 | self.list_eq_block = list_eq_block 38 | self.list_endo_block = list_endo_block 39 | self.list_exo_block = list_exo_block 40 | 41 | self.model = model 42 | 43 | self.block_eq_obj_list = [self.model.eq_obj_dict_number[i] 44 | for i in self.list_eq_block] 45 | 46 | self.n_block = n_block 47 | 48 | def translate_block_python(self): 49 | """ 50 | Permet la traduction d'un modèle en python. 51 | Le modèle doit être analysé avec les outils de la bibliothèque Analyse. 52 | 53 | Evalue l'ensemble des équations 54 | -> Construit le corps du texte d'une fonction f des endogènes dont la valeur cible est 55 | solution de f(x) = 0 sous forme de chaine de caractères 56 | -> Construit également la jacobienne 57 | 58 | ATTENTION : pour le moment, on considère le texte des équations comme étant 59 | syntaxiquement correct (au sens de Troll) 60 | 61 | Paramètres 62 | ========== 63 | 64 | model : un modèle de la classe Modele préalablement analysé 65 | 66 | """ 67 | 68 | # unpack 69 | name_coeff_list = self.model.name_coeff_list 70 | dicovar = self.model.dicovar 71 | 72 | def run_instruction(t, courant, vari=None): 73 | # """ 74 | # Règles de production (evaluation) pour une équation. 75 | 76 | # On suppose que le modèle est correctement analysé. 77 | 78 | # Le but est de produire une chaine de caractères (à passer à la fonction eval de Python 79 | # ou à enregistrer dans une fonction). 80 | 81 | # Arguments : 82 | # * t : arbre syntaxique 83 | # * courant : date courante (pour les retards) 84 | # -> on démarre à courant = 0 et on doit disposer des endogènes jusqu'à l'instant précédent. 85 | # * vari : nom de variable (pour les dérivées partielles) 86 | # """ 87 | 88 | if t.data == 'define_eq': # un seul signe = par équation 89 | leftpart, rightpart = t.children 90 | 91 | t1 = run_instruction(leftpart, courant, vari) 92 | t2 = run_instruction(rightpart, courant, vari) 93 | 94 | if t1 == "0": 95 | if t2 == "0": 96 | return "0" 97 | elif rightpart.data == 'par': 98 | return "-"+t2 99 | else: 100 | return "-("+t2+")" 101 | elif t2 == "0": 102 | if leftpart.data == 'par': 103 | return t1 104 | else: 105 | return '('+t1+')' 106 | elif rightpart.data == 'par': 107 | if leftpart.data == 'par': 108 | return t1+'-'+t2 109 | else: 110 | return '('+t1+')-'+t2 111 | elif leftpart.data == "par": 112 | return t1+'-('+t2+')' 113 | else: 114 | return '('+t1+')-('+t2+')' 115 | 116 | elif t.data == 'add': 117 | leftpart, rightpart = t.children 118 | t1 = run_instruction(leftpart, courant, vari) 119 | t2 = run_instruction(rightpart, courant, vari) 120 | 121 | if t1 == "0": 122 | if t2 == "0": 123 | return "0" 124 | else: 125 | return t2 126 | elif t2 == "0": 127 | return t1 128 | else: 129 | if leftpart.children == 'par': 130 | if rightpart.children == 'par': 131 | return t1+'+'+t2 132 | else: 133 | return t1 + '('+t2+')' 134 | elif rightpart.children == 'par': 135 | return '('+t1+')'+t2 136 | else: 137 | return '('+t1+')+('+t2+')' 138 | 139 | elif t.data == 'sub': 140 | leftpart, rightpart = t.children 141 | t1 = run_instruction(leftpart, courant, vari) 142 | t2 = run_instruction(rightpart, courant, vari) 143 | 144 | if t1 == "0": 145 | if t2 == "0": 146 | return "0" 147 | elif rightpart.data == 'par': 148 | return "-"+t2 149 | else: 150 | return "-("+t2+")" 151 | elif t2 == "0": 152 | if leftpart.data == 'par': 153 | return t1 154 | else: 155 | return '('+t1+')' 156 | elif rightpart.data == 'par': 157 | if leftpart.data == 'par': 158 | return t1+'-'+t2 159 | else: 160 | return '('+t1+')-'+t2 161 | elif leftpart.data == "par": 162 | return t1+'-('+t2+')' 163 | else: 164 | return '('+t1+')-('+t2+')' 165 | 166 | elif t.data == 'mul': 167 | leftpart, rightpart = t.children 168 | t1 = run_instruction(leftpart, courant, vari) 169 | t2 = run_instruction(rightpart, courant, vari) 170 | if t1 == "0" or t2 == "0": 171 | return "0" 172 | elif t1 == "1": 173 | return t2 174 | elif t2 == "1": 175 | return t1 176 | else: 177 | return "(" + t1 + ")*(" + t2 + ")" 178 | 179 | elif t.data == 'div': 180 | leftpart, rightpart = t.children 181 | t1 = run_instruction(leftpart, courant, vari) 182 | if t1 == "0": 183 | return "0" 184 | elif t1 == "1": 185 | return "1/(" + run_instruction(rightpart, courant, vari)+")" 186 | else: 187 | return "("+t1+")" + "/(" + run_instruction(rightpart, courant, vari)+")" 188 | 189 | elif t.data == 'pow': 190 | leftpart, rightpart = t.children 191 | t1 = run_instruction(leftpart, courant, vari) 192 | if t1 == "0": 193 | return "0" 194 | elif t1 == "1": 195 | return "1" 196 | else: 197 | return "(" + run_instruction(leftpart, courant, vari)+")" + "**" + "("+run_instruction(rightpart, courant, vari)+")" 198 | 199 | elif t.data == 'par': 200 | op = t.children[0] 201 | t1 = run_instruction(op, courant, vari) 202 | 203 | if t1 == "0": 204 | return "0" 205 | elif t1 == "1": 206 | return "1" 207 | elif op.data == 'par' or op.data == 'pos' or op.data == 'number' or op.data == "diff": 208 | return t1 209 | else: 210 | return "(" + t1 + ")" 211 | 212 | elif t.data == "delta": 213 | delay, part = t.children 214 | lag = int(delay) 215 | if lag > 0: 216 | return "(" + run_instruction(part, courant, vari) + "- (" + run_instruction(part, courant + int(delay), vari) + "))" 217 | else: 218 | raise ValueError("Valeur incohérente du retard !") 219 | 220 | elif t.data == "deltaone": # pour prendre en compte l'ommission en cas de lag unitaire 221 | part = t.children[0] 222 | return "(" + run_instruction(part, courant, vari) + "- (" + run_instruction(part, courant + 1, vari) + "))" 223 | 224 | elif t.data == "lag": 225 | expr, delay = t.children 226 | lag = int(delay) 227 | if lag <= 0: 228 | return run_instruction(expr, courant + abs(lag), vari) 229 | else: 230 | raise ValueError( 231 | "Le modèle contient des variables anticipées !") 232 | 233 | elif t.data == "coeff": 234 | nom = str(t.children[0]) 235 | if nom not in name_coeff_list: 236 | raise ValueError("Valeur inconnue pour le coefficient !") 237 | # on va chercher le coefficient dans un dictionnaire 238 | return "coeff['" + nom + "']" 239 | 240 | elif t.data == "var": 241 | nom = str(t.children[0]) 242 | 243 | # cas d'une variable exogène ou policy 244 | if (nom in self.list_exo_block): 245 | # on cherche dans la base de données du modèle la valeur 246 | return "data[t-"+str(courant)+"," + str(dicovar[nom]) + "]" 247 | 248 | elif nom in self.list_endo_block: # cas d'une variable endogène 249 | 250 | if courant == 0: # variable dont la valeur doit être déterminée 251 | 252 | return "x["+str(self.list_endo_block.index(nom))+"]" 253 | 254 | else: # on cherche dans la base de données du modèle 255 | return "data[t-"+str(courant)+"," + str(dicovar[nom]) + "]" 256 | 257 | elif nom in name_coeff_list: 258 | # on va chercher le coefficient dans un dictionnaire 259 | return "coeff['" + nom + "']" 260 | 261 | else: 262 | raise ValueError("Le modèle doit être analysé !") 263 | 264 | elif t.data == "log": 265 | op = t.children[0] 266 | return "log(" + run_instruction(op, courant, vari) + ")" 267 | 268 | elif t.data == "exp": 269 | op = t.children[0] 270 | return "exp(" + run_instruction(op, courant, vari) + ")" 271 | 272 | elif t.data == 'neg': 273 | op = t.children[0] 274 | 275 | t1 = run_instruction(op, courant, vari) 276 | 277 | if t1 == "0": 278 | return "0" 279 | elif t1 == "1": 280 | return "-1" 281 | elif op.data == 'par' or op.data == 'pos' or op.data == 'number' or op.data == "diff": 282 | return "-"+t1 283 | else: 284 | return "-(" + t1 + ")" 285 | 286 | elif t.data == 'pos': 287 | op = t.children[0] 288 | return run_instruction(op, courant, vari) 289 | 290 | elif (t.data == "number") or (t.data == "signednumber"): 291 | valeur = t.children[0] 292 | if valeur == 0: 293 | return "0" 294 | else: 295 | return str(valeur) 296 | 297 | elif t.data == "diff": 298 | nom = str(t.children[0]) 299 | if nom == vari: 300 | return "1" 301 | else: 302 | return "0" 303 | 304 | else: 305 | raise SyntaxError('Unknown instruction: %s' % t.data) 306 | 307 | dimbloc = str(len(self.list_endo_block)) 308 | 309 | res_block = "\tf = np.zeros("+dimbloc+",dtype=np.float64)\n" 310 | jac_block = "\tdf = np.zeros(("+dimbloc+","+dimbloc+"),dtype=np.float64)\n" 311 | 312 | ell = 0 313 | 314 | for eq in self.block_eq_obj_list: 315 | 316 | # on récupère l'arbre syntaxique de l'équation et de sa dérivée 317 | eq_parse = eq.tree_eq 318 | jac_parse = eq.tree_diff 319 | 320 | texte_eq = run_instruction(eq_parse, 0) 321 | 322 | res_block += "\tf[" + str(ell) + "] = " 323 | res_block += texte_eq + "\n" # on met à jour le texte de la fonction 324 | 325 | # ensemble des endogènes contemporaines de l'équation 326 | endo_name_list = eq.endo_name_list 327 | 328 | for j, vari in enumerate(self.list_endo_block): 329 | 330 | 331 | 332 | if vari in endo_name_list: 333 | jac_block += "\tdf[" + str(ell) + "][" + str(j) + "] = " 334 | partialder = run_instruction(jac_parse, 0, vari) 335 | jac_block += partialder + "\n" 336 | 337 | ell += 1 338 | 339 | self.fun_text = res_block 340 | self.jac_text = jac_block 341 | 342 | 343 | return 344 | -------------------------------------------------------------------------------- /macronometrics/tools.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | 5 | Tools for manipulation and simulation of a model 6 | """ 7 | import pandas as pd 8 | import numpy as np 9 | import sys 10 | 11 | from scipy.optimize import root 12 | 13 | from .numsolve import newton, newton_alt, ggn11 14 | 15 | import importlib 16 | from time import time 17 | 18 | from .tools_ts import extrapolate_series 19 | 20 | lag_trim = pd.offsets.MonthBegin(3) 21 | 22 | ##################################### 23 | # Manipulations formelles du modèle # 24 | ##################################### 25 | 26 | 27 | def verif_model(model): 28 | """ 29 | Vérifie le modèle : 30 | - nombre d'endogènes == nombre d'équations 31 | - déclaration des variables 32 | - données présentes 33 | - pas de variables endogènes non contemporaines 34 | """ 35 | 36 | # A compléter 37 | 38 | return 39 | 40 | 41 | def addsym(model, status, name_sym, verbose=True): 42 | """ 43 | Add a new symbol to the model 44 | 45 | model : A macro model 46 | status : "endogenous" / "exogenous" / "policy" / "coefficient" 47 | name_sym : name of the symbol (string) 48 | """ 49 | if name_sym not in model.symboles_dict.keys(): 50 | if status == "endogenous": 51 | model.name_endo_list.append(name_sym) 52 | model.name_endo_list.sort() 53 | 54 | elif status == "exogenous": 55 | model.name_exo_list.append(name_sym) 56 | model.name_exo_list.sort() 57 | 58 | elif status == "policy": 59 | model.name_policy_list.append(name_sym) 60 | model.name_policy_list.sort() 61 | 62 | elif status == "coefficient": 63 | model.name_coeff.append(name_sym) 64 | model.name_policy_list.sort() 65 | 66 | else: 67 | raise SyntaxError('Unknown status') 68 | 69 | model.symboles_dict[name_sym] = status 70 | 71 | model.is_analyzed = False 72 | model.is_built = False # mise à jour du statut du modèle 73 | 74 | if status == "exogenous" or status == "endogenous" or status == "policy" : 75 | model.vall_set = model.vall_set | {name_sym} 76 | 77 | 78 | if verbose: 79 | print(name_sym + " is defined as " + status + 80 | " in the model " + model.name_mod) 81 | 82 | return 83 | 84 | 85 | def delsym(model, name_sym, verbose=True): 86 | """ 87 | Delete a symbol from the model 88 | """ 89 | if name_sym in model.symboles_dict.keys(): 90 | status = model.symboles_dict[name_sym] 91 | 92 | if status == "endogenous": 93 | model.name_endo_list.remove(name_sym) 94 | 95 | elif status == "exogenous": 96 | model.name_exo_list.remove(name_sym) 97 | 98 | elif status == "policy": 99 | model.name_policy_list.remove(name_sym) 100 | 101 | elif status == "coefficient": 102 | model.name_coeff.remove(name_sym) 103 | 104 | else: 105 | raise SyntaxError('Unknown status') 106 | 107 | del model.symboles_dict[name_sym] 108 | 109 | if status == "exogenous" or status == "endogenous" or status == "policy" : 110 | model.vall_set = model.vall_set - {name_sym} 111 | 112 | model.is_analyzed = False 113 | model.is_built = False # mise à jour du statut du modèle 114 | 115 | if verbose: 116 | print(name_sym + " is deleted from the model " + model.name_mod) 117 | 118 | return 119 | 120 | 121 | def changesym(model, status, name_sym, verbose=True): 122 | """ 123 | Change the status of a symbol 124 | """ 125 | if name_sym in model.symboles_dict.keys(): 126 | 127 | old_statut = model.symboles_dict[name_sym] 128 | 129 | delsym(model, name_sym, verbose=False) 130 | addsym(model, status, name_sym, verbose=False) 131 | 132 | if verbose: 133 | print(name_sym + " has changed from " + old_statut + " to " + 134 | status + " in the model " + model.name_mod) 135 | 136 | else: 137 | raise SyntaxError('Unknown symbol') 138 | 139 | return 140 | 141 | 142 | # def deleq(model, nom_eq): 143 | # """ 144 | # Supprime une équation 145 | # """ 146 | # if nom_eq in model.equations.keys(): 147 | # del model.equations[nom_eq] 148 | 149 | # model.is_built = False # mise à jour du statut du modèle 150 | 151 | # return 152 | 153 | 154 | # def addeq(model, nom_eq, texte_eq): 155 | # """ 156 | # Ajoute une équation 157 | # """ 158 | # if nom_eq not in model.equations.keys(): 159 | 160 | # model.equations[nom_eq] = texte_eq.strip() 161 | # model.is_built = False # mise à jour du statut du modèle 162 | 163 | # else: 164 | # raise SyntaxError('Duplicate name') 165 | 166 | # return 167 | 168 | 169 | # def changeeq(model, nom_eq, texte_eq): 170 | # """ 171 | # Modifie une équation 172 | # """ 173 | # if nom_eq in model.equations.keys(): 174 | 175 | # deleq(model, nom_eq) 176 | # addeq(model, nom_eq, texte_eq) 177 | 178 | # else: 179 | # raise SyntaxError('Unknown name') 180 | 181 | # return 182 | 183 | 184 | 185 | ######################## 186 | # Simulation du modèle # 187 | ######################## 188 | 189 | 190 | def readcoeffs(filename="coefficients.csv"): 191 | """ 192 | Load the values of the coefficients from a .csv file. 193 | 194 | Argument : 195 | nfilename : name of the .csv file (with the extension) 196 | 197 | Result : 198 | dictionnary name -> value 199 | """ 200 | data = pd.read_csv(filename, header=None) 201 | # data[0] contient les noms des coefficients et data[1] les valeurs associées 202 | 203 | # construit un dictionnaire des données contenues dans le fichier excel 204 | n_lines = data.shape[0] 205 | dico_coeffs = dict() 206 | for i in range(n_lines): 207 | dico_coeffs[data[0][i]] = data[1][i] 208 | 209 | return dico_coeffs 210 | 211 | 212 | def simulate(df_mod, val_coeff, start_date, end_date, function_name, use_jac=True, tol=1e-10, itm=1000, method='sp_root', dir="./modeles_python"): 213 | """ 214 | Simulation of a model from a .py file : 215 | ============================================ 216 | * the dataframe df_mod contains all the endogenous/exogenous/policy variables 217 | * simulation is performed between start_date and end_date (included) 218 | 219 | 220 | Arguments : 221 | =========== 222 | * df_mod : pandas dataframe with the time series of the model 223 | * val_coeff : dictionnary name : value for the coefficients of the model 224 | * start_date : date of the beginning of the simulation (format YYYYQ) 225 | * end_date : date of the end of the simulation (format YYYYQ) 226 | * function_name : name of the python function associated with the model 227 | * use_jac : True if the symbolic jacobian is used during the simulation 228 | 229 | """ 230 | 231 | start_time = time() # tic 232 | 233 | # import sys 234 | sys.path.insert(1, dir) 235 | 236 | # pour pouvoir importer un module dans l'environnement courant 237 | importlib.invalidate_caches() 238 | try: 239 | mdl = importlib.import_module(function_name) 240 | except ModuleNotFoundError: 241 | print('Model is not built yet. Build and write the model into a Python file') 242 | sys.exit() 243 | 244 | 245 | # choix du solveur 246 | try: 247 | mdl_solver = importlib.import_module("macronometrics.numsolve") 248 | except ModuleNotFoundError: 249 | print('Problem with the solver module') 250 | sys.exit() 251 | 252 | if method in ['sp_root','ggn11','newton','newton_alt'] : 253 | solver = getattr(mdl_solver, method) 254 | else : 255 | raise ValueError("Unknown solver !") 256 | 257 | 258 | 259 | n_blocks = getattr(mdl, "n_blocks") # nombre de blocs dans le modèle 260 | 261 | coln = getattr(mdl, "coln") # Liste des noms de variables 262 | # Dictionnaire de correspondance des noms de variables 263 | dicovar = getattr(mdl, "dicovar") 264 | 265 | # coeffs = getattr(mdl, "coeffs") # Liste des coefficients du modèle 266 | # dicoeff = getattr(mdl,"dicoeff") # dictionnaire de correspondance des coefficients 267 | 268 | # fonctions associées au modèle 269 | # fonction permettant de récupérer les endogènes de chaque bloc 270 | funmodel_varendo = getattr(mdl, function_name+"_varendo") 271 | # fonction permettant de récupérer les correspondances de chaque bloc 272 | funmodel_dicoendo = getattr(mdl, function_name+"_dicoendo") 273 | 274 | # définition des dates au format pandas 275 | start_date_pd = pd.to_datetime(start_date) 276 | end_date_pd = pd.to_datetime(end_date) 277 | 278 | # on copie les colones utiles dans un nouveau data frame 279 | # les colones sont bien ordonnées dans le data frame 280 | data_sim = df_mod[coln].copy() 281 | 282 | index_date = len(data_sim[str(data_sim.index[0]):start_date_pd])-1 283 | 284 | data_results = data_sim.copy() # pour le stockage des résultats 285 | 286 | ix = pd.date_range( 287 | start=str(data_sim.index[0]), end=end_date_pd, freq="QS") 288 | # pour avoir l'index de dates correspondant au résultat de la simulation 289 | data_results = data_results.reindex(ix) 290 | 291 | iter_dates = pd.date_range(start=start_date_pd, end=end_date_pd, freq="QS") 292 | 293 | # chargement des données du modèle sous forme de tableau 294 | datanp = data_sim.to_numpy() # au format np.array 295 | # pour stocker les résultats partiels dans un tableau 296 | data_result_np = np.copy(datanp) 297 | 298 | n_simul = len(iter_dates) 299 | 300 | elapsed_time = time() - start_time # toc 301 | 302 | print(f"Loading the model took {elapsed_time:.3f} seconds.\n") 303 | 304 | start_time = time() # tic 305 | 306 | for i in range(n_simul): 307 | 308 | for count in range(n_blocks): 309 | 310 | # dictionnaire nom endogène -> colonne dans le tableau 311 | nom_col_endo = funmodel_dicoendo(count) 312 | # liste des noms des endogènes 313 | list_var_endo = funmodel_varendo(count) 314 | 315 | # liste des indices des endogènes du bloc courant (A SIMPLIFIER !!!) 316 | list_endo = list(nom_col_endo.values()) 317 | 318 | # Récupération des fonctions du bloc 319 | g = getattr(mdl, function_name+"_"+str(count)) 320 | 321 | if use_jac: # utilisation du jacobien symbolique 322 | g_jac = getattr(mdl, function_name+"_"+str(count)+"_jac") 323 | else: 324 | g_jac = False # Jacobien approché numériquement 325 | 326 | x_start = np.zeros(len(list_var_endo)) 327 | 328 | # initialisation de la méthode de résolution à la dernière date connue des endogènes 329 | 330 | x_start = data_result_np[index_date + i - 1, list_endo] 331 | 332 | if (method == 'ggn11'): 333 | (x_res, _, _) = ggn11(x_start, g, g_jac, ftol=tol, itermax=itm, 334 | alphamin=0.05, args=(index_date+i, data_result_np, val_coeff)) 335 | for j, item in enumerate(list_var_endo): 336 | data_result_np[index_date + i, dicovar[item]] = x_res[j] # mise à jour des résultats 337 | 338 | elif (method == 'sp_root'): # on utilise fsolve à la place de root pour voir ? ... 339 | res_spo = root(g, x_start, args=(index_date+i, data_result_np, val_coeff), jac=g_jac, options={'xtol' : tol}) 340 | # mise à jour des endogènes pour le bloc suivant à la date courante 341 | for j, item in enumerate(list_var_endo): 342 | data_result_np[index_date + i,dicovar[item]] = res_spo.x[j] 343 | 344 | elif (method == 'newton'): 345 | (x_res, _, _) = newton(x_start, g, g_jac, ftol=tol, itermax=itm, 346 | alphamin=0.05, args=(index_date+i, data_result_np, val_coeff)) 347 | for j, item in enumerate(list_var_endo): 348 | data_result_np[index_date + i, dicovar[item]] = x_res[j] # mise à jour des résultats 349 | 350 | 351 | elapsed_time = time() - start_time 352 | 353 | print(f"The simulation of the model took {elapsed_time:.3f} secondes.\n") 354 | 355 | return pd.DataFrame(data=data_result_np, index=data_sim.index, columns=coln) 356 | 357 | # def simulate_cython(df_mod, val_coeff, start_date, end_date, function_name, use_jac=True, tol=1e-10, itm=1000, method='sp_root', dir="./modeles_cython"): 358 | # """ 359 | # Simulation of a model from a cython module : 360 | # ============================================ 361 | # * the dataframe df_mod contains all the endogenous/exogenous/policy variables 362 | # * simulation is performed between start_date and end_date (included) 363 | 364 | 365 | # Arguments : 366 | # =========== 367 | # * df_mod : pandas dataframe with the time series of the model 368 | # * val_coeff : dictionnary name : value for the coefficients of the model 369 | # * start_date : date of the beginning of the simulation (format YYYYQ) 370 | # * end_date : date of the end of the simulation (format YYYYQ) 371 | # * function_name : name of the python function associated with the model 372 | # * use_jac : True if the symbolic jacobian is used during the simulation 373 | 374 | # """ 375 | 376 | # start_time = time.time() # tic 377 | 378 | # # import sys 379 | # sys.path.insert(1, dir) 380 | 381 | # # pour pouvoir importer le module Cython dans l'environnement courant 382 | # importlib.invalidate_caches() 383 | # try: 384 | # mdl = importlib.import_module(function_name) 385 | # except ModuleNotFoundError: 386 | # print('Model is not built yet. Build and compile the model into a Cython file') 387 | # sys.exit() 388 | 389 | # funmodel_n_blocks = getattr(mdl, function_name+"_n_blocks") 390 | # n_blocks = funmodel_n_blocks() # nombre de blocs dans le modèle 391 | 392 | # funmodel_coln = getattr(mdl, function_name+"_coln") 393 | # coln = funmodel_coln() # Liste des noms de variables 394 | 395 | # # Dictionnaire de correspondance des noms de variables 396 | # funmodel_dicovar = getattr(mdl, function_name+"_dicovar") 397 | # dicovar = funmodel_dicovar() 398 | 399 | # funmodel_coeffs = getattr(mdl, function_name+"_coeffs") # Liste des coefficients du modèle 400 | # coeffs = funmodel_coeffs() 401 | 402 | # # fonctions associées au modèle 403 | # # fonction permettant de récupérer les endogènes de chaque bloc 404 | # funmodel_varendo = getattr(mdl, function_name+"_varendo") 405 | # # fonction permettant de récupérer les correspondances de chaque bloc 406 | # funmodel_dicoendo = getattr(mdl, function_name+"_dicoendo") 407 | 408 | # # on copie les colonnes utiles dans un nouveau data frame 409 | # # les colonnes sont bien ordonnées dans le data frame 410 | # data_sim = df_mod[coln].copy() 411 | 412 | # # définition des dates au format pandas 413 | # start_date_pd = pd.to_datetime(start_date) 414 | # end_date_pd = pd.to_datetime(end_date) 415 | 416 | # #identification du rang de la première date dans le dataframe 417 | # index_date = len(data_sim[str(data_sim.index[0]):start_date_pd])-1 418 | 419 | # iter_dates = pd.date_range(start=start_date_pd, end=end_date_pd, freq="QS") 420 | 421 | # # chargement des données du modèle sous forme de tableau 422 | # data_result_np = data_sim.to_numpy() # au format np.array 423 | 424 | # n_simul = len(iter_dates) 425 | 426 | # elapsed_time = time.time() - start_time # toc 427 | 428 | # print(f"Loading the model took {elapsed_time:.3f} seconds.\n") 429 | 430 | # start_time = time.time() # tic 431 | 432 | # for i in range(n_simul): 433 | 434 | # for count in range(n_blocks): 435 | 436 | # # dictionnaire nom endogène -> colonne dans le tableau 437 | # nom_col_endo = funmodel_dicoendo(count) 438 | # # liste des noms des endogènes 439 | # list_var_endo = funmodel_varendo(count) 440 | 441 | # # liste des indices des endogènes du bloc courant (A SIMPLIFIER !!!) 442 | # list_endo = list(nom_col_endo.values()) 443 | 444 | # # Récupération des fonctions du bloc 445 | # g = getattr(mdl, function_name+"_"+str(count)) 446 | 447 | # if use_jac: # utilisation du jacobien symbolique 448 | # g_jac = getattr(mdl, function_name+"_"+str(count)+"_jac") 449 | # else: 450 | # g_jac = False # Jacobien approché numériquement 451 | 452 | # x_start = np.zeros(len(list_var_endo)) 453 | 454 | # # initialisation de la méthode de résolution à la dernière date connue des endogènes 455 | 456 | # x_start = data_result_np[index_date + i - 1, list_endo] 457 | 458 | 459 | # res_spo = spo.root(g, x_start, args=( 460 | # index_date+i, data_result_np, val_coeff), jac=g_jac, options={'xtol' : tol}) 461 | # # mise à jour des endogènes pour le bloc suivant à la date courante 462 | # for j, item in enumerate(list_var_endo): 463 | # data_result_np[index_date + i, 464 | # dicovar[item]] = res_spo.x[j] 465 | 466 | # elapsed_time = time.time() - start_time 467 | 468 | # print(f"The simulation of the model took {elapsed_time:.3f} secondes.\n") 469 | 470 | # return pd.DataFrame(data=data_result_np, index=data_sim.index, columns=coln) 471 | 472 | 473 | 474 | def simul_shock(function_name, val_coeff, data_sim, start_date_shock, end_date_shock, start_date_sim, end_date_sim, info_shock, dir="./modeles_python"): 475 | """ 476 | Simulation of a shock on the dataset data_sim (containing the time series between dates start_date_shock and end_date_shock). 477 | 478 | The solution is computed with the function simulate. 479 | 480 | info_shock is a list of dictionaries, where each item has keys : 481 | * variable : list of names of the shocked variables 482 | * type : pctge / level / chron (the type of shock) 483 | * value : the size of the shock (or the name of the variables in the case of a chronicle) 484 | 485 | The model is simulated between start_date_sim and end_date_sim. 486 | """ 487 | 488 | import sys 489 | sys.path.insert(1, dir) 490 | 491 | importlib.invalidate_caches() 492 | 493 | try: 494 | importlib.import_module(function_name) 495 | except ModuleNotFoundError: 496 | print('Model is not built yet. Build and write the model into a Python file') 497 | sys.exit() 498 | 499 | data_sim_mod = data_sim.copy() 500 | 501 | for shock in info_shock: 502 | 503 | if shock["type"] == 'level': 504 | liste_mod = [] 505 | for item in shock["variable"]: 506 | liste_mod.append( 507 | [item, 'affine', [start_date_shock, end_date_shock], 1, shock["value"]]) 508 | data_sim_mod = extrapolate_series( 509 | data_init=data_sim_mod, liste_series=liste_mod) 510 | 511 | elif shock["type"] == 'pctge': 512 | liste_mod = [] 513 | for item in shock["variable"]: 514 | liste_mod.append( 515 | [item, 'affine', [start_date_shock, end_date_shock], 1+0.01*shock["value"], 0]) 516 | data_sim_mod = extrapolate_series( 517 | data_init=data_sim_mod, liste_series=liste_mod) 518 | 519 | else: 520 | for item in shock["variable"]: 521 | data_sim_mod[item] = shock["value"][item] 522 | 523 | return simulate(data_sim_mod, val_coeff, start_date_sim, end_date_sim, function_name) 524 | -------------------------------------------------------------------------------- /macronometrics/tools_ts.py: -------------------------------------------------------------------------------- 1 | from pandas import offsets, date_range, to_datetime, DataFrame 2 | import numpy as np 3 | from math import floor 4 | 5 | lag_trim = offsets.MonthBegin(3) 6 | 7 | def extrapol_affine(data_init, nom_serie, multi, addi, date_start, date_end): 8 | """ 9 | Prolonge la base data_init par extrapolation des séries contenues dans nom_serie 10 | en les multipliant par multi et en additionnant addi 11 | entre les dates date_start et date_end (définies comme chaînes de caractères au format YYYYQ) 12 | """ 13 | data_extrapol = data_init.copy() 14 | iter_dates = date_range(start=to_datetime( 15 | date_start), end=to_datetime(date_end), freq="QS") 16 | 17 | for item in iter_dates: 18 | data_extrapol.loc[item, nom_serie] = data_extrapol.loc[item - 19 | lag_trim, nom_serie]*multi + addi 20 | 21 | return data_extrapol 22 | 23 | 24 | def extrapol_prolonge_taux_moyen(data_init, nom_serie, date_t1, date_t2, date_start, date_end): 25 | """ 26 | Prolonge la base data_init par extrapolation en taux de croissance 27 | des séries contenues dans nom_serie entre date_start jusqu'à l'horizon voulu date_end, au taux moyen observé 28 | entre t1+1 et t2 29 | Dates au format YYYYQ 30 | """ 31 | data_extrapol = data_init.copy() 32 | # Attention au décalage 33 | serie_tx = (data_extrapol[nom_serie].shift(-1) / 34 | data_extrapol[nom_serie] - 1).copy() 35 | 36 | # Calcul du taux moyen sur la période considérée 37 | tx_moyen = serie_tx[(to_datetime(date_t1) + lag_trim):to_datetime(date_t2)].mean() 38 | 39 | # Prolongement de la série 40 | iter_dates = date_range(start=to_datetime( 41 | date_start), end=to_datetime(date_end), freq="QS") 42 | temp = data_extrapol.loc[to_datetime( 43 | date_start)-lag_trim, nom_serie].copy() 44 | 45 | for item in iter_dates: 46 | temp *= (1+tx_moyen) 47 | data_extrapol.loc[item, nom_serie] = temp 48 | 49 | return data_extrapol 50 | 51 | 52 | def extrapol_prolonge_constant(data_init, nom_serie, date_start, date_end, value='last'): 53 | """ 54 | Prolonge la série de date_start jusqu'à l'horizon voulu date_end à une valeur constante 55 | égale par défaut à la dernière valeur observée ou à la valeur "value" si spécifié autrement 56 | Dates au format YYYYQ 57 | """ 58 | data_extrapol = data_init.copy() 59 | 60 | iter_dates = date_range(start=to_datetime( 61 | date_start), end=to_datetime(date_end), freq="QS") 62 | last_value = data_extrapol.loc[to_datetime( 63 | date_start)-lag_trim, nom_serie] 64 | 65 | for item in iter_dates: 66 | if value == 'last': 67 | data_extrapol.loc[item, nom_serie] = last_value 68 | else: 69 | data_extrapol.loc[item, nom_serie] = value 70 | 71 | return data_extrapol 72 | 73 | 74 | def extrapol_duplique(data_init, nom_serie, date_start, date_end, vect): 75 | """ 76 | Prolonge la série de date_start jusqu'à l'horizon voulu date_end en dupliquant le vecteur vect 77 | Dates au format YYYYQ 78 | """ 79 | data_extrapol = data_init.copy() 80 | 81 | rpt = len(data_extrapol[to_datetime(date_start):to_datetime(date_end)+lag_trim]) 82 | data_extrapol.loc[to_datetime(date_start):to_datetime( 83 | date_end), nom_serie] = np.tile(vect, floor(rpt/4)) 84 | 85 | return data_extrapol 86 | 87 | 88 | def extrapolate_series(data_init, liste_series): 89 | """ 90 | Prolonge toutes les séries définies dans liste_series selon le mode d'extrapolation spécifié 91 | Un élément de liste_series doit être écrit sous la forme : 92 | - [nom_serie,'constant',[date_start,date_end],value] -> prolongation constante à la dernière valeur observée ou à la valeur value 93 | - [nom_serie, 'taux de croissance',[date_start,date_end],taux] -> prolongation selon un taux de croissance constant indiqué par taux (en %) 94 | - [nom_serie, 'taux de croissance moyen',[date_start,date_end],[date_t1,date_t2]]->prolongation selon un taux de croissance constant moyen calculé entre date_t1 et date_t2 95 | - [nom_serie, 'affine',[date_start,date_end],multi,addi]->prolongation en multipliant par multi et en additionnat addi 96 | - [nom_serie, 'dummy_trim',[date_start,date_end],vect]->prolongation en dupliquant le vecteur vect 97 | """ 98 | data_extrapol = data_init.copy() 99 | 100 | for item in liste_series: 101 | if item[1] == 'constant': 102 | data_extrapol = extrapol_prolonge_constant( 103 | data_init=data_extrapol, nom_serie=item[0], date_start=item[2][0], date_end=item[2][1], value=item[3]) 104 | elif item[1] == 'taux de croissance': 105 | data_extrapol = extrapol_affine( 106 | data_init=data_extrapol, nom_serie=item[0], multi=1+0.01*item[3], addi=0, date_start=item[2][0], date_end=item[2][1]) 107 | elif item[1] == 'taux de croissance moyen': 108 | data_extrapol = extrapol_prolonge_taux_moyen( 109 | data_init=data_extrapol, nom_serie=item[0], date_t1=item[3][0], date_t2=item[3][1], date_start=item[2][0], date_end=item[2][1]) 110 | elif item[1] == 'affine': 111 | data_extrapol = extrapol_affine( 112 | data_init=data_extrapol, nom_serie=item[0], multi=item[3], addi=item[4], date_start=item[2][0], date_end=item[2][1]) 113 | else: 114 | data_extrapol = extrapol_duplique( 115 | data_init=data_extrapol, nom_serie=item[0], date_start=item[2][0], date_end=item[2][1], vect=item[3]) 116 | 117 | return data_extrapol 118 | 119 | 120 | def compare_series(type_comparaison, db_init, db_fin, liste_var, date_init, date_fin): 121 | """ 122 | Permet de comparer les valeurs des séries dont les noms sont contenues dans liste_var 123 | entre les dataframes db_init et db_fin soit : 124 | - en écart absolu si type_comparaison='niveau' 125 | - en écart relatif (db_fin/db_init-1)*100 si type_comparaison='relatif' 126 | Suppose que les dataframe db_init et db_fin aient les mêmes dimensions et la même indexation temporelle 127 | """ 128 | results_trim = DataFrame() 129 | 130 | for serie in liste_var: 131 | if type_comparaison == 'absolu': 132 | results_trim.loc[to_datetime(date_init):to_datetime(date_fin), serie] = db_fin.loc[to_datetime( 133 | date_init):to_datetime(date_fin), serie]-db_init.loc[to_datetime(date_init):to_datetime(date_fin), serie] 134 | else: 135 | results_trim.loc[to_datetime(date_init):to_datetime(date_fin), serie] = 100*(db_fin.loc[to_datetime( 136 | date_init):to_datetime(date_fin), serie]/db_init.loc[to_datetime(date_init):to_datetime(date_fin), serie]-1) 137 | 138 | results_trim = results_trim.dropna() 139 | 140 | results_ann = results_trim.groupby(results_trim.index.year).mean() 141 | 142 | return (results_trim, results_ann) 143 | -------------------------------------------------------------------------------- /macronometrics/trollparser.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | @author: QME8JI 4 | """ 5 | 6 | from lark import Lark # Parseur générique 7 | 8 | #definition de la grammaire 9 | equation_grammar = ''' 10 | ?start: sum 11 | | sum "=" sum -> define_eq 12 | 13 | ?sum: product 14 | | sum "+" product -> add 15 | | sum "-" product -> sub 16 | 17 | ?product: power 18 | | product "*" power -> mul 19 | | product "/" power -> div 20 | 21 | ?power: unary 22 | | power "^" unary -> pow 23 | 24 | ?unary: item 25 | | "log(" sum ")" -> log 26 | | "exp(" sum ")" -> exp 27 | | "-" unary -> neg 28 | | "+" unary -> pos 29 | | "(" sum ")" -> par 30 | | "del(" NUMBER ":" sum ")" -> delta 31 | | "del(" sum ")" -> deltaone 32 | | unary "(" SIGNED_NUMBER ")" -> lag 33 | 34 | ?item: NUMBER -> number 35 | | SIGNED_NUMBER -> signednumber 36 | | CNAME"'c" -> coeff 37 | | CNAME -> var 38 | 39 | %import common.CNAME 40 | %import common.NUMBER 41 | %import common.SIGNED_NUMBER 42 | %import common.WS 43 | %ignore WS 44 | ''' 45 | 46 | 47 | # Définition du parseur Troll 48 | parser = Lark(equation_grammar, start="start") 49 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | scipy>=1.3.1 2 | numpy>=1.17.2 3 | pandas>=0.25.1 4 | tqdm>=4.32.2 5 | lark-parser>=0.7.1 6 | PyYAML>=3.11 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | import os 3 | 4 | def strip_comments(l): 5 | return l.split('#', 1)[0].strip() 6 | 7 | def reqs(*f): 8 | return list(filter(None, [strip_comments(l) for l in open( 9 | os.path.join(os.getcwd(), *f)).readlines()])) 10 | 11 | install_requires = reqs('requirements.txt') 12 | 13 | setuptools.setup( 14 | name = "macronometrics", 15 | packages = ["macronometrics"], 16 | version = "0.0.1", 17 | description = "Toolbox for macroeconometric modeling", 18 | author = "Benjamin Favetto Adrien Lagouge Olivier Simon", 19 | author_email = "dg75-g220@insee.fr", 20 | url = "http://www.insee.fr/", 21 | download_url = "", 22 | keywords = ["macroeconomics", "economic modeling", "time series"], 23 | classifiers = [ 24 | "Programming Language :: Python", 25 | "Programming Language :: Python :: 3", 26 | "License :: OSI Approved :: CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)", 27 | "Development Status :: 3 - Alpha", 28 | "Operating System :: OS Independent", 29 | "Intended Audience :: Science/Research", 30 | "Topic :: Software Development :: Libraries :: Python Modules", 31 | ], 32 | install_requires=install_requires, 33 | long_description = """\ 34 | A toolbox for macroeconometric modeling 35 | --------------------------------------- 36 | 37 | * High-level language for model description (parser based on Lark) 38 | * backward looking modeling with AR / ECM processes 39 | * Dulmage - Mendelsohn block decomposition of the model 40 | * Symbolic computation of the jacobian 41 | * Several choices of numerical solvers (based on Scipy, or high-order Newton methods) 42 | * Time-series management based on Pandas 43 | * Cython / Numba compilation of the solving functions 44 | * Estimation of the coefficients of the model (OLS) 45 | 46 | This version requires Python 3.6 or later. 47 | """, 48 | python_requires='>=3.6' 49 | ) --------------------------------------------------------------------------------