├── LICENSE.md ├── README.md ├── data ├── EA │ ├── GS_energies_10x10.txt │ ├── GS_energies_20x20.txt │ ├── GS_energies_40x40.txt │ └── README.md └── SK │ ├── README.md │ ├── gs_seed1.txt │ ├── gs_seed13.txt │ ├── gs_seed16.txt │ ├── gs_seed18.txt │ ├── gs_seed19.txt │ ├── gs_seed2.txt │ ├── gs_seed21.txt │ ├── gs_seed22.txt │ ├── gs_seed23.txt │ ├── gs_seed25.txt │ ├── gs_seed27.txt │ ├── gs_seed3.txt │ ├── gs_seed30.txt │ ├── gs_seed31.txt │ ├── gs_seed32.txt │ ├── gs_seed34.txt │ ├── gs_seed35.txt │ ├── gs_seed38.txt │ ├── gs_seed39.txt │ ├── gs_seed40.txt │ ├── gs_seed5.txt │ ├── gs_seed6.txt │ ├── gs_seed7.txt │ ├── gs_seed8.txt │ └── gs_seed9.txt ├── src ├── VNA_1DTRNN │ ├── Helper_functions.py │ ├── RNNWavefunction.py │ ├── Tensordot2.py │ ├── TensorizedRNNCell.py │ └── run_VNA_randomisingchain.py ├── VNA_2DTRNN │ ├── Helper_functions.py │ ├── MDRNNWavefunction.py │ ├── MDTensorizedRNNCell.py │ ├── Tensordot2.py │ └── run_VNA_EdwardsAnderson.py └── VNA_DilatedRNN │ ├── DilatedRNNWavefunction.py │ ├── Helper_functions.py │ └── run_VNA_SherringtonKirkpatrick.py ├── tools ├── Generate_EA_instances.py ├── Generate_SK_instances.py └── Generate_WPE_instances.ipynb └── tutorials ├── Readme.md ├── VNA_1DTRNNs.ipynb ├── VNA_2DTRNNs.ipynb └── VNA_DilatedRNNs.ipynb /LICENSE.md: -------------------------------------------------------------------------------- 1 | # Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International 2 | 3 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 4 | 5 | ### Using Creative Commons Public Licenses 6 | 7 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 8 | 9 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). 10 | 11 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). 12 | 13 | ## Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License 14 | 15 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 16 | 17 | ### Section 1 – Definitions. 18 | 19 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 20 | 21 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 22 | 23 | c. __BY-NC-SA Compatible License__ means a license listed at [creativecommons.org/compatiblelicenses](http://creativecommons.org/compatiblelicenses), approved by Creative Commons as essentially the equivalent of this Public License. 24 | 25 | d. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 26 | 27 | e. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 28 | 29 | f. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 30 | 31 | g. __License Elements__ means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution, NonCommercial, and ShareAlike. 32 | 33 | h. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 34 | 35 | i. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 36 | 37 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. 38 | 39 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 40 | 41 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 42 | 43 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 44 | 45 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 46 | 47 | ### Section 2 – Scope. 48 | 49 | a. ___License grant.___ 50 | 51 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 52 | 53 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 54 | 55 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 56 | 57 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 58 | 59 | 3. __Term.__ The term of this Public License is specified in Section 6(a). 60 | 61 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 62 | 63 | 5. __Downstream recipients.__ 64 | 65 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 66 | 67 | B. __Additional offer from the Licensor – Adapted Material.__ Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply. 68 | 69 | C. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 70 | 71 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 72 | 73 | b. ___Other rights.___ 74 | 75 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 76 | 77 | 2. Patent and trademark rights are not licensed under this Public License. 78 | 79 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 80 | 81 | ### Section 3 – License Conditions. 82 | 83 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 84 | 85 | a. ___Attribution.___ 86 | 87 | 1. If You Share the Licensed Material (including in modified form), You must: 88 | 89 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 90 | 91 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 92 | 93 | ii. a copyright notice; 94 | 95 | iii. a notice that refers to this Public License; 96 | 97 | iv. a notice that refers to the disclaimer of warranties; 98 | 99 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 100 | 101 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 102 | 103 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 104 | 105 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 106 | 107 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 108 | 109 | b. ___ShareAlike.___ 110 | 111 | In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 112 | 113 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-NC-SA Compatible License. 114 | 115 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 116 | 117 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. 118 | 119 | ### Section 4 – Sui Generis Database Rights. 120 | 121 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 122 | 123 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 124 | 125 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and 126 | 127 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 128 | 129 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 130 | 131 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability. 132 | 133 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ 134 | 135 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ 136 | 137 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 138 | 139 | ### Section 6 – Term and Termination. 140 | 141 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 142 | 143 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 144 | 145 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 146 | 147 | 2. upon express reinstatement by the Licensor. 148 | 149 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 150 | 151 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 152 | 153 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 154 | 155 | ### Section 7 – Other Terms and Conditions. 156 | 157 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 158 | 159 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 160 | 161 | ### Section 8 – Interpretation. 162 | 163 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 164 | 165 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 166 | 167 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 168 | 169 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 170 | 171 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 172 | > 173 | > Creative Commons may be contacted at creativecommons.org 174 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Variational Neural Annealing 2 | Variational neural annealing (VNA) is a framework to variationally simulate classical and quantum annealing for the purpose of solving optimization problems using neural networks. In this paper https://www.nature.com/articles/s42256-021-00401-3 (arXiv version: https://arxiv.org/abs/2101.10154), we show that we can implement a variational version of classical annealing (VCA) and its quantum counterpart (VQA) using recurrent neural networks. We find that our implementation significantly outperforms traditional simulated annealing in the asymptotic limit on prototypical spin models, suggesting the promising potential of this route to optimization. 3 | 4 | This repository is aimed to facilitate the reproducibilty of the results of [our paper](https://www.nature.com/articles/s42256-021-00401-3). 5 | 6 | Our implementation is based on [RNN wave functions's code](https://github.com/mhibatallah/RNNWavefunctions). 7 | 8 | ## Content 9 | 10 | This repository contains a source code of our implementation and tutorials under the format of jupyter notebooks for demonstration purposes. 11 | 12 | ### `src` 13 | This section contains our source code with the following implementations: 14 | 15 | 1. `src/VNA_1DTRNN`: an implementation of VNA using 1D Tensorized RNNs to find the ground state of a random ising chains with open boundary conditions. All you need to do is run the file `src/run_VNA_randomisingchain.py`. 16 | 17 | 2. `src/VNA_2DTRNN`: an implementation of VNA using 2D Tensorized RNNs to find the ground state of the 2D Edwards-Anderson model with open boundary conditions. To execute this module, you can run the file `src/run_VNA_EdwardsAnderson.py`. 18 | 19 | 3. `src/VNA_DilatedRNN`: an implementation of VNA using Dilated RNNs to find the ground state of the Sherrington-Kirkpatrick model. To execute this implementation, you can run the python file `src/run_VNA_SherringtonKirkpatrick.py`. 20 | 21 | To be able to run `VCA` in each one of these modules, you can set Bx0 (initial transvere magnetic field) in the hyperparameters section to zero in the execution python files. Similarly if you want to run `VQA`, you can set T0 (initial temperature) to zero. Also, if you want to run `RVQA`, you can set Bx0 and T0 to be both non-zero. Finally, if you want to run Classical-Quantum optimization `CQO`, you can set both Bx0 and T0 to zero. More details about the acronyms `VCA`, `VQA`, `RVQA` and `CQO` are provided in [our paper](https://arxiv.org/abs/2101.10154). 22 | 23 | We note that in this code we use the `tensordot2` operation from the [TensorNetwork package](https://github.com/google/TensorNetwork) to speed up tensorized operations. 24 | 25 | ### `tools` 26 | 27 | This section contains the tools we used to generate the random instances of the models we considered in our paper. 28 | 29 | ### `data` 30 | 31 | This section provides the ground states of the Edwards-Anderson (EA) and the Sherrington-Kirkpatrick (SK) models that were obtained from the [spin-glass server](http://spinglass.uni-bonn.de/) for 25 different seeds. The instances were generated using the code provided in `tools'. 32 | 33 | ### `tutorials` 34 | In this section of the repository, we demonstrate how our source code works in simple cases through Jupyter notebooks that you can run on [Google Colaboratory](colab.research.google.com) to take advantage of GPU speed up. These tutorials will help you to become more familiar with the content of the source code. The `tutorials` module contains the following: 35 | 36 | 1. `tutorials/VNA_1DTRNNs.ipynb`: a demonstration of VNA using 1D Tensorized RNNs applied to random ising chains with open boundary conditions. 37 | 2. `tutorials/VNA_2DTRNNs.ipynb`: a demonstration of VNA using 2D Tensorized RNNs on the 2D Edwards-Anderson model with open boundary conditions. 38 | 3. `tutorials/VNA_DilatedRNNs.ipynb`: a demonstration of VNA using Dilated RNNs applied to the Sherrington-Kirkpatrick model. 39 | 40 | For more details, you can check our manuscript on arXiv: https://arxiv.org/abs/2101.10154 or on Nature Machine Intelligence: https://www.nature.com/articles/s42256-021-00401-3 (free access at https://rdcu.be/cAIyS). 41 | 42 | For questions or inquiries, you can reach out to this email mohamed.hibat.allah@uwaterloo.ca. 43 | 44 | ## Dependencies 45 | This code works on Python (3.6.10) with TensorFlow (1.13.1) and NumPy (1.16.3) modules. We also note that this code runs much faster on a GPU as compared to a CPU. No installation is required providing that the dependencies are available. 46 | 47 | ## Disclaimer 48 | This code can be freely used for academic purposes that are socially and scientifically beneficial, however it is under Vector Institute’s Intellectual Property (IP) policy for profit related activities. 49 | 50 | ## License 51 | This code is under the ['Attribution-NonCommercial-ShareAlike 4.0 International'](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. 52 | 53 | ## Citing 54 | ```bibtex 55 | @Article{VNA2021, 56 | author={Hibat-Allah, Mohamed and Inack, Estelle M. and Wiersema, Roeland and Melko, Roger G. and Carrasquilla, Juan}, 57 | title={Variational neural annealing}, 58 | journal={Nature Machine Intelligence}, 59 | year={2021}, 60 | month={Nov}, 61 | day={01}, 62 | volume={3}, 63 | number={11}, 64 | pages={952-961}, 65 | issn={2522-5839}, 66 | doi={10.1038/s42256-021-00401-3}, 67 | url={https://doi.org/10.1038/s42256-021-00401-3} 68 | } 69 | ``` 70 | 71 | -------------------------------------------------------------------------------- /data/EA/GS_energies_10x10.txt: -------------------------------------------------------------------------------- 1 | Ground state energies per spin for 10x10 with seeds = 1 to 25 2 | 3 | -0.7865039810, -0.6717205831, -0.7077779170, -0.7313929391, -0.7462129868, -0.7671913428, -0.7332729641, -0.7516186715, -0.7085055049, -0.7008707678,-0.7128638101, -0.7208464020, -0.8223925270,-0.7033253011,-0.7408836118 ,-0.7040265818 ,-0.7004240703 ,-0.7385140832 ,-0.8000257990 ,-0.7146896086, -0.7533329466 ,-0.7553434678, -0.7342476745, -0.7261501813, -0.7099739065 4 | -------------------------------------------------------------------------------- /data/EA/GS_energies_20x20.txt: -------------------------------------------------------------------------------- 1 | Ground state energies per spin for 20x20 with seeds = 1 to 25 2 | 3 | -0.7839132534,-0.7613715554,-0.7335066596,-0.7846755488,-0.7545004489,-0.7659300143,-0.7667873026,-0.7612872953,-0.7225713527,-0.7674285786,-0.7739134367,-0.7560489097,-0.7837760932,-0.7677854331,-0.8022273193,-0.7626793774,-0.7666852046,-0.7498243130,-0.7687270599,-0.7679292952,-0.7827799409,-0.7732395401,-0.7561401440,-0.7680018793,-0.7720055462 4 | -------------------------------------------------------------------------------- /data/EA/GS_energies_40x40.txt: -------------------------------------------------------------------------------- 1 | Ground state energies per spin for 40x40 with seeds = 1 to 25 2 | 3 | -0.7718673163, -0.7887947060, -0.7835883900, -0.7883411827, -0.7784774157, -0.7905200508, -0.7781229376, -0.7679759449, -0.7674755723, -0.7997022917, -0.7687799840, -0.7735501281, -0.7812162116, -0.7642683098, -0.7676343805, -0.7678120424, -0.7759546808, -0.7662827678, -0.7587420952, -0.7837036253, -0.7774089232, -0.7819302999, -0.7791316478, -0.7803132727, -0.7822404177 4 | -------------------------------------------------------------------------------- /data/EA/README.md: -------------------------------------------------------------------------------- 1 | This folder contrains the ground state energies of 25 different instances generated using the code provided in `tools' for the EA model with three different sizes: 10x10, 20x20 and 40x40. 2 | -------------------------------------------------------------------------------- /data/SK/README.md: -------------------------------------------------------------------------------- 1 | This folder contrains the ground state configurations of 25 different instances generated using the code provided in `tools' for the SK model with 100 spins. 2 | -------------------------------------------------------------------------------- /data/SK/gs_seed1.txt: -------------------------------------------------------------------------------- 1 | 5 7 8 10 12 14 16 18 23 25 29 32 33 35 36 38 40 41 42 44 50 52 53 54 59 60 63 65 68 70 72 76 79 84 85 86 87 89 90 94 95 97 98 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed13.txt: -------------------------------------------------------------------------------- 1 | 3 5 6 7 9 10 12 13 16 18 19 23 24 25 26 29 30 33 34 35 36 37 38 39 40 41 51 53 54 56 57 58 60 62 63 67 70 71 73 74 77 79 81 86 87 89 90 -------------------------------------------------------------------------------- /data/SK/gs_seed16.txt: -------------------------------------------------------------------------------- 1 | 5 7 10 13 14 15 16 17 19 22 23 27 33 34 35 39 40 43 44 48 49 50 51 53 54 56 58 61 64 65 67 68 69 70 71 72 76 79 80 82 83 84 90 91 94 98 100 -------------------------------------------------------------------------------- /data/SK/gs_seed18.txt: -------------------------------------------------------------------------------- 1 | 1 2 3 7 8 9 10 12 13 15 16 18 19 20 26 29 30 31 32 33 35 36 38 40 45 47 51 52 57 58 59 61 67 68 69 71 72 74 76 84 86 88 92 93 94 95 96 98 -------------------------------------------------------------------------------- /data/SK/gs_seed19.txt: -------------------------------------------------------------------------------- 1 | 3 5 7 9 10 11 13 18 19 22 23 26 33 35 40 43 44 45 46 47 49 51 52 53 54 57 61 62 63 64 68 71 72 74 75 76 77 78 81 84 85 86 87 88 90 94 98 99 -------------------------------------------------------------------------------- /data/SK/gs_seed2.txt: -------------------------------------------------------------------------------- 1 | 1 3 5 8 11 12 13 15 16 17 18 20 21 23 24 30 32 33 35 36 38 40 41 42 44 49 52 57 58 59 60 63 69 70 78 79 81 86 87 93 96 98 99 -------------------------------------------------------------------------------- /data/SK/gs_seed21.txt: -------------------------------------------------------------------------------- 1 | 2 4 6 7 10 13 15 17 19 20 21 22 29 30 32 33 34 39 43 45 46 50 52 57 60 66 67 70 71 79 81 83 85 87 89 90 91 92 94 99 -------------------------------------------------------------------------------- /data/SK/gs_seed22.txt: -------------------------------------------------------------------------------- 1 | 2 3 4 9 12 14 16 19 20 26 27 29 30 32 37 40 42 43 44 45 46 47 48 50 54 55 58 59 60 64 65 66 67 76 79 81 83 84 86 88 90 91 93 94 95 97 98 100 -------------------------------------------------------------------------------- /data/SK/gs_seed23.txt: -------------------------------------------------------------------------------- 1 | 1 2 4 7 10 14 17 18 21 22 23 24 25 26 27 28 30 31 32 35 39 41 42 43 45 52 55 56 57 60 62 64 66 67 68 72 74 75 77 78 80 81 84 85 90 94 95 97 -------------------------------------------------------------------------------- /data/SK/gs_seed25.txt: -------------------------------------------------------------------------------- 1 | 1 2 5 6 8 9 10 13 16 17 19 23 24 25 26 27 30 31 36 39 40 41 45 46 48 50 51 52 55 58 59 61 65 71 72 73 74 75 80 82 85 86 90 94 97 98 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed27.txt: -------------------------------------------------------------------------------- 1 | 2 5 6 8 9 12 13 16 17 22 23 25 26 29 30 33 34 37 38 43 44 45 46 56 57 59 61 62 63 64 68 69 70 72 73 75 76 79 80 85 90 91 93 96 97 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed3.txt: -------------------------------------------------------------------------------- 1 | 2 3 4 6 8 10 12 13 17 21 22 23 24 28 38 39 43 44 46 49 52 54 57 59 60 63 64 65 66 71 73 75 76 77 79 81 82 83 89 90 91 92 93 94 95 97 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed30.txt: -------------------------------------------------------------------------------- 1 | 1 2 3 4 5 8 9 10 11 12 13 16 23 26 27 32 37 39 40 44 48 51 54 56 58 62 63 68 70 71 72 73 74 76 77 84 86 88 89 92 95 97 100 -------------------------------------------------------------------------------- /data/SK/gs_seed31.txt: -------------------------------------------------------------------------------- 1 | 1 2 5 6 7 14 15 16 17 19 20 23 26 31 34 36 41 42 44 47 55 56 58 59 62 63 64 67 68 69 71 74 75 76 77 79 80 81 84 90 94 -------------------------------------------------------------------------------- /data/SK/gs_seed32.txt: -------------------------------------------------------------------------------- 1 | 1 3 4 5 6 8 9 12 16 17 18 19 20 21 22 30 31 33 35 37 38 39 40 41 42 44 45 50 52 53 58 59 67 69 72 73 75 84 86 88 89 90 93 94 97 98 -------------------------------------------------------------------------------- /data/SK/gs_seed34.txt: -------------------------------------------------------------------------------- 1 | 1 6 8 10 11 14 15 16 17 18 20 21 28 32 36 37 38 42 47 48 51 56 57 58 59 60 64 66 67 69 74 76 77 78 80 82 83 84 87 88 91 98 -------------------------------------------------------------------------------- /data/SK/gs_seed35.txt: -------------------------------------------------------------------------------- 1 | 1 3 4 8 9 13 14 15 16 21 22 24 35 36 37 38 39 40 41 44 45 46 50 52 57 60 61 62 64 66 69 71 72 73 74 75 77 78 80 81 84 85 87 89 91 92 93 94 98 99 -------------------------------------------------------------------------------- /data/SK/gs_seed38.txt: -------------------------------------------------------------------------------- 1 | 1 3 4 6 7 8 10 13 15 21 25 27 31 33 35 37 39 41 43 47 50 51 52 53 55 57 58 59 60 61 63 65 66 68 72 73 74 75 80 82 89 90 93 94 95 96 97 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed39.txt: -------------------------------------------------------------------------------- 1 | 1 2 4 5 6 9 11 14 15 16 18 19 20 27 29 30 34 35 36 38 39 41 42 45 49 51 53 55 57 60 62 66 70 71 74 76 77 80 81 82 84 85 87 88 90 92 93 98 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed40.txt: -------------------------------------------------------------------------------- 1 | 1 2 3 4 5 7 9 11 12 14 15 16 20 22 25 27 31 32 35 37 38 39 41 42 44 45 48 58 59 60 63 64 65 69 71 74 75 81 82 84 85 86 87 88 91 92 93 96 98 99 -------------------------------------------------------------------------------- /data/SK/gs_seed5.txt: -------------------------------------------------------------------------------- 1 | 2 4 5 6 7 10 11 13 15 16 17 24 32 34 36 39 41 42 43 44 45 46 48 50 52 55 56 62 63 68 69 71 73 75 76 78 79 81 84 85 88 89 92 94 100 -------------------------------------------------------------------------------- /data/SK/gs_seed6.txt: -------------------------------------------------------------------------------- 1 | 1 2 7 8 9 11 12 14 20 22 23 25 26 27 28 30 32 33 35 40 42 45 46 47 48 51 52 54 55 56 58 59 60 63 65 73 79 81 85 86 87 89 90 91 92 95 99 -------------------------------------------------------------------------------- /data/SK/gs_seed7.txt: -------------------------------------------------------------------------------- 1 | 1 3 6 10 11 15 16 19 20 21 22 24 26 31 32 36 38 39 44 47 48 50 52 54 57 58 59 60 61 64 65 69 70 71 72 75 76 77 78 79 80 85 86 89 90 91 94 97 99 -------------------------------------------------------------------------------- /data/SK/gs_seed8.txt: -------------------------------------------------------------------------------- 1 | 3 4 5 11 12 17 19 22 23 27 29 32 35 37 38 40 41 46 49 52 54 59 60 65 67 68 74 75 76 77 78 79 80 81 83 84 88 91 92 93 94 95 97 99 100 -------------------------------------------------------------------------------- /data/SK/gs_seed9.txt: -------------------------------------------------------------------------------- 1 | 5 6 7 8 14 15 17 18 19 20 23 26 27 30 31 44 45 46 52 56 58 59 62 63 68 69 71 72 77 80 81 82 83 87 88 89 90 95 96 100 -------------------------------------------------------------------------------- /src/VNA_1DTRNN/Helper_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | """ 5 | This implementation is based on RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 6 | Edited by Mohamed Hibat-Allah 7 | Description: we define helper functions to obtain the local energies of a 1D model 8 | both with and without the presence of a transverse magnetic field 9 | """ 10 | 11 | def Ising_diagonal_matrixelements(Jz, samples): 12 | """ To get the diagonal matrix elements of 1D Ising chain given a set of set of samples in parallel! 13 | Returns: The local energies that correspond to the "samples" 14 | Inputs: 15 | - samples: (numsamples, N) 16 | - Jz: (N-1) np array 17 | """ 18 | numsamples = samples.shape[0] 19 | N = samples.shape[1] 20 | energies = np.zeros((numsamples), dtype = np.float64) 21 | 22 | for i in range(N-1): #diagonal elements 23 | values = samples[:,i]+samples[:,i+1] 24 | valuesT = np.copy(values) 25 | valuesT[values==2] = +1 26 | valuesT[values==0] = +1 27 | valuesT[values==1] = -1 28 | 29 | energies += valuesT*(-Jz[i]) 30 | 31 | return energies 32 | 33 | def Ising_local_energies(Jz, Bx, samples, queue_samples, log_probs_tensor, samples_placeholder, log_probs, sess): 34 | """ To get the local energies of 1D spin chain given a set of set of samples in parallel! 35 | Returns: The local energies that correspond to the "samples" 36 | Inputs: 37 | - samples: (numsamples, N) 38 | - Jz: (N-1) np array 39 | - Bx: float 40 | - queue_samples: ((N+1)*numsamples, N) an empty allocated np array to store all the sample applying the Hamiltonian H on samples 41 | - log_probs_tensor: A TF tensor with size (None) 42 | - samples_placeholder: A TF placeholder to feed in a set of configurations 43 | - log_probs: ((N+1)*numsamples): an allocated np array to store the log_probs non diagonal elements 44 | - sess: The current TF session 45 | """ 46 | numsamples = samples.shape[0] 47 | N = samples.shape[1] 48 | 49 | local_energies = np.zeros((numsamples), dtype = np.float64) 50 | 51 | for i in range(N-1): #diagonal elements (let's do 1D for simple stuff) 52 | values = samples[:,i]+samples[:,i+1] 53 | valuesT = np.copy(values) 54 | valuesT[values==2] = +1 #If both spins are up 55 | valuesT[values==0] = +1 #If both spins are down 56 | valuesT[values==1] = -1 #If they are opposite 57 | 58 | local_energies += valuesT*(-Jz[i]) 59 | 60 | queue_samples[0] = samples #storing the diagonal samples 61 | 62 | if Bx != 0: 63 | for i in range(N): #Non-diagonal elements 64 | valuesT = np.copy(samples) 65 | valuesT[:,i][samples[:,i]==1] = 0 #Flip spin i 66 | valuesT[:,i][samples[:,i]==0] = 1 #Flip spin i 67 | 68 | queue_samples[i+1] = valuesT 69 | 70 | len_sigmas = (N+1)*numsamples 71 | steps = len_sigmas//50000+1 #I want a maximum of 50000 in batch size just to be safe I don't allocate too much memory 72 | 73 | queue_samples_reshaped = np.reshape(queue_samples, [(N+1)*numsamples, N]) 74 | for i in range(steps): 75 | if i < steps-1: 76 | cut = slice((i*len_sigmas)//steps,((i+1)*len_sigmas)//steps) 77 | else: 78 | cut = slice((i*len_sigmas)//steps,len_sigmas) 79 | 80 | # Compute the log-probabilities with Tensorflow 81 | log_probs[cut] = sess.run(log_probs_tensor, feed_dict={samples_placeholder:queue_samples_reshaped[cut]}) 82 | 83 | log_probs_reshaped = np.reshape(log_probs, [N+1,numsamples]) 84 | 85 | 86 | for j in range(numsamples): 87 | local_energies[j] += -Bx*np.sum( np.exp(0.5*log_probs_reshaped[1:,j] - 0.5*log_probs_reshaped[0,j]) ) 88 | 89 | return local_energies 90 | -------------------------------------------------------------------------------- /src/VNA_1DTRNN/RNNWavefunction.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import random 4 | 5 | """ 6 | This implementation is based on RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 7 | Edited by Mohamed Hibat-Allah 8 | Description: Here, we define the 1D RNNwavefunction class, which contains the sample method 9 | that allows to sample configurations autoregressively from the RNN and 10 | the log_probability method which allows to estimate the log-probability of a set of configurations. 11 | More details are in https://arxiv.org/abs/2101.10154. 12 | """ 13 | 14 | class RNNWavefunction(object): 15 | def __init__(self,systemsize,cell=None,activation=tf.nn.relu,units=[10],scope='RNNWavefunction', seed = 111): 16 | """ 17 | systemsize: int 18 | number of sites 19 | cell: a tensorflow RNN cell 20 | units: list of int 21 | number of units per RNN layer 22 | scope: str 23 | the name of the name-space scope 24 | activation: activation of the RNN cell 25 | seed: pseudo-random number generator 26 | """ 27 | self.graph=tf.Graph() 28 | self.scope=scope #Label of the RNN wavefunction 29 | self.N=systemsize #Number of sites of the 1D chain 30 | 31 | random.seed(seed) # `python` built-in pseudo-random generator 32 | np.random.seed(seed) # numpy pseudo-random generator 33 | 34 | dim_inputs = [2]+units[:-1] #dim of inputs for each layer in the RNN 35 | 36 | 37 | #Defining the neural network 38 | with self.graph.as_default(): 39 | with tf.compat.v1.variable_scope(self.scope,reuse=tf.compat.v1.AUTO_REUSE): 40 | tf.compat.v1.set_random_seed(seed) # tensorflow pseudo-random generator 41 | 42 | #Defining RNN cells with site-dependent parameters 43 | self.rnn=[tf.compat.v1.nn.rnn_cell.MultiRNNCell([cell(units[i],num_in = dim_inputs[i], activation = activation,name='RNN_{0}{1}'.format(i,n), dtype = tf.float64) for i in range(len(units))]) for n in range(self.N)] 44 | self.dense = [tf.compat.v1.layers.Dense(2,activation=tf.nn.softmax,name='RNNWF_dense_{0}'.format(n), dtype = tf.float64) for n in range(self.N)] 45 | 46 | def sample(self,numsamples,inputdim): 47 | """ 48 | generate samples from a probability distribution parametrized by a recurrent network 49 | ------------------------------------------------------------------------ 50 | Parameters: 51 | 52 | numsamples: int 53 | number of samples to be produced 54 | inputdim: int 55 | hilbert space dimension 56 | 57 | ------------------------------------------------------------------------ 58 | Returns: a tuple (samples,log-probs) 59 | 60 | samples: tf.Tensor of shape (numsamples,systemsize) 61 | the samples in integer encoding 62 | log-probs tf.Tensor of shape (numsamples,) 63 | the log-probability of each sample 64 | """ 65 | 66 | with self.graph.as_default(): #Call the default graph, used if willing to create multiple graphs. 67 | with tf.compat.v1.variable_scope(self.scope,reuse=tf.compat.v1.AUTO_REUSE): 68 | samples = [] 69 | probs=[] 70 | 71 | inputs=tf.zeros((numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 72 | #Initial input to feed to the rnn 73 | 74 | self.inputdim=inputs.shape[1] 75 | self.outputdim=self.inputdim 76 | self.numsamples=inputs.shape[0] 77 | 78 | rnn_state=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 79 | 80 | for n in range(self.N): 81 | rnn_output, rnn_state = self.rnn[n](inputs, rnn_state) 82 | output=self.dense[n](rnn_output) 83 | sample_temp=tf.reshape(tf.compat.v1.multinomial(tf.math.log(output),num_samples=1),[-1,]) 84 | probs.append(output) 85 | samples.append(sample_temp) 86 | inputs=tf.one_hot(sample_temp,depth=self.outputdim, dtype = tf.float64) 87 | 88 | self.samples=tf.stack(values=samples,axis=1) # (self.N, num_samples) to (num_samples, self.N): Generate self.numsamples vectors of size self.N spin containing 0 or 1 89 | 90 | probs=tf.transpose(tf.stack(values=probs,axis=2),perm=[0,2,1]) 91 | one_hot_samples=tf.one_hot(self.samples,depth=self.inputdim, dtype = tf.float64) 92 | self.log_probs=tf.reduce_sum(tf.math.log(tf.reduce_sum(tf.multiply(probs,one_hot_samples),axis=2)),axis=1) 93 | 94 | return self.samples, self.log_probs 95 | 96 | def log_probability(self,samples,inputdim): 97 | """ 98 | calculate the log-probabilities of ```samples`` 99 | ------------------------------------------------------------------------ 100 | Parameters: 101 | 102 | samples: tf.Tensor 103 | a tf.placeholder of shape (number of samples,system-size) 104 | containing the input samples in integer encoding 105 | inputdim: int 106 | dimension of the input space 107 | 108 | ------------------------------------------------------------------------ 109 | Returns: 110 | log-probs tf.Tensor of shape (number of samples,) 111 | the log-probability of each sample 112 | """ 113 | with self.graph.as_default(): 114 | 115 | self.inputdim=inputdim 116 | self.outputdim=self.inputdim 117 | 118 | self.numsamples=tf.shape(samples)[0] 119 | 120 | inputs=tf.zeros((self.numsamples, self.inputdim), dtype=tf.float64) 121 | 122 | with tf.compat.v1.variable_scope(self.scope,reuse=tf.compat.v1.AUTO_REUSE): 123 | probs=[] 124 | 125 | rnn_state=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 126 | 127 | for n in range(self.N): 128 | rnn_output, rnn_state = self.rnn[n](inputs, rnn_state) 129 | output=self.dense[n](rnn_output) 130 | probs.append(output) 131 | inputs=tf.reshape(tf.one_hot(tf.reshape(tf.slice(samples,begin=[np.int32(0),np.int32(n)],size=[np.int32(-1),np.int32(1)]),shape=[self.numsamples]),depth=self.outputdim,dtype = tf.float64),shape=[self.numsamples,self.inputdim]) 132 | 133 | probs=tf.transpose(tf.stack(values=probs,axis=2),perm=[0,2,1]) 134 | one_hot_samples=tf.one_hot(samples,depth=self.inputdim, dtype = tf.float64) 135 | 136 | self.log_probs=tf.reduce_sum(tf.math.log(tf.reduce_sum(tf.multiply(probs,one_hot_samples),axis=2)),axis=1) 137 | 138 | return self.log_probs 139 | -------------------------------------------------------------------------------- /src/VNA_1DTRNN/Tensordot2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The TensorNetwork Authors 2 | # https://github.com/google/TensorNetwork 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """A modified version of TensorFlow's tensordot operation.""" 15 | 16 | from typing import Any, Optional, Union, Text, Sequence, Tuple, List 17 | import numpy as np 18 | 19 | Tensor = Any 20 | 21 | 22 | def tensordot(tf, 23 | a, 24 | b, 25 | axes, 26 | name: Optional[Text] = None) -> Tensor: 27 | r"""Tensor contraction of a and b along specified axes. 28 | Tensordot (also known as tensor contraction) sums the product of elements 29 | from `a` and `b` over the indices specified by `a_axes` and `b_axes`. 30 | The lists `a_axes` and `b_axes` specify those pairs of axes along which to 31 | contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension 32 | as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists 33 | `a_axes` and `b_axes` must have identical length and consist of unique 34 | integers that specify valid axes for each of the tensors. 35 | This operation corresponds to `numpy.tensordot(a, b, axes)`. 36 | Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1` 37 | is equivalent to matrix multiplication. 38 | Example 2: When `a` and `b` are matrices (order 2), the case 39 | `axes = [[1], [0]]` is equivalent to matrix multiplication. 40 | Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two 41 | tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor 42 | \\(c_{jklm}\\) whose entry 43 | corresponding to the indices \\((j,k,l,m)\\) is given by: 44 | \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\). 45 | In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`. 46 | Args: 47 | tf: The TensorFlow module. This must be passed in instead of imported 48 | since we don't assume users have TensorFlow installed. 49 | a: `Tensor` of type `float32` or `float64`. 50 | b: `Tensor` with the same type as `a`. 51 | axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k]. 52 | If axes is a scalar, sum over the last N axes of a and the first N axes of 53 | b in order. If axes is a list or `Tensor` the first and second row contain 54 | the set of unique integers specifying axes along which the contraction is 55 | computed, for `a` and `b`, respectively. The number of axes for `a` and 56 | `b` must be equal. 57 | name: A name for the operation (optional). 58 | Returns: 59 | A `Tensor` with the same type as `a`. 60 | Raises: 61 | ValueError: If the shapes of `a`, `b`, and `axes` are incompatible. 62 | IndexError: If the values in axes exceed the rank of the corresponding 63 | tensor. 64 | """ 65 | 66 | def _tensordot_should_flip(contraction_axes: List[int], 67 | free_axes: List[int]) -> bool: 68 | """Helper method to determine axis ordering. 69 | We minimize the average distance the indices would have to move under the 70 | transposition. 71 | Args: 72 | contraction_axes: The axes to be contracted. 73 | free_axes: The free axes. 74 | Returns: 75 | should_flip: `True` if `contraction_axes` should be moved to the left, 76 | `False` if they should be moved to the right. 77 | """ 78 | # NOTE: This will fail if the arguments contain any Tensors. 79 | if contraction_axes and free_axes: 80 | return bool(np.mean(contraction_axes) < np.mean(free_axes)) 81 | return False 82 | 83 | def _tranpose_if_necessary(tensor: Tensor, perm: List[int]) -> Tensor: 84 | """Like transpose(), but avoids creating a new tensor if possible. 85 | Although the graph optimizer should kill trivial transposes, it is best not 86 | to add them in the first place! 87 | """ 88 | if perm == list(range(len(perm))): 89 | return tensor 90 | return tf.transpose(tensor, perm) 91 | 92 | def _reshape_if_necessary(tensor: Tensor, 93 | new_shape: List[int]) -> Tensor: 94 | """Like reshape(), but avoids creating a new tensor if possible. 95 | Assumes shapes are both fully specified.""" 96 | cur_shape = tensor.get_shape().as_list() 97 | if (len(new_shape) == len(cur_shape) and 98 | all(d0 == d1 for d0, d1 in zip(cur_shape, new_shape))): 99 | return tensor 100 | return tf.reshape(tensor, new_shape) 101 | 102 | def _tensordot_reshape( 103 | a: Tensor, axes: Union[Sequence[int], Tensor], is_right_term=False 104 | ) -> Tuple[Tensor, Union[List[int], Tensor], Optional[List[int]], bool]: 105 | """Helper method to perform transpose and reshape for contraction op. 106 | This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` 107 | using `array_ops.transpose` and `array_ops.reshape`. The method takes a 108 | tensor and performs the correct transpose and reshape operation for a given 109 | set of indices. It returns the reshaped tensor as well as a list of indices 110 | necessary to reshape the tensor again after matrix multiplication. 111 | Args: 112 | a: `Tensor`. 113 | axes: List or `int32` `Tensor` of unique indices specifying valid axes of 114 | `a`. 115 | is_right_term: Whether `a` is the right (second) argument to `matmul`. 116 | Returns: 117 | A tuple `(reshaped_a, free_dims, free_dims_static, transpose_needed)` 118 | where `reshaped_a` is the tensor `a` reshaped to allow contraction via 119 | `matmul`, `free_dims` is either a list of integers or an `int32` 120 | `Tensor`, depending on whether the shape of a is fully specified, and 121 | free_dims_static is either a list of integers and None values, or None, 122 | representing the inferred static shape of the free dimensions. 123 | `transpose_needed` indicates whether `reshaped_a` must be transposed, 124 | or not, when calling `matmul`. 125 | """ 126 | if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)): 127 | shape_a = a.get_shape().as_list() 128 | # NOTE: This will fail if axes contains any tensors 129 | axes = [i if i >= 0 else i + len(shape_a) for i in axes] 130 | free = [i for i in range(len(shape_a)) if i not in axes] 131 | flipped = _tensordot_should_flip(axes, free) 132 | 133 | free_dims = [shape_a[i] for i in free] 134 | prod_free = int(np.prod([shape_a[i] for i in free])) 135 | prod_axes = int(np.prod([shape_a[i] for i in axes])) 136 | perm = axes + free if flipped else free + axes 137 | new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes] 138 | transposed_a = _tranpose_if_necessary(a, perm) 139 | reshaped_a = _reshape_if_necessary(transposed_a, new_shape) 140 | transpose_needed = (not flipped) if is_right_term else flipped 141 | return reshaped_a, free_dims, free_dims, transpose_needed 142 | if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)): 143 | shape_a = a.get_shape().as_list() 144 | axes = [i if i >= 0 else i + len(shape_a) for i in axes] 145 | free = [i for i in range(len(shape_a)) if i not in axes] 146 | flipped = _tensordot_should_flip(axes, free) 147 | perm = axes + free if flipped else free + axes 148 | 149 | axes_dims = [shape_a[i] for i in axes] 150 | free_dims = [shape_a[i] for i in free] 151 | free_dims_static = free_dims 152 | axes = tf.convert_to_tensor(axes, dtype=tf.dtypes.int32, name="axes") 153 | free = tf.convert_to_tensor(free, dtype=tf.dtypes.int32, name="free") 154 | shape_a = tf.shape(a) 155 | transposed_a = _tranpose_if_necessary(a, perm) 156 | else: 157 | free_dims_static = None 158 | shape_a = tf.shape(a) 159 | rank_a = tf.rank(a) 160 | axes = tf.convert_to_tensor(axes, dtype=tf.dtypes.int32, name="axes") 161 | axes = tf.where(axes >= 0, axes, axes + rank_a) 162 | free, _ = tf.compat.v1.setdiff1d(tf.range(rank_a), axes) 163 | # Matmul does not accept tensors for its transpose arguments, so fall 164 | # back to the previous, fixed behavior. 165 | # NOTE(amilsted): With a suitable wrapper for `matmul` using e.g. `case` 166 | # to match transpose arguments to tensor values, we could also avoid 167 | # unneeded tranposes in this case at the expense of a somewhat more 168 | # complicated graph. Unclear whether this would be beneficial overall. 169 | flipped = is_right_term 170 | perm = ( 171 | tf.concat([axes, free], 0) if flipped else tf.concat([free, axes], 0)) 172 | transposed_a = tf.transpose(a, perm) 173 | 174 | free_dims = tf.gather(shape_a, free) 175 | axes_dims = tf.gather(shape_a, axes) 176 | prod_free_dims = tf.reduce_prod(free_dims) 177 | prod_axes_dims = tf.reduce_prod(axes_dims) 178 | 179 | if flipped: 180 | new_shape = tf.stack([prod_axes_dims, prod_free_dims]) 181 | else: 182 | new_shape = tf.stack([prod_free_dims, prod_axes_dims]) 183 | reshaped_a = tf.reshape(transposed_a, new_shape) 184 | transpose_needed = (not flipped) if is_right_term else flipped 185 | return reshaped_a, free_dims, free_dims_static, transpose_needed 186 | 187 | def _tensordot_axes(a: Tensor, axes 188 | ) -> Tuple[Any, Any]: 189 | """Generates two sets of contraction axes for the two tensor arguments.""" 190 | a_shape = a.get_shape() 191 | if isinstance(axes, tf.compat.integral_types): 192 | if axes < 0: 193 | raise ValueError("'axes' must be at least 0.") 194 | if a_shape.ndims is not None: 195 | if axes > a_shape.ndims: 196 | raise ValueError("'axes' must not be larger than the number of " 197 | "dimensions of tensor %s." % a) 198 | return (list(range(a_shape.ndims - axes, 199 | a_shape.ndims)), list(range(axes))) 200 | rank = tf.rank(a) 201 | return (tf.range(rank - axes, rank, 202 | dtype=tf.int32), tf.range(axes, dtype=tf.int32)) 203 | if isinstance(axes, (list, tuple)): 204 | if len(axes) != 2: 205 | raise ValueError("'axes' must be an integer or have length 2.") 206 | a_axes = axes[0] 207 | b_axes = axes[1] 208 | if isinstance(a_axes, tf.compat.integral_types) and \ 209 | isinstance(b_axes, tf.compat.integral_types): 210 | a_axes = [a_axes] 211 | b_axes = [b_axes] 212 | # NOTE: This fails if either a_axes and b_axes are Tensors. 213 | if len(a_axes) != len(b_axes): 214 | raise ValueError( 215 | "Different number of contraction axes 'a' and 'b', %s != %s." % 216 | (len(a_axes), len(b_axes))) 217 | 218 | # The contraction indices do not need to be permuted. 219 | # Sort axes to avoid unnecessary permutations of a. 220 | # NOTE: This fails if either a_axes and b_axes contain Tensors. 221 | # pylint: disable=len-as-condition 222 | if len(a_axes) > 0: 223 | a_axes, b_axes = list(zip(*sorted(zip(a_axes, b_axes)))) 224 | 225 | return a_axes, b_axes 226 | axes = tf.convert_to_tensor(axes, name="axes", dtype=tf.int32) 227 | return axes[0], axes[1] 228 | 229 | with tf.compat.v1.name_scope(name, "Tensordot", [a, b, axes]) as _name: 230 | a = tf.convert_to_tensor(a, name="a") 231 | b = tf.convert_to_tensor(b, name="b") 232 | a_axes, b_axes = _tensordot_axes(a, axes) 233 | a_reshape, a_free_dims, a_free_dims_static, a_transp = _tensordot_reshape( 234 | a, a_axes) 235 | b_reshape, b_free_dims, b_free_dims_static, b_transp = _tensordot_reshape( 236 | b, b_axes, is_right_term=True) 237 | 238 | ab_matmul = tf.matmul( 239 | a_reshape, b_reshape, transpose_a=a_transp, transpose_b=b_transp) 240 | 241 | if isinstance(a_free_dims, list) and isinstance(b_free_dims, list): 242 | return tf.reshape(ab_matmul, a_free_dims + b_free_dims, name=_name) 243 | a_free_dims = tf.convert_to_tensor(a_free_dims, dtype=tf.dtypes.int32) 244 | b_free_dims = tf.convert_to_tensor(b_free_dims, dtype=tf.dtypes.int32) 245 | product = tf.reshape( 246 | ab_matmul, tf.concat([a_free_dims, b_free_dims], 0), name=_name) 247 | if a_free_dims_static is not None and b_free_dims_static is not None: 248 | product.set_shape(a_free_dims_static + b_free_dims_static) 249 | return product 250 | -------------------------------------------------------------------------------- /src/VNA_1DTRNN/TensorizedRNNCell.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from Tensordot2 import tensordot 4 | 5 | """ 6 | Code by Mohamed Hibat-Allah 7 | Title : An implementation of the one dimensional Tensorized RNN cell 8 | """ 9 | 10 | class TensorizedRNNCell(tf.compat.v1.nn.rnn_cell.RNNCell): 11 | """The 1D Tensorized RNN cell. 12 | """ 13 | 14 | def __init__(self, num_units = None, num_in = 2, activation = None, name=None, dtype = None, reuse=None): 15 | super(TensorizedRNNCell, self).__init__(_reuse=reuse, name=name) 16 | # save class variables 17 | self._num_in = num_in 18 | self._num_units = num_units 19 | self._state_size = num_units 20 | self._output_size = num_units 21 | self.activation = activation 22 | 23 | # set up input -> hidden connection 24 | self.W = tf.compat.v1.get_variable("W_"+name, shape=[num_units, num_units, self._num_in], 25 | initializer=tf.contrib.layers.xavier_initializer(), dtype = dtype) 26 | 27 | self.bh = tf.compat.v1.get_variable("bh_"+name, shape=[num_units], 28 | initializer=tf.contrib.layers.xavier_initializer(), dtype = dtype) 29 | # needed properties 30 | 31 | @property 32 | def input_size(self): 33 | return self._num_in # real 34 | 35 | @property 36 | def state_size(self): 37 | return self._state_size # real 38 | 39 | @property 40 | def output_size(self): 41 | return self._output_size # real 42 | 43 | def call(self, inputs, state): 44 | 45 | inputstate_mul = tf.einsum('ij,ik->ijk', state,inputs) 46 | 47 | # prepare input linear combination 48 | state_mul = tensordot(tf, inputstate_mul, self.W, axes=[[1,2],[1,2]]) # [batch_sz, num_units] 49 | 50 | preact = state_mul + self.bh 51 | 52 | output = self.activation(preact) # [batch_sz, num_units] C 53 | 54 | new_state = output 55 | 56 | return output, new_state 57 | -------------------------------------------------------------------------------- /src/VNA_1DTRNN/run_VNA_randomisingchain.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | #---------------------------------------------------------------------------------------------------------- 5 | """ 6 | This implementation is based on RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 7 | Edited by Mohamed Hibat-Allah 8 | Title : Implementation of Variational Neural Annealing for the random Ising chain with open boundary conditions 9 | Description : This code uses 1D Tensorized RNN cells to run variational neural annealing both classical 10 | and quantum on the random ising chain. 11 | """ 12 | #----------------------------------------------------------------------------------------------------------- 13 | 14 | import tensorflow as tf 15 | import numpy as np 16 | import os 17 | import time 18 | import random 19 | 20 | from RNNWavefunction import RNNWavefunction 21 | from TensorizedRNNCell import TensorizedRNNCell 22 | from Helper_functions import * 23 | 24 | #Seeding for reproducibility purposes 25 | seed = 111 26 | tf.compat.v1.reset_default_graph() 27 | random.seed(seed) # `python` built-in pseudo-random generator 28 | np.random.seed(seed) # numpy pseudo-random generator 29 | tf.compat.v1.set_random_seed(seed) # tensorflow pseudo-random generator 30 | 31 | #### Hyperparams 32 | # Note: 33 | # If Bx0=0, then this code will run Variational Classical Annealing (VCA). 34 | # If T0=0, then this code will run Variational Quantum Annealing (VQA). 35 | # If both are zero, then this algorithm will correspond to classical quantum optimization (CQO). 36 | # For more details, please check Ref. https://arxiv.org/abs/2101.10154. 37 | N = 20 #number of spins in the chain 38 | num_units = 20 #number of memory units 39 | numlayers = 1 #number of layers 40 | numsamples = 50 #Batch size 41 | lr = 1e-3 #learning rate 42 | T0 = 2 #Initial temperature 43 | Bx0 = 2 #initial magnetic field 44 | num_warmup_steps = 1000 #number of warmup steps 45 | num_annealing_steps = 500 #number of annealing steps 46 | num_equilibrium_steps = 5 #number of training steps after each annealing step 47 | activation_function = tf.nn.elu #activation of the RNN cell 48 | 49 | Jz = (2*np.random.randint(0,high=2,size=N-1)-1) #Random couplings of the random Ising chain taken from {-1,1} - dependent on the chosen seed 50 | 51 | units=[num_units]*numlayers #list containing the number of hidden units for each layer of the RNN 52 | 53 | print('\n') 54 | print("Number of spins =", N) 55 | print("Initial_temperature =", T0) 56 | print('Seed = ', seed) 57 | 58 | num_steps = num_annealing_steps*num_equilibrium_steps + num_warmup_steps #Total number of gradient steps 59 | 60 | print("\nNumber of annealing steps = {0}".format(num_annealing_steps)) 61 | print("Number of training steps = {0}".format(num_steps)) 62 | print("Number of layers = {0}\n".format(numlayers)) 63 | 64 | 65 | # Intitializing the RNN----------- 66 | RNNWF = RNNWavefunction(N,units=units,cell=TensorizedRNNCell, activation = activation_function, seed = seed) #contains the graph with the RNNs 67 | 68 | #Building the graph ------------------- 69 | with tf.compat.v1.variable_scope(RNNWF.scope,reuse=tf.compat.v1.AUTO_REUSE): 70 | with RNNWF.graph.as_default(): 71 | 72 | global_step = tf.Variable(0, trainable=False) 73 | learningrate_placeholder = tf.compat.v1.placeholder(dtype=tf.float64,shape=[]) 74 | learningrate = tf.compat.v1.train.exponential_decay(learningrate_placeholder, global_step, 100, 1.0, staircase=True) 75 | 76 | #Defining the optimizer 77 | optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=learningrate) 78 | 79 | #Defining Tensorflow placeholders 80 | Eloc=tf.compat.v1.placeholder(dtype=tf.float64,shape=[numsamples]) 81 | sampleplaceholder_forgrad=tf.compat.v1.placeholder(dtype=tf.int32,shape=[numsamples,N]) 82 | log_probs_forgrad = RNNWF.log_probability(sampleplaceholder_forgrad,inputdim=2) 83 | 84 | samples_placeholder=tf.compat.v1.placeholder(dtype=tf.int32,shape=(None,N)) 85 | log_probs_tensor=RNNWF.log_probability(samples_placeholder,inputdim=2) 86 | samplesandprobs = RNNWF.sample(numsamples=numsamples,inputdim=2) 87 | 88 | T_placeholder = tf.compat.v1.placeholder(dtype=tf.float64,shape=()) 89 | 90 | #Here we define a fake cost function that would allows to get the gradients of free energy using the tf.stop_gradient trick 91 | Floc = Eloc + T_placeholder*log_probs_forgrad 92 | cost = tf.reduce_mean(tf.multiply(log_probs_forgrad,tf.stop_gradient(Floc))) - tf.reduce_mean(log_probs_forgrad)*tf.reduce_mean(tf.stop_gradient(Floc)) 93 | 94 | gradients, variables = zip(*optimizer.compute_gradients(cost)) 95 | #Calculate Gradients--------------- 96 | 97 | #Define the optimization step 98 | optstep=optimizer.apply_gradients(zip(gradients,variables), global_step = global_step) 99 | 100 | #Tensorflow saver to checkpoint 101 | saver=tf.compat.v1.train.Saver() 102 | 103 | #For initialization 104 | init=tf.compat.v1.global_variables_initializer() 105 | initialize_parameters = tf.initialize_all_variables() 106 | #---------------------------------------------------------------- 107 | 108 | #Starting Session------------ 109 | #GPU management 110 | config = tf.compat.v1.ConfigProto() 111 | config.gpu_options.allow_growth = True 112 | 113 | #Start session 114 | sess=tf.compat.v1.Session(graph=RNNWF.graph, config=config) 115 | sess.run(init) 116 | 117 | #Loading previous trainings---------- 118 | ### To be implemented 119 | #------------------------------------ 120 | 121 | ## Run Variational Annealing 122 | with tf.compat.v1.variable_scope(RNNWF.scope,reuse=tf.compat.v1.AUTO_REUSE): 123 | with RNNWF.graph.as_default(): 124 | 125 | #To store data 126 | meanEnergy=[] 127 | varEnergy=[] 128 | varFreeEnergy = [] 129 | meanFreeEnergy = [] 130 | samples = np.ones((numsamples, N), dtype=np.int32) 131 | queue_samples = np.zeros((N+1, numsamples, N), dtype = np.int32) #Array to store all the diagonal and non diagonal matrix elements (We create it here for memory efficiency as we do not want to allocate it at each training step) 132 | log_probs = np.zeros((N+1)*numsamples, dtype=np.float64) #Array to store the log_probs of all the diagonal and non diagonal matrix elements (We create it here for memory efficiency as we do not want to allocate it at each training step) 133 | 134 | T = T0 #initializing temperature 135 | Bx = Bx0 #initializing magnetic field 136 | 137 | sess.run(initialize_parameters) #Reinitialize the parameters 138 | 139 | start = time.time() 140 | 141 | for it in range(len(meanEnergy),num_steps+1): 142 | #Annealing 143 | if it>=num_warmup_steps and it <= num_annealing_steps*num_equilibrium_steps + num_warmup_steps and it % num_equilibrium_steps == 0: 144 | annealing_step = (it-num_warmup_steps)/num_equilibrium_steps 145 | T = T0*(1-annealing_step/num_annealing_steps) 146 | Bx = Bx0*(1-annealing_step/num_annealing_steps) 147 | 148 | #Showing current status after that the annealing starts 149 | if it%num_equilibrium_steps==0: 150 | if it <= num_annealing_steps*num_equilibrium_steps + num_warmup_steps and it>=num_warmup_steps: 151 | annealing_step = (it-num_warmup_steps)/num_equilibrium_steps 152 | print("\nAnnealing step: {0}/{1}".format(annealing_step,num_annealing_steps)) 153 | 154 | #Getting samples and log_probs from the RNN 155 | samples, log_probabilities = sess.run(samplesandprobs) 156 | 157 | # Estimating the local energies 158 | local_energies = Ising_local_energies(Jz, Bx, samples, queue_samples, log_probs_tensor, samples_placeholder, log_probs, sess) 159 | 160 | #computing and var() 161 | meanE = np.mean(local_energies) 162 | varE = np.var(local_energies) 163 | 164 | #adding elements to be saved 165 | meanEnergy.append(meanE) 166 | varEnergy.append(varE) 167 | 168 | #computing F and var(F) 169 | meanF = np.mean(local_energies+T*log_probabilities) 170 | varF = np.var(local_energies+T*log_probabilities) 171 | 172 | #adding elements to be saved 173 | meanFreeEnergy.append(meanF) 174 | varFreeEnergy.append(varF) 175 | 176 | #Printing progress 177 | if it%num_equilibrium_steps==0: 178 | print('mean(E): {0}, mean(F): {1}, var(E): {2}, var(F): {3}, #samples {4}, #Training step {5}'.format(meanE,meanF,varE,varF,numsamples, it)) 179 | print("Temperature: ", T) 180 | print("Magnetic field: ", Bx) 181 | 182 | #Here we produce samples at the end of annealing 183 | if it == num_annealing_steps*num_equilibrium_steps + num_warmup_steps: 184 | 185 | numsamples_estimation = 10**5 #Num samples to be obtained at the end 186 | Nsteps = 20 #The number of steps taken to get "numsamples_estimation" samples (to avoid memory allocation issues) 187 | numsamples_perstep = numsamples_estimation//Nsteps 188 | 189 | samplesandprobs_final = RNNWF.sample(numsamples=numsamples_perstep,inputdim=2) 190 | energies = np.zeros((numsamples_estimation)) 191 | solutions = np.zeros((numsamples_estimation, N)) 192 | print("\nSaving energy and variance before the end of annealing") 193 | 194 | for i in range(Nsteps): 195 | # print("\nsampling started") 196 | samples_final, _ = sess.run(samplesandprobs_final) 197 | # print("\nsampling finished") 198 | energies[i*numsamples_perstep:(i+1)*numsamples_perstep] = Ising_diagonal_matrixelements(Jz,samples_final) 199 | solutions[i*numsamples_perstep:(i+1)*numsamples_perstep] = samples_final 200 | print("Sampling step:" , i+1, "/", Nsteps) 201 | 202 | print("meanE = ", np.mean(energies)) 203 | print("varE = ", np.var(energies)) 204 | print("minE = ",np.min(energies)) 205 | 206 | #Run gradient descent step 207 | sess.run(optstep,feed_dict={Eloc:local_energies, sampleplaceholder_forgrad: samples, learningrate_placeholder: lr, T_placeholder:T}) 208 | 209 | if it%5 == 0: 210 | print("Elapsed time is =", time.time()-start, " seconds") 211 | print('\n\n') 212 | 213 | #---------------------------- 214 | -------------------------------------------------------------------------------- /src/VNA_2DTRNN/Helper_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | """ 5 | This implementation is based on RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 6 | Edited by Mohamed Hibat-Allah 7 | Description: we define helper functions to obtain the local energies of a 2D model 8 | both with and without the presence of a transverse magnetic field 9 | """ 10 | 11 | # Loading Functions -------------------------- 12 | def Ising2D_diagonal_matrixelements(Jz, samples): 13 | """ To get the diagonal local energies of 2D spin lattice given a set of set of samples in parallel! 14 | Returns: The local energies that correspond to the "samples" 15 | Inputs: 16 | - samples: (numsamples, Nx,Ny) 17 | - Jz: (Nx,Ny,2) np array of J_ij couplings 18 | """ 19 | 20 | numsamples = samples.shape[0] 21 | Nx = samples.shape[1] 22 | Ny = samples.shape[2] 23 | 24 | N = Nx*Ny #Total number of spins 25 | 26 | local_energies = np.zeros((numsamples), dtype = np.float64) 27 | 28 | for i in range(Nx-1): #diagonal elements (right neighbours) 29 | values = samples[:,i]+samples[:,i+1] 30 | valuesT = np.copy(values) 31 | valuesT[values==2] = +1 #If both spins are up 32 | valuesT[values==0] = +1 #If both spins are down 33 | valuesT[values==1] = -1 #If they are opposite 34 | 35 | local_energies += np.sum(valuesT*(-Jz[i,:,0]), axis = 1) 36 | 37 | for i in range(Ny-1): #diagonal elements (upward neighbours (or downward, it depends on the way you see the lattice :))) 38 | values = samples[:,:,i]+samples[:,:,i+1] 39 | valuesT = np.copy(values) 40 | valuesT[values==2] = +1 #If both spins are up 41 | valuesT[values==0] = +1 #If both spins are down 42 | valuesT[values==1] = -1 #If they are opposite 43 | 44 | local_energies += np.sum(valuesT*(-Jz[:,i,1]), axis = 1) 45 | 46 | return local_energies 47 | #-------------------------- 48 | 49 | def Ising2D_local_energies(Jz, Bx, samples, queue_samples, log_probs_tensor, samples_placeholder, log_probs, sess): 50 | """ To get the local energies of 2D spin lattice given a set of set of samples in parallel! 51 | Returns: The local energies that correspond to the "samples" 52 | Inputs: 53 | - samples: (numsamples, Nx,Ny) 54 | - Jz: (Nx,Ny,2) np array of J_ij couplings 55 | - Bx: float 56 | - queue_samples: ((Nx*Ny+1)*numsamples, Nx,Ny) an allocated np array to store all the sample applying the Hamiltonian H on samples 57 | - log_probs_tensor: A TF tensor with size (None) 58 | - samples_placeholder: A TF placeholder to feed in a set of configurations 59 | - log_probs: ((Nx*Ny+1)*numsamples): an allocated np array to store the log_probs non diagonal elements 60 | - sess: The current TF session 61 | """ 62 | 63 | numsamples = samples.shape[0] 64 | Nx = samples.shape[1] 65 | Ny = samples.shape[2] 66 | 67 | N = Nx*Ny #Total number of spins 68 | 69 | local_energies = np.zeros((numsamples), dtype = np.float64) 70 | 71 | for i in range(Nx-1): #diagonal elements (right neighbours) 72 | values = samples[:,i]+samples[:,i+1] 73 | valuesT = np.copy(values) 74 | valuesT[values==2] = +1 #If both spins are up 75 | valuesT[values==0] = +1 #If both spins are down 76 | valuesT[values==1] = -1 #If they are opposite 77 | 78 | local_energies += np.sum(valuesT*(-Jz[i,:,0]), axis = 1) 79 | 80 | for i in range(Ny-1): #diagonal elements (upward neighbours (or downward, it depends on the way you see the lattice :))) 81 | values = samples[:,:,i]+samples[:,:,i+1] 82 | valuesT = np.copy(values) 83 | valuesT[values==2] = +1 #If both spins are up 84 | valuesT[values==0] = +1 #If both spins are down 85 | valuesT[values==1] = -1 #If they are opposite 86 | 87 | local_energies += np.sum(valuesT*(-Jz[:,i,1]), axis = 1) 88 | 89 | 90 | queue_samples[0] = samples #storing the diagonal samples 91 | 92 | if Bx != 0: 93 | for i in range(Nx): #Non-diagonal elements 94 | for j in range(Ny): 95 | valuesT = np.copy(samples) 96 | valuesT[:,i,j][samples[:,i,j]==1] = 0 #Flip 97 | valuesT[:,i,j][samples[:,i,j]==0] = 1 #Flip 98 | 99 | queue_samples[i*Ny+j+1] = valuesT 100 | 101 | len_sigmas = (N+1)*numsamples 102 | steps = len_sigmas//50000+1 #I want a maximum in batch size just to not allocate too much memory 103 | # print("Total num of steps =", steps) 104 | queue_samples_reshaped = np.reshape(queue_samples, [(N+1)*numsamples, Nx,Ny]) 105 | for i in range(steps): 106 | if i < steps-1: 107 | cut = slice((i*len_sigmas)//steps,((i+1)*len_sigmas)//steps) 108 | else: 109 | cut = slice((i*len_sigmas)//steps,len_sigmas) 110 | log_probs[cut] = sess.run(log_probs_tensor, feed_dict={samples_placeholder:queue_samples_reshaped[cut]}) 111 | # print(i) 112 | 113 | log_probs_reshaped = np.reshape(log_probs, [N+1,numsamples]) 114 | for j in range(numsamples): 115 | local_energies[j] += -Bx*np.sum(np.exp(0.5*log_probs_reshaped[1:,j]-0.5*log_probs_reshaped[0,j])) 116 | 117 | return local_energies 118 | -------------------------------------------------------------------------------- /src/VNA_2DTRNN/MDRNNWavefunction.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import random 4 | 5 | """ 6 | This implementation is based on RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 7 | Edited by Mohamed Hibat-Allah 8 | Here, we define the 2D RNNwavefunction class, which contains the sample method 9 | that allows to sample configurations autoregressively from the RNN and 10 | the log_probability method which allows to estimate the log-probability of a set of configurations. 11 | More details are in https://arxiv.org/abs/2101.10154. 12 | """ 13 | 14 | class MDRNNWavefunction(object): 15 | def __init__(self,systemsize_x = None, systemsize_y = None,cell=None,activation=None,num_units = None,scope='RNNWavefunction',seed = 111): 16 | """ 17 | systemsize_x, systemsize_y: int 18 | number of sites in x, y directions 19 | cell: a tensorflow RNN cell 20 | num_units: int 21 | number of memory units 22 | scope: str 23 | the name of the name-space scope 24 | activation: activation of the RNN cell 25 | seed: pseudo-random number generator 26 | """ 27 | self.graph=tf.Graph() 28 | self.scope=scope #Label of the RNN wavefunction 29 | self.Nx=systemsize_x 30 | self.Ny=systemsize_y 31 | 32 | random.seed(seed) # `python` built-in pseudo-random generator 33 | np.random.seed(seed) # numpy pseudo-random generator 34 | 35 | #Defining the neural network 36 | with self.graph.as_default(): 37 | with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE): 38 | 39 | tf.set_random_seed(seed) # tensorflow pseudo-random generator 40 | 41 | #Defining the 2D Tensorized RNN cell with non-weight sharing 42 | self.rnn=[cell(num_units = num_units, activation = activation, name="rnn_"+str(0)+str(i),dtype=tf.float64) for i in range(self.Nx*self.Ny)] 43 | self.dense = [tf.layers.Dense(2,activation=tf.nn.softmax,name='wf_dense'+str(i), dtype = tf.float64) for i in range(self.Nx*self.Ny)] 44 | 45 | def sample(self,numsamples,inputdim): 46 | """ 47 | generate samples from a probability distribution parametrized by a recurrent network 48 | ------------------------------------------------------------------------ 49 | Parameters: 50 | 51 | numsamples: int 52 | number of samples to be produced 53 | inputdim: int 54 | hilbert space dimension 55 | 56 | ------------------------------------------------------------------------ 57 | Returns: a tuple (samples,log-probs) 58 | 59 | samples: tf.Tensor of shape (numsamples,systemsize_x, systemsize_y) 60 | the samples in integer encoding 61 | log-probs tf.Tensor of shape (numsamples,) 62 | the log-probability of each sample 63 | """ 64 | 65 | with self.graph.as_default(): #Call the default graph, used if willing to create multiple graphs. 66 | with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE): 67 | 68 | #Initial input to feed to the lstm 69 | 70 | self.inputdim=inputdim 71 | self.outputdim=self.inputdim 72 | self.numsamples=numsamples 73 | 74 | 75 | samples=[[[] for nx in range(self.Nx)] for ny in range(self.Ny)] 76 | probs = [[[] for nx in range(self.Nx)] for ny in range(self.Ny)] 77 | rnn_states = {} 78 | inputs = {} 79 | 80 | for ny in range(self.Ny): #Loop over the boundaries for initialization 81 | if ny%2==0: 82 | nx = -1 83 | # print(nx,ny) 84 | rnn_states[str(nx)+str(ny)]=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 85 | inputs[str(nx)+str(ny)] = tf.zeros((self.numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 86 | 87 | if ny%2==1: 88 | nx = self.Nx 89 | # print(nx,ny) 90 | rnn_states[str(nx)+str(ny)]=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 91 | inputs[str(nx)+str(ny)] = tf.zeros((self.numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 92 | 93 | 94 | for nx in range(self.Nx): #Loop over the boundaries for initialization 95 | ny = -1 96 | rnn_states[str(nx)+str(ny)]=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 97 | inputs[str(nx)+str(ny)] = tf.zeros((self.numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 98 | 99 | #Making a loop over the sites with the 2DRNN 100 | for ny in range(self.Ny): 101 | 102 | if ny%2 == 0: 103 | 104 | for nx in range(self.Nx): #left to right 105 | 106 | rnn_output, rnn_states[str(nx)+str(ny)] = self.rnn[ny*self.Nx+nx]((inputs[str(nx-1)+str(ny)],inputs[str(nx)+str(ny-1)]), (rnn_states[str(nx-1)+str(ny)],rnn_states[str(nx)+str(ny-1)])) 107 | 108 | output=self.dense[ny*self.Nx+nx](rnn_output) 109 | sample_temp=tf.reshape(tf.multinomial(tf.log(output),num_samples=1),[-1,]) 110 | samples[nx][ny] = sample_temp 111 | probs[nx][ny] = output 112 | inputs[str(nx)+str(ny)]=tf.one_hot(sample_temp,depth=self.outputdim, dtype = tf.float64) 113 | 114 | 115 | if ny%2 == 1: 116 | 117 | for nx in range(self.Nx-1,-1,-1): #right to left 118 | 119 | rnn_output, rnn_states[str(nx)+str(ny)] = self.rnn[ny*self.Nx+nx]((inputs[str(nx+1)+str(ny)],inputs[str(nx)+str(ny-1)]), (rnn_states[str(nx+1)+str(ny)],rnn_states[str(nx)+str(ny-1)])) 120 | 121 | output=self.dense[ny*self.Nx+nx](rnn_output) 122 | sample_temp=tf.reshape(tf.multinomial(tf.log(output),num_samples=1),[-1,]) 123 | samples[nx][ny] = sample_temp 124 | probs[nx][ny] = output 125 | inputs[str(nx)+str(ny)]=tf.one_hot(sample_temp,depth=self.outputdim, dtype = tf.float64) 126 | 127 | 128 | self.samples=tf.transpose(tf.stack(values=samples,axis=0), perm = [2,0,1]) 129 | 130 | probs=tf.transpose(tf.stack(values=probs,axis=0),perm=[2,0,1,3]) 131 | one_hot_samples=tf.one_hot(self.samples,depth=self.inputdim, dtype = tf.float64) 132 | self.log_probs=tf.reduce_sum(tf.reduce_sum(tf.log(tf.reduce_sum(tf.multiply(probs,one_hot_samples),axis=3)),axis=2),axis=1) 133 | 134 | 135 | return self.samples,self.log_probs 136 | 137 | 138 | def log_probability(self,samples,inputdim): 139 | """ 140 | calculate the log-probabilities of ```samples`` 141 | ------------------------------------------------------------------------ 142 | Parameters: 143 | 144 | samples: tf.Tensor 145 | a tf.placeholder of shape (number of samples,systemsize_x, systemsize_y) 146 | containing the input samples in integer encoding 147 | inputdim: int 148 | dimension of the input space 149 | 150 | ------------------------------------------------------------------------ 151 | Returns: 152 | log-probs tf.Tensor of shape (number of samples,) 153 | the log-probability of each sample 154 | """ 155 | with self.graph.as_default(): 156 | 157 | self.inputdim=inputdim 158 | self.outputdim=self.inputdim 159 | 160 | self.numsamples=tf.shape(samples)[0] 161 | 162 | #Initial input to feed to the lstm 163 | self.outputdim=self.inputdim 164 | 165 | 166 | samples_=tf.transpose(samples, perm = [1,2,0]) 167 | rnn_states = {} 168 | inputs = {} 169 | 170 | for ny in range(self.Ny): #Loop over the boundaries for initialization 171 | if ny%2==0: 172 | nx = -1 173 | rnn_states[str(nx)+str(ny)]=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 174 | inputs[str(nx)+str(ny)] = tf.zeros((self.numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 175 | 176 | if ny%2==1: 177 | nx = self.Nx 178 | rnn_states[str(nx)+str(ny)]=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 179 | inputs[str(nx)+str(ny)] = tf.zeros((self.numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 180 | 181 | 182 | for nx in range(self.Nx): #Loop over the boundaries for initialization 183 | ny = -1 184 | rnn_states[str(nx)+str(ny)]=self.rnn[0].zero_state(self.numsamples,dtype=tf.float64) 185 | inputs[str(nx)+str(ny)] = tf.zeros((self.numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 186 | 187 | 188 | with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE): 189 | probs = [[[] for nx in range(self.Nx)] for ny in range(self.Ny)] 190 | 191 | #Making a loop over the sites with the 2DRNN 192 | for ny in range(self.Ny): 193 | 194 | if ny%2 == 0: 195 | 196 | for nx in range(self.Nx): #left to right 197 | 198 | rnn_output, rnn_states[str(nx)+str(ny)] = self.rnn[ny*self.Nx+nx]((inputs[str(nx-1)+str(ny)],inputs[str(nx)+str(ny-1)]), (rnn_states[str(nx-1)+str(ny)],rnn_states[str(nx)+str(ny-1)])) 199 | 200 | output=self.dense[ny*self.Nx+nx](rnn_output) 201 | sample_temp=tf.reshape(tf.multinomial(tf.log(output),num_samples=1),[-1,]) 202 | probs[nx][ny] = output 203 | inputs[str(nx)+str(ny)]=tf.one_hot(samples_[nx,ny],depth=self.outputdim,dtype = tf.float64) 204 | 205 | if ny%2 == 1: 206 | 207 | for nx in range(self.Nx-1,-1,-1): #right to left 208 | 209 | rnn_output, rnn_states[str(nx)+str(ny)] = self.rnn[ny*self.Nx+nx]((inputs[str(nx+1)+str(ny)],inputs[str(nx)+str(ny-1)]), (rnn_states[str(nx+1)+str(ny)],rnn_states[str(nx)+str(ny-1)])) 210 | 211 | output=self.dense[ny*self.Nx+nx](rnn_output) 212 | sample_temp=tf.reshape(tf.multinomial(tf.log(output),num_samples=1),[-1,]) 213 | probs[nx][ny] = output 214 | inputs[str(nx)+str(ny)]=tf.one_hot(samples_[nx,ny],depth=self.outputdim,dtype = tf.float64) 215 | 216 | probs=tf.transpose(tf.stack(values=probs,axis=0),perm=[2,0,1,3]) 217 | one_hot_samples=tf.one_hot(samples,depth=self.inputdim, dtype = tf.float64) 218 | 219 | self.log_probs=tf.reduce_sum(tf.reduce_sum(tf.log(tf.reduce_sum(tf.multiply(probs,one_hot_samples),axis=3)),axis=2),axis=1) 220 | 221 | return self.log_probs 222 | -------------------------------------------------------------------------------- /src/VNA_2DTRNN/MDTensorizedRNNCell.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from Tensordot2 import tensordot 4 | 5 | ####################################################################################################### 6 | """ 7 | Code by Mohamed Hibat-Allah 8 | Title : An implementation of the two dimensional Tensorized RNN cell 9 | """ 10 | ####################################################################################################### 11 | 12 | class MDTensorizedRNNCell(tf.contrib.rnn.RNNCell): 13 | """The 2D Tensorized RNN cell. 14 | """ 15 | def __init__(self, num_units = None, activation = None, name=None, dtype = None, reuse=None): 16 | super(MDTensorizedRNNCell, self).__init__(_reuse=reuse, name=name) 17 | # save class variables 18 | self._num_in = 2 19 | self._num_units = num_units 20 | self._state_size = num_units 21 | self._output_size = num_units 22 | self.activation = activation 23 | 24 | # set up input -> hidden connection 25 | self.W = tf.get_variable("W_"+name, shape=[num_units, 2*num_units, 2*self._num_in], 26 | initializer=tf.contrib.layers.xavier_initializer(), dtype = dtype) 27 | 28 | self.b = tf.get_variable("b_"+name, shape=[num_units], 29 | initializer=tf.contrib.layers.xavier_initializer(), dtype = dtype) 30 | 31 | # needed properties 32 | 33 | @property 34 | def input_size(self): 35 | return self._num_in # real 36 | 37 | @property 38 | def state_size(self): 39 | return self._state_size # real 40 | 41 | @property 42 | def output_size(self): 43 | return self._output_size # real 44 | 45 | def call(self, inputs, states): 46 | 47 | inputstate_mul = tf.einsum('ij,ik->ijk', tf.concat(states, 1),tf.concat(inputs,1)) 48 | # prepare input linear combination 49 | state_mul = tensordot(tf, inputstate_mul, self.W, axes=[[1,2],[1,2]]) # [batch_sz, num_units] 50 | 51 | preact = state_mul + self.b 52 | 53 | output = self.activation(preact) # [batch_sz, num_units] C 54 | 55 | new_state = output 56 | 57 | return output, new_state 58 | -------------------------------------------------------------------------------- /src/VNA_2DTRNN/Tensordot2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 The TensorNetwork Authors 2 | # https://github.com/google/TensorNetwork 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | """A modified version of TensorFlow's tensordot operation.""" 15 | 16 | from typing import Any, Optional, Union, Text, Sequence, Tuple, List 17 | import numpy as np 18 | 19 | Tensor = Any 20 | 21 | 22 | def tensordot(tf, 23 | a, 24 | b, 25 | axes, 26 | name: Optional[Text] = None) -> Tensor: 27 | r"""Tensor contraction of a and b along specified axes. 28 | Tensordot (also known as tensor contraction) sums the product of elements 29 | from `a` and `b` over the indices specified by `a_axes` and `b_axes`. 30 | The lists `a_axes` and `b_axes` specify those pairs of axes along which to 31 | contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension 32 | as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists 33 | `a_axes` and `b_axes` must have identical length and consist of unique 34 | integers that specify valid axes for each of the tensors. 35 | This operation corresponds to `numpy.tensordot(a, b, axes)`. 36 | Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1` 37 | is equivalent to matrix multiplication. 38 | Example 2: When `a` and `b` are matrices (order 2), the case 39 | `axes = [[1], [0]]` is equivalent to matrix multiplication. 40 | Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two 41 | tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor 42 | \\(c_{jklm}\\) whose entry 43 | corresponding to the indices \\((j,k,l,m)\\) is given by: 44 | \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\). 45 | In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`. 46 | Args: 47 | tf: The TensorFlow module. This must be passed in instead of imported 48 | since we don't assume users have TensorFlow installed. 49 | a: `Tensor` of type `float32` or `float64`. 50 | b: `Tensor` with the same type as `a`. 51 | axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k]. 52 | If axes is a scalar, sum over the last N axes of a and the first N axes of 53 | b in order. If axes is a list or `Tensor` the first and second row contain 54 | the set of unique integers specifying axes along which the contraction is 55 | computed, for `a` and `b`, respectively. The number of axes for `a` and 56 | `b` must be equal. 57 | name: A name for the operation (optional). 58 | Returns: 59 | A `Tensor` with the same type as `a`. 60 | Raises: 61 | ValueError: If the shapes of `a`, `b`, and `axes` are incompatible. 62 | IndexError: If the values in axes exceed the rank of the corresponding 63 | tensor. 64 | """ 65 | 66 | def _tensordot_should_flip(contraction_axes: List[int], 67 | free_axes: List[int]) -> bool: 68 | """Helper method to determine axis ordering. 69 | We minimize the average distance the indices would have to move under the 70 | transposition. 71 | Args: 72 | contraction_axes: The axes to be contracted. 73 | free_axes: The free axes. 74 | Returns: 75 | should_flip: `True` if `contraction_axes` should be moved to the left, 76 | `False` if they should be moved to the right. 77 | """ 78 | # NOTE: This will fail if the arguments contain any Tensors. 79 | if contraction_axes and free_axes: 80 | return bool(np.mean(contraction_axes) < np.mean(free_axes)) 81 | return False 82 | 83 | def _tranpose_if_necessary(tensor: Tensor, perm: List[int]) -> Tensor: 84 | """Like transpose(), but avoids creating a new tensor if possible. 85 | Although the graph optimizer should kill trivial transposes, it is best not 86 | to add them in the first place! 87 | """ 88 | if perm == list(range(len(perm))): 89 | return tensor 90 | return tf.transpose(tensor, perm) 91 | 92 | def _reshape_if_necessary(tensor: Tensor, 93 | new_shape: List[int]) -> Tensor: 94 | """Like reshape(), but avoids creating a new tensor if possible. 95 | Assumes shapes are both fully specified.""" 96 | cur_shape = tensor.get_shape().as_list() 97 | if (len(new_shape) == len(cur_shape) and 98 | all(d0 == d1 for d0, d1 in zip(cur_shape, new_shape))): 99 | return tensor 100 | return tf.reshape(tensor, new_shape) 101 | 102 | def _tensordot_reshape( 103 | a: Tensor, axes: Union[Sequence[int], Tensor], is_right_term=False 104 | ) -> Tuple[Tensor, Union[List[int], Tensor], Optional[List[int]], bool]: 105 | """Helper method to perform transpose and reshape for contraction op. 106 | This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul` 107 | using `array_ops.transpose` and `array_ops.reshape`. The method takes a 108 | tensor and performs the correct transpose and reshape operation for a given 109 | set of indices. It returns the reshaped tensor as well as a list of indices 110 | necessary to reshape the tensor again after matrix multiplication. 111 | Args: 112 | a: `Tensor`. 113 | axes: List or `int32` `Tensor` of unique indices specifying valid axes of 114 | `a`. 115 | is_right_term: Whether `a` is the right (second) argument to `matmul`. 116 | Returns: 117 | A tuple `(reshaped_a, free_dims, free_dims_static, transpose_needed)` 118 | where `reshaped_a` is the tensor `a` reshaped to allow contraction via 119 | `matmul`, `free_dims` is either a list of integers or an `int32` 120 | `Tensor`, depending on whether the shape of a is fully specified, and 121 | free_dims_static is either a list of integers and None values, or None, 122 | representing the inferred static shape of the free dimensions. 123 | `transpose_needed` indicates whether `reshaped_a` must be transposed, 124 | or not, when calling `matmul`. 125 | """ 126 | if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)): 127 | shape_a = a.get_shape().as_list() 128 | # NOTE: This will fail if axes contains any tensors 129 | axes = [i if i >= 0 else i + len(shape_a) for i in axes] 130 | free = [i for i in range(len(shape_a)) if i not in axes] 131 | flipped = _tensordot_should_flip(axes, free) 132 | 133 | free_dims = [shape_a[i] for i in free] 134 | prod_free = int(np.prod([shape_a[i] for i in free])) 135 | prod_axes = int(np.prod([shape_a[i] for i in axes])) 136 | perm = axes + free if flipped else free + axes 137 | new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes] 138 | transposed_a = _tranpose_if_necessary(a, perm) 139 | reshaped_a = _reshape_if_necessary(transposed_a, new_shape) 140 | transpose_needed = (not flipped) if is_right_term else flipped 141 | return reshaped_a, free_dims, free_dims, transpose_needed 142 | if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)): 143 | shape_a = a.get_shape().as_list() 144 | axes = [i if i >= 0 else i + len(shape_a) for i in axes] 145 | free = [i for i in range(len(shape_a)) if i not in axes] 146 | flipped = _tensordot_should_flip(axes, free) 147 | perm = axes + free if flipped else free + axes 148 | 149 | axes_dims = [shape_a[i] for i in axes] 150 | free_dims = [shape_a[i] for i in free] 151 | free_dims_static = free_dims 152 | axes = tf.convert_to_tensor(axes, dtype=tf.dtypes.int32, name="axes") 153 | free = tf.convert_to_tensor(free, dtype=tf.dtypes.int32, name="free") 154 | shape_a = tf.shape(a) 155 | transposed_a = _tranpose_if_necessary(a, perm) 156 | else: 157 | free_dims_static = None 158 | shape_a = tf.shape(a) 159 | rank_a = tf.rank(a) 160 | axes = tf.convert_to_tensor(axes, dtype=tf.dtypes.int32, name="axes") 161 | axes = tf.where(axes >= 0, axes, axes + rank_a) 162 | free, _ = tf.compat.v1.setdiff1d(tf.range(rank_a), axes) 163 | # Matmul does not accept tensors for its transpose arguments, so fall 164 | # back to the previous, fixed behavior. 165 | # NOTE(amilsted): With a suitable wrapper for `matmul` using e.g. `case` 166 | # to match transpose arguments to tensor values, we could also avoid 167 | # unneeded tranposes in this case at the expense of a somewhat more 168 | # complicated graph. Unclear whether this would be beneficial overall. 169 | flipped = is_right_term 170 | perm = ( 171 | tf.concat([axes, free], 0) if flipped else tf.concat([free, axes], 0)) 172 | transposed_a = tf.transpose(a, perm) 173 | 174 | free_dims = tf.gather(shape_a, free) 175 | axes_dims = tf.gather(shape_a, axes) 176 | prod_free_dims = tf.reduce_prod(free_dims) 177 | prod_axes_dims = tf.reduce_prod(axes_dims) 178 | 179 | if flipped: 180 | new_shape = tf.stack([prod_axes_dims, prod_free_dims]) 181 | else: 182 | new_shape = tf.stack([prod_free_dims, prod_axes_dims]) 183 | reshaped_a = tf.reshape(transposed_a, new_shape) 184 | transpose_needed = (not flipped) if is_right_term else flipped 185 | return reshaped_a, free_dims, free_dims_static, transpose_needed 186 | 187 | def _tensordot_axes(a: Tensor, axes 188 | ) -> Tuple[Any, Any]: 189 | """Generates two sets of contraction axes for the two tensor arguments.""" 190 | a_shape = a.get_shape() 191 | if isinstance(axes, tf.compat.integral_types): 192 | if axes < 0: 193 | raise ValueError("'axes' must be at least 0.") 194 | if a_shape.ndims is not None: 195 | if axes > a_shape.ndims: 196 | raise ValueError("'axes' must not be larger than the number of " 197 | "dimensions of tensor %s." % a) 198 | return (list(range(a_shape.ndims - axes, 199 | a_shape.ndims)), list(range(axes))) 200 | rank = tf.rank(a) 201 | return (tf.range(rank - axes, rank, 202 | dtype=tf.int32), tf.range(axes, dtype=tf.int32)) 203 | if isinstance(axes, (list, tuple)): 204 | if len(axes) != 2: 205 | raise ValueError("'axes' must be an integer or have length 2.") 206 | a_axes = axes[0] 207 | b_axes = axes[1] 208 | if isinstance(a_axes, tf.compat.integral_types) and \ 209 | isinstance(b_axes, tf.compat.integral_types): 210 | a_axes = [a_axes] 211 | b_axes = [b_axes] 212 | # NOTE: This fails if either a_axes and b_axes are Tensors. 213 | if len(a_axes) != len(b_axes): 214 | raise ValueError( 215 | "Different number of contraction axes 'a' and 'b', %s != %s." % 216 | (len(a_axes), len(b_axes))) 217 | 218 | # The contraction indices do not need to be permuted. 219 | # Sort axes to avoid unnecessary permutations of a. 220 | # NOTE: This fails if either a_axes and b_axes contain Tensors. 221 | # pylint: disable=len-as-condition 222 | if len(a_axes) > 0: 223 | a_axes, b_axes = list(zip(*sorted(zip(a_axes, b_axes)))) 224 | 225 | return a_axes, b_axes 226 | axes = tf.convert_to_tensor(axes, name="axes", dtype=tf.int32) 227 | return axes[0], axes[1] 228 | 229 | with tf.compat.v1.name_scope(name, "Tensordot", [a, b, axes]) as _name: 230 | a = tf.convert_to_tensor(a, name="a") 231 | b = tf.convert_to_tensor(b, name="b") 232 | a_axes, b_axes = _tensordot_axes(a, axes) 233 | a_reshape, a_free_dims, a_free_dims_static, a_transp = _tensordot_reshape( 234 | a, a_axes) 235 | b_reshape, b_free_dims, b_free_dims_static, b_transp = _tensordot_reshape( 236 | b, b_axes, is_right_term=True) 237 | 238 | ab_matmul = tf.matmul( 239 | a_reshape, b_reshape, transpose_a=a_transp, transpose_b=b_transp) 240 | 241 | if isinstance(a_free_dims, list) and isinstance(b_free_dims, list): 242 | return tf.reshape(ab_matmul, a_free_dims + b_free_dims, name=_name) 243 | a_free_dims = tf.convert_to_tensor(a_free_dims, dtype=tf.dtypes.int32) 244 | b_free_dims = tf.convert_to_tensor(b_free_dims, dtype=tf.dtypes.int32) 245 | product = tf.reshape( 246 | ab_matmul, tf.concat([a_free_dims, b_free_dims], 0), name=_name) 247 | if a_free_dims_static is not None and b_free_dims_static is not None: 248 | product.set_shape(a_free_dims_static + b_free_dims_static) 249 | return product 250 | -------------------------------------------------------------------------------- /src/VNA_2DTRNN/run_VNA_EdwardsAnderson.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | #---------------------------------------------------------------------------------------------------------- 5 | """ 6 | This implementation is based on RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 7 | Edited by Mohamed Hibat-Allah 8 | Title : Implementation of Variational Neural Annealing for the Edwards Anderson model with open boundary conditions 9 | Description : This code uses 2D Tensorized RNN cells to run variational neural annealing both classical 10 | and quantum on the Edwards-Anderson model. 11 | """ 12 | #----------------------------------------------------------------------------------------------------------- 13 | 14 | import tensorflow as tf 15 | import numpy as np 16 | import os 17 | import time 18 | import random 19 | 20 | from MDRNNWavefunction import MDRNNWavefunction 21 | from MDTensorizedRNNCell import MDTensorizedRNNCell 22 | from Helper_functions import * 23 | 24 | #Seeding for reproducibility purposes 25 | seed = 111 26 | tf.compat.v1.reset_default_graph() 27 | random.seed(seed) # `python` built-in pseudo-random generator 28 | np.random.seed(seed) # numpy pseudo-random generator 29 | tf.compat.v1.set_random_seed(seed) # tensorflow pseudo-random generator 30 | 31 | #### Hyperparams 32 | # Note: 33 | # If Bx0=0, then this code will run Variational Classical Annealing (VCA). 34 | # If T0=0, then this code will run Variational Quantum Annealing (VQA). 35 | # If both are zero, then this algorithm will correspond to classical quantum optimization (CQO). 36 | # For more details, please check Ref. https://arxiv.org/abs/2101.10154. 37 | Nx = 5 #x-size 38 | Ny = 5 #y-size 39 | N = Nx*Ny #total number of sites 40 | num_units = 20 #number of memory units 41 | numsamples = 50 #number of samples used for training 42 | lr = 1e-4 #learning rate 43 | T0 = 2 #Initial temperature 44 | Bx0 = 2 #Initial magnetic field 45 | num_warmup_steps = 1000 #number of warmup steps 46 | num_annealing_steps = 500 #number of annealing steps 47 | num_equilibrium_steps = 5 #number of training steps after each annealing step 48 | activation_function = tf.nn.elu #non-linear activation function for the 2D Tensorized RNN cell 49 | 50 | Jz = +np.random.uniform(0,2, size = (Nx,Ny,2))-1 #Couplings of Edwards-Anderson model 51 | 52 | print('\n') 53 | print("Number of spins =", N) 54 | print("Initial_temperature =", T0) 55 | print('Seed = ', seed) 56 | 57 | num_steps = num_annealing_steps*num_equilibrium_steps + num_warmup_steps 58 | 59 | print("\nNumber of annealing steps = {0}".format(num_annealing_steps)) 60 | print("Number of training steps = {0}".format(num_steps)) 61 | 62 | # Intitializing the RNN (with only one layer)----------- 63 | MDRNNWF = MDRNNWavefunction(systemsize_x = Nx, systemsize_y = Ny ,num_units = num_units,cell=MDTensorizedRNNCell, activation = activation_function, seed = seed) #contains the graph with the RNNs 64 | 65 | #Building the graph ------------------- 66 | with tf.compat.v1.variable_scope(MDRNNWF.scope,reuse=tf.compat.v1.AUTO_REUSE): 67 | with MDRNNWF.graph.as_default(): 68 | 69 | global_step = tf.Variable(0, trainable=False) 70 | learningrate_placeholder = tf.compat.v1.placeholder(dtype=tf.float64,shape=[]) 71 | learningrate = tf.compat.v1.train.exponential_decay(learningrate_placeholder, global_step, 100, 1.0, staircase=True) 72 | 73 | #Defining the optimizer 74 | optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=learningrate) 75 | 76 | #Defining Tensorflow placeholders 77 | Eloc=tf.compat.v1.placeholder(dtype=tf.float64,shape=[numsamples]) 78 | sampleplaceholder_forgrad=tf.compat.v1.placeholder(dtype=tf.int32,shape=[numsamples,Nx,Ny]) 79 | log_probs_forgrad = MDRNNWF.log_probability(sampleplaceholder_forgrad,inputdim=2) 80 | 81 | samples_placeholder=tf.compat.v1.placeholder(dtype=tf.int32,shape=(None,Nx, Ny)) 82 | log_probs_tensor=MDRNNWF.log_probability(samples_placeholder,inputdim=2) 83 | samplesandprobs = MDRNNWF.sample(numsamples=numsamples,inputdim=2) 84 | 85 | T_placeholder = tf.compat.v1.placeholder(dtype=tf.float64,shape=()) 86 | 87 | #Here we define a fake cost function that would allows to get the gradients of free energy using the tf.stop_gradient trick 88 | Floc = Eloc + T_placeholder*log_probs_forgrad 89 | cost = tf.reduce_mean(tf.multiply(log_probs_forgrad,tf.stop_gradient(Floc))) - tf.reduce_mean(log_probs_forgrad)*tf.reduce_mean(tf.stop_gradient(Floc)) 90 | 91 | gradients, variables = zip(*optimizer.compute_gradients(cost)) 92 | #Calculate Gradients--------------- 93 | 94 | 95 | #Define the optimization step 96 | optstep=optimizer.apply_gradients(zip(gradients,variables), global_step = global_step) 97 | 98 | #Tensorflow saver to checkpoint 99 | saver=tf.compat.v1.train.Saver() 100 | 101 | #For initialization 102 | init=tf.compat.v1.global_variables_initializer() 103 | initialize_parameters = tf.initialize_all_variables() 104 | #---------------------------------------------------------------- 105 | 106 | #Starting Session------------ 107 | #GPU management 108 | config = tf.compat.v1.ConfigProto() 109 | config.gpu_options.allow_growth = True 110 | 111 | sess=tf.compat.v1.Session(graph=MDRNNWF.graph, config=config) 112 | sess.run(init) 113 | 114 | #Loading previous trainings---------- 115 | ### To be implemented 116 | #------------------------------------ 117 | 118 | ## Run Variational Annealing 119 | with tf.compat.v1.variable_scope(MDRNNWF.scope,reuse=tf.compat.v1.AUTO_REUSE): 120 | with MDRNNWF.graph.as_default(): 121 | 122 | #To store data 123 | meanEnergy=[] 124 | varEnergy=[] 125 | varFreeEnergy = [] 126 | meanFreeEnergy = [] 127 | samples = np.ones((numsamples, Nx, Ny), dtype=np.int32) 128 | queue_samples = np.zeros((N+1, numsamples, Nx, Ny), dtype = np.int32) #Array to store all the diagonal and non diagonal matrix elements (We create it here for memory efficiency as we do not want to allocate it at each training step) 129 | log_probs = np.zeros((N+1)*numsamples, dtype=np.float64) #Array to store the log_probs of all the diagonal and non diagonal matrix elements (We create it here for memory efficiency as we do not want to allocate it at each training step) 130 | 131 | T = T0 #initializing temperature 132 | Bx = Bx0 #initializing magnetic field 133 | 134 | sess.run(initialize_parameters) #Reinitialize the parameters 135 | 136 | start = time.time() 137 | for it in range(len(meanEnergy),num_steps+1): 138 | #Annealing 139 | if it>=num_warmup_steps and it <= num_annealing_steps*num_equilibrium_steps + num_warmup_steps and it % num_equilibrium_steps == 0: 140 | annealing_step = (it-num_warmup_steps)/num_equilibrium_steps 141 | T = T0*(1-annealing_step/num_annealing_steps) 142 | Bx = Bx0*(1-annealing_step/num_annealing_steps) 143 | 144 | #Showing current status after that the annealing starts 145 | if it%num_equilibrium_steps==0: 146 | if it <= num_annealing_steps*num_equilibrium_steps + num_warmup_steps and it>=num_warmup_steps: 147 | annealing_step = (it-num_warmup_steps)/num_equilibrium_steps 148 | print("\nAnnealing step: {0}/{1}".format(annealing_step,num_annealing_steps)) 149 | 150 | samples, log_probabilities = sess.run(samplesandprobs) 151 | 152 | # Estimating the local energies 153 | local_energies = Ising2D_local_energies(Jz, Bx, samples, queue_samples, log_probs_tensor, samples_placeholder, log_probs, sess) 154 | 155 | meanE = np.mean(local_energies) 156 | varE = np.var(local_energies) 157 | 158 | #adding elements to be saved 159 | meanEnergy.append(meanE) 160 | varEnergy.append(varE) 161 | 162 | meanF = np.mean(local_energies+T*log_probabilities) 163 | varF = np.var(local_energies+T*log_probabilities) 164 | 165 | meanFreeEnergy.append(meanF) 166 | varFreeEnergy.append(varF) 167 | 168 | if it%num_equilibrium_steps==0: 169 | print('mean(E): {0}, mean(F): {1}, var(E): {2}, var(F): {3}, #samples {4}, #Training step {5}'.format(meanE,meanF,varE,varF,numsamples, it)) 170 | print("Temperature: ", T) 171 | print("Magnetic field: ", Bx) 172 | 173 | #Here we produce samples at the end of annealing 174 | if it == num_annealing_steps*num_equilibrium_steps + num_warmup_steps: 175 | 176 | Nsteps = 20 177 | numsamples_estimation = 10**5 #Num samples to be obtained at the end 178 | numsamples_perstep = numsamples_estimation//Nsteps #The number of steps taken to get "numsamples_estimation" samples (to avoid memory allocation issues) 179 | 180 | samplesandprobs_final = MDRNNWF.sample(numsamples=numsamples_perstep,inputdim=2) 181 | energies = np.zeros((numsamples_estimation)) 182 | solutions = np.zeros((numsamples_estimation, Nx, Ny)) 183 | print("\nSaving energy and variance before the end of annealing") 184 | 185 | for i in range(Nsteps): 186 | # print("\nsampling started") 187 | samples_final, _ = sess.run(samplesandprobs_final) 188 | # print("\nsampling finished") 189 | energies[i*numsamples_perstep:(i+1)*numsamples_perstep] = Ising2D_diagonal_matrixelements(Jz,samples_final) 190 | solutions[i*numsamples_perstep:(i+1)*numsamples_perstep] = samples_final 191 | print("Sampling step:" , i+1, "/", Nsteps) 192 | print("meanE = ", np.mean(energies)) 193 | print("varE = ", np.var(energies)) 194 | print("minE = ",np.min(energies)) 195 | 196 | #Run gradient descent step 197 | sess.run(optstep,feed_dict={Eloc:local_energies, sampleplaceholder_forgrad: samples, learningrate_placeholder: lr, T_placeholder:T}) 198 | 199 | if it%5 == 0: 200 | print("Elapsed time is =", time.time()-start, " seconds") 201 | print('\n\n') 202 | #---------------------------- 203 | -------------------------------------------------------------------------------- /src/VNA_DilatedRNN/DilatedRNNWavefunction.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import random 4 | 5 | """ 6 | This implementation is an adaptation of RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 7 | Edited by Mohamed Hibat-Allah 8 | Here, we define the Dilated RNNwavefunction class, which contains the sample method 9 | that allows to sample configurations autoregressively from the RNN and 10 | the log_probability method which allows to estimate the log-probability of a set of configurations. 11 | We also note that the dilated connections between RNN cells allow to take care of the long-distance 12 | dependencies between spins more efficiently as explained in https://arxiv.org/abs/2101.10154. 13 | """ 14 | 15 | class DilatedRNNWavefunction(object): 16 | def __init__(self,systemsize,cell=tf.nn.rnn_cell.BasicRNNCell,activation=tf.nn.relu,units=[2],scope='DilatedRNNwavefunction', seed = 111): 17 | """ 18 | systemsize: int 19 | number of sites 20 | cell: a tensorflow RNN cell 21 | units: list of int 22 | number of units per RNN layer 23 | scope: str 24 | the name of the name-space scope 25 | activation: activation of the RNN cell 26 | seed: pseudo-random number generator 27 | """ 28 | 29 | self.graph=tf.Graph() 30 | self.scope=scope #Label of the RNN wavefunction 31 | self.N=systemsize #Number of sites of the 1D chain 32 | self.numlayers = len(units) 33 | random.seed(seed) # `python` built-in pseudo-random generator 34 | np.random.seed(seed) # numpy pseudo-random generator 35 | 36 | #Defining the neural network 37 | with self.graph.as_default(): 38 | with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE): 39 | tf.set_random_seed(seed) # tensorflow pseudo-random generator 40 | 41 | #Define the RNN cell where units[n] corresponds to the number of memory units in each layer n 42 | self.rnn=[[cell(num_units = units[i], activation = activation,name="rnn_"+str(n)+str(i),dtype=tf.float64) for n in range(self.N)] for i in range(self.numlayers)] 43 | self.dense = [tf.layers.Dense(2,activation=tf.nn.softmax,name='wf_dense'+str(n)) for n in range(self.N)] #Define the Fully-Connected layer followed by a Softmax 44 | 45 | def sample(self,numsamples,inputdim): 46 | """ 47 | generate samples from a probability distribution parametrized by a recurrent network 48 | ------------------------------------------------------------------------ 49 | Parameters: 50 | numsamples: int 51 | number of samples to be produced 52 | inputdim: int 53 | hilbert space dimension 54 | ------------------------------------------------------------------------ 55 | Returns: 56 | samples: tf.Tensor of shape (numsamples,systemsize) 57 | the samples in integer encoding 58 | """ 59 | with self.graph.as_default(): #Call the default graph, used if willing to create multiple graphs. 60 | samples = [] 61 | probs = [] 62 | with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE): 63 | 64 | inputs=tf.zeros((numsamples,inputdim), dtype = tf.float64) #Feed the table b in tf. 65 | #Initial input to feed to the rnn 66 | 67 | self.inputdim=inputs.shape[1] 68 | self.outputdim=self.inputdim 69 | self.numsamples=inputs.shape[0] 70 | 71 | rnn_states = [] 72 | 73 | for i in range(self.numlayers): 74 | for n in range(self.N): 75 | # rnn_states.append(1.0-self.rnn[i].zero_state(self.numsamples,dtype=tf.float64)) #Initialize the RNN hidden state 76 | rnn_states.append(self.rnn[i][n].zero_state(self.numsamples,dtype=tf.float64)) #Initialize the RNN hidden state 77 | #zero state returns a zero filled tensor withs shape = (self.numsamples, num_units) 78 | 79 | for n in range(self.N): 80 | 81 | rnn_output = inputs 82 | 83 | for i in range(self.numlayers): 84 | if (n-2**i)>=0: 85 | rnn_output, rnn_states[i + n*self.numlayers] = self.rnn[i][n](rnn_output, rnn_states[i+((n-2**i)*self.numlayers)]) #Compute the next hidden states 86 | else: 87 | rnn_output, rnn_states[i + n*self.numlayers] = self.rnn[i][n](rnn_output, rnn_states[i]) 88 | 89 | output=self.dense[n](rnn_output) 90 | probs.append(output) 91 | sample_temp=tf.reshape(tf.multinomial(tf.log(output),num_samples=1),[-1,]) #Sample from the probability 92 | samples.append(sample_temp) 93 | inputs=tf.one_hot(sample_temp,depth=self.outputdim,dtype = tf.float64) 94 | 95 | probs=tf.transpose(tf.stack(values=probs,axis=2),perm=[0,2,1]) 96 | self.samples=tf.stack(values=samples,axis=1) # (self.N, num_samples) to (num_samples, self.N): Generate self.numsamples vectors of size self.N spin containing 0 or 1 97 | one_hot_samples=tf.one_hot(self.samples,depth=self.inputdim, dtype = tf.float64) 98 | self.log_probs=tf.reduce_sum(tf.log(tf.reduce_sum(tf.multiply(probs,one_hot_samples),axis=2)),axis=1) 99 | 100 | return self.samples,self.log_probs 101 | 102 | def log_probability(self,samples,inputdim): 103 | """ 104 | calculate the log-probabilities of ```samples`` 105 | ------------------------------------------------------------------------ 106 | Parameters: 107 | samples: tf.Tensor 108 | a tf.placeholder of shape (number of samples,systemsize) 109 | containing the input samples in integer encoding 110 | inputdim: int 111 | dimension of the input space 112 | ------------------------------------------------------------------------ 113 | Returns: 114 | log-probs tf.Tensor of shape (number of samples,) 115 | the log-probability of each sample 116 | """ 117 | with self.graph.as_default(): 118 | 119 | self.inputdim=inputdim 120 | self.outputdim=self.inputdim 121 | 122 | self.numsamples=tf.shape(samples)[0] 123 | 124 | inputs=tf.zeros((self.numsamples, self.inputdim), dtype=tf.float64) 125 | 126 | with tf.variable_scope(self.scope,reuse=tf.AUTO_REUSE): 127 | probs=[] 128 | 129 | rnn_states = [] 130 | 131 | for i in range(self.numlayers): 132 | for n in range(self.N): 133 | rnn_states.append(self.rnn[i][n].zero_state(self.numsamples,dtype=tf.float64)) #Initialize the RNN hidden state 134 | #zero state returns a zero filled tensor withs shape = (self.numsamples, num_units) 135 | 136 | for n in range(self.N): 137 | 138 | rnn_output = inputs 139 | 140 | for i in range(self.numlayers): 141 | if (n-2**i)>=0: 142 | rnn_output, rnn_states[i + n*self.numlayers] = self.rnn[i][n](rnn_output, rnn_states[i+((n-2**i)*self.numlayers)]) #Compute the next hidden states 143 | else: 144 | rnn_output, rnn_states[i + n*self.numlayers] = self.rnn[i][n](rnn_output, rnn_states[i]) 145 | 146 | output=self.dense[n](rnn_output) 147 | probs.append(output) 148 | inputs=tf.reshape(tf.one_hot(tf.reshape(tf.slice(samples,begin=[np.int32(0),np.int32(n)],size=[np.int32(-1),np.int32(1)]),shape=[self.numsamples]),depth=self.outputdim,dtype = tf.float64),shape=[self.numsamples,self.inputdim]) 149 | 150 | probs=tf.cast(tf.transpose(tf.stack(values=probs,axis=2),perm=[0,2,1]),tf.float64) 151 | one_hot_samples=tf.one_hot(samples,depth=self.inputdim, dtype = tf.float64) 152 | 153 | self.log_probs=tf.reduce_sum(tf.log(tf.reduce_sum(tf.multiply(probs,one_hot_samples),axis=2)),axis=1) 154 | 155 | return self.log_probs 156 | -------------------------------------------------------------------------------- /src/VNA_DilatedRNN/Helper_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | """ 5 | Code by Mohamed Hibat-Allah 6 | Description: we define helper functions to obtain the local energies of a fully connected model 7 | both with and without the presence of a transverse magnetic field 8 | """ 9 | 10 | # Loading Functions -------------------------- 11 | def Fullyconnected_diagonal_matrixelements(Jz, samples): 12 | """ To get the diagonal local energies of a fully-connected spin model given a set of set of samples in parallel! 13 | Returns: The local energies that correspond to the "samples" 14 | Inputs: 15 | - samples: (numsamples, N) 16 | - Jz: (N,N) np array of J_ij couplings 17 | """ 18 | 19 | numsamples = samples.shape[0] 20 | N = samples.shape[1] 21 | energies = np.zeros((numsamples), dtype = np.float64) 22 | 23 | for i in range(N-1): 24 | values = np.expand_dims(samples[:,i], axis = -1)+samples[:,i+1:] 25 | valuesT = np.copy(values) 26 | valuesT[values==2] = +1 #If both spins are up 27 | valuesT[values==0] = +1 #If both spins are down 28 | valuesT[values==1] = -1 #If they are opposite 29 | 30 | energies += np.sum(valuesT*(-Jz[i,i+1:]), axis = 1) 31 | 32 | return energies 33 | 34 | def Fullyconnected_localenergies(Jz, Bx, samples, queue_samples, log_probs_tensor, samples_placeholder, log_probs, sess): 35 | """ To get the local energies of a fully-connected spin model given a set of set of samples in parallel! 36 | Returns: The local energies that correspond to the "samples" 37 | Inputs: 38 | - samples: (numsamples, N) 39 | - Jz: (N,N) np array of J_ij couplings 40 | - Bx: float 41 | - queue_samples: ((N+1)*numsamples, N) an allocated np array to store all the sample applying the Hamiltonian H on samples 42 | - log_probs_tensor: A TF tensor with size (None) 43 | - samples_placeholder: A TF placeholder to feed in a set of configurations 44 | - log_probs: ((N+1)*numsamples): an allocated np array to store the log_probs non diagonal elements 45 | - sess: The current TF session 46 | """ 47 | 48 | numsamples = samples.shape[0] 49 | N = samples.shape[1] 50 | 51 | local_energies = np.zeros((numsamples), dtype = np.float64) 52 | 53 | for i in range(N-1): 54 | # for j in range(i+1,N): 55 | values = np.expand_dims(samples[:,i], axis = -1)+samples[:,i+1:] 56 | valuesT = np.copy(values) 57 | valuesT[values==2] = +1 #If both spins are up 58 | valuesT[values==0] = +1 #If both spins are down 59 | valuesT[values==1] = -1 #If they are opposite 60 | 61 | local_energies += np.sum(valuesT*(-Jz[i,i+1:]), axis = 1) 62 | # local_energies += -N*np.mean((2*samples-1), axis = 1)**3 63 | 64 | queue_samples[0] = samples #storing the diagonal samples 65 | 66 | if Bx != 0: 67 | count = 0 68 | for i in range(N-1): #Non-diagonal elements 69 | valuesT = np.copy(samples) 70 | valuesT[:,i][samples[:,i]==1] = 0 #Flip spin i 71 | valuesT[:,i][samples[:,i]==0] = 1 #Flip spin i 72 | 73 | 74 | count += 1 75 | queue_samples[count] = valuesT 76 | 77 | len_sigmas = (N+1)*numsamples 78 | steps = len_sigmas//50000+1 #I want a maximum of 50000 in batch size just to be safe I don't allocate too much memory 79 | 80 | queue_samples_reshaped = np.reshape(queue_samples, [(N+1)*numsamples, N]) 81 | for i in range(steps): 82 | if i < steps-1: 83 | cut = slice((i*len_sigmas)//steps,((i+1)*len_sigmas)//steps) 84 | else: 85 | cut = slice((i*len_sigmas)//steps,len_sigmas) 86 | log_probs[cut] = sess.run(log_probs_tensor, feed_dict={samples_placeholder:queue_samples_reshaped[cut]}) 87 | 88 | 89 | log_probs_reshaped = np.reshape(log_probs, [N+1,numsamples]) 90 | for j in range(numsamples): 91 | local_energies[j] += -Bx*np.sum(0.5*(np.exp(log_probs_reshaped[1:,j]-log_probs_reshaped[0,j]))) 92 | 93 | return local_energies 94 | -------------------------------------------------------------------------------- /src/VNA_DilatedRNN/run_VNA_SherringtonKirkpatrick.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | #---------------------------------------------------------------------------------------------------------- 5 | """ 6 | This implementation is an adaptation of RNN Wave Functions' code https://github.com/mhibatallah/RNNWavefunctions 7 | Edited by Mohamed Hibat-Allah 8 | Title : Implementation of Variational Neural Annealing for the Sherrington Kirkpatrick Model 9 | Description : This code uses a Dilated RNN cell to implement variational neural annealing both classical 10 | and quantum on the Sherrington-Kirkpatrick model. 11 | """ 12 | #----------------------------------------------------------------------------------------------------------- 13 | 14 | import tensorflow as tf 15 | import numpy as np 16 | import os 17 | import time 18 | import random 19 | from math import ceil 20 | 21 | from DilatedRNNWavefunction import DilatedRNNWavefunction 22 | from Helper_functions import * 23 | 24 | #Seeding for reproducibility purposes 25 | seed = 111 26 | tf.compat.v1.reset_default_graph() 27 | random.seed(seed) # `python` built-in pseudo-random generator 28 | np.random.seed(seed) # numpy pseudo-random generator 29 | tf.compat.v1.set_random_seed(seed) # tensorflow pseudo-random generator 30 | 31 | #### Hyperparams 32 | # Note: 33 | # If Bx0=0, then this code will run Variational Classical Annealing (VCA). 34 | # If T0=0, then this code will run Variational Quantum Annealing (VQA). 35 | # If both are zero, then this algorithm will correspond to classical quantum optimization (CQO). 36 | # For more details, please check Ref. https://arxiv.org/abs/2101.10154. 37 | N = 20 #total number of spins 38 | num_units = 20 #number of memory units for each RNN cell 39 | numlayers = ceil(np.log2(N)) #number of layers 40 | numsamples = 50 #number of samples used for training 41 | lr = 1e-3 #learning rate 42 | T0 = 2 #initial temperature 43 | Bx0 = 2 #initial magnetic field 44 | num_warmup_steps = 1000 #number of warmup steps 45 | num_annealing_steps = 500 #number of annealing steps 46 | num_equilibrium_steps = 5 #number of training steps after each annnealing step 47 | activation_function = tf.nn.elu #activation function used for the Dilated RNN cell 48 | 49 | Jz = np.random.normal(0,1/np.sqrt(N),size = (N,N)) #SK model couplings 50 | 51 | #Defining the other parameters 52 | units=[num_units]*numlayers #list containing the number of hidden units for each layer of the RNN 53 | 54 | print('\n') 55 | print("Number of spins =", N) 56 | print("Initial_temperature =", T0) 57 | print('Seed = ', seed) 58 | 59 | num_steps = num_annealing_steps*num_equilibrium_steps + num_warmup_steps 60 | 61 | print("\nNumber of annealing steps = {0}".format(num_annealing_steps)) 62 | print("Number of training steps = {0}".format(num_steps)) 63 | print("Number of layers = {0}\n".format(numlayers)) 64 | 65 | # Intitializing the RNN----------- 66 | DRNNWF = DilatedRNNWavefunction(N,units=units,cell=tf.nn.rnn_cell.BasicRNNCell, activation = activation_function, seed = seed) #contains the graph with the RNNs 67 | 68 | #Building the graph ------------------- 69 | with tf.compat.v1.variable_scope(DRNNWF.scope,reuse=tf.compat.v1.AUTO_REUSE): 70 | with DRNNWF.graph.as_default(): 71 | 72 | global_step = tf.Variable(0, trainable=False) 73 | learningrate_placeholder = tf.compat.v1.placeholder(dtype=tf.float64,shape=[]) 74 | learningrate = tf.compat.v1.train.exponential_decay(learningrate_placeholder, global_step, 100, 1.0, staircase=True) 75 | 76 | #Defining the optimizer 77 | optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=learningrate) 78 | 79 | #Defining Tensorflow placeholders 80 | Eloc=tf.compat.v1.placeholder(dtype=tf.float64,shape=[numsamples]) 81 | sampleplaceholder_forgrad=tf.compat.v1.placeholder(dtype=tf.int32,shape=[numsamples,N]) 82 | log_probs_forgrad = DRNNWF.log_probability(sampleplaceholder_forgrad,inputdim=2) 83 | samples_placeholder=tf.compat.v1.placeholder(dtype=tf.int32,shape=(None,N)) 84 | log_probs_tensor=DRNNWF.log_probability(samples_placeholder,inputdim=2) 85 | samplesandprobs = DRNNWF.sample(numsamples=numsamples,inputdim=2) 86 | 87 | T_placeholder = tf.compat.v1.placeholder(dtype=tf.float64,shape=()) 88 | 89 | #Here we define a fake cost function that would allows to get the gradients of free energy using the tf.stop_gradient trick 90 | Floc = Eloc + T_placeholder*log_probs_forgrad 91 | cost = tf.reduce_mean(tf.multiply(log_probs_forgrad,tf.stop_gradient(Floc))) - tf.reduce_mean(log_probs_forgrad)*tf.reduce_mean(tf.stop_gradient(Floc)) 92 | 93 | gradients, variables = zip(*optimizer.compute_gradients(cost)) 94 | #Calculate Gradients--------------- 95 | 96 | #Define the optimization step 97 | optstep=optimizer.apply_gradients(zip(gradients,variables), global_step = global_step) 98 | 99 | #Tensorflow saver to checkpoint 100 | saver=tf.compat.v1.train.Saver() 101 | 102 | #For initialization 103 | init=tf.compat.v1.global_variables_initializer() 104 | initialize_parameters = tf.initialize_all_variables() 105 | #---------------------------------------------------------------- 106 | 107 | #Starting Session------------ 108 | #GPU management 109 | config = tf.compat.v1.ConfigProto() 110 | config.gpu_options.allow_growth = True 111 | 112 | sess=tf.compat.v1.Session(graph=DRNNWF.graph, config=config) 113 | sess.run(init) 114 | 115 | #Loading previous trainings---------- 116 | ### To be implemented 117 | #------------------------------------ 118 | 119 | 120 | ## Run Variational Annealing 121 | with tf.compat.v1.variable_scope(DRNNWF.scope,reuse=tf.compat.v1.AUTO_REUSE): 122 | with DRNNWF.graph.as_default(): 123 | 124 | #To store data 125 | meanEnergy=[] 126 | varEnergy=[] 127 | varFreeEnergy = [] 128 | meanFreeEnergy = [] 129 | samples = np.ones((numsamples, N), dtype=np.int32) 130 | queue_samples = np.zeros((N+1, numsamples, N), dtype = np.int32) #Array to store all the diagonal and non diagonal matrix elements (We create it here for memory efficiency as we do not want to allocate it at each training step) 131 | log_probs = np.zeros((N+1)*numsamples, dtype=np.float64) #Array to store the log_probs of all the diagonal and non diagonal matrix elements (We create it here for memory efficiency as we do not want to allocate it at each training step) 132 | 133 | T = T0 #initializing temperature 134 | Bx = Bx0 #initializing magnetic field 135 | 136 | sess.run(initialize_parameters) #Reinitialize the parameters 137 | 138 | start = time.time() 139 | 140 | for it in range(len(meanEnergy),num_steps+1): 141 | 142 | #Annealing 143 | if it>=num_warmup_steps and it <= num_annealing_steps*num_equilibrium_steps + num_warmup_steps and it % num_equilibrium_steps == 0: 144 | annealing_step = (it-num_warmup_steps)/num_equilibrium_steps 145 | T = T0*(1-annealing_step/num_annealing_steps) 146 | Bx = Bx0*(1-annealing_step/num_annealing_steps) 147 | 148 | #Showing current status after that the annealing starts 149 | if it%num_equilibrium_steps==0: 150 | if it <= num_annealing_steps*num_equilibrium_steps + num_warmup_steps and it>=num_warmup_steps: 151 | annealing_step = (it-num_warmup_steps)/num_equilibrium_steps 152 | print("\nAnnealing step: {0}/{1}".format(annealing_step,num_annealing_steps)) 153 | 154 | samples, log_probabilities = sess.run(samplesandprobs) 155 | 156 | # Estimating the local energies 157 | local_energies = Fullyconnected_localenergies(Jz, Bx, samples, queue_samples, log_probs_tensor, samples_placeholder, log_probs, sess) 158 | 159 | meanE = np.mean(local_energies) 160 | varE = np.var(local_energies) 161 | 162 | #adding elements to be saved 163 | meanEnergy.append(meanE) 164 | varEnergy.append(varE) 165 | 166 | meanF = np.mean(local_energies+T*log_probabilities) 167 | varF = np.var(local_energies+T*log_probabilities) 168 | 169 | meanFreeEnergy.append(meanF) 170 | varFreeEnergy.append(varF) 171 | 172 | if it%num_equilibrium_steps==0: 173 | print('mean(E): {0}, mean(F): {1}, var(E): {2}, var(F): {3}, #samples {4}, #Training step {5}'.format(meanE,meanF,varE,varF,numsamples, it)) 174 | print("Temperature: ", T) 175 | print("Magnetic field: ", Bx) 176 | 177 | 178 | #Here we produce samples at the end of annealing 179 | if it == num_annealing_steps*num_equilibrium_steps + num_warmup_steps: 180 | 181 | Nsteps = 20 182 | numsamples_estimation = 10**5 #Num samples to be obtained at the end 183 | numsamples_perstep = numsamples_estimation//Nsteps #The number of steps taken to get "numsamples_estimation" samples (to avoid memory allocation issues) 184 | 185 | samplesandprobs_final = DRNNWF.sample(numsamples=numsamples_perstep,inputdim=2) 186 | energies = np.zeros((numsamples_estimation)) 187 | solutions = np.zeros((numsamples_estimation, N)) 188 | print("\nSaving energy and variance before the end of annealing") 189 | 190 | for i in range(Nsteps): 191 | # print("\nsampling started") 192 | samples_final, _ = sess.run(samplesandprobs_final) 193 | # print("\nsampling finished") 194 | energies[i*numsamples_perstep:(i+1)*numsamples_perstep] = Fullyconnected_diagonal_matrixelements(Jz,samples_final) 195 | solutions[i*numsamples_perstep:(i+1)*numsamples_perstep] = samples_final 196 | print("Sampling step:" , i+1, "/", Nsteps) 197 | print("meanE = ", np.mean(energies)) 198 | print("varE = ", np.var(energies)) 199 | print("minE = ",np.min(energies)) 200 | 201 | #Run gradient descent step 202 | sess.run(optstep,feed_dict={Eloc:local_energies, sampleplaceholder_forgrad: samples, learningrate_placeholder: lr, T_placeholder:T}) 203 | 204 | if it%5 == 0: 205 | print("Elapsed time is =", time.time()-start, " seconds") 206 | print('\n\n') 207 | 208 | #---------------------------- 209 | -------------------------------------------------------------------------------- /tools/Generate_EA_instances.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import os 4 | 5 | """ 6 | This is code is used to generate the random instances for the 2D Edward Anderson model from a given set of seeds 7 | in a format that is suitable for submission to the spinglass server https://software.cs.uni-koeln.de/spinglass/ 8 | Code by Mohamed Hibat-Allah 9 | """ 10 | 11 | list_seeds = [i for i in range(1,25+1)] 12 | 13 | Nx = 40 14 | Ny = 40 15 | 16 | for seed in list_seeds: 17 | random.seed(seed) # `python` built-in pseudo-random generator 18 | np.random.seed(seed) # numpy pseudo-random generator 19 | 20 | Jz = np.random.uniform(0,2, size = (Nx,Ny,2))-1 21 | 22 | if not os.path.exists('./configs/'): 23 | os.mkdir('./configs') 24 | file = open("configs/"+str(Nx)+"x"+str(Ny)+"_uniform_seed"+str(seed)+".txt", "w") 25 | 26 | #Print the couplings to submit to https://software.cs.uni-koeln.de/spinglass/ 27 | for ny in range(Ny): 28 | for nx in range(Nx): 29 | if nx != Nx-1: 30 | file.write(str(nx+ny*Nx+1) + " "+str(nx+1+ny*Nx+1)+" "+str(Jz[nx,ny,0])+"\n") 31 | if ny != Ny-1: 32 | file.write(str(nx+ny*Nx+1) + " " + str(nx+(ny+1)*Nx+1)+" "+str(Jz[nx,ny,1])+"\n") 33 | file.close() 34 | print("seed ", seed, " finished.") 35 | -------------------------------------------------------------------------------- /tools/Generate_SK_instances.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | import os 4 | 5 | """ 6 | This is code is used to generate the random instances for the Sherrington-Kirkpatrick model from a given set of seeds 7 | in a format that is suitable for submission to the spinglass server https://software.cs.uni-koeln.de/spinglass/ 8 | Code by Mohamed Hibat-Allah 9 | """ 10 | 11 | list_seeds = [i for i in range(1,40+1)] 12 | ## Only the 25 seeds: "1,2,3,5,6,7,8,9,13,16,18,19,21,22,23,25,27,30,31,32,34,35,38,39,40" have been successfully solved by the spin-glass server https://software.cs.uni-koeln.de/spinglass/ 13 | ## Those 25 seeds are the ones we run with our variational classical annealing implementation in our paper. 14 | 15 | N = 100 16 | 17 | for seed in list_seeds: 18 | random.seed(seed) # `python` built-in pseudo-random generator 19 | np.random.seed(seed) # numpy pseudo-random generator 20 | 21 | Jz = np.random.normal(0,1/np.sqrt(N),size = (N,N)) 22 | 23 | if not os.path.exists('./configs/'): 24 | os.mkdir('./configs') 25 | file = open("./configs/"+str(N)+"_SK_seed"+str(seed)+".txt", "w") 26 | 27 | #Print the couplings to submit to https://software.cs.uni-koeln.de/spinglass/ 28 | for i in range(N): 29 | for j in range(i+1,N): 30 | file.write(str(i+1) + " "+str(j+1)+" "+str(Jz[i,j])+"\n") 31 | file.close() 32 | print("seed ", seed, " finished.") 33 | -------------------------------------------------------------------------------- /tools/Generate_WPE_instances.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "kernelspec": { 6 | "display_name": "Julia 1.4.1", 7 | "language": "julia", 8 | "name": "julia-1.4" 9 | }, 10 | "language_info": { 11 | "file_extension": ".jl", 12 | "mimetype": "application/julia", 13 | "name": "julia", 14 | "version": "1.4.1" 15 | }, 16 | "colab": { 17 | "name": "Generate_WPE_instances.ipynb", 18 | "provenance": [], 19 | "collapsed_sections": [] 20 | } 21 | }, 22 | "cells": [ 23 | { 24 | "cell_type": "markdown", 25 | "metadata": { 26 | "id": "pgzDVVfYbdvQ" 27 | }, 28 | "source": [ 29 | "**This notebook is used to generate the random instances of the Wishart Planted Ensemble in Julia Language.**\r\n", 30 | "\r\n", 31 | "Code by Estelle Inack." 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "metadata": { 37 | "id": "INQUrob1bbjQ", 38 | "outputId": "bc1e5cd8-0061-494e-b47e-c2492d248ace" 39 | }, 40 | "source": [ 41 | "using LinearAlgebra\n", 42 | "using Random\n", 43 | "using DelimitedFiles\n", 44 | "\n", 45 | "N_list = [32] # number of spins \n", 46 | "# α_list = [0.25,0.5, 0.75, 1.0, 1.25, 1.5]\n", 47 | "# α_list = [0.125]\n", 48 | "α_list = [3/32]\n", 49 | "\n", 50 | "num_realizations = 25\n", 51 | "\n", 52 | "rng_seeds = [MersenneTwister(i*100 + 10000) for i in 1:num_realizations]" 53 | ], 54 | "execution_count": null, 55 | "outputs": [ 56 | { 57 | "output_type": "execute_result", 58 | "data": { 59 | "text/plain": [ 60 | "25-element Array{MersenneTwister,1}:\n", 61 | " MersenneTwister(UInt32[0x00002774], Random.DSFMT.DSFMT_state(Int32[-11926851, 1073606668, 1650665704, 1073471581, -1879960341, 1073439488, -1027724573, 1073196559, -149516174, 1073422011 … 733802864, 1072724939, 1573146613, 1073318988, 1514839799, -1450448959, -1632994230, -1942974185, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 62 | " MersenneTwister(UInt32[0x000027d8], Random.DSFMT.DSFMT_state(Int32[1301897229, 1073039221, -1418917844, 1073133633, 1623222379, 1072761438, 664901523, 1073613346, 1196639756, 1073298013 … -1078763757, 1073165077, 823846778, 1073317784, 820886862, 584485858, -1319233806, 822788427, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 63 | " MersenneTwister(UInt32[0x0000283c], Random.DSFMT.DSFMT_state(Int32[1866483007, 1073311311, -708683489, 1072981793, 217130579, 1073469458, -441880983, 1073682568, -882739475, 1073706704 … 522754514, 1073371775, -1138917026, 1072895262, -1976936692, 1784100385, 108383793, -1336419982, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 64 | " MersenneTwister(UInt32[0x000028a0], Random.DSFMT.DSFMT_state(Int32[1926706273, 1073528933, 1491625416, 1073317472, 1274169906, 1073695325, -487660002, 1073489995, -928822325, 1072983660 … 166734815, 1072831950, -1507267142, 1073680887, 1930469047, -1732003651, -169937094, 1079687246, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 65 | " MersenneTwister(UInt32[0x00002904], Random.DSFMT.DSFMT_state(Int32[-446764888, 1073598536, 1809719588, 1073129355, 1337669650, 1073630802, -212844913, 1073677046, 1078584867, 1073215106 … -911430871, 1073245890, 739069056, 1073379059, -1707202297, -1392644849, 1886785793, -1970837662, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 66 | " MersenneTwister(UInt32[0x00002968], Random.DSFMT.DSFMT_state(Int32[1067016777, 1073328542, -1939437242, 1073626233, -1024000129, 1073518755, -906560349, 1073584679, 1159111659, 1073058061 … 1020878305, 1073230017, 669443013, 1072746223, -1075501022, 679830805, -70199225, 1241542514, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 67 | " MersenneTwister(UInt32[0x000029cc], Random.DSFMT.DSFMT_state(Int32[-435524894, 1073244711, 255272721, 1072823183, -774373842, 1073108237, -151313756, 1073344675, -267269702, 1073389757 … -415192198, 1072830292, 2131779224, 1073686744, -555076494, 126239065, -216554795, 893690939, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 68 | " MersenneTwister(UInt32[0x00002a30], Random.DSFMT.DSFMT_state(Int32[-385610405, 1073177720, 1241435098, 1072757671, 623385593, 1073676108, 36918330, 1073095463, -1465362481, 1073397218 … -663248832, 1073634694, 1764805654, 1073611737, 595553611, 342110802, 1018061561, 1871033572, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 69 | " MersenneTwister(UInt32[0x00002a94], Random.DSFMT.DSFMT_state(Int32[1601752570, 1073188985, -328406996, 1073482878, -1414924132, 1073607227, 1599946322, 1073054748, -234255408, 1072971916 … 1347802431, 1072751849, 906214556, 1073422872, -898627467, 1965471207, 1650603220, -1339438994, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 70 | " MersenneTwister(UInt32[0x00002af8], Random.DSFMT.DSFMT_state(Int32[-267117929, 1073186915, -1785739203, 1073592682, 1453470971, 1072942818, -1097212712, 1072969775, -848605769, 1073171246 … 2098903325, 1073589083, 1388680113, 1073537291, 980012345, -316826126, 1645049813, -1306411966, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 71 | " MersenneTwister(UInt32[0x00002b5c], Random.DSFMT.DSFMT_state(Int32[-1649089648, 1072719120, -385984043, 1073009707, -1749228778, 1073647232, 165653530, 1073238259, -1622784134, 1073415018 … 280794170, 1072979425, -1166610491, 1073261681, -474358358, 983305888, -1213756420, 217816603, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 72 | " MersenneTwister(UInt32[0x00002bc0], Random.DSFMT.DSFMT_state(Int32[-912431540, 1072775785, 1802644985, 1073565229, 1883712608, 1073276545, 283619433, 1073541623, 1719758653, 1072826807 … 826995333, 1073741139, -309976911, 1073046144, -2016839850, 240167512, 846900675, 22129541, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 73 | " MersenneTwister(UInt32[0x00002c24], Random.DSFMT.DSFMT_state(Int32[-653189304, 1073641496, 1762273075, 1073644141, -67580942, 1072836815, 1513094260, 1073657223, -692927429, 1072726524 … -378435741, 1072791878, -525488712, 1072966366, 2050843094, -2022749142, -1303032260, -1452952036, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 74 | " MersenneTwister(UInt32[0x00002c88], Random.DSFMT.DSFMT_state(Int32[-432655360, 1073122976, 2067975046, 1073089141, -683705355, 1073148182, -357558801, 1072720162, 2102876502, 1073144579 … 383854431, 1073244631, -1928807754, 1072711587, -909828519, -620795, -900461261, 545445652, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 75 | " MersenneTwister(UInt32[0x00002cec], Random.DSFMT.DSFMT_state(Int32[652155341, 1073033546, 276964589, 1073410056, 1745849354, 1072712112, 1613320692, 1072935338, -1060678969, 1073342264 … 579792103, 1072765463, 498776450, 1073295404, 1069372161, 457184671, -1002781520, -119617857, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 76 | " MersenneTwister(UInt32[0x00002d50], Random.DSFMT.DSFMT_state(Int32[1909860139, 1072894063, 1843379381, 1072926850, 269747551, 1072929462, -1078816341, 1072952766, -411451103, 1073517357 … -1587802259, 1073067263, 1198108137, 1072806722, 968816176, 2062651521, 1953663641, -2064189001, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 77 | " MersenneTwister(UInt32[0x00002db4], Random.DSFMT.DSFMT_state(Int32[-1892735059, 1073208008, -436011104, 1072784317, -158627637, 1072931484, 698443978, 1073408073, -1193212048, 1073524388 … 292450478, 1073261078, -1220465147, 1072756351, 1816722908, -1156823665, 1897872033, -1044210849, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 78 | " MersenneTwister(UInt32[0x00002e18], Random.DSFMT.DSFMT_state(Int32[-199636927, 1073566809, 611549647, 1073702668, -782820792, 1073334914, -1677553094, 1073497647, 896687171, 1073323629 … -1896397514, 1073412118, 413171485, 1073540713, 1714733598, 345974589, -242011604, -834731399, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 79 | " MersenneTwister(UInt32[0x00002e7c], Random.DSFMT.DSFMT_state(Int32[2110892222, 1073638419, 178099245, 1073454582, 20407607, 1072858412, -1695042815, 1073027217, 2085980573, 1073712334 … -166730438, 1072700008, 1649123669, 1073231687, -1463672465, -1970727852, -694921193, 508036313, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 80 | " MersenneTwister(UInt32[0x00002ee0], Random.DSFMT.DSFMT_state(Int32[1746222031, 1073386477, -2116530973, 1073675125, 1415169045, 1073156241, -2086753117, 1073590246, 1874313984, 1072712344 … 2075346800, 1073405046, -1356639765, 1073085000, -1323615800, -1435819146, 1849961075, -1478169413, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 81 | " MersenneTwister(UInt32[0x00002f44], Random.DSFMT.DSFMT_state(Int32[-1883602788, 1073439533, 149917686, 1073639048, 502273636, 1073601270, -587337712, 1073188953, 2114876825, 1072858866 … 1283489428, 1073410238, -577222351, 1072923322, 1444797236, -42257337, 471893582, 917418854, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 82 | " MersenneTwister(UInt32[0x00002fa8], Random.DSFMT.DSFMT_state(Int32[1788237327, 1072879383, -1997093038, 1073121750, -207071606, 1073608554, -1023293193, 1073711648, -554359759, 1073308067 … -1316902422, 1072935295, 1654222219, 1073436772, 780475943, 1775860136, -916493722, -857411348, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 83 | " MersenneTwister(UInt32[0x0000300c], Random.DSFMT.DSFMT_state(Int32[-1564651867, 1073207542, -628895595, 1072980726, 1226883559, 1073044732, -110035754, 1073581000, 275261382, 1073109736 … -174250215, 1073344305, -273978825, 1073097666, 1725607641, 1979835675, 1952566329, -1502396235, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 84 | " MersenneTwister(UInt32[0x00003070], Random.DSFMT.DSFMT_state(Int32[568227807, 1073125867, -131931623, 1073481444, -744993263, 1072818536, -1156431817, 1072828087, 647952209, 1073125229 … -1432432696, 1073128796, 1498139335, 1073449195, -1760974962, -2077133852, -1301605108, -2133153439, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)\n", 85 | " MersenneTwister(UInt32[0x000030d4], Random.DSFMT.DSFMT_state(Int32[-736503742, 1073256241, -1226436467, 1073018302, -625509231, 1073133402, 1743826756, 1072882959, 608696950, 1073493350 … -1563945155, 1073469579, 1447892541, 1073024718, -972082220, -288533428, 1079120730, -1559616842, 382, 0]), [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], UInt128[0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000 … 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000, 0x00000000000000000000000000000000], 1002, 0)" 86 | ] 87 | }, 88 | "metadata": { 89 | "tags": [] 90 | }, 91 | "execution_count": 18 92 | } 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "metadata": { 98 | "id": "YTeUyO5Vbbji", 99 | "outputId": "a3e2b944-afc7-473f-9ee6-9020d227da38" 100 | }, 101 | "source": [ 102 | "\"\"\"\n", 103 | "A function that generates a single Wishart planted realization\n", 104 | "\"\"\"\n", 105 | "function generate_wpe(N, α, rng, identity_matrix, A) \n", 106 | " \n", 107 | " thres = 1e-12\n", 108 | " M = Int(α * N)\n", 109 | " \n", 110 | " Z = rand(rng,-1:2:1, N, M)\n", 111 | " W = (A * Z)/sqrt(N*(N-1))\n", 112 | " \n", 113 | " Jriggle = - (W * W')/N \n", 114 | " J = Jriggle - diag(Jriggle)' .* identity_matrix\n", 115 | " \n", 116 | " for i in 1:N\n", 117 | " for m in 1:N\n", 118 | " if(abs(J[i,m]) < thres) \n", 119 | " J[i,m] = 0.0\n", 120 | " end\n", 121 | " end\n", 122 | " end\n", 123 | " \n", 124 | " return round.(J, digits=10)\n", 125 | " \n", 126 | "end\n", 127 | "\n", 128 | "\"\"\"\n", 129 | "This function that generates many single Wishart planted realizations\n", 130 | "\"\"\"\n", 131 | "function wpe_examples(n, α, identity_matrix, A)\n", 132 | " couplers = Array{Float64}(undef, n*num_realizations, n)\n", 133 | " for num in 1:num_realizations\n", 134 | " \n", 135 | " println(\"Example: \", num, \" Size: \", n, \" α: \", α)\n", 136 | " coupler = generate_wpe(n, α, rng_seeds[num], identity_matrix, A)\n", 137 | " \n", 138 | " couplers[1+n*(num-1):n*num, :] = coupler\n", 139 | " end\n", 140 | " \n", 141 | " return couplers\n", 142 | "end\n", 143 | "\n", 144 | "\n", 145 | "\"\"\"\n", 146 | "This function generares realizations for differents N and α \n", 147 | "\"\"\"\n", 148 | "function generate_examples()\n", 149 | " \n", 150 | " dict_couplers = Dict()\n", 151 | " for n in N_list\n", 152 | " \n", 153 | " identity_matrix = Matrix{Int}(I, n, n)\n", 154 | " \n", 155 | " A = fill(-1, n, n)\n", 156 | " [A[i,i] = n-1 for i in 1:n]\n", 157 | " \n", 158 | " for α in α_list\n", 159 | " couplers = wpe_examples(n, α, identity_matrix, A)\n", 160 | " dict_couplers[\"size_$(n)_alpha_$(α)\"] = couplers\n", 161 | " end\n", 162 | " end\n", 163 | " \n", 164 | " return dict_couplers\n", 165 | "end" 166 | ], 167 | "execution_count": null, 168 | "outputs": [ 169 | { 170 | "output_type": "execute_result", 171 | "data": { 172 | "text/plain": [ 173 | "generate_examples" 174 | ] 175 | }, 176 | "metadata": { 177 | "tags": [] 178 | }, 179 | "execution_count": 19 180 | } 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "metadata": { 186 | "id": "ibsyL6izbbjj", 187 | "outputId": "a60ee87d-7b20-4589-f9c6-44e3ac0bbc8d" 188 | }, 189 | "source": [ 190 | "couplings = generate_examples()\n", 191 | "\n", 192 | "for n in N_list \n", 193 | " \n", 194 | " for α in α_list\n", 195 | " \n", 196 | " for num in 1:num_realizations\n", 197 | " filename = open(\"data/wpe_size$(n)_alpha$(α)_realization$(num).txt\", \"w\")\n", 198 | " writedlm(filename, couplings[\"size_$(n)_alpha_$(α)\"][1+n*(num-1):n*num, :], '\\t')\n", 199 | " close(filename)\n", 200 | " end\n", 201 | " \n", 202 | " end\n", 203 | "end" 204 | ], 205 | "execution_count": null, 206 | "outputs": [ 207 | { 208 | "output_type": "stream", 209 | "text": [ 210 | "Example: 1 Size: 32 α: 0.09375\n", 211 | "Example: 2 Size: 32 α: 0.09375\n", 212 | "Example: 3 Size: 32 α: 0.09375\n", 213 | "Example: 4 Size: 32 α: 0.09375\n", 214 | "Example: 5 Size: 32 α: 0.09375\n", 215 | "Example: 6 Size: 32 α: 0.09375\n", 216 | "Example: 7 Size: 32 α: 0.09375\n", 217 | "Example: 8 Size: 32 α: 0.09375\n", 218 | "Example: 9 Size: 32 α: 0.09375\n", 219 | "Example: 10 Size: 32 α: 0.09375\n", 220 | "Example: 11 Size: 32 α: 0.09375\n", 221 | "Example: 12 Size: 32 α: 0.09375\n", 222 | "Example: 13 Size: 32 α: 0.09375\n", 223 | "Example: 14 Size: 32 α: 0.09375\n", 224 | "Example: 15 Size: 32 α: 0.09375\n", 225 | "Example: 16 Size: 32 α: 0.09375\n", 226 | "Example: 17 Size: 32 α: 0.09375\n", 227 | "Example: 18 Size: 32 α: 0.09375\n", 228 | "Example: 19 Size: 32 α: 0.09375\n", 229 | "Example: 20 Size: 32 α: 0.09375\n", 230 | "Example: 21 Size: 32 α: 0.09375\n", 231 | "Example: 22 Size: 32 α: 0.09375\n", 232 | "Example: 23 Size: 32 α: 0.09375\n", 233 | "Example: 24 Size: 32 α: 0.09375\n", 234 | "Example: 25 Size: 32 α: 0.09375\n" 235 | ], 236 | "name": "stdout" 237 | } 238 | ] 239 | } 240 | ] 241 | } -------------------------------------------------------------------------------- /tutorials/Readme.md: -------------------------------------------------------------------------------- 1 | These jupyter notebooks can be run on a GPU using Google Colab (https://colab.research.google.com/). 2 | --------------------------------------------------------------------------------