├── .gitignore ├── LICENSE ├── Readme.md ├── go.mod ├── go.sum ├── json-parse ├── parse-agg.py └── parse-benchmark.py ├── main-bench.go ├── main-snarks.go ├── main.go ├── main_bench_test.go ├── plots └── gen-plots.py ├── scripts ├── hyper-bench.sh ├── hyper-go.sh └── hyper-test.sh └── vcs ├── keygen-parallel.go ├── test_utils.go ├── vcs-agg.go ├── vcs-fake.go ├── vcs-helper.go ├── vcs-merkle_bench_test.go ├── vcs-micro_bench_test.go ├── vcs-pruned.go ├── vcs-pruned_test.go ├── vcs-save-load.go ├── vcs-utils.go ├── vcs.go └── vcs_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | temp/* 2 | hyperproofs-go 3 | keygen/ 4 | pkvk*/ 5 | logs*.json* 6 | *result*.csv* 7 | plots/*.csv* 8 | plots/*.png* 9 | plots/*.pdf* 10 | plots/*.json* 11 | json-parse/*.json* 12 | .history/ 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2021 Shravan Srinivasan 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # Hyperproofs 2 | 3 | Hyperproofs, the first vector commitment (VC) scheme that is efficiently maintainable and aggregatable. 4 | This repo contains the implementation of Hyperproofs in go. 5 | 6 | This repo depends on: 7 | - [go-mcl](https://github.com/alinush/go-mcl/) for elliptic curve operations. 8 | - [kzg-go](https://github.com/hyperproofs/kzg-go) for KZG commitments. 9 | - [gipa-go](https://github.com/hyperproofs/gipa-go) for proof aggregation. 10 | 11 | [hyperproofs]: https://ia.cr/2021/599 12 | ## Instructions 13 | 14 | ### Software requirements 15 | - Install golang, python 16 | ```bash 17 | $ sudo apt-get install git python curl python3-pip libgmp-dev libflint-dev 18 | $ sudo add-apt-repository ppa:longsleep/golang-backports 19 | $ sudo apt-get install golang golang-go golang-doc golang-golang-x-tools 20 | $ pip3 install -U pip pandas matplotlib 21 | ``` 22 | - Install ```mcl``` 23 | ```bash 24 | $ git clone https://github.com/herumi/mcl 25 | $ cd mcl/ 26 | $ git checkout caf27db2 #herumi/mcl v1.86.0 27 | $ cmake -S . -B build -DCMAKE_BUILD_TYPE=Release 28 | $ cmake --build build 29 | $ sudo cmake --build build --target install 30 | $ sudo ldconfig 31 | ``` 32 | 33 | ### Hyperproofs 34 | 0. See [v1.0.0](https://github.com/hyperproofs/hyperproofs-go/tree/51cc725b150c839987c26a3edf89fc2808fe4231) for the USENIX 2022 version 35 | 1. Run ```time bash scripts/hyper-go.sh``` to setup PRK, VRK, UPK, etc. 36 | 2. Run ```time bash scripts/hyper-bench.sh``` to replicate the benchmarks reported in the [paper][hyperproofs]. 37 | - Does not benchmark OpenAll and Commit by default. Uncomment the [corresponding lines](https://github.com/hyperproofs/hyperproofs-go/blob/main/scripts/hyper-bench.sh#L23) in the shell script to run the benchmarks. 38 | 3. Copy ```pedersen-30-single.csv``` and ```poseidon-30-single.csv``` from [bellman-bignat](https://github.com/hyperproofs/bellman-bignat) to [hyperproofs-go/plots](https://github.com/hyperproofs/hyperproofs-go/tree/main/plots). Then, run ```cd plots; time python3 gen-plots.py``` to generate the plots. 39 | ## Reference 40 | 41 | [_Hyperproofs: Aggregating and Maintaining Proofs in Vector Commitments_][hyperproofs]\ 42 | [Shravan Srinivasan](https://github.com/sshravan), Alexander Chepurnoy, Charalampos Papamanthou, [Alin Tomescu](https://github.com/alinush), and Yupeng Zhang\ 43 | ePrint, 2021 44 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hyperproofs/hyperproofs-go 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/QED-it/go-jubjub v0.0.0-20191022103023-02870f199e5e 7 | github.com/alinush/go-mcl v0.0.0-20210224202455-eb6000c9b115 8 | github.com/dustin/go-humanize v1.0.1 9 | github.com/hyperproofs/gipa-go v0.0.0-20220302191226-79b6b6b9d404 10 | github.com/hyperproofs/kzg-go v0.0.0-20220302191111-5aea771ec346 11 | github.com/iden3/go-iden3-crypto v0.0.15 12 | github.com/pkg/errors v0.9.1 // indirect 13 | github.com/sirupsen/logrus v1.9.3 // indirect 14 | golang.org/x/crypto v0.31.0 15 | golang.org/x/text v0.21.0 16 | ) 17 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/QED-it/go-jubjub v0.0.0-20191022103023-02870f199e5e h1:W5YgXLTLD4nvO9dgQs6MTCBqGRla2Uh0brEVP9+0V2g= 2 | github.com/QED-it/go-jubjub v0.0.0-20191022103023-02870f199e5e/go.mod h1:v2i1Bl8LFYzn5JpYJ23mRv3/pT9/dlT6vYPLYLqxs8Y= 3 | github.com/alinush/go-mcl v0.0.0-20210224202455-eb6000c9b115 h1:dmRRyrH6MpUFPLjJ8yEMuOudcBDzeJ4Hnxf2sgoUjpQ= 4 | github.com/alinush/go-mcl v0.0.0-20210224202455-eb6000c9b115/go.mod h1:Mc7ekS3Dylak0ZRbsbvcDb2T4ilKb2/E3/BVDE5MspE= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/dchest/blake512 v1.0.0/go.mod h1:FV1x7xPPLWukZlpDpWQ88rF/SFwZ5qbskrzhLMB92JI= 9 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 10 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 11 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 12 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 13 | github.com/hyperproofs/gipa-go v0.0.0-20220302191226-79b6b6b9d404 h1:SWJ3J1ppl2FZZAPMm8I72jMnZf0shQ1MQ8icm44CJGE= 14 | github.com/hyperproofs/gipa-go v0.0.0-20220302191226-79b6b6b9d404/go.mod h1:u+w+rur8Ty0mkosVioTo6OcAiOhbzwid5pRDgXt1KNo= 15 | github.com/hyperproofs/kzg-go v0.0.0-20210614002436-76fcd25deccb/go.mod h1:prvLGmOjGylbat4d0ZKk77AoYxksWW/h5N3Sxslpe98= 16 | github.com/hyperproofs/kzg-go v0.0.0-20220302191111-5aea771ec346 h1:wbP23ncu/9LCvHY9vCYG/m3bSgv83Q7pVMB2iL/a4TQ= 17 | github.com/hyperproofs/kzg-go v0.0.0-20220302191111-5aea771ec346/go.mod h1:prvLGmOjGylbat4d0ZKk77AoYxksWW/h5N3Sxslpe98= 18 | github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= 19 | github.com/iden3/go-iden3-crypto v0.0.15/go.mod h1:dLpM4vEPJ3nDHzhWFXDjzkn1qHoBeOT/3UEhXsEsP3E= 20 | github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE= 21 | github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro= 22 | github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= 23 | github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= 24 | github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= 25 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 26 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 27 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 28 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 29 | github.com/sirupsen/logrus v1.7.1/go.mod h1:4GuYW9TZmE769R5STWrRakJc4UqQ3+QQ95fyz7ENv1A= 30 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 31 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 32 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 33 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 34 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= 35 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 36 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 37 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 38 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 39 | github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= 40 | github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= 41 | github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= 42 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 43 | golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= 44 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 45 | golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= 46 | golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= 47 | golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= 48 | golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= 49 | golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= 50 | golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= 51 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 52 | golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 53 | golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= 54 | golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= 55 | golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= 56 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 57 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 58 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= 59 | golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= 60 | golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= 61 | golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= 62 | golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= 63 | golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= 64 | golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= 65 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 66 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 67 | golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 68 | golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= 69 | golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 70 | golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 71 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= 72 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 73 | golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 74 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 75 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 76 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 77 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 78 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 79 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 80 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 81 | golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 82 | golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 83 | golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 84 | golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 85 | golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= 86 | golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 87 | golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= 88 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 89 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= 90 | golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= 91 | golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= 92 | golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= 93 | golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= 94 | golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= 95 | golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= 96 | golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= 97 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 98 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 99 | golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= 100 | golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= 101 | golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 102 | golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= 103 | golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= 104 | golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 105 | golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= 106 | golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= 107 | golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= 108 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 109 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 110 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 111 | golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= 112 | golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= 113 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= 114 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 115 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 116 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 117 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 118 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 119 | -------------------------------------------------------------------------------- /json-parse/parse-agg.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import sys 3 | import json 4 | 5 | """ 6 | Parse the output to a json and return that as a dataframe. 7 | Will return with all golang's default columns: Time, Action, Package, Output, Elapsed 8 | """ 9 | 10 | 11 | def get_clean_json_as_df(filename): 12 | raw = [] 13 | with open(filename, 'r') as f: 14 | entry = {} 15 | old_output = "" 16 | for line in f: 17 | if line.startswith("{"): 18 | entry = json.loads(line) 19 | if "Output" in entry: 20 | if line.endswith('\\t"}\n'): 21 | old_output += entry["Output"] 22 | else: 23 | entry["Output"] = old_output + entry["Output"] 24 | raw.append(entry) 25 | old_output = "" 26 | df = pd.DataFrame(raw) 27 | return df 28 | 29 | 30 | """ 31 | Works only time is measured in "ns/op" 32 | """ 33 | 34 | 35 | def get_only_measurements_rows(df): 36 | df = df[["Output"]] 37 | df = df.dropna() 38 | # Extract rows with this string 39 | df = df[df["Output"].str.contains("Benchmark")] 40 | # Extract only measurement rows 41 | df = df[df["Output"].str.contains("ns/op")] 42 | df = df["Output"].str.split(expand=True) 43 | df.columns = ["Testname", "Benchtime", "Time", "Time_Units", 44 | "Memusage", "Memusage_Units", "Mallocs", "Mallocs_Units"] 45 | # df[["Testname", "cores"]] = df["Testname"].str.rsplit( 46 | # "-", expand=True) # Split from the right of the str 47 | return df 48 | 49 | 50 | def parse_hyper_agg_benchmarks(df): 51 | 52 | df = get_only_measurements_rows(df) 53 | df[["Testname", "Txn"]] = df["Testname"].str.rsplit( 54 | ";", expand=True) # Split from the right of the str 55 | df[["Testname", "Ell", "Operation"]] = df["Testname"].str.rsplit( 56 | "/", expand=True) # Split from the right of the str 57 | df["Operation"] = df["Operation"].str.replace("Aggregate", "") 58 | df = df.reset_index(drop=True) 59 | df = df[["Operation", "Txn", "Time"]] 60 | 61 | return df 62 | 63 | 64 | def hyper_agg_benchmarks_driver(in_filename, out_filename): 65 | df = get_clean_json_as_df(in_filename) 66 | df = parse_hyper_agg_benchmarks(df) 67 | df.to_csv(out_filename, header=None, index=False) 68 | return df 69 | 70 | 71 | if __name__ == '__main__': 72 | 73 | in_filename = sys.argv[1] 74 | out_filename = sys.argv[2] 75 | print("Reading", in_filename) 76 | 77 | try: 78 | hyper_agg_benchmarks_driver( 79 | in_filename, out_filename) 80 | print("Writing to", out_filename) 81 | except: 82 | raise 83 | -------------------------------------------------------------------------------- /json-parse/parse-benchmark.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import math 5 | import sys 6 | import json 7 | 8 | """ 9 | Parse the output to a json and return that as a dataframe. 10 | Will return with all golang's default columns: Time, Action, Package, Output, Elapsed 11 | """ 12 | 13 | 14 | def get_clean_json_as_df(filename): 15 | raw = [] 16 | with open(filename, 'r') as f: 17 | entry = {} 18 | old_output = "" 19 | for line in f: 20 | if line.startswith("{"): 21 | entry = json.loads(line) 22 | if "Output" in entry: 23 | if line.endswith('\\t"}\n'): 24 | old_output += entry["Output"] 25 | else: 26 | entry["Output"] = old_output + entry["Output"] 27 | raw.append(entry) 28 | old_output = "" 29 | df = pd.DataFrame(raw) 30 | return df 31 | 32 | 33 | """ 34 | Works only time is measured in "ns/op" 35 | """ 36 | 37 | 38 | def get_only_measurements_rows(df): 39 | df = df[["Output"]] 40 | df = df.dropna() 41 | # Extract rows with this string 42 | df = df[df["Output"].str.contains("Benchmark")] 43 | # Extract only measurement rows 44 | df = df[df["Output"].str.contains("ns/op")] 45 | df = df["Output"].str.split(expand=True) 46 | df.columns = ["Testname", "Benchtime", "Time", "Time_Units", 47 | "Memusage", "Memusage_Units", "Mallocs", "Mallocs_Units"] 48 | # df[["Testname", "cores"]] = df["Testname"].str.rsplit( 49 | # "-", expand=True) # Split from the right of the str 50 | return df 51 | 52 | 53 | def parse_hashing_benchmarks(df): 54 | 55 | df = get_only_measurements_rows(df) 56 | df[["Testname", "N"]] = df["Testname"].str.rsplit( 57 | ";", expand=True) # Split from the right of the str 58 | df[["Testname", "Operation"]] = df["Testname"].str.rsplit( 59 | "/", expand=True) # Split from the right of the str 60 | df = df.replace(r'', np.NaN) # r indicates that it is regular exp 61 | 62 | df = df.reset_index(drop=True) 63 | df = df[["Operation", "Benchtime", "Time", "N"]] 64 | return df 65 | 66 | 67 | def hashing_benchmarks_driver(): 68 | df = get_clean_json_as_df("hashing-benchmark.json",) 69 | df = parse_hashing_benchmarks(df) 70 | return df 71 | 72 | 73 | def parse_micro_macro_benchmarks(df): 74 | 75 | df = get_only_measurements_rows(df) 76 | df[["Testname", "Txn"]] = df["Testname"].str.rsplit( 77 | ";", expand=True) # Split from the right of the str 78 | df[["Testname", "Ell", "Operation"]] = df["Testname"].str.rsplit( 79 | "/", expand=True) # Split from the right of the str 80 | df["Testname"] = df["Testname"].str.replace("BenchmarkPrunedVCS", "") 81 | df = df.replace(r'', np.NaN) # r indicates that it is regular exp 82 | 83 | df = df.reset_index(drop=True) 84 | df = df[["Operation", "Testname", "Benchtime", "Txn", "Ell", "Time"]] 85 | df = df.sort_values(by=["Ell", "Txn", "Testname", "Benchtime"]) 86 | 87 | return df 88 | 89 | 90 | def micro_macro_benchmarks_driver(filename): 91 | df = get_clean_json_as_df(filename) 92 | df = parse_micro_macro_benchmarks(df) 93 | return df 94 | 95 | 96 | if __name__ == '__main__': 97 | print("Hello, World!") 98 | # df = hashing_benchmarks_driver() 99 | # print(df) 100 | # df = micro_macro_benchmarks_driver("micro-macro-more-runs.json") 101 | # print(df) 102 | df = micro_macro_benchmarks_driver("micro-macro-1024txn.json") 103 | print(df) 104 | 105 | 106 | df['Ell'] = df['Ell'].astype(int) 107 | df = df[df["Ell"] != 10] 108 | del df['Benchtime'] 109 | df = df.pivot(index=["Operation", "Testname"], columns="Ell") 110 | -------------------------------------------------------------------------------- /main-bench.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | vc "github.com/hyperproofs/hyperproofs-go/vcs" 8 | ) 9 | 10 | const FOLDER = "./pkvk-26" 11 | 12 | func BenchmarkVCSCommit(L uint8, txnLimit uint64) string { 13 | N := uint64(1) << L 14 | K := txnLimit 15 | vcs := vc.VCS{} 16 | vcs.KeyGenLoad(16, L, FOLDER, K) 17 | 18 | aFr := vc.GenerateVector(N) 19 | dt := time.Now() 20 | vcs.Commit(aFr, uint64(L)) 21 | duration := time.Since(dt) 22 | out := fmt.Sprintf("BenchmarkVCS/%d/Commit;%d%40d ns/op", L, txnLimit, duration.Nanoseconds()) 23 | fmt.Println(vc.SEP) 24 | fmt.Println(out) 25 | fmt.Println(vc.SEP) 26 | return out 27 | } 28 | 29 | func BenchmarkVCSOpenAll(L uint8, txnLimit uint64) string { 30 | N := uint64(1) << L 31 | K := txnLimit 32 | vcs := vc.VCS{} 33 | vcs.KeyGenLoad(16, L, FOLDER, K) 34 | 35 | aFr := vc.GenerateVector(N) 36 | dt := time.Now() 37 | vcs.OpenAll(aFr) 38 | duration := time.Since(dt) 39 | out := fmt.Sprintf("BenchmarkVCS/%d/OpenAll;%d%40d ns/op", L, txnLimit, duration.Nanoseconds()) 40 | fmt.Println(vc.SEP) 41 | fmt.Println(out) 42 | fmt.Println(vc.SEP) 43 | return out 44 | } 45 | 46 | func Benchmark() { 47 | var ell []uint8 48 | var txns []uint64 49 | var logs []string 50 | 51 | txns = []uint64{1024} 52 | 53 | for itxn := range txns { 54 | txnLimit := txns[itxn] 55 | ell = []uint8{10, 20, 22, 24, 26} // Change the tree height here 56 | for i := range ell { 57 | l := BenchmarkVCSCommit(ell[i], txnLimit) 58 | logs = append(logs, l) 59 | } 60 | 61 | ell = []uint8{10, 20, 22, 24} // Change the tree height here 62 | for i := range ell { 63 | l := BenchmarkVCSOpenAll(ell[i], txnLimit) 64 | logs = append(logs, l) 65 | } 66 | } 67 | fmt.Println(vc.SEP) 68 | for iLog := range logs { 69 | fmt.Println(logs[iLog]) 70 | } 71 | fmt.Println(vc.SEP) 72 | } 73 | -------------------------------------------------------------------------------- /main-snarks.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "math" 8 | "math/rand" 9 | "testing" 10 | 11 | "github.com/alinush/go-mcl" 12 | "github.com/dustin/go-humanize" 13 | "golang.org/x/text/language" 14 | "golang.org/x/text/message" 15 | ) 16 | 17 | func snarks_verifier() { 18 | var db map[string]float64 19 | db = make(map[string]float64) 20 | BenchmarkSnarkVerifierBinaryFieldElements(&db) 21 | BenchmarkSnarkVerifierRandomFieldElements(&db) 22 | // keys, _ := getKeyValues(db) 23 | 24 | json, err := json.Marshal(db) 25 | if err != nil { 26 | panic(err) 27 | } 28 | err = ioutil.WriteFile("./plots/benchmarking-snarks-verifier.json", json, 0666) 29 | if err != nil { 30 | panic(err) 31 | } 32 | fmt.Println("Data saved to: ./plots/benchmarking-snarks-verifier.json") 33 | macro_merkle_snark_driver() // TODO merge all the micro and macro results to a single JSON file. 34 | } 35 | 36 | func Summary(size uint64, op string, aux string, r *testing.BenchmarkResult) { 37 | 38 | // a := time.Duration(r.NsPerOp() / int64(size)) 39 | // out := fmt.Sprintf("Time per %s (%d iters%s):", op, r.N, aux) 40 | // fmt.Printf("%-60s %20v\n", out, a) 41 | 42 | p := message.NewPrinter(language.English) 43 | a := float64(r.NsPerOp()) / float64(size) / float64(1000) // Convert ns to us 44 | out := fmt.Sprintf("Time per %s (%s%d iters):", op, aux, r.N) 45 | p.Printf("%-60s %20.3f us\n", out, a) 46 | } 47 | 48 | // Merkle proof aggregation of N leaves: 49 | // During verification the public inputs are: 50 | // 1. N leaf values 51 | // 2. Each vector index requires log N bits. Thus, N log N inputs are required for checking indices. Not that exponents are binary. 52 | // 3. Old index requires 1 input 53 | // 4. New index requires 1 input 54 | func BenchmarkSnarkVerifierBinaryFieldElements(db *map[string]float64) { 55 | 56 | var size []uint64 57 | 58 | size = []uint64{8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384} // Number of Merkle proofs 59 | 60 | for i := 0; i < len(size); i++ { 61 | N := size[i] 62 | logN := uint64(math.Log2(float64(N))) 63 | 64 | M := N + N*logN + 1 65 | baseG1_elements := generateG1(N + 1) 66 | expoFr_elements := generateFr(N + 1) 67 | 68 | baseG1_indices := generateG1(N * logN) 69 | expoFr_indices := generateBinaryFr(N * logN) 70 | // baseG1_indices := generateG1(N) 71 | // expoFr_indices := generateFr(N) 72 | 73 | baseG1 := append(baseG1_elements, baseG1_indices...) 74 | expoFr := append(expoFr_elements, expoFr_indices...) 75 | 76 | P, Q := generate_pairing_data(4) // 1 out 4 pairing can be precomputed, I think, for Groth16. 77 | 78 | fmt.Println("Done generating the data. N =", N, "M =", M) 79 | 80 | var results testing.BenchmarkResult 81 | 82 | // ============================================= 83 | results = testing.Benchmark(func(t *testing.B) { 84 | var result mcl.G1 85 | var out mcl.GT 86 | t.ResetTimer() 87 | for i := 0; i < t.N; i++ { 88 | mcl.G1MulVec(&result, baseG1, expoFr) 89 | mcl.G1Neg(&P[0], &P[0]) 90 | mcl.MillerLoopVec(&out, P, Q) 91 | mcl.FinalExp(&out, &out) 92 | } 93 | }) 94 | 95 | Summary(1, "G1MulVecBinary", fmt.Sprintf("size %s; ", humanize.Comma(int64(M))), &results) 96 | Summary(M, "G1MulVecBinary", fmt.Sprintf("per exp; "), &results) 97 | (*db)[fmt.Sprintf("%d;%d;G1MulVecBinary", N, M)] = float64(results.NsPerOp()) 98 | (*db)[fmt.Sprintf("%d;%d;G1MulVecBinaryAvg", N, M)] = float64(results.NsPerOp()) / float64(M) 99 | fmt.Println(sep_string("")) 100 | // ============================================= 101 | } 102 | } 103 | 104 | func BenchmarkSnarkVerifierRandomFieldElements(db *map[string]float64) { 105 | 106 | var size []uint64 107 | 108 | size = []uint64{8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384} // Number of Merkle proofs 109 | 110 | for i := 0; i < len(size); i++ { 111 | N := size[i] 112 | // logN := uint64(math.Log2(float64(N))) 113 | 114 | M := N + N + 1 115 | baseG1_elements := generateG1(N + 1) 116 | expoFr_elements := generateFr(N + 1) 117 | 118 | baseG1_indices := generateG1(N) 119 | expoFr_indices := generateFr(N) 120 | 121 | baseG1 := append(baseG1_elements, baseG1_indices...) 122 | expoFr := append(expoFr_elements, expoFr_indices...) 123 | 124 | P, Q := generate_pairing_data(4) // 1 out 4 pairing can be precomputed, I think, for Groth16. 125 | 126 | fmt.Println("Done generating the data. N =", N, "M =", M) 127 | 128 | var results testing.BenchmarkResult 129 | 130 | // ============================================= 131 | results = testing.Benchmark(func(t *testing.B) { 132 | var result mcl.G1 133 | var out mcl.GT 134 | t.ResetTimer() 135 | for i := 0; i < t.N; i++ { 136 | mcl.G1MulVec(&result, baseG1, expoFr) 137 | mcl.G1Neg(&P[0], &P[0]) 138 | mcl.MillerLoopVec(&out, P, Q) 139 | mcl.FinalExp(&out, &out) 140 | } 141 | }) 142 | 143 | Summary(1, "G1MulVecRandom", fmt.Sprintf("size %s; ", humanize.Comma(int64(M))), &results) 144 | Summary(M, "G1MulVecRandom", fmt.Sprintf("per exp; "), &results) 145 | (*db)[fmt.Sprintf("%d;%d;G1MulVecRandom", N, M)] = float64(results.NsPerOp()) 146 | (*db)[fmt.Sprintf("%d;%d;G1MulVecRandomAvg", N, M)] = float64(results.NsPerOp()) / float64(M) 147 | fmt.Println(sep_string("")) 148 | // ============================================= 149 | } 150 | } 151 | 152 | func generateG1(count uint64) []mcl.G1 { 153 | base := make([]mcl.G1, count) 154 | for i := uint64(0); i < count; i++ { 155 | base[i].Random() 156 | } 157 | return base 158 | } 159 | 160 | func generateFr(count uint64) []mcl.Fr { 161 | base := make([]mcl.Fr, count) 162 | for i := uint64(0); i < count; i++ { 163 | base[i].Random() 164 | } 165 | return base 166 | } 167 | 168 | func generateBinaryFr(count uint64) []mcl.Fr { 169 | base := make([]mcl.Fr, count) 170 | for i := uint64(0); i < count; i++ { 171 | b := rand.Uint64() 172 | if b%2 == 0 { 173 | base[i].SetInt64(0) 174 | } else { 175 | base[i].SetInt64(1) 176 | } 177 | } 178 | return base 179 | } 180 | 181 | func sep_string(in string) string { 182 | return fmt.Sprintf("%s=============================================", in) 183 | } 184 | 185 | func generate_pairing_data(N int) ([]mcl.G1, []mcl.G2) { 186 | 187 | P := make([]mcl.G1, N) 188 | Q := make([]mcl.G2, N) 189 | for i := range P { 190 | P[i].Random() 191 | Q[i].Random() 192 | } 193 | return P, Q 194 | } 195 | 196 | func macro_merkle_snark_driver() { 197 | db := make(map[string]float64) 198 | BenchmarkStatelessSnarkVerifierBinaryFieldElements(&db) 199 | 200 | json, err := json.Marshal(db) 201 | if err != nil { 202 | panic(err) 203 | } 204 | err = ioutil.WriteFile("./plots/benchmarking-snarks-verifier-macro.json", json, 0666) 205 | if err != nil { 206 | panic(err) 207 | } 208 | fmt.Println("Data saved to: ./plots/benchmarking-snarks-verifier-macro.json") 209 | } 210 | 211 | // Stateless setting(macro benchmarking): (different from the above microbenchmarking settings) Merkle proof aggregation of N leaves: 212 | // During verification the public inputs are: 213 | // 1. old digest 214 | // 2. New digest 215 | // 3. (sender's index, receiver's index, delta) Note that indices will be in binary form 216 | func BenchmarkStatelessSnarkVerifierBinaryFieldElements(db *map[string]float64) { 217 | 218 | var size []uint64 219 | 220 | size = []uint64{8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384} // Number of Merkle proofs 221 | 222 | for i := 0; i < len(size); i++ { 223 | N := size[i] 224 | logN := uint64(math.Log2(float64(N))) 225 | 226 | M := N + 2*N*logN + 2 227 | baseG1_elements := generateG1(N + 2) // N deltas and 2 digest 228 | expoFr_elements := generateFr(N + 2) // N deltas and 2 digest 229 | 230 | baseG1_indices := generateG1(2 * N * logN) // alice's indices in binary and bob's indices in binary 231 | expoFr_indices := generateBinaryFr(2 * N * logN) // alice's indices in binary and bob's indices in binary 232 | // baseG1_indices := generateG1(N) 233 | // expoFr_indices := generateFr(N) 234 | 235 | baseG1 := append(baseG1_elements, baseG1_indices...) 236 | expoFr := append(expoFr_elements, expoFr_indices...) 237 | 238 | P, Q := generate_pairing_data(4) // 1 out 4 pairing can be precomputed, I think, for Groth16. 239 | 240 | fmt.Println("Done generating the data. N =", N, "M =", M) 241 | 242 | var results testing.BenchmarkResult 243 | 244 | // ============================================= 245 | results = testing.Benchmark(func(t *testing.B) { 246 | var result mcl.G1 247 | var out mcl.GT 248 | t.ResetTimer() 249 | for i := 0; i < t.N; i++ { 250 | mcl.G1MulVec(&result, baseG1, expoFr) 251 | mcl.G1Neg(&P[0], &P[0]) 252 | mcl.MillerLoopVec(&out, P, Q) 253 | mcl.FinalExp(&out, &out) 254 | } 255 | }) 256 | 257 | Summary(1, "G1MulVecBinaryMacro", fmt.Sprintf("size %s; ", humanize.Comma(int64(M))), &results) 258 | Summary(M, "G1MulVecBinaryMacro", fmt.Sprintf("per exp; "), &results) 259 | (*db)[fmt.Sprintf("%d;%d;G1MulVecBinaryMacro", N, M)] = float64(results.NsPerOp()) 260 | (*db)[fmt.Sprintf("%d;%d;G1MulVecBinaryMacroAvg", N, M)] = float64(results.NsPerOp()) / float64(M) 261 | fmt.Println(sep_string("")) 262 | // ============================================= 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/alinush/go-mcl" 11 | "github.com/hyperproofs/hyperproofs-go/vcs" 12 | ) 13 | 14 | func main() { 15 | testing.Init() 16 | flag.Parse() 17 | 18 | fmt.Println("Hello, World!") 19 | mcl.InitFromString("bls12-381") 20 | 21 | dt := time.Now() 22 | fmt.Println("Specific date and time is: ", dt.Format(time.UnixDate)) 23 | 24 | fmt.Println(vcs.SEP) 25 | 26 | args := os.Args 27 | 28 | if len(args) == 1 { 29 | var L uint8 30 | L = uint8(26) 31 | _ = hyperGenerateKeys(L, false) 32 | 33 | L = uint8(30) 34 | _ = hyperGenerateKeys(L, true) 35 | } else { 36 | if args[1] == "1" { 37 | snarks_verifier() 38 | } else { 39 | Benchmark() // Uncomment this benchmark Commit and OpenAll. 40 | } 41 | } 42 | } 43 | 44 | func hyperGenerateKeys(L uint8, fake bool) *vcs.VCS { 45 | 46 | N := uint64(1) << L 47 | vcs := vcs.VCS{} 48 | 49 | fmt.Println("L:", L, "N:", N) 50 | folderPath := fmt.Sprintf("pkvk-%02d", L) 51 | if fake { 52 | vcs.KeyGenFake(16, L, folderPath, 1<<12) 53 | } else { 54 | vcs.KeyGen(16, L, folderPath, 1<<12) 55 | } 56 | 57 | fmt.Println("KeyGen ... Done") 58 | return &vcs 59 | } 60 | 61 | func hyperLoadKeys(L uint8) *vcs.VCS { 62 | 63 | folderPath := fmt.Sprintf("pkvk-%02d", L) 64 | vcs := vcs.VCS{} 65 | 66 | vcs.KeyGenLoad(16, L, folderPath, 1<<12) 67 | 68 | fmt.Println("KeyGenLoad ... Done") 69 | return &vcs 70 | } 71 | -------------------------------------------------------------------------------- /main_bench_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "fmt" 6 | "math/big" 7 | "testing" 8 | 9 | "github.com/QED-it/go-jubjub/pkg/pedersenhash" 10 | "github.com/iden3/go-iden3-crypto/poseidon" 11 | "golang.org/x/crypto/blake2b" 12 | ) 13 | 14 | func benchmarkHashing(b *testing.B, kind string, logarraysize int) { 15 | 16 | datasize := (1 << (logarraysize)) * 20 17 | b.Run(fmt.Sprintf("%s;%d", kind, logarraysize), func(b *testing.B) { 18 | bytes := make([]byte, datasize) 19 | b.ResetTimer() 20 | for bn := 0; bn < b.N; bn++ { 21 | b.StopTimer() 22 | _, err := rand.Read(bytes) 23 | if err != nil { 24 | panic(fmt.Sprintf("Error randomly generating data: %v\n", err)) 25 | } 26 | b.StartTimer() 27 | if kind == "Blake2b256" { 28 | _ = blake2b.Sum256(bytes) 29 | } else if kind == "Poseidon" { 30 | poseidon.HashBytes(bytes) 31 | } 32 | } 33 | }) 34 | } 35 | 36 | func benchmarkPoseidon(b *testing.B) { 37 | 38 | datasize := 32 39 | b.Run(fmt.Sprintf("Poseidon;"), func(b *testing.B) { 40 | bytes := make([]byte, datasize) 41 | b.ResetTimer() 42 | for bn := 0; bn < b.N; bn++ { 43 | b.StopTimer() 44 | _, _ = rand.Read(bytes) 45 | A, _ := poseidon.HashBytes(bytes) 46 | _, _ = rand.Read(bytes) 47 | B, _ := poseidon.HashBytes(bytes) 48 | array := []*big.Int{A, B} 49 | b.StartTimer() 50 | 51 | poseidon.Hash(array) 52 | } 53 | }) 54 | } 55 | 56 | func benchmarkBlake2b256(b *testing.B) { 57 | 58 | datasize := 32 59 | b.Run(fmt.Sprintf("Blake2b256;"), func(b *testing.B) { 60 | bytes := make([]byte, datasize) 61 | b.ResetTimer() 62 | for bn := 0; bn < b.N; bn++ { 63 | b.StopTimer() 64 | _, _ = rand.Read(bytes) 65 | A := blake2b.Sum256(bytes) 66 | _, _ = rand.Read(bytes) 67 | B := blake2b.Sum256(bytes) 68 | array := make([]byte, 0, 2*len(A)) 69 | array = append(array, A[:]...) 70 | array = append(array, B[:]...) 71 | b.StartTimer() 72 | 73 | _ = blake2b.Sum256(bytes) 74 | } 75 | }) 76 | } 77 | 78 | func convert_bytes_to_bool(byteArray []byte) []bool { 79 | X := make([]bool, len(byteArray)*8) 80 | for _, in := range byteArray { 81 | for i := 7; i > 0; i-- { 82 | if in&(1< 0 { 83 | X[i] = true 84 | } 85 | } 86 | } 87 | return X 88 | } 89 | 90 | func benchmarkPedersenHash(b *testing.B) { 91 | 92 | datasize := 32 93 | bytesX := make([]byte, datasize/2) 94 | bytesY := make([]byte, datasize/2) 95 | _, _ = rand.Read(bytesX) 96 | _, _ = rand.Read(bytesY) 97 | X := convert_bytes_to_bool(bytesX) 98 | Y := convert_bytes_to_bool(bytesY) 99 | 100 | hasher, _ := pedersenhash.NewPedersenHasher() 101 | p, _ := hasher.PedersenHashForBits(X, Y) 102 | bytesX = p.X().Bytes() 103 | bytesY = p.Y().Bytes() 104 | 105 | b.Run(fmt.Sprintf("Pedersen128;"), func(b *testing.B) { 106 | b.ResetTimer() 107 | for bn := 0; bn < b.N; bn++ { 108 | b.StopTimer() 109 | bytesX = p.X().Bytes() 110 | bytesY = p.Y().Bytes() 111 | X = convert_bytes_to_bool(bytesX) 112 | Y = convert_bytes_to_bool(bytesY) 113 | b.StartTimer() 114 | p, _ = hasher.PedersenHashForBits(X, Y) 115 | } 116 | }) 117 | } 118 | 119 | func BenchmarkHashing(b *testing.B) { 120 | logarraysize := []int{6, 7} 121 | for i := range logarraysize { 122 | benchmarkHashing(b, "Poseidon", logarraysize[i]) 123 | benchmarkHashing(b, "Blake2b256", logarraysize[i]) 124 | } 125 | benchmarkPoseidon(b) 126 | benchmarkBlake2b256(b) 127 | // benchmarkPedersenHash(b) 128 | } 129 | -------------------------------------------------------------------------------- /plots/gen-plots.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import math 5 | import sys 6 | 7 | rcParams = {'font.size': 35, 8 | 'font.weight': 'bold', 9 | 'font.family': 'sans-serif', 10 | 'text.usetex': True, 11 | 'ps.useafm': True, 12 | 'pdf.use14corefonts': True, 13 | 'font.sans-serif': ['Helvetica'], 14 | # 'text.latex.preamble': ['\\usepackage{helvet}'], 15 | 'axes.unicode_minus': False, 16 | 'xtick.major.pad': 20, 17 | 'ytick.major.pad': 20, 18 | } 19 | 20 | 21 | def rmMathFmt(x, y): return '{}'.format(str(x)) 22 | 23 | # This function returns the proof generation of the OWWB20 snark. 24 | # The name of the new column will be set to tag. 25 | # OWWB20 code is in seconds. 26 | 27 | 28 | def extract_prover_data(filename, columns_list, column, tag): 29 | # None of the output from the OWWB20 gives the header. For uniformity we skip as well. 30 | df = pd.read_csv(filename, header=None) 31 | df.columns = columns_list 32 | # Number of swaps 2000 => 4000 Merkle proof verifications => 1000 TXNs 33 | df['mrkcount'] = 2 * df['swaps'] 34 | df = df[['mrkcount', column]] 35 | df = df.rename(columns={column: tag}) 36 | df.index = df['mrkcount'] 37 | df = df[[tag]] 38 | return df 39 | 40 | 41 | def merkle_aggregations_prover(files, columns_list): 42 | df = pd.DataFrame() 43 | for file in files: 44 | tempdf = extract_prover_data( 45 | file[0], columns_list, file[1], file[2]) 46 | df = pd.concat([df, tempdf], axis=1) # Concat columns 47 | # print(df) 48 | return df 49 | 50 | 51 | def hyperproofs_aggregation(file, columns_list): 52 | df = pd.read_csv(file[0], header=None) 53 | df.columns = columns_list 54 | # Go benchmark measurements are in nanoseconds. Convert to seconds. 55 | df['time'] = df['time'] / 10**9 56 | df = df.pivot(index="numleaves", columns="operation", values="time") 57 | df = df.rename(columns={file[1]: file[2]}) 58 | df = df[[file[2]]] 59 | # print(df) 60 | return df 61 | 62 | 63 | def extrapolate_owwb20(df): 64 | 65 | df = df.drop(columns=['Hyperproofs']) 66 | # Prover time per Merkle proof 67 | df_d = df.div(df.index.to_series(), axis=0) 68 | # Forward fill NA. Other option is fill by mean. 69 | df_d = df_d.fillna(method='ffill') 70 | # df_d = df_d.fillna(df_d.mean()) 71 | df_d = df_d.mul(df_d.index.to_series(), axis=0) # Reconstruct prover time 72 | df_d = df_d[df.isna()] # Choose only extrapolated value. 73 | df_d = df_d.dropna(how='all') # Drop the rest of the rows 74 | df = df.loc[df.dropna().index[-1]:] 75 | df = df.add(df_d, fill_value=0) 76 | 77 | df.index = np.log2(df.index).astype(int) 78 | df.columns = ["{:} (extrapolated)".format(x) for x in df.columns] 79 | return df 80 | 81 | 82 | def agg_prove_plot(df): 83 | 84 | df_c = df[:] 85 | df_c.index = np.log2(df_c.index).astype(int) 86 | df_b = extrapolate_owwb20(df[:]) 87 | 88 | plt.rcParams.update(rcParams) 89 | f, ax = plt.subplots(figsize=(12, 9)) 90 | df_c.plot(ax=ax, marker=".", linewidth=6, markersize=20) 91 | colors = [x.get_color() for x in ax.get_lines()[-len(df_b.columns):]] 92 | df_b.plot(ax=ax, linestyle="dotted", marker=".", legend=False, 93 | color=colors, linewidth=6, markersize=20) 94 | ax.set_yscale('log') 95 | ax.set_ylabel("Proving time (s)", fontsize=rcParams['font.size'] + 5) 96 | ax.set_xlabel( 97 | "Aggregation size ($\mathbf{\log_2}$ scale)", fontsize=rcParams['font.size'] + 5) 98 | plt.xticks(fontsize=rcParams['font.size'] + 5) 99 | plt.yticks(fontsize=rcParams['font.size'] + 5) 100 | ax.xaxis.set_ticks_position('bottom') 101 | ax.yaxis.set_ticks_position('left') 102 | ax.tick_params(length=10, which="major", direction='out') 103 | ax.tick_params(length=5, which="minor", direction='out') 104 | # Has to be interger. Else, FuncFormatter will truncate the decimal values. 105 | ax.xaxis.set_ticks(np.arange(2, 15, 2)) 106 | def fmter(x, y): return '$\mathbf{10^{' + str(int(math.log10(x))) + '}}$' 107 | ax.yaxis.set_major_formatter(plt.FuncFormatter(fmter)) 108 | ax.xaxis.set_major_formatter( 109 | plt.FuncFormatter(lambda x, y: '{:.0f}'.format(x))) 110 | plt.grid(True, which="both") 111 | plt.tight_layout(pad=0.08) 112 | plt.savefig("aggregation-prover-log.pdf") 113 | 114 | 115 | def merkle_aggregations_verifier(filename): 116 | 117 | sr = pd.read_json(filename, typ="series") 118 | df = sr.to_frame() 119 | df.columns = ["time"] 120 | df["key"] = df.index 121 | df = df[["key", "time"]] 122 | 123 | df_tmp = df["key"].str.split(";", expand=True) 124 | df_tmp.columns = ["N", "M", "operation"] 125 | df = pd.concat([df, df_tmp], axis=1) 126 | del df["key"] 127 | df["N"] = df["N"].astype(int) 128 | df["M"] = df["M"].astype(int) 129 | df = df.sort_values(by=["N", "operation", "M"]) 130 | df = df.reset_index(drop=True) 131 | del df["M"] 132 | df = df.pivot(index="N", columns="operation", values="time") 133 | del df["G1MulVecBinaryAvg"] 134 | del df["G1MulVecRandomAvg"] 135 | df.index.name = None 136 | df.columns.name = None 137 | df = df[["G1MulVecBinary"]] 138 | df = df.rename(columns={"G1MulVecBinary": "Merkle"}) 139 | # df = df[["G1MulVecRandom"]] 140 | # df = df.rename(columns={"G1MulVecRandom": "Merkle"}) 141 | 142 | df = df / 10**9 143 | return df 144 | 145 | 146 | def agg_verify_plot(df): 147 | 148 | df_c = df[:] 149 | df_c.index = np.log2(df_c.index).astype(int) 150 | 151 | plt.rcParams.update(rcParams) 152 | f, ax = plt.subplots(figsize=(12, 9)) 153 | df_c.plot(ax=ax, marker=".", linewidth=6, markersize=20) 154 | ax.set_yscale('log') 155 | ax.set_ylabel("Verification time (s)", fontsize=rcParams['font.size'] + 5) 156 | ax.set_xlabel( 157 | "Aggregation size ($\mathbf{\log_2}$ scale)", fontsize=rcParams['font.size'] + 5) 158 | plt.xticks(fontsize=rcParams['font.size'] + 5) 159 | plt.yticks(fontsize=rcParams['font.size'] + 5) 160 | ax.xaxis.set_ticks_position('bottom') 161 | ax.yaxis.set_ticks_position('left') 162 | ax.tick_params(length=10, which="major", direction='out') 163 | ax.tick_params(length=5, which="minor", direction='out') 164 | # Has to be interger. Else, FuncFormatter will truncate the decimal values. 165 | ax.xaxis.set_ticks(np.arange(2, 15, 2)) 166 | def fmter(x, y): return '$\mathbf{10^{' + str(int(math.log10(x))) + '}}$' 167 | ax.yaxis.set_major_formatter(plt.FuncFormatter(fmter)) 168 | ax.xaxis.set_major_formatter( 169 | plt.FuncFormatter(lambda x, y: '{:.0f}'.format(x))) 170 | plt.grid(True, which="both") 171 | plt.tight_layout(pad=0.08) 172 | plt.savefig("aggregation-verifier-log.pdf") 173 | 174 | 175 | def agg_end_to_end_plot(pDf, vDf): 176 | 177 | df_v = vDf 178 | df_v.index = np.log2(df_v.index).astype(int) 179 | 180 | df_c = pDf[:] 181 | df_c.index = np.log2(df_c.index).astype(int) 182 | df_b = extrapolate_owwb20(pDf[:]) 183 | 184 | # df_v.to_csv("verification.csv", index_label="numtxn") 185 | # df_c.to_csv("prover-asis.csv", index_label="numtxn") 186 | # df_b.to_csv("prover-extra.csv", index_label="numtxn") 187 | 188 | df_c["Hyperproofs"] = df_c["Hyperproofs"] + df_v["Hyperproofs"] 189 | df_c["Merkle (Poseidon)"] = df_c["Merkle (Poseidon)"] + df_v["Merkle"] 190 | df_c["Merkle (Pedersen)"] = df_c["Merkle (Pedersen)"] + df_v["Merkle"] 191 | 192 | df_b["Merkle (Poseidon) (extrapolated)"] += df_v["Merkle"] 193 | df_b["Merkle (Pedersen) (extrapolated)"] += df_v["Merkle"] 194 | 195 | plt.rcParams.update(rcParams) 196 | f, ax = plt.subplots(figsize=(12, 9)) 197 | df_c.plot(ax=ax, marker=".", linewidth=6, markersize=20) 198 | colors = [x.get_color() for x in ax.get_lines()[-len(df_b.columns):]] 199 | df_b.plot(ax=ax, linestyle="dotted", marker=".", legend=False, 200 | color=colors, linewidth=6, markersize=20) 201 | # plt.legend(prop={'size': 30}) 202 | ax.set_yscale('log') 203 | ax.set_ylabel("Proving + Verification time (s)", 204 | fontsize=rcParams['font.size'] + 5) 205 | ax.set_xlabel( 206 | "Aggregation size ($\mathbf{\log_2}$ scale)", fontsize=rcParams['font.size'] + 5) 207 | plt.xticks(fontsize=rcParams['font.size'] + 5) 208 | plt.yticks(fontsize=rcParams['font.size'] + 5) 209 | ax.xaxis.set_ticks_position('bottom') 210 | ax.yaxis.set_ticks_position('left') 211 | ax.tick_params(length=10, which="major", direction='out') 212 | ax.tick_params(length=5, which="minor", direction='out') 213 | # Has to be interger. Else, FuncFormatter will truncate the decimal values. 214 | ax.xaxis.set_ticks(np.arange(2, 15, 2)) 215 | def fmter(x, y): return '$\mathbf{10^{' + str(int(math.log10(x))) + '}}$' 216 | ax.yaxis.set_major_formatter(plt.FuncFormatter(fmter)) 217 | ax.xaxis.set_major_formatter( 218 | plt.FuncFormatter(lambda x, y: '{:.0f}'.format(x))) 219 | plt.grid(True, which="both") 220 | plt.tight_layout(pad=0.08) 221 | plt.savefig("aggregation-e2e-log.pdf") 222 | 223 | 224 | if __name__ == '__main__': 225 | print("Hello, World!") 226 | columns_list = ["type", "swaps", "height", "init", 227 | "paramgen", "synth", "prover", "verifier"] 228 | folder = "./" 229 | files = [ 230 | ("poseidon-30-single.csv", "prover", "Merkle (Poseidon)"), 231 | ("pedersen-30-single.csv", "prover", "Merkle (Pedersen)") 232 | ] 233 | files = [("{}{}".format(folder, x[0]), x[1], x[2]) for x in files] 234 | df1 = merkle_aggregations_prover(files, columns_list) 235 | 236 | hp_columns_list = ["operation", "numleaves", "time"] 237 | df2 = hyperproofs_aggregation(("{}{}".format( 238 | folder, "hyperproofs-agg.csv"), "Prove", "Hyperproofs"), hp_columns_list) 239 | 240 | proveDf = pd.concat([df2, df1], axis=1) 241 | agg_prove_plot(proveDf) 242 | 243 | df3 = hyperproofs_aggregation(("{}{}".format( 244 | folder, "hyperproofs-agg.csv"), "Verify", "Hyperproofs"), hp_columns_list) 245 | 246 | df4 = merkle_aggregations_verifier("{}{}".format( 247 | folder, "benchmarking-snarks-verifier.json")) 248 | 249 | verifyDf = pd.concat([df3, df4], axis=1) 250 | agg_verify_plot(verifyDf) 251 | 252 | agg_end_to_end_plot(proveDf, verifyDf) 253 | -------------------------------------------------------------------------------- /scripts/hyper-bench.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | shopt -s expand_aliases 4 | alias time='date; time' 5 | 6 | scriptdir=$(cd $(dirname $0); pwd -P) 7 | sourcedir=$(cd $scriptdir/..; pwd -P) 8 | 9 | # Micro benchmarks without OpenAll and Commit 10 | filepath="$sourcedir/json-parse/micro-macro-1024txn.json" 11 | time go test -v ./vcs -bench=BenchmarkPrunedVCS -run=BenchmarkPrunedVCS -benchtime 4x -benchmem -timeout 10800m -json | tee $filepath 12 | 13 | # Benchmarks of Hyperproofs aggregation 14 | filepath="$sourcedir/json-parse/hyper-agg.json" 15 | time go test -v ./vcs -bench=BenchmarkVCSAgg -run=BenchmarkVCSAgg -benchtime 2x -benchmem -timeout 360m -json | tee $filepath 16 | outpath="$sourcedir/plots/hyperproofs-agg.csv" 17 | time python3 "$sourcedir/json-parse/parse-agg.py" $filepath $outpath 18 | 19 | # This computes the estimate verification time of SNARK based Merkle aggregation. 20 | go build && time ./hyperproofs-go 1 21 | 22 | # # WARNING: Benchmarking OpenAll and Commit takes around 6.5 hours. 23 | # go build && time ./hyperproofs-go 2 # This benchmarks OpenAll and Commit 24 | -------------------------------------------------------------------------------- /scripts/hyper-go.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | shopt -s expand_aliases 4 | alias time='date; time' 5 | 6 | scriptdir=$(cd $(dirname $0); pwd -P) 7 | sourcedir=$(cd $scriptdir/..; pwd -P) 8 | 9 | time go build && time ./hyperproofs-go 10 | -------------------------------------------------------------------------------- /scripts/hyper-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | shopt -s expand_aliases 4 | alias time='date; time' 5 | 6 | scriptdir=$(cd $(dirname $0); pwd -P) 7 | sourcedir=$(cd $scriptdir/..; pwd -P) 8 | 9 | time go test -v ./vcs -run=TestVCSPruned 10 | time go test -v ./vcs -run=TestVCS 11 | -------------------------------------------------------------------------------- /vcs/keygen-parallel.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "os" 7 | "sync" 8 | 9 | "github.com/alinush/go-mcl" 10 | ) 11 | 12 | // [start, stop) 13 | func (vcs *VCS) PrkGen(index uint8, start uint64, stop uint64, wg *sync.WaitGroup) { 14 | 15 | os.MkdirAll(vcs.folderPath, os.ModePerm) 16 | fileName := vcs.folderPath + fmt.Sprintf(PRKNAME, index) 17 | f, err := os.Create(fileName) 18 | check(err) 19 | 20 | var result mcl.G1 21 | for i := start; i < stop; i++ { 22 | exponent := vcs.SelectPRK(i) 23 | mcl.G1Mul(&result, &vcs.G, &exponent) 24 | _, err = f.Write(result.Serialize()) 25 | check(err) 26 | if !vcs.PARAM_TOO_LARGE { 27 | vcs.PRK[i] = result 28 | } 29 | // fmt.Println(i) 30 | } 31 | fmt.Println("Dumped ", fileName, BoundsPrint(start, stop)) 32 | defer f.Close() 33 | defer wg.Done() 34 | } 35 | 36 | func (vcs *VCS) PrkGenDriver() { 37 | fmt.Println(SEP, "Generating the PRK", SEP) 38 | if !vcs.PARAM_TOO_LARGE { 39 | // Actually we can avoid during Save 40 | vcs.PRK = make([]mcl.G1, vcs.N) // Allocate space for PRK 41 | } 42 | var wg sync.WaitGroup 43 | step := uint64(math.Ceil(float64(vcs.N) / float64(NFILES))) // Maximum size of each file. 44 | 45 | start := uint64(0) 46 | stop := step 47 | for i := uint8(0); i < NFILES; i++ { 48 | wg.Add(1) 49 | go vcs.PrkGen(i, start, stop, &wg) 50 | 51 | start += step 52 | stop += step 53 | if (i+1)%NCORES == 0 { 54 | wg.Wait() 55 | } 56 | } 57 | wg.Wait() 58 | } 59 | 60 | func (vcs *VCS) UpkGen(index uint8, start uint64, stop uint64, wg *sync.WaitGroup) { 61 | 62 | os.MkdirAll(vcs.folderPath, os.ModePerm) 63 | fileName := vcs.folderPath + fmt.Sprintf(UPKNAME, index) 64 | f, err := os.Create(fileName) 65 | check(err) 66 | // fmt.Println(fileName) 67 | var result mcl.G1 68 | for j := start; j < stop; j++ { 69 | i, k := IndexInTheLevel(j) 70 | exponent := vcs.SelectUPK(i, k) 71 | mcl.G1Mul(&result, &vcs.G, &exponent) 72 | _, err = f.Write(result.Serialize()) 73 | check(err) 74 | if !vcs.PARAM_TOO_LARGE { 75 | vcs.UPK[i][k] = result 76 | } 77 | 78 | // fmt.Println(i, k, exponent.IsZero(), result.IsZero(), vcs.PRK[i][k].IsZero()) 79 | } 80 | 81 | fmt.Println("Dumped ", fileName, BoundsPrint(start, stop)) 82 | defer f.Close() 83 | defer wg.Done() 84 | } 85 | 86 | func (vcs *VCS) UpkGenDriver() { 87 | 88 | var wg sync.WaitGroup 89 | if !vcs.PARAM_TOO_LARGE { 90 | // Allocate space for UPK 91 | vcs.MallocUpk() 92 | } 93 | fmt.Println(SEP, "Generating the UPK", SEP) 94 | 95 | numUPK := (uint64(1) << (vcs.L + 1)) - 1 // Number of nodes in the UPK tree 96 | step := uint64(math.Ceil(float64(numUPK) / float64(NFILES))) 97 | start := uint64(0) 98 | stop := step 99 | 100 | for i := uint8(0); i < NFILES; i++ { 101 | // fmt.Println(i, start, stop) 102 | wg.Add(1) 103 | go vcs.UpkGen(i, start, stop, &wg) 104 | 105 | start += step 106 | stop += step 107 | stop = minUint64(stop, numUPK) 108 | 109 | if (i+1)%NCORES == 0 { 110 | wg.Wait() 111 | } 112 | } 113 | wg.Wait() 114 | } 115 | 116 | func (vcs *VCS) PrkUpkGen() { 117 | 118 | vcs.UpkGenDriver() 119 | if !vcs.DISCARD_PRK && !vcs.PARAM_TOO_LARGE { 120 | fmt.Println(SEP) 121 | vcs.PrkGenDriver() // This also allocates memory for PRK 122 | } 123 | fmt.Println(SEP) 124 | } 125 | -------------------------------------------------------------------------------- /vcs/test_utils.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "os" 7 | "sync" 8 | 9 | "github.com/alinush/go-mcl" 10 | ) 11 | 12 | func getKeyValuesFr(db map[uint64]mcl.Fr) ([]uint64, []mcl.Fr) { 13 | 14 | keys := make([]uint64, 0, len(db)) 15 | values := make([]mcl.Fr, 0, len(db)) 16 | for k, v := range db { 17 | keys = append(keys, k) 18 | values = append(values, v) 19 | } 20 | return keys, values 21 | } 22 | 23 | func getKeyValuesG1(db map[uint64]mcl.G1) ([]uint64, []mcl.G1) { 24 | 25 | keys := make([]uint64, 0, len(db)) 26 | values := make([]mcl.G1, 0, len(db)) 27 | for k, v := range db { 28 | keys = append(keys, k) 29 | values = append(values, v) 30 | } 31 | return keys, values 32 | } 33 | 34 | func fillRange(aFr *[]mcl.Fr, start uint64, stop uint64, wg *sync.WaitGroup) { 35 | for i := start; i < stop; i++ { 36 | (*aFr)[i].Random() 37 | } 38 | wg.Done() 39 | } 40 | 41 | func GenerateVector(N uint64) []mcl.Fr { 42 | var aFr []mcl.Fr 43 | aFr = make([]mcl.Fr, N) 44 | 45 | step := N / 16 46 | if step < 1 { 47 | step = 1 48 | } 49 | 50 | start := uint64(0) 51 | stop := start + step 52 | stop = minUint64(stop, N) 53 | var wg sync.WaitGroup 54 | for start < N { 55 | wg.Add(1) 56 | fillRange(&aFr, start, stop, &wg) 57 | start += step 58 | stop += step 59 | stop = minUint64(stop, N) 60 | } 61 | wg.Wait() 62 | 63 | return aFr 64 | } 65 | 66 | func SaveVector(N uint64, aFr []mcl.Fr) { 67 | folderPath := "pkvk/" 68 | os.MkdirAll(folderPath, os.ModePerm) 69 | fileName := folderPath + "/Vec.data" 70 | 71 | f, err := os.Create(fileName) 72 | check(err) 73 | fmt.Println(fileName) 74 | 75 | intBytes := make([]byte, 8) 76 | binary.LittleEndian.PutUint64(intBytes, N) 77 | _, err = f.Write(intBytes) 78 | check(err) 79 | 80 | for i := uint64(0); i < N; i++ { 81 | _, err = f.Write(aFr[i].Serialize()) 82 | check(err) 83 | } 84 | fmt.Println("Dumped ", fileName) 85 | defer f.Close() 86 | } 87 | 88 | func LoadVector(N uint64, folderPath string) []mcl.Fr { 89 | 90 | fileName := folderPath + "/Vec.data" 91 | 92 | f, err := os.Open(fileName) 93 | check(err) 94 | 95 | var n uint64 96 | data := make([]byte, 8) 97 | 98 | _, err = f.Read(data) 99 | 100 | n = binary.LittleEndian.Uint64(data) 101 | 102 | if N > n { 103 | panic("Vec Load Error: There is not enough to read") 104 | } 105 | 106 | dataFr := make([]byte, GetFrByteSize()) 107 | aFr := make([]mcl.Fr, N) 108 | 109 | for i := uint64(0); i < N; i++ { 110 | _, err = f.Read(dataFr) 111 | check(err) 112 | aFr[i].Deserialize(dataFr) 113 | } 114 | 115 | defer f.Close() 116 | return aFr 117 | } 118 | 119 | // Export the proofs in the 2D format for the VCS API 120 | func GetProofVecFromDb(proofs_db map[uint64][]mcl.G1, indexVec []uint64) [][]mcl.G1 { 121 | proofVec := make([][]mcl.G1, len(indexVec)) 122 | 123 | for i := range indexVec { 124 | proofVec[i] = make([]mcl.G1, len(proofs_db[indexVec[i]])) 125 | copy(proofVec[i], proofs_db[indexVec[i]]) 126 | } 127 | return proofVec 128 | } 129 | -------------------------------------------------------------------------------- /vcs/vcs-agg.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | 7 | "github.com/alinush/go-mcl" 8 | "github.com/hyperproofs/gipa-go/batch" 9 | "github.com/hyperproofs/gipa-go/cm" 10 | "github.com/hyperproofs/gipa-go/utils" 11 | ) 12 | 13 | func (vcs *VCS) GenAggGipa() { 14 | 15 | { 16 | mn := uint64(MAX_AGG_SIZE) // short circuiting things 17 | ck, kzg1, kzg2 := cm.IPPSetupKZG(mn, vcs.alpha, vcs.beta, vcs.G, vcs.H) 18 | cm.IPPSaveCmKzg(ck, kzg1, kzg2, vcs.folderPath) 19 | } 20 | vcs.LoadAggGipa() 21 | } 22 | 23 | func (self *VCS) LoadAggGipa() { 24 | 25 | L := uint64(self.L) 26 | limit := L * self.TxnLimit 27 | 28 | self.MN = utils.NextPowOf2(limit) 29 | 30 | self.ck, self.kzg1, self.kzg2 = cm.IPPCMLoadCmKzg(self.MN, self.folderPath) 31 | self.aggProver = batch.Prover{} 32 | self.aggVerifier = batch.Verifier{} 33 | 34 | self.nDiff = int64(uint64(math.Ceil(float64(self.MN)/float64(L))) - self.TxnLimit) // This is the size of padding for P and Q vector (gipa) 35 | self.mnDiff = int64(self.MN - (L * self.TxnLimit)) // This is the size of padding for A and B vector (gipa) 36 | 37 | fmt.Println("Size:", len(self.ck.V), len(self.ck.W), len(self.kzg1.PK), len(self.kzg1.VK), len(self.kzg2.PK), len(self.kzg2.VK)) 38 | fmt.Println("padding:", self.nDiff, self.mnDiff) 39 | } 40 | 41 | // This resets the variable MN and txnLimit. 42 | // Be sure to load the data from disk 43 | func (self *VCS) ResizeAgg(txnLimit uint64) { 44 | L := uint64(self.L) 45 | self.TxnLimit = txnLimit 46 | limit := L * self.TxnLimit 47 | self.MN = utils.NextPowOf2(limit) 48 | } 49 | 50 | func (vcs *VCS) AggProve(indexVec []uint64, proofVec [][]mcl.G1) batch.Proof { 51 | 52 | var A []mcl.G1 53 | var B []mcl.G2 54 | txnLimit := int(vcs.TxnLimit) 55 | L := int(vcs.L) 56 | 57 | if len(indexVec) != txnLimit || len(proofVec) != txnLimit { 58 | panic("AggProof: Vectors are not of the expected size") 59 | } 60 | 61 | for t := range proofVec { 62 | if len(proofVec[t]) != L { 63 | panic(fmt.Sprintf("Bad proof: %d", t)) 64 | } 65 | A = append(A, proofVec[t]...) 66 | } 67 | 68 | var binary []bool 69 | b := make([]mcl.G2, vcs.L) 70 | for t := range indexVec { 71 | binary = ToBinary(indexVec[t], vcs.L) 72 | for i := 0; i < L; i++ { 73 | if binary[i] == true { 74 | // mcl.G2Sub(&b[i], &vcs.VRK[i], &vcs.H) 75 | b[i] = vcs.VRKSubOneRev[i] 76 | } else { 77 | b[i] = vcs.VRK[i] 78 | } 79 | } 80 | B = append(B, b...) 81 | } 82 | 83 | aPad := make([]mcl.G1, vcs.mnDiff) 84 | bPad := make([]mcl.G2, vcs.mnDiff) 85 | A = append(A, aPad...) 86 | B = append(B, bPad...) 87 | 88 | vcs.aggProver.Init(uint32(vcs.L), uint32(vcs.TxnLimit+uint64(vcs.nDiff)), vcs.MN, &vcs.ck, &vcs.kzg1, &vcs.kzg2, A, B) 89 | 90 | proof := vcs.aggProver.Prove() 91 | return proof 92 | } 93 | 94 | func (vcs *VCS) AggVerify(proof batch.Proof, digest mcl.G1, indexVec []uint64, a_i []mcl.Fr) bool { 95 | 96 | txnLimit := int(vcs.TxnLimit) 97 | L := int(vcs.L) 98 | 99 | if len(indexVec) != txnLimit || len(a_i) != txnLimit { 100 | panic("AggProof: Vectors are not of the expected size") 101 | } 102 | 103 | P := make([]mcl.G1, txnLimit) 104 | Q := make([]mcl.G2, txnLimit) 105 | var p mcl.G1 // temp variables 106 | 107 | for t := range a_i { 108 | mcl.G1Mul(&p, &vcs.G, &a_i[t]) 109 | mcl.G1Sub(&p, &digest, &p) 110 | P[t] = p 111 | Q[t] = vcs.H 112 | } 113 | 114 | pPad := make([]mcl.G1, vcs.nDiff) 115 | qPad := make([]mcl.G2, vcs.nDiff) 116 | P = append(P, pPad...) 117 | Q = append(Q, qPad...) 118 | 119 | var B []mcl.G2 120 | var binary []bool 121 | b := make([]mcl.G2, vcs.L) 122 | for t := range indexVec { 123 | binary = ToBinary(indexVec[t], vcs.L) 124 | for i := 0; i < L; i++ { 125 | if binary[i] == true { 126 | // mcl.G2Sub(&b[i], &vcs.VRK[i], &vcs.H) 127 | b[i] = vcs.VRKSubOneRev[i] 128 | } else { 129 | b[i] = vcs.VRK[i] 130 | } 131 | } 132 | B = append(B, b...) 133 | } 134 | bPad := make([]mcl.G2, vcs.mnDiff) 135 | B = append(B, bPad...) 136 | 137 | // fmt.Println("Agg Verifier", L, vcs.N, len(P)) 138 | vcs.aggVerifier.Init(uint32(L), uint32(vcs.TxnLimit+uint64(vcs.nDiff)), vcs.MN, vcs.ck.W, &vcs.kzg1, &vcs.kzg2, P, Q, B) 139 | status := vcs.aggVerifier.VerifyEdrax(proof) 140 | // status := vcs.aggVerifier.Verify(proof, P, Q, B) 141 | return status 142 | } 143 | -------------------------------------------------------------------------------- /vcs/vcs-fake.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "math/rand" 5 | 6 | "github.com/alinush/go-mcl" 7 | ) 8 | 9 | // Generate the trapdoors 10 | // Generate the keys for aggregation 11 | // PRK and UPK is generated only during runtime using GenUpkFake 12 | func (vcs *VCS) KeyGenFake(ncores uint8, L uint8, folder string, txnLimit uint64) { 13 | 14 | NCORES = ncores // Maximum number threads created. Set this to number of available cores. 15 | vcs.Init(L, folder, txnLimit) 16 | vcs.TrapdoorsGen() 17 | vcs.GenAggGipa() 18 | } 19 | 20 | func (vcs *VCS) KeyGenLoadFake(ncores uint8, L uint8, folder string, txnLimit uint64) { 21 | NCORES = ncores 22 | vcs.Init(L, folder, txnLimit) 23 | vcs.LoadTrapdoor(L) 24 | // vcs.LoadAggGipa() // No need to load this. We'll explicitly load this during every run. 25 | } 26 | 27 | // Given an index in the vector, get its upk. This goes from top of the tree to leaf. 28 | func (vcs *VCS) GenUpkFake(index uint64) []mcl.G1 { 29 | L := vcs.L 30 | upk := make([]mcl.G1, L) 31 | for i := uint8(0); i < L; i++ { 32 | exp := vcs.SelectUPK(L-i, index) 33 | mcl.G1Mul(&upk[L-i-1], &vcs.G, &exp) 34 | } 35 | return upk 36 | } 37 | 38 | // Goal is to generate a proof tree using trapdoors 39 | func (vcs *VCS) GenProofsTreeFake(count uint64) (mcl.G1, []uint64, []mcl.Fr, map[uint64][]mcl.G1, [][]mcl.G1, []map[uint64]mcl.G1) { 40 | N := int64(vcs.N) 41 | indexVec := make([]uint64, count) 42 | fakeQTree := make([]map[uint64]mcl.Fr, vcs.L) // Each slice index is level and each position contains DB of sub-indices. 43 | proofTree := make([]map[uint64]mcl.G1, vcs.L) // Each slice index is level and each position contains DB of sub-indices. 44 | proofs_db := make(map[uint64][]mcl.G1) // Misnomer. It is a DB. Each vector index is a key and values are the proof (from 0th quotient). 45 | a_i := make([]mcl.Fr, count) 46 | var f_a mcl.Fr 47 | var digest mcl.G1 48 | 49 | // Genrate random indices 50 | for k := uint64(0); k < count; k++ { 51 | id := uint64(rand.Int63n(N)) // Pick an index from the saved list of vector positions. Could contain duplicates as well. 52 | indexVec[k] = id 53 | } 54 | 55 | for l := uint8(0); l < vcs.L; l++ { 56 | fakeQTree[l] = make(map[uint64]mcl.Fr) 57 | proofTree[l] = make(map[uint64]mcl.G1) 58 | } 59 | 60 | f_a.Random() 61 | mcl.G1Mul(&digest, &vcs.G, &f_a) 62 | 63 | // Set f_a. Populate random quotients and compute the vector element. 64 | for k := uint64(0); k < count; k++ { 65 | var id uint64 66 | var rhs mcl.Fr 67 | id = indexVec[k] 68 | binary := ToBinary(id, vcs.L) 69 | proofs := make([]mcl.G1, vcs.L) 70 | rhs.SetInt64(0) 71 | 72 | for l := uint8(0); l < vcs.L; l++ { 73 | id = id >> 1 74 | q, ok := fakeQTree[vcs.L-l-1][id] 75 | pi, _ := proofTree[vcs.L-l-1][id] 76 | if !ok { 77 | q.Random() 78 | fakeQTree[vcs.L-l-1][id] = q 79 | 80 | mcl.G1Mul(&pi, &vcs.G, &q) 81 | proofTree[vcs.L-l-1][id] = pi 82 | } 83 | 84 | proofs[l] = pi 85 | 86 | var tmp mcl.Fr 87 | if binary[l] { 88 | mcl.FrMul(&tmp, &q, &vcs.trapdoorsSubOneRev[l]) 89 | } else { 90 | mcl.FrMul(&tmp, &q, &vcs.trapdoors[l]) 91 | } 92 | mcl.FrAdd(&rhs, &rhs, &tmp) 93 | 94 | } 95 | mcl.FrSub(&a_i[k], &f_a, &rhs) 96 | 97 | _, ok := proofs_db[indexVec[k]] 98 | if !ok { 99 | proofs_db[indexVec[k]] = proofs 100 | } 101 | } 102 | 103 | // Build the upk db for the subvector. Key: Position in the vector. Value is the UPK vector for that index. 104 | upk_db := make(map[uint64][]mcl.G1) 105 | for k := uint64(0); k < count; k++ { 106 | _, ok := upk_db[indexVec[k]] 107 | if !ok { 108 | upk_i := vcs.GenUpkFake(indexVec[k]) 109 | upk_db[indexVec[k]] = upk_i 110 | } 111 | } 112 | 113 | proofVec := GetProofVecFromDb(proofs_db, indexVec) 114 | return digest, indexVec, a_i, upk_db, proofVec, proofTree 115 | } 116 | -------------------------------------------------------------------------------- /vcs/vcs-helper.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "github.com/alinush/go-mcl" 5 | ) 6 | 7 | var PRKNAME string 8 | var VRKNAME string 9 | var UPKNAME string 10 | var TRAPDOORNAME string 11 | var NFILES uint8 12 | var NCORES uint8 13 | 14 | const MAX_AGG_SIZE = 1 << 19 15 | const SEP = "\n========================================================================================" 16 | 17 | // Allocate space for UPK. 18 | // This is not done with Init, as UPK and PRK may not fit in memory together. 19 | func (vcs *VCS) MallocUpk() { 20 | vcs.UPK = make([][]mcl.G1, vcs.L+1) 21 | for i := uint64(0); i < uint64(vcs.L+1); i++ { 22 | vcs.UPK[i] = make([]mcl.G1, 1< (s_3 * s_1) 28 | // It is a worker to compute g, g^{s_1}, g^{s_2}, g^{s_1 s_2}, g^{s_3}, , g^{s_3 s_1} ... 29 | func (vcs *VCS) SelectPRK(index uint64) mcl.Fr { 30 | var prod mcl.Fr 31 | var out mcl.Fr 32 | prod.SetInt64(1) 33 | // fmt.Print(index, " ") 34 | for i := uint8(0); i < vcs.L; i++ { 35 | // fmt.Print(index&vcs.pow2[i], " ") 36 | if index&vcs.pow2[i] != 0 { 37 | out = vcs.trapdoors[i] 38 | mcl.FrMul(&prod, &prod, &out) 39 | } 40 | } 41 | // fmt.Println() 42 | // fmt.Println(prod) 43 | return prod 44 | } 45 | 46 | // Compute the UPK located in a specific coordinate of the tree 47 | // Say 3, 3 => g^{} 48 | func (vcs *VCS) SelectUPK(L uint8, index uint64) mcl.Fr { 49 | if L > vcs.L { 50 | panic("Select UPK error") 51 | } 52 | 53 | var prod mcl.Fr 54 | var out mcl.Fr 55 | prod.SetInt64(1) 56 | 57 | for i := uint8(0); i < L; i++ { 58 | if index&vcs.pow2[i] == 0 { 59 | out = vcs.trapdoorsSubOne[i] 60 | } else { 61 | out = vcs.trapdoors[i] 62 | } 63 | mcl.FrMul(&prod, &prod, &out) 64 | } 65 | return prod 66 | } 67 | 68 | func (vcs *VCS) VerifyUPK(index uint64, upkProof []mcl.G1) bool { 69 | 70 | binary := ToBinary(index, vcs.L) 71 | var temp1, temp2, result mcl.G1 72 | var r mcl.Fr 73 | r.SetInt64(1) 74 | if binary[0] == false { 75 | temp1 = vcs.UPK[1][0] 76 | } else { 77 | temp1 = vcs.UPK[1][1] 78 | } 79 | 80 | // Linear Combination is an option 81 | lhsP := make([]mcl.G1, vcs.L-1) 82 | lhsQ := make([]mcl.G2, vcs.L-1) 83 | // rhsP := make([]mcl.G1, vcs.L-1) 84 | // rhsQ := make([]mcl.G2, vcs.L-1) 85 | 86 | for i := uint8(1); i < vcs.L; i++ { 87 | r.Random() 88 | if i == 1 { 89 | mcl.G1Mul(&lhsP[i-1], &temp1, &r) 90 | } else { 91 | mcl.G1Mul(&lhsP[i-1], &upkProof[i-1], &r) 92 | } 93 | 94 | if binary[i] == false { 95 | lhsQ[i-1] = vcs.VRKSubOne[i] 96 | } else { 97 | lhsQ[i-1] = vcs.VRK[i] 98 | } 99 | 100 | mcl.G1Mul(&temp2, &upkProof[i], &r) 101 | mcl.G1Add(&result, &result, &temp2) 102 | } 103 | 104 | var lhs, rhs mcl.GT 105 | mcl.Pairing(&rhs, &result, &vcs.H) 106 | 107 | mcl.MillerLoopVec(&lhs, lhsP, lhsQ) 108 | mcl.FinalExp(&lhs, &lhs) 109 | return lhs.IsEqual(&rhs) 110 | } 111 | 112 | func IsEqual(a, b *VCS) bool { 113 | 114 | if a.N != b.N { 115 | return false 116 | } 117 | if a.L != b.L { 118 | return false 119 | } 120 | 121 | if !a.G.IsEqual(&b.G) { 122 | return false 123 | } 124 | 125 | if !a.H.IsEqual(&b.H) { 126 | return false 127 | } 128 | 129 | if len(a.trapdoors) != len(b.trapdoors) { 130 | return false 131 | } 132 | 133 | if len(a.trapdoorsSubOne) != len(b.trapdoorsSubOne) { 134 | return false 135 | } 136 | 137 | if len(a.trapdoorsSubOneRev) != len(b.trapdoorsSubOneRev) { 138 | return false 139 | } 140 | 141 | if len(a.VRK) != len(b.VRK) { 142 | return false 143 | } 144 | 145 | if len(a.VRKSubOne) != len(b.VRKSubOne) { 146 | return false 147 | } 148 | 149 | if len(a.VRKSubOneRev) != len(b.VRKSubOneRev) { 150 | return false 151 | } 152 | 153 | status := true 154 | for i := 0; i < int(a.L); i++ { 155 | status = status && a.trapdoors[i].IsEqual(&b.trapdoors[i]) 156 | status = status && a.trapdoorsSubOne[i].IsEqual(&b.trapdoorsSubOne[i]) 157 | status = status && a.trapdoorsSubOneRev[i].IsEqual(&b.trapdoorsSubOneRev[i]) 158 | } 159 | 160 | for i := 0; i < int(a.L); i++ { 161 | status = status && a.VRK[i].IsEqual(&b.VRK[i]) 162 | status = status && a.VRKSubOne[i].IsEqual(&b.VRKSubOne[i]) 163 | status = status && a.VRKSubOneRev[i].IsEqual(&b.VRKSubOneRev[i]) 164 | if !status { 165 | return status 166 | } 167 | } 168 | if !status { 169 | return status 170 | } 171 | 172 | if !a.alpha.IsEqual(&b.alpha) { 173 | return false 174 | } 175 | 176 | if !a.beta.IsEqual(&b.beta) { 177 | return false 178 | } 179 | 180 | if !SliceIsEqual(a.PRK, b.PRK) { 181 | return false 182 | } 183 | 184 | if len(a.UPK) != len(b.UPK) { 185 | return false 186 | } 187 | 188 | for i := 0; i < len(a.UPK); i++ { 189 | if !SliceIsEqual(a.UPK[i], b.UPK[i]) { 190 | return false 191 | } 192 | } 193 | return true 194 | } 195 | -------------------------------------------------------------------------------- /vcs/vcs-merkle_bench_test.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/alinush/go-mcl" 8 | "github.com/hyperproofs/gipa-go/batch" 9 | ) 10 | 11 | // Benchmark the aggregation for ell = 30. 12 | // These results are used for baseline comparison with Merkle aggregation using SNARKs. 13 | func BenchmarkVCSAgg(b *testing.B) { 14 | 15 | mcl.InitFromString("bls12-381") 16 | fmt.Println("Curve order", mcl.GetCurveOrder()) 17 | var L uint8 18 | 19 | ell := []uint8{30} // Change the tree height here 20 | txnExpo := []uint8{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14} 21 | txns := make([]uint64, len(txnExpo)) 22 | for i := range txnExpo { 23 | txns[i] = uint64(1) << txnExpo[i] 24 | } 25 | 26 | for loop := range ell { 27 | L = ell[loop] 28 | // N := uint64(1) << L 29 | 30 | vcs := VCS{} 31 | vcs.KeyGenLoadFake(16, L, "../pkvk-30", txns[len(txns)-1]) 32 | 33 | // digest, indexVec, valueVec, _, proofs_db := vcs.GenProofsFake(txns[0]) 34 | 35 | var status bool 36 | fmt.Println("Num txns: ", txns[len(txns)-1]) 37 | 38 | for iTxn := range txns { 39 | txn := txns[iTxn] 40 | digest, indexVec, valueVec, _, proofVec, _ := vcs.GenProofsTreeFake(txn) 41 | vcs.ResizeAgg(txn) 42 | vcs.LoadAggGipa() 43 | 44 | var aggProof batch.Proof 45 | var aggProofs []batch.Proof 46 | 47 | b.Run(fmt.Sprintf("%d/AggregateProve;%d", L, txn), func(b *testing.B) { 48 | for bn := 0; bn < b.N; bn++ { 49 | aggProof = vcs.AggProve(indexVec[:txn], proofVec[:txn]) 50 | b.StopTimer() 51 | aggProofs = append(aggProofs, aggProof) 52 | b.StartTimer() 53 | } 54 | }) 55 | 56 | status = true 57 | b.Run(fmt.Sprintf("%d/AggregateVerify;%d", L, txn), func(b *testing.B) { 58 | 59 | for bn := 0; bn < b.N; bn++ { 60 | aggProof, aggProofs = aggProofs[0], aggProofs[1:] 61 | status = status && vcs.AggVerify(aggProof, digest, indexVec[:txn], valueVec[:txn]) 62 | } 63 | if status == false { 64 | b.Errorf("Aggregation failed") 65 | } 66 | }) 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /vcs/vcs-micro_bench_test.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/alinush/go-mcl" 8 | "github.com/hyperproofs/gipa-go/batch" 9 | ) 10 | 11 | var ell = []uint8{10, 30, 28, 26, 24, 22} // Change the tree height here 12 | 13 | // Microbenchmarks for UpdateAllProofs, Ver ,agg, veragg 14 | // Block size = 1024 transcations. 15 | func BenchmarkPrunedVCSMicro(b *testing.B) { 16 | 17 | mcl.InitFromString("bls12-381") 18 | fmt.Println("Curve order", mcl.GetCurveOrder()) 19 | 20 | txns := []uint64{1024} 21 | 22 | for loop := range ell { 23 | for iTxn := range txns { 24 | benchmarkVCS(ell[loop], txns[iTxn], true, true, b) 25 | } 26 | } 27 | } 28 | 29 | func BenchmarkPrunedVCSMacro(b *testing.B) { 30 | 31 | mcl.InitFromString("bls12-381") 32 | fmt.Println("Curve order", mcl.GetCurveOrder()) 33 | 34 | txns := []uint64{1024} 35 | 36 | for loop := range ell { 37 | for iTxn := range txns { 38 | benchmarkVCS(ell[loop], 2*txns[iTxn], false, false, b) 39 | } 40 | } 41 | } 42 | 43 | func benchmarkVCS(L uint8, txn uint64, DoAgg bool, Micro bool, b *testing.B) { 44 | 45 | var status bool 46 | var basecost int 47 | vcs := VCS{} 48 | 49 | vcs.KeyGenLoadFake(16, L, "../pkvk-30", txn) 50 | digest, indexVec, valueVec, upk_db, proofVec, proofTree := vcs.GenProofsTreeFake(txn) 51 | 52 | deltaVec := make([]mcl.Fr, len(indexVec)) 53 | for i := range indexVec { 54 | deltaVec[i].Random() 55 | } 56 | 57 | b.Run(fmt.Sprintf("%d/UpdateComVec;%d", L, txn), func(b *testing.B) { 58 | b.ResetTimer() 59 | for bn := 0; bn < b.N; bn++ { 60 | digest = vcs.UpdateComVecDB(upk_db, digest, indexVec, deltaVec) 61 | b.StopTimer() 62 | // Since we are updating the digest, we need to keep track of the changes made as well. 63 | 64 | valueVec = SecondaryStateUpdate(indexVec, deltaVec, valueVec) 65 | b.StartTimer() 66 | } 67 | }) 68 | 69 | // Save a copy of the ProofTree so that it can be used for benchmarking UpdateProofTreeBulk 70 | proofTree_b := make([]map[uint64]mcl.G1, vcs.L) 71 | for i := range proofTree_b { 72 | proofTree_b[i] = make(map[uint64]mcl.G1) 73 | for k, v := range proofTree[i] { 74 | proofTree_b[i][k] = v 75 | } 76 | } 77 | 78 | b.Run(fmt.Sprintf("%d/UpdateProofTreeBulk;%d", L, txn), func(b *testing.B) { 79 | b.ResetTimer() 80 | for bn := 0; bn < b.N; bn++ { 81 | proofTree_b, basecost = vcs.UpdateProofTreeBulkDB(proofTree_b, upk_db, indexVec, deltaVec) 82 | } 83 | }) 84 | fmt.Println("UpdateProofTreeBulk (vcs-pruned.go): Unique Nodes:", basecost) 85 | 86 | if Micro { 87 | for i := range indexVec { 88 | proofVec[i] = vcs.GetProofPathDB(proofTree_b, indexVec[i]) 89 | } 90 | 91 | b.Run(fmt.Sprintf("%d/VerifyNaive;%d", L, txn), func(b *testing.B) { 92 | status = true 93 | for bn := 0; bn < b.N; bn++ { 94 | for i := range indexVec { 95 | status = status && vcs.Verify(digest, indexVec[i], valueVec[i], proofVec[i]) 96 | if !status { 97 | b.Errorf("Naive UpdateProofTree: Naive Verification Failed") 98 | } 99 | } 100 | } 101 | }) 102 | 103 | b.Run(fmt.Sprintf("%d/UpdateProofTreeNaive;%d", L, txn), func(b *testing.B) { 104 | b.ResetTimer() 105 | for bn := 0; bn < b.N; bn++ { 106 | for i := range indexVec { 107 | proofTree, _ = vcs.UpdateProofTreeBulkDB(proofTree, upk_db, indexVec[i:i+1], deltaVec[i:i+1]) 108 | } 109 | } 110 | }) 111 | 112 | for i := range indexVec { 113 | proofVec[i] = vcs.GetProofPathDB(proofTree, indexVec[i]) 114 | } 115 | 116 | b.Run(fmt.Sprintf("%d/VerifyMemoized;%d", L, txn), func(b *testing.B) { 117 | for bn := 0; bn < b.N; bn++ { 118 | status, basecost = vcs.VerifyMemoized(digest, indexVec, valueVec, proofVec) 119 | if !status { 120 | b.Errorf("Bulk UpdateProofTree: Fast Verification Failed") 121 | } 122 | } 123 | }) 124 | fmt.Println("VerifyMemoized (vcs.go): Unique Nodes:", basecost) 125 | } 126 | 127 | if DoAgg { 128 | vcs.ResizeAgg(txn) 129 | vcs.LoadAggGipa() 130 | 131 | var aggProof batch.Proof 132 | var aggProofs []batch.Proof 133 | 134 | b.Run(fmt.Sprintf("%d/AggregateProve;%d", L, txn), func(b *testing.B) { 135 | b.ResetTimer() 136 | for bn := 0; bn < b.N; bn++ { 137 | aggProof = vcs.AggProve(indexVec[:txn], proofVec[:txn]) 138 | b.StopTimer() 139 | aggProofs = append(aggProofs, aggProof) 140 | b.StartTimer() 141 | } 142 | }) 143 | 144 | b.Run(fmt.Sprintf("%d/AggregateVerify;%d", L, txn), func(b *testing.B) { 145 | b.ResetTimer() 146 | for bn := 0; bn < b.N; bn++ { 147 | b.StopTimer() 148 | aggProof, aggProofs = aggProofs[0], aggProofs[1:] 149 | b.StartTimer() 150 | status = status && vcs.AggVerify(aggProof, digest, indexVec[:txn], valueVec[:txn]) 151 | if status == false { 152 | b.Errorf("Aggregation failed") 153 | } 154 | } 155 | }) 156 | } 157 | 158 | } 159 | -------------------------------------------------------------------------------- /vcs/vcs-pruned.go: -------------------------------------------------------------------------------- 1 | // All the functionalities when only pruned proof and pruned UPK tree is available. 2 | // UPKs are technically not sent around as a tree, as UPK is fixed prefetching all UPKs for all indices of interest saves the time to fetch UPK from its tree. 3 | // Same trick does not work for proof trees as we need to update them often. 4 | package vcs 5 | 6 | import ( 7 | "github.com/alinush/go-mcl" 8 | ) 9 | 10 | // Proof 1...l 11 | // Index 0 has one 0-variables, index L - 1 has L-1 variable 12 | func (vcs *VCS) GetProofPathDB(proofTree []map[uint64]mcl.G1, index uint64) []mcl.G1 { 13 | 14 | proof := make([]mcl.G1, vcs.L) 15 | id := index 16 | for j := uint8(0); j < vcs.L; j++ { 17 | id = id >> 1 // Hacky. Need to write docs for this. 18 | proof[j] = proofTree[vcs.L-j-1][id] 19 | } 20 | return proof 21 | } 22 | 23 | func (vcs *VCS) UpdateComVecDB(upk_db map[uint64][]mcl.G1, digest mcl.G1, updateindex []uint64, delta []mcl.Fr) mcl.G1 { 24 | N := len(updateindex) 25 | // if N != len(delta) { 26 | // fmt.Print("UpdateComVec: Error") 27 | // } 28 | var result mcl.G1 29 | var temp mcl.G1 30 | upks := make([]mcl.G1, N) 31 | for i := 0; i < N; i++ { 32 | upks[i] = upk_db[updateindex[i]][vcs.L-1] 33 | } 34 | 35 | mcl.G1MulVec(&temp, upks, delta) 36 | mcl.G1Add(&result, &digest, &temp) 37 | return result 38 | } 39 | 40 | // This is analogous to the UpdateProofTree. UpdateProofTree uses the struct variable ProofTree to import all updates. 41 | // Where as this code updates a pruned Proof tree. 42 | // For ell = 30, it is not possible to store the entire proof tree in memory. 43 | // Thus a pruned proof tree is only stored in-memory. 44 | func (vcs *VCS) UpdateProofTreeBulkDB(proofTree []map[uint64]mcl.G1, upk_db map[uint64][]mcl.G1, updateindexVec []uint64, deltaVec []mcl.Fr) ([]map[uint64]mcl.G1, int) { 45 | 46 | var q_i mcl.G1 47 | 48 | // Temporary variables 49 | var x, upk_i uint8 // These will serve as GPS in the tree 50 | var y uint64 // These will serve as GPS in the tree 51 | 52 | g1Db := make(map[TreeGPS][]mcl.G1) 53 | frDb := make(map[TreeGPS][]mcl.Fr) 54 | 55 | for t := range updateindexVec { 56 | updateindex := updateindexVec[t] 57 | delta := deltaVec[t] 58 | 59 | updateindexBinary := ToBinary(updateindex, vcs.L) // LSB first 60 | updateindexBinary = ReverseSliceBool(updateindexBinary) // MSB first 61 | 62 | upk := upk_db[updateindex] // Pop upk_{u,l} as it containts ell variables. 63 | upk = append([]mcl.G1{vcs.G}, upk[:len(upk)-1]...) // Since the root of the proof tree contains only ell - 1 variables, we need to pop the upk. 64 | 65 | L := vcs.L 66 | Y := FindTreeGPS(updateindex, int(L)) 67 | // Start from the top of the prooftree (which implies start from the bottom of the UPK tree) 68 | for i := uint8(0); i < L; i++ { 69 | x = i 70 | y = Y[i] 71 | upk_i = L - i - 1 72 | loc := TreeGPS{x, y} 73 | 74 | q_i = upk[upk_i] 75 | 76 | if !updateindexBinary[x] { 77 | mcl.G1Neg(&q_i, &q_i) 78 | } 79 | g1Db[loc] = append(g1Db[loc], q_i) 80 | frDb[loc] = append(frDb[loc], delta) 81 | } 82 | } 83 | 84 | q_i.Clear() // Re-init the variable 85 | for key := range g1Db { 86 | A := g1Db[key] 87 | B := frDb[key] 88 | 89 | proof_i := proofTree[key.level][key.index] 90 | 91 | mcl.G1MulVec(&q_i, A, B) 92 | var tmp mcl.G1 93 | mcl.G1Add(&tmp, &proof_i, &q_i) 94 | proofTree[key.level][key.index] = tmp 95 | } 96 | basecost := len(g1Db) 97 | return proofTree, basecost 98 | } 99 | -------------------------------------------------------------------------------- /vcs/vcs-pruned_test.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/alinush/go-mcl" 8 | "github.com/hyperproofs/gipa-go/batch" 9 | ) 10 | 11 | // Generate proofs using trapdoor for ell = 30 12 | // Check if the proofs verify. 13 | // Also check if the aggregation works. 14 | func TestVCSPruned(t *testing.T) { 15 | 16 | folderPath := fmt.Sprintf("../pkvk-30") 17 | L := uint8(30) 18 | txnscount := uint64(512) 19 | var status bool 20 | 21 | vcs := VCS{} 22 | // Besure to have the trapdoors and key generated. 23 | vcs.KeyGenLoadFake(16, L, folderPath, 1<<12) 24 | digest, indexVec, valueVec, upk_db, proofVec, proofTree := vcs.GenProofsTreeFake(txnscount) 25 | 26 | for i := range valueVec { 27 | if valueVec[i].IsZero() { 28 | fmt.Println("Index:", i, "is zero.") 29 | } 30 | } 31 | 32 | // Check indeed if the fake proofs verify. 33 | status = true 34 | t.Run(fmt.Sprintf("%d/NaiveVerify;%d", L, txnscount), func(t *testing.T) { 35 | status, _ = vcs.VerifyMemoized(digest, indexVec, valueVec, proofVec) 36 | if status == false { 37 | t.Errorf("Fast Verification Failed") 38 | } 39 | }) 40 | fmt.Println("Done checking the proofs.", len(upk_db)) 41 | 42 | t.Run(fmt.Sprintf("%d/GetProofPathDB;%d", L, txnscount), func(t *testing.T) { 43 | for i := range indexVec { 44 | proofVecChecker := vcs.GetProofPathDB(proofTree, indexVec[i]) 45 | if len(proofVecChecker) != len(proofVec[i]) { 46 | t.Errorf("Length of proof extracted from DB is not same as the baseline.") 47 | } 48 | for j := range proofVecChecker { 49 | if !proofVecChecker[j].IsEqual(&proofVec[i][j]) { 50 | out := fmt.Sprintf("Proofs extracted from the DB is not same as the baseline: i: %d indexVec[i]: %d proof[j]: %d", i, indexVec[i], j) 51 | t.Errorf(out) 52 | } 53 | } 54 | } 55 | }) 56 | 57 | deltaVec := make([]mcl.Fr, len(indexVec)) 58 | for i := range indexVec { 59 | deltaVec[i].Random() 60 | } 61 | 62 | digest = vcs.UpdateComVecDB(upk_db, digest, indexVec, deltaVec) 63 | proofTree, _ = vcs.UpdateProofTreeBulkDB(proofTree, upk_db, indexVec, deltaVec) 64 | for i := range indexVec { 65 | proofVec[i] = vcs.GetProofPathDB(proofTree, indexVec[i]) 66 | } 67 | 68 | for i := range valueVec { 69 | mcl.FrAdd(&valueVec[i], &valueVec[i], &deltaVec[i]) 70 | } 71 | 72 | t.Run(fmt.Sprintf("%d/UpdateComAndTreeBulk;%d", L, txnscount), func(t *testing.T) { 73 | status, _ = vcs.VerifyMemoized(digest, indexVec, valueVec, proofVec) 74 | if status == false { 75 | t.Errorf("UpdateComAndTreeBulk: Fast Verification Failed") 76 | } 77 | }) 78 | 79 | // Just to be doubly sure, we are making sure that aggregation code is also fine with fake proofs. 80 | vcs.ResizeAgg(txnscount) 81 | vcs.LoadAggGipa() 82 | 83 | var aggProof batch.Proof 84 | var aggProofs []batch.Proof 85 | 86 | aggProof = vcs.AggProve(indexVec[:txnscount], proofVec[:txnscount]) 87 | aggProofs = append(aggProofs, aggProof) 88 | 89 | t.Run(fmt.Sprintf("%d/AggregateVerify;%d", L, txnscount), func(t *testing.T) { 90 | 91 | aggProof, aggProofs = aggProofs[0], aggProofs[1:] 92 | status = status && vcs.AggVerify(aggProof, digest, indexVec[:txnscount], valueVec[:txnscount]) 93 | 94 | if status == false { 95 | t.Errorf("Aggregation failed") 96 | } 97 | }) 98 | } 99 | -------------------------------------------------------------------------------- /vcs/vcs-save-load.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "math" 7 | "os" 8 | "path/filepath" 9 | "sync" 10 | 11 | "github.com/alinush/go-mcl" 12 | ) 13 | 14 | func (vcs *VCS) SaveTrapdoor() { 15 | 16 | fmt.Println(SEP, "Saving data to:", vcs.folderPath, SEP) 17 | 18 | os.MkdirAll(vcs.folderPath, os.ModePerm) 19 | f, err := os.Create(vcs.folderPath + TRAPDOORNAME) 20 | check(err) 21 | 22 | // Report the size. 23 | LBytes := make([]byte, 8) // Enough space for 64 bits of interger 24 | binary.LittleEndian.PutUint64(LBytes, uint64(vcs.L)) 25 | _, err = f.Write(LBytes) 26 | check(err) 27 | 28 | // Write KZG stuff first, as it not related to VCS or size of the VCS. 29 | _, err = f.Write(vcs.alpha.Serialize()) 30 | check(err) 31 | _, err = f.Write(vcs.beta.Serialize()) 32 | check(err) 33 | 34 | // Write the Generator to the file 35 | _, err = f.Write(vcs.G.Serialize()) 36 | check(err) 37 | _, err = f.Write(vcs.H.Serialize()) 38 | check(err) 39 | 40 | // Write the trapdoors to the file. 41 | for i := range vcs.trapdoors { 42 | _, err = f.Write(vcs.trapdoors[i].Serialize()) 43 | check(err) 44 | _, err = f.Write(vcs.trapdoorsSubOne[i].Serialize()) 45 | check(err) 46 | _, err = f.Write(vcs.trapdoorsSubOneRev[i].Serialize()) 47 | check(err) 48 | } 49 | 50 | f.Close() 51 | fmt.Println(SEP, "Saved trapdoors", SEP) 52 | 53 | // Create a new file for VRK and write it. 54 | f, err = os.Create(vcs.folderPath + VRKNAME) 55 | check(err) 56 | 57 | for i := range vcs.VRK { 58 | _, err = f.Write(vcs.VRK[i].Serialize()) 59 | check(err) 60 | _, err = f.Write(vcs.VRKSubOne[i].Serialize()) 61 | check(err) 62 | _, err = f.Write(vcs.VRKSubOneRev[i].Serialize()) 63 | check(err) 64 | } 65 | f.Close() 66 | fmt.Println(SEP, "Saved VRK", SEP) 67 | 68 | } 69 | 70 | func (vcs *VCS) LoadTrapdoor(L uint8) { 71 | 72 | f, err := os.Open(vcs.folderPath + TRAPDOORNAME) 73 | check(err) 74 | 75 | // fileinfo, err := f.Stat() 76 | // check(err) 77 | // filesize := fileinfo.Size() // In Bytes 78 | // estimatedEll := (filesize - int64(GetG1ByteSize()+GetG2ByteSize())) / int64(GetFrByteSize()) / 2 79 | 80 | var data []byte 81 | 82 | data = make([]byte, 8) 83 | _, err = f.Read(data) 84 | check(err) 85 | reportedEll := uint8(binary.LittleEndian.Uint64(data)) 86 | 87 | if reportedEll < L { 88 | // Assumes SaveTrapdoor is honest 89 | panic(fmt.Sprintf("There is not enough to read! Found: %d, Wants: %d", reportedEll, L)) 90 | } 91 | 92 | // Load KZG related stuff 93 | data = make([]byte, GetFrByteSize()) 94 | _, err = f.Read(data) 95 | check(err) 96 | vcs.alpha.Deserialize(data) 97 | 98 | data = make([]byte, GetFrByteSize()) 99 | _, err = f.Read(data) 100 | check(err) 101 | vcs.beta.Deserialize(data) 102 | 103 | // Load the VCS related stuff 104 | data = make([]byte, GetG1ByteSize()) 105 | _, err = f.Read(data) 106 | check(err) 107 | vcs.G.Deserialize(data) 108 | 109 | data = make([]byte, GetG2ByteSize()) 110 | _, err = f.Read(data) 111 | check(err) 112 | vcs.H.Deserialize(data) 113 | 114 | vcs.L = uint8(L) 115 | 116 | fmt.Println("Loading trapdoors:", L) 117 | for i := uint8(0); i < L; i++ { 118 | 119 | data = make([]byte, GetFrByteSize()) 120 | _, err = f.Read(data) 121 | check(err) 122 | vcs.trapdoors[i].Deserialize(data) 123 | 124 | data = make([]byte, GetFrByteSize()) 125 | _, err = f.Read(data) 126 | check(err) 127 | vcs.trapdoorsSubOne[i].Deserialize(data) 128 | 129 | data = make([]byte, GetFrByteSize()) 130 | _, err = f.Read(data) 131 | check(err) 132 | vcs.trapdoorsSubOneRev[i].Deserialize(data) 133 | } 134 | 135 | f.Close() 136 | 137 | // Load VRKs 138 | f, err = os.Open(vcs.folderPath + VRKNAME) 139 | check(err) 140 | 141 | data = make([]byte, GetG2ByteSize()) 142 | for i := uint8(0); i < L; i++ { 143 | _, err = f.Read(data) 144 | check(err) 145 | vcs.VRK[i].Deserialize(data) 146 | 147 | _, err = f.Read(data) 148 | check(err) 149 | vcs.VRKSubOne[i].Deserialize(data) 150 | 151 | _, err = f.Read(data) 152 | check(err) 153 | vcs.VRKSubOneRev[i].Deserialize(data) 154 | } 155 | f.Close() 156 | } 157 | 158 | func (vcs *VCS) UpkLoad(fileName string, index uint8, start uint64, stop uint64, wg *sync.WaitGroup) { 159 | f, err := os.Open(fileName) 160 | check(err) 161 | 162 | data := make([]byte, GetG1ByteSize()) 163 | 164 | var result mcl.G1 165 | for j := start; j < stop; j++ { 166 | i, k := IndexInTheLevel(j) 167 | _, err = f.Read(data) 168 | check(err) 169 | result.Deserialize(data) 170 | 171 | vcs.UPK[i][k] = result 172 | 173 | } 174 | fmt.Println("Read ", fileName, BoundsPrint(start, stop)) 175 | defer f.Close() 176 | defer wg.Done() 177 | } 178 | 179 | func (vcs *VCS) UpkLoadDriver() { 180 | 181 | // Allocate space for UPK 182 | vcs.MallocUpk() 183 | 184 | var wg sync.WaitGroup 185 | var step, start, stop uint64 186 | var total, totalBytes int64 187 | var i uint8 188 | 189 | var files []string 190 | var err error 191 | 192 | files, err = filepath.Glob(vcs.folderPath + "/upk*") 193 | check(err) 194 | totalBytes = int64(0) 195 | for i := range files { 196 | totalBytes += fileSize(files[i]) 197 | } 198 | total = totalBytes / int64(GetG1ByteSize()) 199 | step = uint64(math.Ceil(float64(total) / float64(NFILES))) 200 | 201 | numUPK := (uint64(1) << (vcs.L + 1)) - 1 202 | start = uint64(0) 203 | stop = step 204 | stop = minUint64(stop, numUPK) 205 | 206 | i = uint8(0) 207 | for start < numUPK { 208 | wg.Add(1) 209 | fileName := vcs.folderPath + fmt.Sprintf(UPKNAME, i) 210 | go vcs.UpkLoad(fileName, i, start, stop, &wg) 211 | // fmt.Println(i) 212 | // fmt.Println("Reading chuck range:", i, BoundsPrint(start, stop)) 213 | start += step 214 | stop += step 215 | stop = minUint64(stop, numUPK) 216 | i++ 217 | } 218 | wg.Wait() 219 | } 220 | 221 | func (vcs *VCS) PrkLoad(fileName string, index uint8, start uint64, stop uint64, wg *sync.WaitGroup) { 222 | 223 | f, err := os.Open(fileName) 224 | check(err) 225 | 226 | var data []byte 227 | data = make([]byte, GetG1ByteSize()) 228 | 229 | var result mcl.G1 230 | for i := start; i < stop; i++ { 231 | _, err = f.Read(data) 232 | check(err) 233 | result.Deserialize(data) 234 | vcs.PRK[i] = result 235 | } 236 | 237 | fmt.Println("Read ", fileName, BoundsPrint(start, stop)) 238 | defer f.Close() 239 | defer wg.Done() 240 | } 241 | 242 | func (vcs *VCS) PrkLoadDriver() { 243 | // Allocate space for PRK 244 | vcs.PRK = make([]mcl.G1, vcs.N) 245 | var wg sync.WaitGroup 246 | var step, start, stop uint64 247 | var total, totalBytes int64 248 | var i uint8 249 | 250 | var files []string 251 | var err error 252 | 253 | files, err = filepath.Glob(vcs.folderPath + "/prk*") 254 | check(err) 255 | totalBytes = int64(0) 256 | for i := range files { 257 | totalBytes += fileSize(files[i]) 258 | } 259 | total = totalBytes / int64(GetG1ByteSize()) 260 | step = uint64(math.Ceil(float64(total) / float64(NFILES))) 261 | 262 | upperBound := (uint64(1) << vcs.L) 263 | 264 | i = uint8(0) 265 | start = uint64(0) 266 | stop = minUint64(step, upperBound) 267 | for start < upperBound { 268 | wg.Add(1) 269 | fileName := vcs.folderPath + fmt.Sprintf(PRKNAME, i) 270 | go vcs.PrkLoad(fileName, i, start, stop, &wg) 271 | 272 | // fmt.Println(i, fmt.Sprintf("%05d %05d", start, stop)) 273 | start += step 274 | stop += step 275 | stop = minUint64(stop, upperBound) 276 | i++ 277 | } 278 | wg.Wait() 279 | } 280 | 281 | func (vcs *VCS) PrkUpkLoad() { 282 | fmt.Println(SEP) 283 | vcs.UpkLoadDriver() 284 | fmt.Println(SEP) 285 | if !vcs.DISCARD_PRK { 286 | vcs.PrkLoadDriver() 287 | fmt.Println(SEP) 288 | } 289 | } 290 | -------------------------------------------------------------------------------- /vcs/vcs-utils.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | 8 | "github.com/alinush/go-mcl" 9 | ) 10 | 11 | // Some global variables. Used in vcs.go 12 | // LSB first 13 | // 6 -> 0 1 1 14 | func ToBinary(index uint64, L uint8) []bool { 15 | binary := make([]bool, L) 16 | for i := uint8(0); i < L; i++ { 17 | if index%2 == 0 { 18 | binary[i] = false 19 | } else { 20 | binary[i] = true 21 | } 22 | index = index / 2 23 | } 24 | return binary 25 | } 26 | 27 | // Converts 1D to 2D 28 | // n = 7 the it corresponds to a value in [lg][start] in the tree. 29 | // Say, when indexed from 0, [10] element in a 1D array is same as [3][3] 30 | func IndexInTheLevel(n uint64) (uint8, uint64) { 31 | if n < 0 { 32 | panic("Has to be greater than 0") 33 | } 34 | nPrime := n 35 | n = n + 1 36 | start := uint64(1) 37 | lg := uint8(0) 38 | for n > 1 { 39 | n = n >> 1 40 | start = start << 1 41 | lg++ 42 | } 43 | start = start - 1 44 | return lg, nPrime - start 45 | } 46 | 47 | func check(e error) { 48 | if e != nil { 49 | panic(e) 50 | } 51 | } 52 | 53 | func GetFrByteSize() int { 54 | return 32 55 | // return mcl.GetFrByteSize() 56 | } 57 | 58 | func GetG1ByteSize() int { 59 | return 48 60 | // return mcl.GetG1ByteSize() 61 | } 62 | 63 | func GetG2ByteSize() int { 64 | return 96 65 | // return mcl.GetG1ByteSize() 66 | } 67 | 68 | func GetGTByteSize() int { 69 | return 576 70 | } 71 | 72 | func min(a uint8, b int64) int64 { 73 | A := int64(a) 74 | if A < b { 75 | return A 76 | } 77 | return b 78 | } 79 | 80 | func minUint64(a uint64, b uint64) uint64 { 81 | if a < b { 82 | return a 83 | } 84 | return b 85 | } 86 | 87 | func fileSize(path string) int64 { 88 | fi, err := os.Stat(path) 89 | if err != nil { 90 | log.Fatal(err) 91 | } 92 | return fi.Size() 93 | } 94 | 95 | func BoundsPrint(start, stop uint64) string { 96 | return fmt.Sprintf("%10d %10d", start, stop) 97 | } 98 | 99 | // Index at each level of the proof tree. 100 | // Ex: 33 will be [0:0, 1:0, 2:0, 3:1, 4:2, 5:4, 6:8, 7:16] 101 | func FindTreeGPS(k uint64, L int) []uint64 { 102 | 103 | yCoordinate := make([]uint64, L) 104 | for i := 0; i < L; i++ { 105 | k = k >> 1 106 | yCoordinate[L-i-1] = k 107 | } 108 | return yCoordinate 109 | } 110 | 111 | func ReverseSliceBool(a []bool) []bool { 112 | for left, right := 0, len(a)-1; left < right; left, right = left+1, right-1 { 113 | a[left], a[right] = a[right], a[left] 114 | } 115 | return a 116 | } 117 | 118 | func ReverseSliceUint64(a []uint64) []uint64 { 119 | for left, right := 0, len(a)-1; left < right; left, right = left+1, right-1 { 120 | a[left], a[right] = a[right], a[left] 121 | } 122 | return a 123 | } 124 | 125 | func SliceIsEqual(a, b []mcl.G1) bool { 126 | var status bool 127 | if len(a) != len(b) { 128 | return false 129 | } 130 | status = true 131 | for i := 0; i < len(a); i++ { 132 | status = status && a[i].IsEqual(&b[i]) 133 | if !status { 134 | fmt.Printf("Failed at %d index out of %d\n", i, len(a)-1) 135 | return status 136 | } 137 | } 138 | return true 139 | } 140 | -------------------------------------------------------------------------------- /vcs/vcs.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "github.com/alinush/go-mcl" 5 | "github.com/hyperproofs/gipa-go/batch" 6 | "github.com/hyperproofs/gipa-go/cm" 7 | "github.com/hyperproofs/kzg-go/kzg" 8 | ) 9 | 10 | type VCS struct { 11 | PRK []mcl.G1 // PRK is technically not needed for our experiments 12 | UPK [][]mcl.G1 // UPK forms a tree. The UPK API in code is different from the paper. 13 | VRK []mcl.G2 14 | VRKSubOne []mcl.G2 // h^(1-s_1) 15 | VRKSubOneRev []mcl.G2 // h^(s_1 - 1) 16 | 17 | N uint64 18 | L uint8 19 | G mcl.G1 //Generator 20 | H mcl.G2 //Generator 21 | 22 | trapdoors []mcl.Fr 23 | trapdoorsSubOne []mcl.Fr // (1-s_1) 24 | trapdoorsSubOneRev []mcl.Fr // (s_1-1) 25 | pow2 []uint64 26 | alpha mcl.Fr // KZG + GIPA 27 | beta mcl.Fr // KZG + GIPA 28 | 29 | ProofTree [][]mcl.G1 // Figure 2 from the paper is illustrative of how the ProofTree is saved. 30 | // Note that lowest level f(w) is not saved in this tree. 31 | // Proof serving node saves this proof tree all the time. Unable to fit beyond 2^26 in memory. 32 | 33 | // GIPA Stuff 34 | MN uint64 // Power of 2 which is nearest to TxnLimit * L. In GIPA notation let M = L, n = TxnLimit = 1024 35 | ck cm.Ck // KZG + GIPA 36 | TxnLimit uint64 // Number of txns in a block that needs to be aggregated 37 | nDiff int64 // These many variables are used to pad the GIPA instance if L * TxnLimit is not a power of 2. 38 | mnDiff int64 // These many variables are used to pad the GIPA instance if L * TxnLimit is not a power of 2. 39 | 40 | // KZG Stuff 41 | kzg1 kzg.KZG1Settings // KZG + GIPA 42 | kzg2 kzg.KZG2Settings // KZG + GIPA 43 | 44 | folderPath string 45 | 46 | aggProver batch.Prover 47 | aggVerifier batch.Verifier 48 | 49 | DISCARD_PRK bool // We do not use: g, g^{s_1}, g^{s_2}, g^{s_1}{s_2}, g^{s_3}..... 50 | // Thus, PRK is discarded by default 51 | // UPK tree is enough for the prover 52 | PARAM_TOO_LARGE bool 53 | } 54 | 55 | // Instantiate a new vector commitment instance 56 | // Space for UPK and PRK will be created when keys are created and saved. 57 | // This reduces the memory footprint. 58 | func (vcs *VCS) Init(L uint8, folder string, txnLimit uint64) { 59 | 60 | NFILES = 16 61 | PRKNAME = "/prk-%02d.data" 62 | VRKNAME = "/vrk.data" 63 | TRAPDOORNAME = "/trapdoors.data" 64 | UPKNAME = "/upk-%02d.data" 65 | if L == 0 || L >= 32 { 66 | panic("KeyGen: Error. Either ell is 0 or >= 32") 67 | } 68 | 69 | vcs.folderPath = folder 70 | 71 | vcs.L = L 72 | vcs.N = uint64(1) << L 73 | 74 | vcs.pow2 = make([]uint64, L) 75 | for i := uint8(0); i < L; i++ { 76 | vcs.pow2[i] = 1 << i 77 | } 78 | 79 | // Allocate enough space for trapdoors 80 | vcs.trapdoors = make([]mcl.Fr, vcs.L) 81 | vcs.trapdoorsSubOne = make([]mcl.Fr, vcs.L) 82 | vcs.trapdoorsSubOneRev = make([]mcl.Fr, vcs.L) 83 | 84 | // Allocate enough space for VRK 85 | vcs.VRK = make([]mcl.G2, vcs.L) 86 | vcs.VRKSubOne = make([]mcl.G2, vcs.L) 87 | vcs.VRKSubOneRev = make([]mcl.G2, vcs.L) 88 | 89 | // Space for UPK is allocated during load 90 | 91 | vcs.TxnLimit = txnLimit 92 | 93 | if txnLimit*uint64(L) > MAX_AGG_SIZE { 94 | panic("Try with smaller block size") 95 | } 96 | 97 | vcs.DISCARD_PRK = true // It is assumed true by default. 98 | if L > 24 { 99 | vcs.PARAM_TOO_LARGE = true // When UPK and PRK is large, keys are just flushed to files without keeping it in memory. 100 | } 101 | } 102 | 103 | // Generate trapdoors for the VCS. Be sure to run this after ```Init```. 104 | func (vcs *VCS) TrapdoorsGen() { 105 | 106 | // Need to find a source of randomness and generate trapdoors 107 | // Need to seed the randomness 108 | 109 | // Sample generators 110 | vcs.G.Random() 111 | vcs.H.Random() 112 | 113 | // Generate trapdoors 114 | var frOne mcl.Fr 115 | frOne.SetInt64(1) 116 | for i := range vcs.trapdoors { 117 | vcs.trapdoors[i].Random() 118 | mcl.FrSub(&vcs.trapdoorsSubOne[i], &frOne, &vcs.trapdoors[i]) 119 | mcl.FrSub(&vcs.trapdoorsSubOneRev[i], &vcs.trapdoors[i], &frOne) 120 | } 121 | 122 | // Generate VRK: h^(s_1), h^(s_2), .... 123 | for i := range vcs.trapdoors { 124 | mcl.G2Mul(&vcs.VRK[i], &vcs.H, &vcs.trapdoors[i]) 125 | } 126 | 127 | // Generate VRKSubOne: h^(1-s_1), h^(1-s_2), .... 128 | for i := range vcs.trapdoorsSubOne { 129 | mcl.G2Mul(&vcs.VRKSubOne[i], &vcs.H, &vcs.trapdoorsSubOne[i]) 130 | } 131 | 132 | // Generate VRKSubOneRev: h^(s_1-1), h^(s_2-1), .... 133 | for i := range vcs.trapdoorsSubOneRev { 134 | mcl.G2Mul(&vcs.VRKSubOneRev[i], &vcs.H, &vcs.trapdoorsSubOneRev[i]) 135 | } 136 | 137 | // Generate alpha and beta for KZG 138 | vcs.alpha.Random() 139 | vcs.beta.Random() 140 | 141 | vcs.SaveTrapdoor() 142 | } 143 | 144 | // Generates PRK VRK UPK etc 145 | // Use this only once to generate the parameters. 146 | func (vcs *VCS) KeyGen(ncores uint8, L uint8, folder string, txnLimit uint64) { 147 | 148 | NCORES = ncores // Maximum number threads created. Set this to number of available cores. 149 | vcs.Init(L, folder, txnLimit) // 150 | vcs.TrapdoorsGen() 151 | vcs.PrkUpkGen() 152 | vcs.GenAggGipa() 153 | } 154 | 155 | // Defacto entry to VCS. 156 | // Use this to load the files always 157 | func (vcs *VCS) KeyGenLoad(ncores uint8, L uint8, folder string, txnLimit uint64) { 158 | NCORES = ncores 159 | vcs.Init(L, folder, txnLimit) 160 | vcs.LoadTrapdoor(L) 161 | vcs.PrkUpkLoad() 162 | vcs.LoadAggGipa() 163 | } 164 | 165 | // Do not remove L from the parameters. I am using it OpenAll 166 | func (vcs *VCS) Commit(a []mcl.Fr, L uint64) mcl.G1 { 167 | var digest mcl.G1 168 | mcl.G1MulVec(&digest, vcs.UPK[L], a) // Not L - 1 as L = 0 has just vcs.G 169 | return digest 170 | } 171 | 172 | func (vcs *VCS) OpenAllRec(a []mcl.Fr, start uint64, end uint64, L uint8) { 173 | 174 | if end-start <= 1 { 175 | return 176 | } 177 | 178 | mid := (start + end) / 2 179 | bin := end - start 180 | index := start / bin 181 | 182 | aDiff := make([]mcl.Fr, mid-start) 183 | for i := uint64(0); i < mid-start; i++ { 184 | mcl.FrSub(&aDiff[i], &a[i+mid], &a[i+start]) 185 | } 186 | 187 | result := vcs.Commit(aDiff, uint64(L-1)) 188 | vcs.ProofTree[vcs.L-L][index] = result 189 | 190 | vcs.OpenAllRec(a, start, mid, L-1) 191 | vcs.OpenAllRec(a, mid, end, L-1) 192 | } 193 | 194 | func (vcs *VCS) OpenAll(a []mcl.Fr) { 195 | 196 | vcs.ProofTree = make([][]mcl.G1, vcs.L) 197 | for i := uint8(0); i < vcs.L; i++ { 198 | vcs.ProofTree[i] = make([]mcl.G1, 1<> 1 // Hacky. Need to write docs for this. 211 | proof[j] = vcs.ProofTree[vcs.L-j-1][id] 212 | // fmt.Println(index, vcs.L-j-1, k) 213 | } 214 | return proof 215 | } 216 | 217 | type TreeGPS struct { 218 | level uint8 // Root is level 0 219 | index uint64 220 | } 221 | 222 | func (vcs *VCS) Verify(digest mcl.G1, index uint64, a_i mcl.Fr, proof []mcl.G1) bool { 223 | 224 | if len(proof) != int(vcs.L) { 225 | panic("Verify: Bad proof!") 226 | } 227 | 228 | // temp variables 229 | var rhs mcl.GT 230 | var p mcl.G1 231 | var ps []mcl.G1 232 | var qs []mcl.G2 233 | 234 | ps = make([]mcl.G1, vcs.L+1) 235 | qs = make([]mcl.G2, vcs.L+1) 236 | binary := ToBinary(index, vcs.L) 237 | for i := uint8(0); i < vcs.L; i++ { 238 | if binary[i] { 239 | qs[i] = vcs.VRKSubOneRev[i] 240 | } else { 241 | qs[i] = vcs.VRK[i] 242 | } 243 | ps[i] = proof[i] 244 | } 245 | 246 | // Move e(digest/g^{a_i}, h) to other side. Thus it will be e(g^{a_i}/digest, h) 247 | mcl.G1Mul(&p, &vcs.G, &a_i) 248 | mcl.G1Sub(&p, &p, &digest) 249 | ps[vcs.L] = p 250 | qs[vcs.L] = vcs.H 251 | 252 | mcl.MillerLoopVec(&rhs, ps, qs) 253 | mcl.FinalExp(&rhs, &rhs) 254 | return rhs.IsOne() 255 | } 256 | 257 | func (vcs *VCS) VerifyMemoized(digest mcl.G1, indexVec []uint64, a_i []mcl.Fr, proofVec [][]mcl.G1) (bool, int) { 258 | 259 | // fmt.Println(vcs.VRKSubOneRev[i].IsEqual(&qs[i]), vcs.VRKSubOneRev[i].IsZero()) 260 | 261 | if len(proofVec) != len(indexVec) { 262 | panic("Verify: Bad proof!") 263 | } 264 | 265 | var p mcl.G1 // temp variables 266 | var lhs mcl.GT // temp variables 267 | var tempG2 mcl.G2 268 | var prod mcl.GT 269 | var result mcl.GT 270 | prod.SetInt64(1) 271 | 272 | db := make(map[TreeGPS]mcl.GT) 273 | status := true 274 | for t := range proofVec { 275 | proof := proofVec[t] 276 | index := indexVec[t] 277 | 278 | binary := ToBinary(index, vcs.L) 279 | prod.SetInt64(1) 280 | 281 | for i := vcs.L; i > 0; i-- { 282 | loc := TreeGPS{i, index} 283 | 284 | result1, keyStatus := db[loc] 285 | if keyStatus == false { 286 | if binary[vcs.L-i] == true { 287 | tempG2 = vcs.VRKSubOneRev[vcs.L-i] 288 | } else { 289 | tempG2 = vcs.VRK[vcs.L-i] 290 | } 291 | // mcl.Pairing(&result, &proof[vcs.L-i], &tempG2) 292 | mcl.MillerLoop(&result, &proof[vcs.L-i], &tempG2) 293 | db[loc] = result 294 | mcl.GTMul(&prod, &prod, &result) 295 | } else { 296 | mcl.GTMul(&prod, &prod, &result1) 297 | } 298 | index = index >> 1 299 | } 300 | 301 | mcl.G1Mul(&p, &vcs.G, &a_i[t]) 302 | mcl.G1Sub(&p, &p, &digest) 303 | mcl.MillerLoop(&lhs, &p, &vcs.H) 304 | mcl.GTMul(&prod, &prod, &lhs) 305 | 306 | mcl.FinalExp(&prod, &prod) 307 | 308 | status = status && prod.IsOne() 309 | } 310 | return status, len(db) 311 | } 312 | 313 | func (vcs *VCS) UpdateCom(digest mcl.G1, updateindex uint64, delta mcl.Fr) mcl.G1 { 314 | var temp mcl.G1 315 | var result mcl.G1 316 | mcl.G1Mul(&temp, &vcs.UPK[vcs.L][updateindex], &delta) 317 | mcl.G1Add(&result, &digest, &temp) 318 | return result 319 | } 320 | 321 | func (vcs *VCS) UpdateComVec(digest mcl.G1, updateindex []uint64, delta []mcl.Fr) mcl.G1 { 322 | N := len(updateindex) 323 | // if N != len(delta) { 324 | // fmt.Print("UpdateComVec: Error") 325 | // } 326 | var result mcl.G1 327 | var temp mcl.G1 328 | upks := make([]mcl.G1, N) 329 | for i := 0; i < N; i++ { 330 | upks[i] = vcs.UPK[vcs.L][updateindex[i]] 331 | } 332 | 333 | mcl.G1MulVec(&temp, upks, delta) 334 | mcl.G1Add(&result, &digest, &temp) 335 | return result 336 | } 337 | 338 | func (vcs *VCS) UpdateProof(proof []mcl.G1, localindex uint64, updateindex uint64, delta mcl.Fr) []mcl.G1 { 339 | 340 | newProof := make([]mcl.G1, len(proof)) 341 | copy(newProof, proof) 342 | var temp mcl.G1 343 | updateindexBinary := ToBinary(updateindex, vcs.L) // LSB first 344 | localindexBinary := ToBinary(localindex, vcs.L) // LSB first 345 | // upk := vcs.UPK[updateindex] 346 | upk := vcs.GetUpk(updateindex) 347 | L := int(vcs.L) 348 | for i := L; i > 0; i-- { 349 | if i-1 > 0 { 350 | mcl.G1Mul(&temp, &upk[i-2], &delta) 351 | } else { 352 | mcl.G1Mul(&temp, &vcs.G, &delta) 353 | } 354 | if updateindexBinary[i-1] == false && localindexBinary[i-1] == true { 355 | mcl.G1Sub(&newProof[i-1], &proof[i-1], &temp) 356 | break 357 | } else if updateindexBinary[i-1] == true && localindexBinary[i-1] == false { 358 | mcl.G1Add(&newProof[i-1], &proof[i-1], &temp) 359 | break 360 | } else if updateindexBinary[i-1] == false && localindexBinary[i-1] == false { 361 | mcl.G1Sub(&newProof[i-1], &proof[i-1], &temp) 362 | } else { 363 | mcl.G1Add(&newProof[i-1], &proof[i-1], &temp) 364 | } 365 | } 366 | return newProof 367 | } 368 | 369 | func (vcs *VCS) UpdateProofTree(updateindex uint64, delta mcl.Fr) { 370 | 371 | var q_i mcl.G1 372 | updateindexBinary := ToBinary(updateindex, vcs.L) // LSB first 373 | updateindexBinary = ReverseSliceBool(updateindexBinary) // MSB first 374 | 375 | upk := vcs.GetUpk(updateindex) // Pop upk_{u,l} as it containts ell variables. 376 | upk = append([]mcl.G1{vcs.G}, upk[:len(upk)-1]...) // Since the root of the proof tree contains only ell - 1 variables, we need to pop the upk. 377 | 378 | L := int(vcs.L) 379 | Y := FindTreeGPS(updateindex, L) 380 | 381 | var x, upk_i int // These will serve as GPS in the tree 382 | var y uint64 // These will serve as GPS in the tree 383 | 384 | // Start from the top of the prooftree (which implies start from the bottom of the UPK tree) 385 | for i := 0; i < L; i++ { 386 | x = i 387 | y = Y[i] 388 | upk_i = L - i - 1 389 | mcl.G1Mul(&q_i, &upk[upk_i], &delta) 390 | 391 | if updateindexBinary[x] { 392 | mcl.G1Add(&vcs.ProofTree[x][y], &vcs.ProofTree[x][y], &q_i) 393 | } else { 394 | mcl.G1Sub(&vcs.ProofTree[x][y], &vcs.ProofTree[x][y], &q_i) 395 | } 396 | } 397 | } 398 | 399 | func (vcs *VCS) UpdateProofTreeBulk(updateindexVec []uint64, deltaVec []mcl.Fr) int { 400 | 401 | // ProofTree[][] 402 | var q_i mcl.G1 403 | 404 | var x, upk_i uint8 // These will serve as GPS in the tree 405 | var y uint64 // These will serve as GPS in the tree 406 | 407 | g1Db := make(map[TreeGPS][]mcl.G1) 408 | frDb := make(map[TreeGPS][]mcl.Fr) 409 | 410 | for t := range updateindexVec { 411 | updateindex := updateindexVec[t] 412 | delta := deltaVec[t] 413 | 414 | updateindexBinary := ToBinary(updateindex, vcs.L) // LSB first 415 | updateindexBinary = ReverseSliceBool(updateindexBinary) // MSB first 416 | 417 | upk := vcs.GetUpk(updateindex) // Pop upk_{u,l} as it containts ell variables. 418 | upk = append([]mcl.G1{vcs.G}, upk[:len(upk)-1]...) // Since the root of the proof tree contains only ell - 1 variables, we need to pop the upk. 419 | 420 | L := vcs.L 421 | Y := FindTreeGPS(updateindex, int(L)) 422 | // Start from the top of the prooftree (which implies start from the bottom of the UPK tree) 423 | for i := uint8(0); i < L; i++ { 424 | x = i 425 | y = Y[i] 426 | upk_i = L - i - 1 427 | loc := TreeGPS{x, y} 428 | 429 | q_i = upk[upk_i] 430 | 431 | if !updateindexBinary[x] { 432 | mcl.G1Neg(&q_i, &q_i) 433 | } 434 | g1Db[loc] = append(g1Db[loc], q_i) 435 | frDb[loc] = append(frDb[loc], delta) 436 | } 437 | } 438 | 439 | for key := range g1Db { 440 | A := g1Db[key] 441 | B := frDb[key] 442 | mcl.G1MulVec(&q_i, A, B) 443 | mcl.G1Add(&vcs.ProofTree[key.level][key.index], &vcs.ProofTree[key.level][key.index], &q_i) 444 | } 445 | return len(g1Db) 446 | } 447 | 448 | // g^{(1-s_1)}, g^{(1-s_2)(1-s_1)}, g^{(1-s_3)(1-s_2)(1-s_1)} 449 | func (vcs *VCS) GetUpk(i uint64) []mcl.G1 { 450 | 451 | k := i 452 | upk := make([]mcl.G1, vcs.L) 453 | for j := uint8(vcs.L); j > 0; j-- { 454 | k = k & (^(1 << j)) // Clears the jth bit of k. Technically everything before jth and before has to be cleared. 455 | upk[j-1] = vcs.UPK[j][k] 456 | } 457 | return upk 458 | } 459 | -------------------------------------------------------------------------------- /vcs/vcs_test.go: -------------------------------------------------------------------------------- 1 | package vcs 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "testing" 7 | 8 | "github.com/alinush/go-mcl" 9 | "github.com/hyperproofs/gipa-go/batch" 10 | ) 11 | 12 | // Basic unit test cases for testing VCS functionality. 13 | func TestVCS(t *testing.T) { 14 | 15 | // This method assumes that VRK, UPK are already computed and saved on disk. 16 | // Be sure to run the run vcs.KeyGen if PRK, VRK, UPK keys are not computed. 17 | 18 | mcl.InitFromString("bls12-381") 19 | fmt.Println("Curve order", mcl.GetCurveOrder()) 20 | // Get K random positions in the tree 21 | var L uint8 22 | K := 21 // Number of transactions 23 | txnLimit := uint64(K) 24 | ell := []uint8{16} 25 | 26 | for loop := range ell { 27 | L = ell[loop] 28 | N := uint64(1) << L 29 | 30 | vcs := VCS{} 31 | vcs.KeyGenLoad(16, L, "../pkvk-17", txnLimit) 32 | 33 | indexVec := make([]uint64, K) // List of indices that chanaged (there can be duplicates.) 34 | proofVec := make([][]mcl.G1, K) // Proofs of the changed indices. 35 | deltaVec := make([]mcl.Fr, K) // Magnitude of the changes. 36 | valueVec := make([]mcl.Fr, K) // Current value in that position. 37 | 38 | var digest mcl.G1 39 | var status bool 40 | 41 | { 42 | aFr := GenerateVector(N) 43 | digest = vcs.Commit(aFr, uint64(L)) 44 | vcs.OpenAll(aFr) 45 | 46 | for k := 0; k < K; k++ { 47 | indexVec[k] = uint64(rand.Intn(int(N))) // Can contain duplicates 48 | proofVec[k] = vcs.GetProofPath(indexVec[k]) 49 | deltaVec[k].Random() 50 | valueVec[k] = aFr[indexVec[k]] 51 | } 52 | } 53 | 54 | t.Run(fmt.Sprintf("%d/VerifyNaive;", L), func(t *testing.T) { 55 | status = true 56 | var loc uint64 57 | 58 | for k := 0; k < K; k++ { 59 | loc = indexVec[k] 60 | // status = status && vcs.Verify(digest, loc, valueMap[loc], proofVec[k]) 61 | status = status && vcs.Verify(digest, loc, valueVec[k], proofVec[k]) 62 | if status == false { 63 | t.Errorf("Verification Failed") 64 | } 65 | } 66 | }) 67 | 68 | t.Run(fmt.Sprintf("%d/VerifyMemoized;", L), func(t *testing.T) { 69 | status = true 70 | status, _ = vcs.VerifyMemoized(digest, indexVec, valueVec, proofVec) 71 | if status == false { 72 | t.Errorf("Fast Verification Failed") 73 | } 74 | }) 75 | 76 | // Make some changes to the vector positions. 77 | for k := 0; k < K; k++ { 78 | loc := indexVec[k] 79 | delta := deltaVec[k] 80 | vcs.UpdateProofTree(loc, delta) 81 | } 82 | 83 | // Update the value vector 84 | valueVec = SecondaryStateUpdate(indexVec, deltaVec, valueVec) 85 | 86 | // Get latest proofs 87 | for k := 0; k < K; k++ { 88 | proofVec[k] = vcs.GetProofPath(indexVec[k]) 89 | } 90 | 91 | digest = vcs.UpdateComVec(digest, indexVec, deltaVec) 92 | 93 | t.Run(fmt.Sprintf("%d/UpdateProofTree;", L), func(t *testing.T) { 94 | status = true 95 | status, _ = vcs.VerifyMemoized(digest, indexVec, valueVec, proofVec) 96 | if status == false { 97 | t.Errorf("UpdateProofTree Failed") 98 | } 99 | }) 100 | 101 | vcs.UpdateProofTreeBulk(indexVec, deltaVec) 102 | 103 | // Update the value vector 104 | valueVec = SecondaryStateUpdate(indexVec, deltaVec, valueVec) 105 | 106 | // Get latest proofs 107 | for k := 0; k < K; k++ { 108 | proofVec[k] = vcs.GetProofPath(indexVec[k]) 109 | } 110 | digest = vcs.UpdateComVec(digest, indexVec, deltaVec) 111 | 112 | t.Run(fmt.Sprintf("%d/UpdateProofTreeBulk;", L), func(t *testing.T) { 113 | status = true 114 | status, _ = vcs.VerifyMemoized(digest, indexVec, valueVec, proofVec) 115 | if status == false { 116 | t.Errorf("UpdateProofTreeBulk Failed") 117 | } 118 | }) 119 | 120 | var aggProof batch.Proof 121 | aggProof = vcs.AggProve(indexVec, proofVec) 122 | 123 | t.Run(fmt.Sprintf("%d/AggregateVerify;%d", L, txnLimit), func(t *testing.T) { 124 | 125 | status = status && vcs.AggVerify(aggProof, digest, indexVec, valueVec) 126 | if status == false { 127 | t.Errorf("Aggregation failed") 128 | } 129 | }) 130 | 131 | // Simple do another round of updates to check if aggregated succeeded 132 | vcs.UpdateProofTreeBulk(indexVec, deltaVec) 133 | valueVec = SecondaryStateUpdate(indexVec, deltaVec, valueVec) 134 | for k := 0; k < K; k++ { 135 | proofVec[k] = vcs.GetProofPath(indexVec[k]) 136 | } 137 | digest = vcs.UpdateComVec(digest, indexVec, deltaVec) 138 | 139 | var aggIndex []uint64 140 | var aggProofIndv [][]mcl.G1 141 | var aggValue []mcl.Fr 142 | 143 | aggIndex = make([]uint64, txnLimit) 144 | aggProofIndv = make([][]mcl.G1, txnLimit) 145 | aggValue = make([]mcl.Fr, txnLimit) 146 | 147 | for j := uint64(0); j < txnLimit; j++ { 148 | id := uint64(rand.Intn(int(K))) // Pick an index from the saved list of vector positions 149 | aggIndex[j] = indexVec[id] 150 | aggProofIndv[j] = proofVec[id] 151 | aggValue[j] = valueVec[id] 152 | } 153 | 154 | aggProof = vcs.AggProve(aggIndex, aggProofIndv) 155 | t.Run(fmt.Sprintf("%d/AggregateVerify2;%d", L, txnLimit), func(t *testing.T) { 156 | 157 | status = status && vcs.AggVerify(aggProof, digest, aggIndex, aggValue) 158 | if status == false { 159 | t.Errorf("Aggregation#2 failed") 160 | } 161 | }) 162 | 163 | } 164 | } 165 | 166 | func SecondaryStateUpdate(indexVec []uint64, deltaVec []mcl.Fr, valueVec []mcl.Fr) []mcl.Fr { 167 | 168 | K := len(indexVec) 169 | valueMap := make(map[uint64]mcl.Fr) // loc: Current value in that position. 170 | updateMap := make(map[uint64]mcl.Fr) // loc: Magnitude of the changes. 171 | 172 | for k := 0; k < K; k++ { 173 | valueMap[indexVec[k]] = valueVec[k] 174 | } 175 | 176 | // Make some changes to the vector positions. 177 | for k := 0; k < K; k++ { 178 | loc := indexVec[k] 179 | delta := deltaVec[k] 180 | temp := updateMap[loc] 181 | mcl.FrAdd(&temp, &temp, &delta) 182 | updateMap[loc] = temp 183 | } 184 | 185 | // Import the bunch of changes made to local slice of aFr 186 | for key, value := range updateMap { 187 | temp := valueMap[key] 188 | mcl.FrAdd(&temp, &temp, &value) 189 | valueMap[key] = temp 190 | } 191 | 192 | // Update the value vector 193 | for k := 0; k < K; k++ { 194 | valueVec[k] = valueMap[indexVec[k]] 195 | } 196 | 197 | return valueVec 198 | } 199 | --------------------------------------------------------------------------------