├── .arcconfig ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── gen ├── ecmult │ ├── Cargo.toml │ └── src │ │ └── main.rs └── genmult │ ├── Cargo.toml │ └── src │ └── main.rs ├── shell.nix ├── src ├── ecdh.rs ├── ecdsa.rs ├── ecmult │ ├── const.rs │ ├── const_gen.rs │ └── mod.rs ├── error.rs ├── field.rs ├── group.rs ├── lib.rs └── scalar.rs └── tests └── verify.rs /.arcconfig: -------------------------------------------------------------------------------- 1 | { 2 | "phabricator.uri" : "https://source.that.world/" 3 | } -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | matrix: 7 | allow_failures: 8 | - rust: nightly 9 | - rust: beta 10 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libsecp256k1" 3 | description = "Pure Rust secp256k1 implementation." 4 | license = "Apache-2.0" 5 | version = "0.1.13" 6 | authors = ["Wei Tang "] 7 | repository = "https://source.that.world/source/libsecp256k1-rs/" 8 | keywords = [ "crypto", "ECDSA", "secp256k1", "bitcoin", "no_std" ] 9 | 10 | [lib] 11 | name = "secp256k1" 12 | 13 | [dependencies] 14 | rand = { version = "0.4", default-features = false } 15 | hmac-drbg = "0.1" 16 | sha2 = "0.6" 17 | digest = "0.6" 18 | typenum = "1.9" 19 | 20 | [dev-dependencies] 21 | secp256k1-test = "0.7" 22 | rand = "0.4" 23 | etcommon-hexutil = "0.2" 24 | 25 | [workspace] 26 | members = [ 27 | "./gen/ecmult", 28 | "./gen/genmult", 29 | ] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: gen 2 | 3 | gen: 4 | cd gen/ecmult && cargo run > ../../src/ecmult/const.rs.new 5 | mv src/ecmult/const.rs.new src/ecmult/const.rs 6 | cd gen/genmult && cargo run > ../../src/ecmult/const_gen.rs.new 7 | mv src/ecmult/const_gen.rs.new src/ecmult/const_gen.rs 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SECP256K1 Implementation in Pure Rust 2 | 3 | [![Build Status](https://travis-ci.org/ethereumproject/libsecp256k1-rs.svg?branch=master)](https://travis-ci.org/ethereumproject/libsecp256k1-rs) 4 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](./LICENSE) 5 | [![Cargo](https://img.shields.io/crates/v/libsecp256k1.svg)](https://crates.io/crates/libsecp256k1) 6 | [![Documentation](https://docs.rs/libsecp256k1/badge.svg)](https://docs.rs/libsecp256k1) 7 | 8 | SECP256K1 implementation with `no_std` support. Currently we have 9 | implementation for: 10 | 11 | * Convert a private key to a public key. 12 | * Sign messages. 13 | * Signature verification. 14 | * Public key recovery from signed messages. 15 | * Shared secrets. 16 | -------------------------------------------------------------------------------- /gen/ecmult/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libsecp256k1-gen-ecmult" 3 | version = "0.0.0" 4 | authors = ["Wei Tang "] 5 | 6 | [dependencies] 7 | libsecp256k1 = { path = "../.." } -------------------------------------------------------------------------------- /gen/ecmult/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate secp256k1; 2 | 3 | use secp256k1::curve::{Jacobian, Field, AffineStorage, Affine, AFFINE_G}; 4 | use secp256k1::util::{odd_multiples_table, ECMULT_TABLE_SIZE_G, 5 | set_table_gej_var, globalz_set_table_gej}; 6 | 7 | fn odd_multiples_table_storage_var(pre: &mut [AffineStorage], 8 | a: &Jacobian) { 9 | let mut prej: Vec = Vec::with_capacity(pre.len()); 10 | for _ in 0..pre.len() { 11 | prej.push(Jacobian::default()); 12 | } 13 | let mut prea: Vec = Vec::with_capacity(pre.len()); 14 | for _ in 0..pre.len() { 15 | prea.push(Affine::default()); 16 | } 17 | let mut zr: Vec = Vec::with_capacity(pre.len()); 18 | for _ in 0..pre.len() { 19 | zr.push(Field::default()); 20 | } 21 | 22 | odd_multiples_table(&mut prej, &mut zr, a); 23 | set_table_gej_var(&mut prea, &prej, &zr); 24 | 25 | for i in 0..pre.len() { 26 | pre[i] = prea[i].clone().into(); 27 | } 28 | } 29 | 30 | fn main() { 31 | let mut gj = Jacobian::default(); 32 | gj.set_ge(&AFFINE_G); 33 | let mut pre_g = Vec::with_capacity(ECMULT_TABLE_SIZE_G); 34 | for _ in 0..ECMULT_TABLE_SIZE_G { 35 | pre_g.push(AffineStorage::default()); 36 | } 37 | odd_multiples_table_storage_var(&mut pre_g, &gj); 38 | println!("["); 39 | for pg in pre_g { 40 | println!(" affine_storage_const!(field_storage_const!({}, {}, {}, {}, {}, {}, {}, {}), field_storage_const!({}, {}, {}, {}, {}, {}, {}, {})),", 41 | pg.x.0[7], pg.x.0[6], pg.x.0[5], pg.x.0[4], pg.x.0[3], pg.x.0[2], pg.x.0[1], pg.x.0[0], 42 | pg.y.0[7], pg.y.0[6], pg.y.0[5], pg.y.0[4], pg.y.0[3], pg.y.0[2], pg.y.0[1], pg.y.0[0]); 43 | } 44 | println!("]"); 45 | } 46 | -------------------------------------------------------------------------------- /gen/genmult/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "libsecp256k1-gen-genmult" 3 | version = "0.0.0" 4 | authors = ["Wei Tang "] 5 | 6 | [dependencies] 7 | libsecp256k1 = { path = "../.." } -------------------------------------------------------------------------------- /gen/genmult/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate secp256k1; 2 | 3 | use secp256k1::curve::{Jacobian, Field, AffineStorage, Affine, AFFINE_G}; 4 | use secp256k1::util::{odd_multiples_table, ECMULT_TABLE_SIZE_G, 5 | set_table_gej_var, globalz_set_table_gej}; 6 | 7 | pub fn set_all_gej_var(a: &[Jacobian]) -> Vec { 8 | let mut az: Vec = Vec::with_capacity(a.len()); 9 | for i in 0..a.len() { 10 | if !a[i].is_infinity() { 11 | az.push(a[i].z.clone()); 12 | } 13 | } 14 | let mut azi: Vec = inv_all_var(&az); 15 | 16 | let mut ret = Vec::with_capacity(a.len()); 17 | for _ in 0..a.len() { 18 | ret.push(Affine::default()); 19 | } 20 | 21 | let mut count = 0; 22 | for i in 0..a.len() { 23 | ret[i].infinity = a[i].infinity; 24 | if !a[i].is_infinity() { 25 | ret[i].set_gej_zinv(&a[i], &azi[count]); 26 | count += 1; 27 | } 28 | } 29 | ret 30 | } 31 | 32 | /// Calculate the (modular) inverses of a batch of field 33 | /// elements. Requires the inputs' magnitudes to be at most 8. The 34 | /// output magnitudes are 1 (but not guaranteed to be 35 | /// normalized). The inputs and outputs must not overlap in 36 | /// memory. 37 | pub fn inv_all_var(fields: &[Field]) -> Vec { 38 | if fields.len() == 0 { 39 | return Vec::new(); 40 | } 41 | 42 | let mut ret = Vec::new(); 43 | ret.push(fields[0].clone()); 44 | 45 | for i in 1..fields.len() { 46 | ret.push(Field::default()); 47 | ret[i] = &ret[i - 1] * &fields[i]; 48 | } 49 | 50 | let mut u = ret[fields.len() - 1].inv_var(); 51 | 52 | for i in (1..fields.len()).rev() { 53 | let j = i; 54 | let i = i - 1; 55 | ret[j] = &ret[i] * &u; 56 | u = &u * &fields[j]; 57 | } 58 | 59 | ret[0] = u; 60 | ret 61 | } 62 | 63 | fn main() { 64 | let mut gj = Jacobian::default(); 65 | gj.set_ge(&AFFINE_G); 66 | 67 | // Construct a group element with no known corresponding scalar (nothing up my sleeve). 68 | let mut nums_32 = [0u8; 32]; 69 | debug_assert!("The scalar for this x is unknown".as_bytes().len() == 32); 70 | for (i, v) in "The scalar for this x is unknown".as_bytes().iter().enumerate() { 71 | nums_32[i] = *v; 72 | } 73 | let mut nums_x = Field::default(); 74 | debug_assert!(nums_x.set_b32(&nums_32)); 75 | let mut nums_ge = Affine::default(); 76 | debug_assert!(nums_ge.set_xo_var(&nums_x, false)); 77 | let mut nums_gej = Jacobian::default(); 78 | nums_gej.set_ge(&nums_ge); 79 | nums_gej = nums_gej.add_ge_var(&AFFINE_G, None); 80 | 81 | // Compute prec. 82 | let mut precj: Vec = Vec::with_capacity(1024); 83 | for _ in 0..1024 { 84 | precj.push(Jacobian::default()); 85 | } 86 | let mut gbase = gj.clone(); 87 | let mut numsbase = nums_gej.clone(); 88 | for j in 0..64 { 89 | precj[j*16] = numsbase.clone(); 90 | for i in 1..16 { 91 | precj[j*16 + i] = precj[j*16 + i - 1].add_var(&gbase, None); 92 | } 93 | for _ in 0..4 { 94 | gbase = gbase.double_var(None); 95 | } 96 | numsbase = numsbase.double_var(None); 97 | if j == 62 { 98 | numsbase = numsbase.neg(); 99 | numsbase = numsbase.add_var(&nums_gej, None); 100 | } 101 | } 102 | let prec = set_all_gej_var(&precj); 103 | println!("["); 104 | for j in 0..64 { 105 | println!(" ["); 106 | for i in 0..16 { 107 | let pg: AffineStorage = prec[j*16 + i].clone().into(); 108 | println!(" affine_storage_const!(field_storage_const!({}, {}, {}, {}, {}, {}, {}, {}), field_storage_const!({}, {}, {}, {}, {}, {}, {}, {})),", 109 | pg.x.0[7], pg.x.0[6], pg.x.0[5], pg.x.0[4], pg.x.0[3], pg.x.0[2], pg.x.0[1], pg.x.0[0], 110 | pg.y.0[7], pg.y.0[6], pg.y.0[5], pg.y.0[4], pg.y.0[3], pg.y.0[2], pg.y.0[1], pg.y.0[0]); 111 | } 112 | println!(" ],"); 113 | } 114 | println!("]"); 115 | } 116 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let pkgs = ( 2 | let 3 | nixpkgs = import ; 4 | pkgs_ = (nixpkgs {}); 5 | rustOverlay = (pkgs_.fetchFromGitHub { 6 | owner = "mozilla"; 7 | repo = "nixpkgs-mozilla"; 8 | rev = "6179dd876578ca2931f864627598ede16ba6cdef"; 9 | sha256 = "1lim10a674621zayz90nhwiynlakxry8fyz1x209g9bdm38zy3av"; 10 | }); 11 | in (nixpkgs { 12 | overlays = [ 13 | (import (builtins.toPath "${rustOverlay}/rust-overlay.nix")) 14 | (self: super: { 15 | rust = { 16 | rustc = super.rustChannels.stable.rust; 17 | cargo = super.rustChannels.stable.cargo; 18 | }; 19 | rustPlatform = super.recurseIntoAttrs (super.makeRustPlatform { 20 | rustc = super.rustChannels.stable.rust; 21 | cargo = super.rustChannels.stable.cargo; 22 | }); 23 | }) 24 | ]; 25 | })); 26 | 27 | in with pkgs; 28 | 29 | stdenv.mkDerivation { 30 | name = "libsecp256k1-env"; 31 | buildInputs = [ 32 | gcc rustc cargo gdb openssl pkgconfig 33 | ]; 34 | } 35 | -------------------------------------------------------------------------------- /src/ecdh.rs: -------------------------------------------------------------------------------- 1 | use sha2::Sha256; 2 | use digest::{FixedOutput, Input}; 3 | use group::{Affine, Jacobian}; 4 | use scalar::Scalar; 5 | use ecmult::ECMultContext; 6 | 7 | impl ECMultContext { 8 | pub fn ecdh_raw(&self, point: &Affine, scalar: &Scalar) -> Option<[u8; 32]> { 9 | let mut pt = point.clone(); 10 | let s = scalar.clone(); 11 | 12 | let mut result = [0u8; 32]; 13 | if s.is_zero() { 14 | return None; 15 | } 16 | 17 | let mut res = Jacobian::default(); 18 | self.ecmult_const(&mut res, &pt, &s); 19 | pt.set_gej(&res); 20 | 21 | pt.x.normalize(); 22 | pt.y.normalize(); 23 | 24 | let x = pt.x.b32(); 25 | let y = 0x02 | (if pt.y.is_odd() { 1 } else { 0 }); 26 | 27 | let mut sha = Sha256::default(); 28 | sha.process(&[y]); 29 | sha.process(&x); 30 | let generic = sha.fixed_result(); 31 | 32 | for i in 0..32 { 33 | result[i] = generic[i]; 34 | } 35 | 36 | Some(result) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/ecdsa.rs: -------------------------------------------------------------------------------- 1 | use field::Field; 2 | use group::{Affine, Jacobian}; 3 | use scalar::Scalar; 4 | use ecmult::{ECMultContext, ECMultGenContext}; 5 | use Error; 6 | 7 | const P_MINUS_ORDER: Field = field_const!( 8 | 0, 0, 0, 1, 0x45512319, 0x50B75FC4, 0x402DA172, 0x2FC9BAEE 9 | ); 10 | 11 | const ORDER_AS_FE: Field = field_const!( 12 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 13 | 0xBAAEDCE6, 0xAF48A03B, 0xBFD25E8C, 0xD0364141 14 | ); 15 | 16 | impl ECMultContext { 17 | pub fn verify_raw( 18 | &self, sigr: &Scalar, sigs: &Scalar, pubkey: &Affine, message: &Scalar 19 | ) -> bool { 20 | let c; 21 | let (sn, u1, u2): (Scalar, Scalar, Scalar); 22 | 23 | if sigr.is_zero() || sigs.is_zero() { 24 | return false; 25 | } 26 | 27 | sn = sigs.inv_var(); 28 | u1 = &sn * message; 29 | u2 = &sn * sigr; 30 | let mut pubkeyj: Jacobian = Jacobian::default(); 31 | pubkeyj.set_ge(pubkey); 32 | let mut pr: Jacobian = Jacobian::default(); 33 | self.ecmult(&mut pr, &pubkeyj, &u2, &u1); 34 | if pr.is_infinity() { 35 | return false; 36 | } 37 | 38 | c = sigr.b32(); 39 | let mut xr: Field = Default::default(); 40 | xr.set_b32(&c); 41 | 42 | if pr.eq_x_var(&xr) { 43 | return true; 44 | } 45 | if xr >= P_MINUS_ORDER { 46 | return false; 47 | } 48 | xr += ORDER_AS_FE; 49 | if pr.eq_x_var(&xr) { 50 | return true; 51 | } 52 | return false; 53 | } 54 | 55 | pub fn recover_raw( 56 | &self, sigr: &Scalar, sigs: &Scalar, rec_id: u8, message: &Scalar 57 | ) -> Result { 58 | debug_assert!(rec_id < 4); 59 | 60 | if sigr.is_zero() || sigs.is_zero() { 61 | return Err(Error::InvalidSignature); 62 | } 63 | 64 | let brx = sigr.b32(); 65 | let mut fx = Field::default(); 66 | let overflow = fx.set_b32(&brx); 67 | debug_assert!(overflow); 68 | 69 | if rec_id & 2 > 0 { 70 | if fx >= P_MINUS_ORDER { 71 | return Err(Error::InvalidSignature); 72 | } 73 | fx += ORDER_AS_FE; 74 | } 75 | let mut x = Affine::default(); 76 | if !x.set_xo_var(&fx, rec_id & 1 > 0) { 77 | return Err(Error::InvalidSignature); 78 | } 79 | let mut xj = Jacobian::default(); 80 | xj.set_ge(&x); 81 | let rn = sigr.inv(); 82 | let mut u1 = &rn * message; 83 | u1 = u1.neg(); 84 | let u2 = &rn * sigs; 85 | let mut qj = Jacobian::default(); 86 | self.ecmult(&mut qj, &xj, &u2, &u1); 87 | 88 | let mut pubkey = Affine::default(); 89 | pubkey.set_gej_var(&qj); 90 | 91 | if pubkey.is_infinity() { 92 | return Err(Error::InvalidSignature); 93 | } else { 94 | return Ok(pubkey); 95 | } 96 | } 97 | } 98 | 99 | impl ECMultGenContext { 100 | pub fn sign_raw(&self, seckey: &Scalar, message: &Scalar, nonce: &Scalar) -> Result<(Scalar, Scalar, u8), Error> { 101 | let mut rp = Jacobian::default(); 102 | self.ecmult_gen(&mut rp, nonce); 103 | let mut r = Affine::default(); 104 | r.set_gej(&rp); 105 | r.x.normalize(); 106 | r.y.normalize(); 107 | let b = r.x.b32(); 108 | let mut sigr = Scalar::default(); 109 | let overflow = sigr.set_b32(&b); 110 | debug_assert!(!sigr.is_zero()); 111 | debug_assert!(!overflow); 112 | 113 | let mut recid = (if overflow { 2 } else { 0 }) | (if r.y.is_odd() { 1 } else { 0 }); 114 | let mut n = &sigr * seckey; 115 | n += message; 116 | let mut sigs = nonce.inv(); 117 | sigs *= &n; 118 | n.clear(); 119 | rp.clear(); 120 | r.clear(); 121 | if sigs.is_zero() { 122 | return Err(Error::InvalidMessage); 123 | } 124 | if sigs.is_high() { 125 | sigs = sigs.neg(); 126 | recid = recid ^ 1; 127 | } 128 | return Ok((sigr, sigs, recid)); 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /src/ecmult/mod.rs: -------------------------------------------------------------------------------- 1 | use group::{Affine, Jacobian, AffineStorage, globalz_set_table_gej}; 2 | use field::Field; 3 | use scalar::Scalar; 4 | 5 | pub const WINDOW_A: usize = 5; 6 | pub const WINDOW_G: usize = 16; 7 | pub const ECMULT_TABLE_SIZE_A: usize = 1 << (WINDOW_A - 2); 8 | pub const ECMULT_TABLE_SIZE_G: usize = 1 << (WINDOW_G - 2); 9 | pub const WNAF_BITS: usize = 256; 10 | 11 | /// Context for accelerating the computation of a*P + b*G. 12 | pub struct ECMultContext { 13 | pre_g: [AffineStorage; ECMULT_TABLE_SIZE_G], 14 | } 15 | 16 | /// Context for accelerating the computation of a*G. 17 | pub struct ECMultGenContext { 18 | prec: [[AffineStorage; 16]; 64], 19 | blind: Scalar, 20 | initial: Jacobian, 21 | } 22 | 23 | /// A static ECMult context. 24 | pub static ECMULT_CONTEXT: ECMultContext = ECMultContext { 25 | pre_g: include!("const.rs"), 26 | }; 27 | 28 | /// A static ECMultGen context. 29 | pub static ECMULT_GEN_CONTEXT: ECMultGenContext = ECMultGenContext { 30 | prec: include!("const_gen.rs"), 31 | blind: Scalar([2217680822, 850875797, 1046150361, 1330484644, 32 | 4015777837, 2466086288, 2052467175, 2084507480]), 33 | initial: Jacobian { 34 | x: field_const_raw!(586608, 43357028, 207667908, 262670128, 142222828, 38529388, 267186148, 45417712, 115291924, 13447464), 35 | y: field_const_raw!(12696548, 208302564, 112025180, 191752716, 143238548, 145482948, 228906000, 69755164, 243572800, 210897016), 36 | z: field_const_raw!(3685368, 75404844, 20246216, 5748944, 73206666, 107661790, 110806176, 73488774, 5707384, 104448710), 37 | infinity: false, 38 | } 39 | }; 40 | 41 | pub fn odd_multiples_table(prej: &mut [Jacobian], 42 | zr: &mut [Field], 43 | a: &Jacobian) { 44 | debug_assert!(prej.len() == zr.len()); 45 | debug_assert!(prej.len() > 0); 46 | debug_assert!(!a.is_infinity()); 47 | 48 | let d = a.double_var(None); 49 | let d_ge = Affine { 50 | x: d.x.clone(), 51 | y: d.y.clone(), 52 | infinity: false, 53 | }; 54 | 55 | let mut a_ge = Affine::default(); 56 | a_ge.set_gej_zinv(a, &d.z); 57 | prej[0].x = a_ge.x; 58 | prej[0].y = a_ge.y; 59 | prej[0].z = a.z.clone(); 60 | prej[0].infinity = false; 61 | 62 | zr[0] = d.z.clone(); 63 | for i in 1..prej.len() { 64 | prej[i] = prej[i-1].add_ge_var(&d_ge, Some(&mut zr[i])); 65 | } 66 | 67 | let l = &prej.last().unwrap().z * &d.z; 68 | prej.last_mut().unwrap().z = l; 69 | } 70 | 71 | fn odd_multiples_table_globalz_windowa(pre: &mut [Affine; ECMULT_TABLE_SIZE_A], 72 | globalz: &mut Field, 73 | a: &Jacobian) { 74 | let mut prej: [Jacobian; ECMULT_TABLE_SIZE_A] = Default::default(); 75 | let mut zr: [Field; ECMULT_TABLE_SIZE_A] = Default::default(); 76 | 77 | odd_multiples_table(&mut prej, &mut zr, a); 78 | globalz_set_table_gej(pre, globalz, &prej, &zr); 79 | } 80 | 81 | fn table_get_ge(r: &mut Affine, pre: &[Affine], n: i32, w: usize) { 82 | debug_assert!(n & 1 == 1); 83 | debug_assert!(n >= -((1 << (w-1)) - 1)); 84 | debug_assert!(n <= ((1 << (w-1)) - 1)); 85 | if n > 0 { 86 | *r = pre[((n-1)/2) as usize].clone(); 87 | } else { 88 | *r = pre[((-n-1)/2) as usize].neg(); 89 | } 90 | } 91 | 92 | fn table_get_ge_const(r: &mut Affine, pre: &[Affine], n: i32, w: usize) { 93 | let abs_n = n * (if n > 0 { 1 } else { 0 } * 2 - 1); 94 | let idx_n = abs_n / 2; 95 | debug_assert!(n & 1 == 1); 96 | debug_assert!(n >= -((1 << (w-1)) - 1)); 97 | debug_assert!(n <= ((1 << (w-1)) - 1)); 98 | for m in 0..pre.len() { 99 | r.x.cmov(&pre[m].x, m == idx_n as usize); 100 | r.y.cmov(&pre[m].y, m == idx_n as usize); 101 | } 102 | r.infinity = false; 103 | let neg_y = r.y.neg(1); 104 | r.y.cmov(&neg_y, n != abs_n); 105 | } 106 | 107 | fn table_get_ge_storage(r: &mut Affine, pre: &[AffineStorage], n: i32, w: usize) { 108 | debug_assert!(n & 1 == 1); 109 | debug_assert!(n >= -((1 << (w-1)) - 1)); 110 | debug_assert!(n <= ((1 << (w-1)) - 1)); 111 | if n > 0 { 112 | *r = pre[((n-1)/2) as usize].clone().into(); 113 | } else { 114 | *r = pre[((-n-1)/2) as usize].clone().into(); 115 | *r = r.neg(); 116 | } 117 | } 118 | 119 | pub fn ecmult_wnaf(wnaf: &mut [i32], a: &Scalar, w: usize) -> i32 { 120 | let mut s = a.clone(); 121 | let mut last_set_bit: i32 = -1; 122 | let mut bit = 0; 123 | let mut sign = 1; 124 | let mut carry = 0; 125 | 126 | debug_assert!(wnaf.len() <= 256); 127 | debug_assert!(w >= 2 && w <= 31); 128 | 129 | for i in 0..wnaf.len() { 130 | wnaf[i] = 0; 131 | } 132 | 133 | if s.bits(255, 1) > 0 { 134 | s = s.neg(); 135 | sign = -1; 136 | } 137 | 138 | while bit < wnaf.len() { 139 | let mut now; 140 | let mut word; 141 | if s.bits(bit, 1) == carry as u32 { 142 | bit += 1; 143 | continue; 144 | } 145 | 146 | now = w; 147 | if now > wnaf.len() - bit { 148 | now = wnaf.len() - bit; 149 | } 150 | 151 | word = (s.bits_var(bit, now) as i32) + carry; 152 | 153 | carry = (word >> (w-1)) & 1; 154 | word -= carry << w; 155 | 156 | wnaf[bit] = sign * word; 157 | last_set_bit = bit as i32; 158 | 159 | bit += now; 160 | } 161 | debug_assert!(carry == 0); 162 | debug_assert!({ 163 | let mut t = true; 164 | while bit < 256 { 165 | t = t && (s.bits(bit, 1) == 0); 166 | bit += 1; 167 | } 168 | t 169 | }); 170 | last_set_bit + 1 171 | } 172 | 173 | pub fn ecmult_wnaf_const(wnaf: &mut [i32], a: &Scalar, w: usize) -> i32 { 174 | let mut s = a.clone(); 175 | let mut word = 0; 176 | 177 | /* Note that we cannot handle even numbers by negating them to be 178 | * odd, as is done in other implementations, since if our scalars 179 | * were specified to have width < 256 for performance reasons, 180 | * their negations would have width 256 and we'd lose any 181 | * performance benefit. Instead, we use a technique from Section 182 | * 4.2 of the Okeya/Tagaki paper, which is to add either 1 (for 183 | * even) or 2 (for odd) to the number we are encoding, returning a 184 | * skew value indicating this, and having the caller compensate 185 | * after doing the multiplication. */ 186 | 187 | /* Negative numbers will be negated to keep their bit 188 | * representation below the maximum width */ 189 | let flip = s.is_high(); 190 | /* We add 1 to even numbers, 2 to odd ones, noting that negation 191 | * flips parity */ 192 | let bit = flip ^ !s.is_even(); 193 | /* We add 1 to even numbers, 2 to odd ones, noting that negation 194 | * flips parity */ 195 | let neg_s = s.neg(); 196 | let not_neg_one = !neg_s.is_one(); 197 | s.cadd_bit(if bit { 1 } else { 0 }, not_neg_one); 198 | /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so 199 | * caller expects that we added two to it and flipped it. In fact 200 | * for -1 these operations are identical. We only flipped, but 201 | * since skewing is required (in the sense that the skew must be 1 202 | * or 2, never zero) and flipping is not, we need to change our 203 | * flags to claim that we only skewed. */ 204 | let mut global_sign = s.cond_neg_mut(flip); 205 | global_sign *= if not_neg_one { 1 } else { 0 } * 2 - 1; 206 | let skew = 1 << (if bit { 1 } else { 0 }); 207 | 208 | let mut u_last: i32 = s.shr_int(w) as i32; 209 | let mut u: i32 = 0; 210 | while word * w < WNAF_BITS { 211 | u = s.shr_int(w) as i32; 212 | let even = (u & 1) == 0; 213 | let sign = 2 * (if u_last > 0 { 1 } else { 0 }) - 1; 214 | u += sign * if even { 1 } else { 0 }; 215 | u_last -= sign * if even { 1 } else { 0 } * (1 << w); 216 | 217 | wnaf[word] = (u_last as i32 * global_sign as i32) as i32; 218 | word += 1; 219 | 220 | u_last = u; 221 | } 222 | wnaf[word] = u * global_sign as i32; 223 | 224 | debug_assert!(s.is_zero()); 225 | let wnaf_size = (WNAF_BITS + w - 1) / w; 226 | debug_assert!(word == wnaf_size); 227 | 228 | skew 229 | } 230 | 231 | impl ECMultContext { 232 | pub fn ecmult( 233 | &self, r: &mut Jacobian, a: &Jacobian, na: &Scalar, ng: &Scalar 234 | ) { 235 | let mut tmpa = Affine::default(); 236 | let mut pre_a: [Affine; ECMULT_TABLE_SIZE_A] = Default::default(); 237 | let mut z = Field::default(); 238 | let mut wnaf_na = [0i32; 256]; 239 | let mut wnaf_ng = [0i32; 256]; 240 | let bits_na = ecmult_wnaf(&mut wnaf_na, na, WINDOW_A); 241 | let mut bits = bits_na; 242 | odd_multiples_table_globalz_windowa(&mut pre_a, &mut z, a); 243 | 244 | let bits_ng = ecmult_wnaf(&mut wnaf_ng, &ng, WINDOW_G); 245 | if bits_ng > bits { 246 | bits = bits_ng; 247 | } 248 | 249 | r.set_infinity(); 250 | for i in (0..bits).rev() { 251 | let mut n; 252 | *r = r.double_var(None); 253 | 254 | n = wnaf_na[i as usize]; 255 | if i < bits_na && n != 0 { 256 | table_get_ge(&mut tmpa, &pre_a, n, WINDOW_A); 257 | *r = r.add_ge_var(&tmpa, None); 258 | } 259 | n = wnaf_ng[i as usize]; 260 | if i < bits_ng && n != 0 { 261 | table_get_ge_storage(&mut tmpa, &self.pre_g, n, WINDOW_G); 262 | *r = r.add_zinv_var(&tmpa, &z); 263 | } 264 | } 265 | 266 | if !r.is_infinity() { 267 | r.z *= &z; 268 | } 269 | } 270 | 271 | pub fn ecmult_const( 272 | &self, r: &mut Jacobian, a: &Affine, scalar: &Scalar 273 | ) { 274 | const WNAF_SIZE: usize = (WNAF_BITS + (WINDOW_A - 1) - 1) / (WINDOW_A - 1); 275 | 276 | let mut tmpa = Affine::default(); 277 | let mut pre_a: [Affine; ECMULT_TABLE_SIZE_A] = Default::default(); 278 | let mut z = Field::default(); 279 | 280 | let mut wnaf_1 = [0i32; 1 + WNAF_SIZE]; 281 | 282 | let sc = scalar.clone(); 283 | let skew_1 = ecmult_wnaf_const(&mut wnaf_1, &sc, WINDOW_A - 1); 284 | 285 | /* Calculate odd multiples of a. All multiples are brought to 286 | * the same Z 'denominator', which is stored in Z. Due to 287 | * secp256k1' isomorphism we can do all operations pretending 288 | * that the Z coordinate was 1, use affine addition formulae, 289 | * and correct the Z coordinate of the result once at the end. 290 | */ 291 | r.set_ge(a); 292 | odd_multiples_table_globalz_windowa(&mut pre_a, &mut z, r); 293 | for i in 0..ECMULT_TABLE_SIZE_A { 294 | pre_a[i].y.normalize_weak(); 295 | } 296 | 297 | /* first loop iteration (separated out so we can directly set 298 | * r, rather than having it start at infinity, get doubled 299 | * several times, then have its new value added to it) */ 300 | let i = wnaf_1[WNAF_SIZE]; 301 | debug_assert!(i != 0); 302 | table_get_ge_const(&mut tmpa, &pre_a, i, WINDOW_A); 303 | r.set_ge(&tmpa); 304 | 305 | /* remaining loop iterations */ 306 | for i in (0..WNAF_SIZE).rev() { 307 | for _ in 0..(WINDOW_A - 1) { 308 | let r2 = r.clone(); 309 | r.double_nonzero_in_place(&r2, None); 310 | } 311 | 312 | let n = wnaf_1[i]; 313 | table_get_ge_const(&mut tmpa, &pre_a, n, WINDOW_A); 314 | debug_assert!(n != 0); 315 | *r = r.add_ge(&tmpa); 316 | } 317 | 318 | r.z *= &z; 319 | 320 | /* Correct for wNAF skew */ 321 | let mut correction = a.clone(); 322 | let mut correction_1_stor: AffineStorage; 323 | let a2_stor: AffineStorage; 324 | let mut tmpj = Jacobian::default(); 325 | tmpj.set_ge(&correction); 326 | tmpj = tmpj.double_var(None); 327 | correction.set_gej(&tmpj); 328 | correction_1_stor = a.clone().into(); 329 | a2_stor = correction.into(); 330 | 331 | /* For odd numbers this is 2a (so replace it), for even ones a (so no-op) */ 332 | correction_1_stor.cmov(&a2_stor, skew_1 == 2); 333 | 334 | /* Apply the correction */ 335 | correction = correction_1_stor.into(); 336 | correction = correction.neg(); 337 | *r = r.add_ge(&correction) 338 | } 339 | } 340 | 341 | impl ECMultGenContext { 342 | pub fn ecmult_gen( 343 | &self, r: &mut Jacobian, gn: &Scalar 344 | ) { 345 | let mut adds = AffineStorage::default(); 346 | *r = self.initial.clone(); 347 | 348 | let mut gnb = gn + &self.blind; 349 | let mut add = Affine::default(); 350 | add.infinity = false; 351 | 352 | for j in 0..64 { 353 | let mut bits = gnb.bits(j * 4, 4); 354 | for i in 0..16 { 355 | adds.cmov(&self.prec[j][i], i as u32 == bits); 356 | } 357 | add = adds.clone().into(); 358 | *r = r.add_ge(&add); 359 | #[allow(unused_assignments)] 360 | { 361 | bits = 0; 362 | } 363 | } 364 | add.clear(); 365 | gnb.clear(); 366 | } 367 | } 368 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug, Clone, Eq, PartialEq)] 2 | pub enum Error { 3 | InvalidSignature, 4 | InvalidPublicKey, 5 | InvalidSecretKey, 6 | InvalidRecoveryId, 7 | InvalidMessage 8 | } 9 | -------------------------------------------------------------------------------- /src/field.rs: -------------------------------------------------------------------------------- 1 | use core::cmp::Ordering; 2 | use core::ops::{Add, AddAssign, Mul, MulAssign}; 3 | 4 | macro_rules! debug_assert_bits { 5 | ($x: expr, $n: expr) => { 6 | debug_assert!($x >> $n == 0); 7 | } 8 | } 9 | 10 | macro_rules! field_const_raw { 11 | ($d9: expr, $d8: expr, $d7: expr, $d6: expr, $d5: expr, $d4: expr, $d3: expr, $d2: expr, 12 | $d1: expr, $d0: expr) => { 13 | $crate::field::Field { 14 | n: [$d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7, $d8, $d9], 15 | magnitude: 1, 16 | normalized: false 17 | } 18 | } 19 | } 20 | 21 | macro_rules! field_const { 22 | ($d7: expr, $d6: expr, $d5: expr, $d4: expr, $d3: expr, $d2: expr, $d1: expr, $d0: expr) => { 23 | $crate::field::Field { 24 | n: [ 25 | $d0 & 0x3ffffff, 26 | ($d0 >> 26) | (($d1 & 0xfffff) << 6), 27 | ($d1 >> 20) | (($d2 & 0x3fff) << 12), 28 | ($d2 >> 14) | (($d3 & 0xff) << 18), 29 | ($d3 >> 8) | (($d4 & 0x3) << 24), 30 | ($d4 >> 2) & 0x3ffffff, 31 | ($d4 >> 28) | (($d5 & 0x3fffff) << 4), 32 | ($d5 >> 22) | (($d6 & 0xffff) << 10), 33 | ($d6 >> 16) | (($d7 & 0x3ff) << 16), 34 | ($d7 >> 10) 35 | ], 36 | magnitude: 1, 37 | normalized: true, 38 | } 39 | } 40 | } 41 | 42 | macro_rules! field_storage_const { 43 | ($d7: expr, $d6: expr, $d5: expr, $d4: expr, $d3: expr, $d2: expr, $d1: expr, $d0: expr) => { 44 | $crate::field::FieldStorage([$d0, $d1, $d2, $d3, $d4, $d5, $d6, $d7]) 45 | } 46 | } 47 | 48 | #[derive(Debug, Clone)] 49 | /// Field element for secp256k1. 50 | pub struct Field { 51 | pub(crate) n: [u32; 10], 52 | pub(crate) magnitude: u32, 53 | pub(crate) normalized: bool, 54 | } 55 | 56 | impl Field { 57 | pub fn new( 58 | d7: u32, d6: u32, d5: u32, d4: u32, d3: u32, d2: u32, d1: u32, d0: u32 59 | ) -> Self { 60 | field_const!(d7, d6, d5, d4, d3, d2, d1, d0) 61 | } 62 | 63 | pub fn from_int(a: u32) -> Field { 64 | let mut f = Field::default(); 65 | f.set_int(a); 66 | f 67 | } 68 | 69 | fn verify(&self) -> bool { 70 | let m = if self.normalized { 1 } else { 2 } * self.magnitude; 71 | let mut r = true; 72 | r = r && (self.n[0] <= 0x3ffffff * m); 73 | r = r && (self.n[1] <= 0x3ffffff * m); 74 | r = r && (self.n[2] <= 0x3ffffff * m); 75 | r = r && (self.n[3] <= 0x3ffffff * m); 76 | r = r && (self.n[4] <= 0x3ffffff * m); 77 | r = r && (self.n[5] <= 0x3ffffff * m); 78 | r = r && (self.n[6] <= 0x3ffffff * m); 79 | r = r && (self.n[7] <= 0x3ffffff * m); 80 | r = r && (self.n[8] <= 0x3ffffff * m); 81 | r = r && (self.n[9] <= 0x03fffff * m); 82 | r = r && (self.magnitude <= 32); 83 | if self.normalized { 84 | r = r && self.magnitude <= 1; 85 | if r && (self.n[9] == 0x03fffff) { 86 | let mid = self.n[8] & self.n[7] & self.n[6] & self.n[5] & self.n[4] & self.n[3] & self.n[2]; 87 | if mid == 0x3ffffff { 88 | r = r && ((self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) <= 0x3ffffff) 89 | } 90 | } 91 | } 92 | r 93 | } 94 | 95 | /// Normalize a field element. 96 | pub fn normalize(&mut self) { 97 | let mut t0 = self.n[0]; 98 | let mut t1 = self.n[1]; 99 | let mut t2 = self.n[2]; 100 | let mut t3 = self.n[3]; 101 | let mut t4 = self.n[4]; 102 | let mut t5 = self.n[5]; 103 | let mut t6 = self.n[6]; 104 | let mut t7 = self.n[7]; 105 | let mut t8 = self.n[8]; 106 | let mut t9 = self.n[9]; 107 | 108 | let mut m: u32; 109 | let mut x = t9 >> 22; 110 | t9 &= 0x03fffff; 111 | 112 | t0 += x * 0x3d1; t1 += x << 6; 113 | t1 += t0 >> 26; t0 &= 0x3ffffff; 114 | t2 += t1 >> 26; t1 &= 0x3ffffff; 115 | t3 += t2 >> 26; t2 &= 0x3ffffff; m = t2; 116 | t4 += t3 >> 26; t3 &= 0x3ffffff; m &= t3; 117 | t5 += t4 >> 26; t4 &= 0x3ffffff; m &= t4; 118 | t6 += t5 >> 26; t5 &= 0x3ffffff; m &= t5; 119 | t7 += t6 >> 26; t6 &= 0x3ffffff; m &= t6; 120 | t8 += t7 >> 26; t7 &= 0x3ffffff; m &= t7; 121 | t9 += t8 >> 26; t8 &= 0x3ffffff; m &= t8; 122 | 123 | debug_assert!(t9 >> 23 == 0); 124 | 125 | x = (t9 >> 22) | (if t9 == 0x03fffff { 1 } else { 0 } & if m == 0x3ffffff { 1 } else { 0 } & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff { 1 } else { 0 })); 126 | 127 | t0 += x * 0x3d1; t1 += x << 6; 128 | t1 += t0 >> 26; t0 &= 0x3ffffff; 129 | t2 += t1 >> 26; t1 &= 0x3ffffff; 130 | t3 += t2 >> 26; t2 &= 0x3ffffff; 131 | t4 += t3 >> 26; t3 &= 0x3ffffff; 132 | t5 += t4 >> 26; t4 &= 0x3ffffff; 133 | t6 += t5 >> 26; t5 &= 0x3ffffff; 134 | t7 += t6 >> 26; t6 &= 0x3ffffff; 135 | t8 += t7 >> 26; t7 &= 0x3ffffff; 136 | t9 += t8 >> 26; t8 &= 0x3ffffff; 137 | 138 | debug_assert!(t9 >> 22 == x); 139 | 140 | t9 &= 0x03fffff; 141 | 142 | self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9]; 143 | self.magnitude = 1; 144 | self.normalized = true; 145 | debug_assert!(self.verify()); 146 | } 147 | 148 | /// Weakly normalize a field element: reduce it magnitude to 1, 149 | /// but don't fully normalize. 150 | pub fn normalize_weak(&mut self) { 151 | let mut t0 = self.n[0]; 152 | let mut t1 = self.n[1]; 153 | let mut t2 = self.n[2]; 154 | let mut t3 = self.n[3]; 155 | let mut t4 = self.n[4]; 156 | let mut t5 = self.n[5]; 157 | let mut t6 = self.n[6]; 158 | let mut t7 = self.n[7]; 159 | let mut t8 = self.n[8]; 160 | let mut t9 = self.n[9]; 161 | 162 | let x = t9 >> 22; t9 &= 0x03fffff; 163 | 164 | t0 += x * 0x3d1; t1 += x << 6; 165 | t1 += t0 >> 26; t0 &= 0x3ffffff; 166 | t2 += t1 >> 26; t1 &= 0x3ffffff; 167 | t3 += t2 >> 26; t2 &= 0x3ffffff; 168 | t4 += t3 >> 26; t3 &= 0x3ffffff; 169 | t5 += t4 >> 26; t4 &= 0x3ffffff; 170 | t6 += t5 >> 26; t5 &= 0x3ffffff; 171 | t7 += t6 >> 26; t6 &= 0x3ffffff; 172 | t8 += t7 >> 26; t7 &= 0x3ffffff; 173 | t9 += t8 >> 26; t8 &= 0x3ffffff; 174 | 175 | debug_assert!(t9 >> 23 == 0); 176 | 177 | self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9]; 178 | self.magnitude = 1; 179 | debug_assert!(self.verify()); 180 | } 181 | 182 | /// Normalize a field element, without constant-time guarantee. 183 | pub fn normalize_var(&mut self) { 184 | let mut t0 = self.n[0]; 185 | let mut t1 = self.n[1]; 186 | let mut t2 = self.n[2]; 187 | let mut t3 = self.n[3]; 188 | let mut t4 = self.n[4]; 189 | let mut t5 = self.n[5]; 190 | let mut t6 = self.n[6]; 191 | let mut t7 = self.n[7]; 192 | let mut t8 = self.n[8]; 193 | let mut t9 = self.n[9]; 194 | 195 | let mut m: u32; 196 | let mut x = t9 >> 22; t9 &= 0x03fffff; 197 | 198 | t0 += x * 0x3d1; t1 += x << 6; 199 | t1 += t0 >> 26; t0 &= 0x3ffffff; 200 | t2 += t1 >> 26; t1 &= 0x3ffffff; 201 | t3 += t2 >> 26; t2 &= 0x3ffffff; m = t2; 202 | t4 += t3 >> 26; t3 &= 0x3ffffff; m &= t3; 203 | t5 += t4 >> 26; t4 &= 0x3ffffff; m &= t4; 204 | t6 += t5 >> 26; t5 &= 0x3ffffff; m &= t5; 205 | t7 += t6 >> 26; t6 &= 0x3ffffff; m &= t6; 206 | t8 += t7 >> 26; t7 &= 0x3ffffff; m &= t7; 207 | t9 += t8 >> 26; t8 &= 0x3ffffff; m &= t8; 208 | 209 | debug_assert!(t9 >> 23 == 0); 210 | 211 | x = (t9 >> 22) | (if t9 == 0x03fffff { 1 } else { 0 } & if m == 0x3ffffff { 1 } else { 0 } & (if (t1 + 0x40 + ((t0 + 0x3d1) >> 26)) > 0x3ffffff { 1 } else { 0 })); 212 | 213 | if x > 0 { 214 | t0 += 0x3d1; t1 += x << 6; 215 | t1 += t0 >> 26; t0 &= 0x3ffffff; 216 | t2 += t1 >> 26; t1 &= 0x3ffffff; 217 | t3 += t2 >> 26; t2 &= 0x3ffffff; 218 | t4 += t3 >> 26; t3 &= 0x3ffffff; 219 | t5 += t4 >> 26; t4 &= 0x3ffffff; 220 | t6 += t5 >> 26; t5 &= 0x3ffffff; 221 | t7 += t6 >> 26; t6 &= 0x3ffffff; 222 | t8 += t7 >> 26; t7 &= 0x3ffffff; 223 | t9 += t8 >> 26; t8 &= 0x3ffffff; 224 | 225 | debug_assert!(t9 >> 22 == x); 226 | 227 | t9 &= 0x03fffff; 228 | } 229 | 230 | self.n = [t0, t1, t2, t3, t4, t5, t6, t7, t8, t9]; 231 | self.magnitude = 1; 232 | self.normalized = true; 233 | debug_assert!(self.verify()); 234 | } 235 | 236 | /// Verify whether a field element represents zero i.e. would 237 | /// normalize to a zero value. The field implementation may 238 | /// optionally normalize the input, but this should not be relied 239 | /// upon. 240 | pub fn normalizes_to_zero(&self) -> bool { 241 | let mut t0 = self.n[0]; 242 | let mut t1 = self.n[1]; 243 | let mut t2 = self.n[2]; 244 | let mut t3 = self.n[3]; 245 | let mut t4 = self.n[4]; 246 | let mut t5 = self.n[5]; 247 | let mut t6 = self.n[6]; 248 | let mut t7 = self.n[7]; 249 | let mut t8 = self.n[8]; 250 | let mut t9 = self.n[9]; 251 | 252 | let mut z0: u32; let mut z1: u32; 253 | 254 | let x = t9 >> 22; t9 &= 0x03fffff; 255 | 256 | t0 += x * 0x3d1; t1 += x << 6; 257 | t1 += t0 >> 26; t0 &= 0x3ffffff; z0 = t0; z1 = t0 ^ 0x3d0; 258 | t2 += t1 >> 26; t1 &= 0x3ffffff; z0 |= t1; z1 &= t1 ^ 0x40; 259 | t3 += t2 >> 26; t2 &= 0x3ffffff; z0 |= t2; z1 &= t2; 260 | t4 += t3 >> 26; t3 &= 0x3ffffff; z0 |= t3; z1 &= t3; 261 | t5 += t4 >> 26; t4 &= 0x3ffffff; z0 |= t4; z1 &= t4; 262 | t6 += t5 >> 26; t5 &= 0x3ffffff; z0 |= t5; z1 &= t5; 263 | t7 += t6 >> 26; t6 &= 0x3ffffff; z0 |= t6; z1 &= t6; 264 | t8 += t7 >> 26; t7 &= 0x3ffffff; z0 |= t7; z1 &= t7; 265 | t9 += t8 >> 26; t8 &= 0x3ffffff; z0 |= t8; z1 &= t8; 266 | z0 |= t9; z1 &= t9 ^ 0x3c00000; 267 | 268 | debug_assert!(t9 >> 23 == 0); 269 | 270 | return z0 == 0 || z1 == 0x3ffffff; 271 | } 272 | 273 | /// Verify whether a field element represents zero i.e. would 274 | /// normalize to a zero value. The field implementation may 275 | /// optionally normalize the input, but this should not be relied 276 | /// upon. 277 | pub fn normalizes_to_zero_var(&self) -> bool { 278 | let mut t0: u32; let mut t1: u32; 279 | let mut t2: u32; let mut t3: u32; 280 | let mut t4: u32; let mut t5: u32; 281 | let mut t6: u32; let mut t7: u32; 282 | let mut t8: u32; let mut t9: u32; 283 | let mut z0: u32; let mut z1: u32; 284 | let x: u32; 285 | 286 | t0 = self.n[0]; 287 | t9 = self.n[9]; 288 | 289 | x = t9 >> 22; 290 | t0 += x * 0x3d1; 291 | 292 | z0 = t0 & 0x3ffffff; 293 | z1 = z0 ^ 0x3d0; 294 | 295 | if z0 != 0 && z1 != 0x3ffffff { 296 | return false; 297 | } 298 | 299 | t1 = self.n[1]; 300 | t2 = self.n[2]; 301 | t3 = self.n[3]; 302 | t4 = self.n[4]; 303 | t5 = self.n[5]; 304 | t6 = self.n[6]; 305 | t7 = self.n[7]; 306 | t8 = self.n[8]; 307 | 308 | t9 &= 0x03fffff; 309 | t1 += x << 6; 310 | 311 | t1 += t0 >> 26; 312 | t2 += t1 >> 26; t1 &= 0x3ffffff; z0 |= t1; z1 &= t1 ^ 0x40; 313 | t3 += t2 >> 26; t2 &= 0x3ffffff; z0 |= t2; z1 &= t2; 314 | t4 += t3 >> 26; t3 &= 0x3ffffff; z0 |= t3; z1 &= t3; 315 | t5 += t4 >> 26; t4 &= 0x3ffffff; z0 |= t4; z1 &= t4; 316 | t6 += t5 >> 26; t5 &= 0x3ffffff; z0 |= t5; z1 &= t5; 317 | t7 += t6 >> 26; t6 &= 0x3ffffff; z0 |= t6; z1 &= t6; 318 | t8 += t7 >> 26; t7 &= 0x3ffffff; z0 |= t7; z1 &= t7; 319 | t9 += t8 >> 26; t8 &= 0x3ffffff; z0 |= t8; z1 &= t8; 320 | z0 |= t9; z1 &= t9 ^ 0x3c00000; 321 | 322 | debug_assert!(t9 >> 23 == 0); 323 | 324 | return z0 == 0 || z1 == 0x3ffffff; 325 | } 326 | 327 | /// Set a field element equal to a small integer. Resulting field 328 | /// element is normalized. 329 | pub fn set_int(&mut self, a: u32) { 330 | self.n = [a, 0, 0, 0, 0, 0, 0, 0, 0, 0]; 331 | self.magnitude = 1; 332 | self.normalized = true; 333 | debug_assert!(self.verify()); 334 | } 335 | 336 | /// Verify whether a field element is zero. Requires the input to 337 | /// be normalized. 338 | pub fn is_zero(&self) -> bool { 339 | debug_assert!(self.normalized); 340 | debug_assert!(self.verify()); 341 | return (self.n[0] | self.n[1] | self.n[2] | self.n[3] | self.n[4] | self.n[5] | self.n[6] | self.n[7] | self.n[8] | self.n[9]) == 0; 342 | } 343 | 344 | /// Check the "oddness" of a field element. Requires the input to 345 | /// be normalized. 346 | pub fn is_odd(&self) -> bool { 347 | debug_assert!(self.normalized); 348 | debug_assert!(self.verify()); 349 | return self.n[0] & 1 != 0; 350 | } 351 | 352 | /// Sets a field element equal to zero, initializing all fields. 353 | pub fn clear(&mut self) { 354 | self.magnitude = 0; 355 | self.normalized = true; 356 | self.n = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]; 357 | } 358 | 359 | /// Set a field element equal to 32-byte big endian value. If 360 | /// successful, the resulting field element is normalized. 361 | pub fn set_b32(&mut self, a: &[u8; 32]) -> bool { 362 | self.n[0] = (a[31] as u32) | ((a[30] as u32) << 8) | ((a[29] as u32) << 16) | (((a[28] & 0x3) as u32) << 24); 363 | self.n[1] = (((a[28] >> 2) & 0x3f) as u32) | ((a[27] as u32) << 6) | ((a[26] as u32) << 14) | (((a[25] & 0xf) as u32) << 22); 364 | self.n[2] = (((a[25] >> 4) & 0xf) as u32) | ((a[24] as u32) << 4) | ((a[23] as u32) << 12) | (((a[22] as u32) & 0x3f) << 20); 365 | self.n[3] = (((a[22] >> 6) & 0x3) as u32) | ((a[21] as u32) << 2) | ((a[20] as u32) << 10) | ((a[19] as u32) << 18); 366 | self.n[4] = (a[18] as u32) | ((a[17] as u32) << 8) | ((a[16] as u32) << 16) | (((a[15] & 0x3) as u32) << 24); 367 | self.n[5] = (((a[15] >> 2) & 0x3f) as u32) | ((a[14] as u32) << 6) | ((a[13] as u32) << 14) | (((a[12] as u32) & 0xf) << 22); 368 | self.n[6] = (((a[12] >> 4) & 0xf) as u32) | ((a[11] as u32) << 4) | ((a[10] as u32) << 12) | (((a[9] & 0x3f) as u32) << 20); 369 | self.n[7] = (((a[9] >> 6) & 0x3) as u32) | ((a[8] as u32) << 2) | ((a[7] as u32) << 10) | ((a[6] as u32) << 18); 370 | self.n[8] = (a[5] as u32) | ((a[4] as u32) << 8) | ((a[3] as u32) << 16) | (((a[2] & 0x3) as u32) << 24); 371 | self.n[9] = (((a[2] >> 2) & 0x3f) as u32) | ((a[1] as u32) << 6) | ((a[0] as u32) << 14); 372 | 373 | if self.n[9] == 0x03fffff && (self.n[8] & self.n[7] & self.n[6] & self.n[5] & self.n[4] & self.n[3] & self.n[2]) == 0x3ffffff && (self.n[1] + 0x40 + ((self.n[0] + 0x3d1) >> 26)) > 0x3ffffff { 374 | return false; 375 | } 376 | 377 | self.magnitude = 1; 378 | self.normalized = true; 379 | debug_assert!(self.verify()); 380 | 381 | return true; 382 | } 383 | 384 | /// Convert a field element to a 32-byte big endian 385 | /// value. Requires the input to be normalized. 386 | pub fn b32(&self) -> [u8; 32] { 387 | debug_assert!(self.normalized); 388 | debug_assert!(self.verify()); 389 | 390 | let mut r = [0u8; 32]; 391 | 392 | r[0] = ((self.n[9] >> 14) & 0xff) as u8; 393 | r[1] = ((self.n[9] >> 6) & 0xff) as u8; 394 | r[2] = (((self.n[9] & 0x3f) << 2) | ((self.n[8] >> 24) & 0x3)) as u8; 395 | r[3] = ((self.n[8] >> 16) & 0xff) as u8; 396 | r[4] = ((self.n[8] >> 8) & 0xff) as u8; 397 | r[5] = (self.n[8] & 0xff) as u8; 398 | r[6] = ((self.n[7] >> 18) & 0xff) as u8; 399 | r[7] = ((self.n[7] >> 10) & 0xff) as u8; 400 | r[8] = ((self.n[7] >> 2) & 0xff) as u8; 401 | r[9] = (((self.n[7] & 0x3) << 6) | ((self.n[6] >> 20) & 0x3f)) as u8; 402 | r[10] = ((self.n[6] >> 12) & 0xff) as u8; 403 | r[11] = ((self.n[6] >> 4) & 0xff) as u8; 404 | r[12] = (((self.n[6] & 0xf) << 4) | ((self.n[5] >> 22) & 0xf)) as u8; 405 | r[13] = ((self.n[5] >> 14) & 0xff) as u8; 406 | r[14] = ((self.n[5] >> 6) & 0xff) as u8; 407 | r[15] = (((self.n[5] & 0x3f) << 2) | ((self.n[4] >> 24) & 0x3)) as u8; 408 | r[16] = ((self.n[4] >> 16) & 0xff) as u8; 409 | r[17] = ((self.n[4] >> 8) & 0xff) as u8; 410 | r[18] = (self.n[4] & 0xff) as u8; 411 | r[19] = ((self.n[3] >> 18) & 0xff) as u8; 412 | r[20] = ((self.n[3] >> 10) & 0xff) as u8; 413 | r[21] = ((self.n[3] >> 2) & 0xff) as u8; 414 | r[22] = (((self.n[3] & 0x3) << 6) | ((self.n[2] >> 20) & 0x3f)) as u8; 415 | r[23] = ((self.n[2] >> 12) & 0xff) as u8; 416 | r[24] = ((self.n[2] >> 4) & 0xff) as u8; 417 | r[25] = (((self.n[2] & 0xf) << 4) | ((self.n[1] >> 22) & 0xf)) as u8; 418 | r[26] = ((self.n[1] >> 14) & 0xff) as u8; 419 | r[27] = ((self.n[1] >> 6) & 0xff) as u8; 420 | r[28] = (((self.n[1] & 0x3f) << 2) | ((self.n[0] >> 24) & 0x3)) as u8; 421 | r[29] = ((self.n[0] >> 16) & 0xff) as u8; 422 | r[30] = ((self.n[0] >> 8) & 0xff) as u8; 423 | r[31] = (self.n[0] & 0xff) as u8; 424 | 425 | r 426 | } 427 | 428 | /// Set a field element equal to the additive inverse of 429 | /// another. Takes a maximum magnitude of the input as an 430 | /// argument. The magnitude of the output is one higher. 431 | pub fn neg_in_place(&mut self, other: &Field, m: u32) { 432 | debug_assert!(other.magnitude <= m); 433 | debug_assert!(other.verify()); 434 | 435 | self.n[0] = 0x3fffc2f * 2 * (m + 1) - other.n[0]; 436 | self.n[1] = 0x3ffffbf * 2 * (m + 1) - other.n[1]; 437 | self.n[2] = 0x3ffffff * 2 * (m + 1) - other.n[2]; 438 | self.n[3] = 0x3ffffff * 2 * (m + 1) - other.n[3]; 439 | self.n[4] = 0x3ffffff * 2 * (m + 1) - other.n[4]; 440 | self.n[5] = 0x3ffffff * 2 * (m + 1) - other.n[5]; 441 | self.n[6] = 0x3ffffff * 2 * (m + 1) - other.n[6]; 442 | self.n[7] = 0x3ffffff * 2 * (m + 1) - other.n[7]; 443 | self.n[8] = 0x3ffffff * 2 * (m + 1) - other.n[8]; 444 | self.n[9] = 0x03fffff * 2 * (m + 1) - other.n[9]; 445 | 446 | self.magnitude = m + 1; 447 | self.normalized = false; 448 | debug_assert!(self.verify()); 449 | } 450 | 451 | pub fn neg(&self, m: u32) -> Field { 452 | let mut ret = Field::default(); 453 | ret.neg_in_place(self, m); 454 | ret 455 | } 456 | 457 | /// Multiplies the passed field element with a small integer 458 | /// constant. Multiplies the magnitude by that small integer. 459 | pub fn mul_int(&mut self, a: u32) { 460 | self.n[0] *= a; 461 | self.n[1] *= a; 462 | self.n[2] *= a; 463 | self.n[3] *= a; 464 | self.n[4] *= a; 465 | self.n[5] *= a; 466 | self.n[6] *= a; 467 | self.n[7] *= a; 468 | self.n[8] *= a; 469 | self.n[9] *= a; 470 | 471 | self.magnitude *= a; 472 | self.normalized = false; 473 | debug_assert!(self.verify()); 474 | } 475 | 476 | /// Compare two field elements. Requires both inputs to be 477 | /// normalized. 478 | pub fn cmp_var(&self, other: &Field) -> Ordering { 479 | // Variable time compare implementation. 480 | debug_assert!(self.normalized); 481 | debug_assert!(other.normalized); 482 | debug_assert!(self.verify()); 483 | debug_assert!(other.verify()); 484 | 485 | for i in (0..10).rev() { 486 | if self.n[i] > other.n[i] { 487 | return Ordering::Greater; 488 | } 489 | if self.n[i] < other.n[i] { 490 | return Ordering::Less; 491 | } 492 | } 493 | return Ordering::Equal; 494 | } 495 | 496 | pub fn eq_var(&self, other: &Field) -> bool { 497 | let mut na = self.neg(1); 498 | na += other; 499 | return na.normalizes_to_zero_var(); 500 | } 501 | 502 | fn mul_inner(&mut self, a: &Field, b: &Field) { 503 | const M: u64 = 0x3ffffff; 504 | const R0: u64 = 0x3d10; 505 | const R1: u64 = 0x400; 506 | 507 | let (mut c, mut d): (u64, u64); 508 | let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64); 509 | let (t9, t1, t0, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32); 510 | 511 | debug_assert_bits!(a.n[0], 30); 512 | debug_assert_bits!(a.n[1], 30); 513 | debug_assert_bits!(a.n[2], 30); 514 | debug_assert_bits!(a.n[3], 30); 515 | debug_assert_bits!(a.n[4], 30); 516 | debug_assert_bits!(a.n[5], 30); 517 | debug_assert_bits!(a.n[6], 30); 518 | debug_assert_bits!(a.n[7], 30); 519 | debug_assert_bits!(a.n[8], 30); 520 | debug_assert_bits!(a.n[9], 26); 521 | debug_assert_bits!(b.n[0], 30); 522 | debug_assert_bits!(b.n[1], 30); 523 | debug_assert_bits!(b.n[2], 30); 524 | debug_assert_bits!(b.n[3], 30); 525 | debug_assert_bits!(b.n[4], 30); 526 | debug_assert_bits!(b.n[5], 30); 527 | debug_assert_bits!(b.n[6], 30); 528 | debug_assert_bits!(b.n[7], 30); 529 | debug_assert_bits!(b.n[8], 30); 530 | debug_assert_bits!(b.n[9], 26); 531 | 532 | // [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. 533 | // px is a shorthand for sum(a[i]*b[x-i], i=0..x). 534 | // Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. 535 | 536 | d = ((a.n[0] as u64) * (b.n[9] as u64)).wrapping_add( 537 | (a.n[1] as u64) * (b.n[8] as u64)).wrapping_add( 538 | (a.n[2] as u64) * (b.n[7] as u64)).wrapping_add( 539 | (a.n[3] as u64) * (b.n[6] as u64)).wrapping_add( 540 | (a.n[4] as u64) * (b.n[5] as u64)).wrapping_add( 541 | (a.n[5] as u64) * (b.n[4] as u64)).wrapping_add( 542 | (a.n[6] as u64) * (b.n[3] as u64)).wrapping_add( 543 | (a.n[7] as u64) * (b.n[2] as u64)).wrapping_add( 544 | (a.n[8] as u64) * (b.n[1] as u64)).wrapping_add( 545 | (a.n[9] as u64) * (b.n[0] as u64)); 546 | // debug_assert_bits!(d, 64); 547 | 548 | /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ 549 | t9 = (d & M) as u32; d >>= 26; 550 | debug_assert_bits!(t9, 26); 551 | debug_assert_bits!(d, 38); 552 | /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ 553 | 554 | c = (a.n[0] as u64) * (b.n[0] as u64); 555 | debug_assert_bits!(c, 60); 556 | /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */ 557 | 558 | d = d.wrapping_add( 559 | (a.n[1] as u64) * (b.n[9] as u64)).wrapping_add( 560 | (a.n[2] as u64) * (b.n[8] as u64)).wrapping_add( 561 | (a.n[3] as u64) * (b.n[7] as u64)).wrapping_add( 562 | (a.n[4] as u64) * (b.n[6] as u64)).wrapping_add( 563 | (a.n[5] as u64) * (b.n[5] as u64)).wrapping_add( 564 | (a.n[6] as u64) * (b.n[4] as u64)).wrapping_add( 565 | (a.n[7] as u64) * (b.n[3] as u64)).wrapping_add( 566 | (a.n[8] as u64) * (b.n[2] as u64)).wrapping_add( 567 | (a.n[9] as u64) * (b.n[1] as u64)); 568 | debug_assert_bits!(d, 63); 569 | /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 570 | v0 = d & M; d >>= 26; c += v0 * R0; 571 | debug_assert_bits!(v0, 26); 572 | debug_assert_bits!(d, 37); 573 | debug_assert_bits!(c, 61); 574 | /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 575 | t0 = (c & M) as u32; c >>= 26; c += v0 * R1; 576 | 577 | debug_assert_bits!(t0, 26); 578 | debug_assert_bits!(c, 37); 579 | /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 580 | /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 581 | 582 | c = c.wrapping_add( 583 | (a.n[0] as u64) * (b.n[1] as u64)).wrapping_add( 584 | (a.n[1] as u64) * (b.n[0] as u64)); 585 | debug_assert_bits!(c, 62); 586 | /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */ 587 | d = d.wrapping_add( 588 | (a.n[2] as u64) * (b.n[9] as u64)).wrapping_add( 589 | (a.n[3] as u64) * (b.n[8] as u64)).wrapping_add( 590 | (a.n[4] as u64) * (b.n[7] as u64)).wrapping_add( 591 | (a.n[5] as u64) * (b.n[6] as u64)).wrapping_add( 592 | (a.n[6] as u64) * (b.n[5] as u64)).wrapping_add( 593 | (a.n[7] as u64) * (b.n[4] as u64)).wrapping_add( 594 | (a.n[8] as u64) * (b.n[3] as u64)).wrapping_add( 595 | (a.n[9] as u64) * (b.n[2] as u64)); 596 | debug_assert_bits!(d, 63); 597 | /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 598 | v1 = d & M; d >>= 26; c += v1 * R0; 599 | debug_assert_bits!(v1, 26); 600 | debug_assert_bits!(d, 37); 601 | debug_assert_bits!(c, 63); 602 | /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 603 | t1 = (c & M) as u32; c >>= 26; c += v1 * R1; 604 | debug_assert_bits!(t1, 26); 605 | debug_assert_bits!(c, 38); 606 | /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 607 | /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 608 | 609 | c = c.wrapping_add( 610 | (a.n[0] as u64) * (b.n[2] as u64)).wrapping_add( 611 | (a.n[1] as u64) * (b.n[1] as u64)).wrapping_add( 612 | (a.n[2] as u64) * (b.n[0] as u64)); 613 | debug_assert_bits!(c, 62); 614 | /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 615 | d = d.wrapping_add( 616 | (a.n[3] as u64) * (b.n[9] as u64)).wrapping_add( 617 | (a.n[4] as u64) * (b.n[8] as u64)).wrapping_add( 618 | (a.n[5] as u64) * (b.n[7] as u64)).wrapping_add( 619 | (a.n[6] as u64) * (b.n[6] as u64)).wrapping_add( 620 | (a.n[7] as u64) * (b.n[5] as u64)).wrapping_add( 621 | (a.n[8] as u64) * (b.n[4] as u64)).wrapping_add( 622 | (a.n[9] as u64) * (b.n[3] as u64)); 623 | debug_assert_bits!(d, 63); 624 | /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 625 | v2 = d & M; d >>= 26; c += v2 * R0; 626 | debug_assert_bits!(v2, 26); 627 | debug_assert_bits!(d, 37); 628 | debug_assert_bits!(c, 63); 629 | /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 630 | t2 = (c & M) as u32; c >>= 26; c += v2 * R1; 631 | debug_assert_bits!(t2, 26); 632 | debug_assert_bits!(c, 38); 633 | /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 634 | /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 635 | 636 | c = c.wrapping_add( 637 | (a.n[0] as u64) * (b.n[3] as u64)).wrapping_add( 638 | (a.n[1] as u64) * (b.n[2] as u64)).wrapping_add( 639 | (a.n[2] as u64) * (b.n[1] as u64)).wrapping_add( 640 | (a.n[3] as u64) * (b.n[0] as u64)); 641 | debug_assert_bits!(c, 63); 642 | /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 643 | d = d.wrapping_add( 644 | (a.n[4] as u64) * (b.n[9] as u64)).wrapping_add( 645 | (a.n[5] as u64) * (b.n[8] as u64)).wrapping_add( 646 | (a.n[6] as u64) * (b.n[7] as u64)).wrapping_add( 647 | (a.n[7] as u64) * (b.n[6] as u64)).wrapping_add( 648 | (a.n[8] as u64) * (b.n[5] as u64)).wrapping_add( 649 | (a.n[9] as u64) * (b.n[4] as u64)); 650 | debug_assert_bits!(d, 63); 651 | /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 652 | v3 = d & M; d >>= 26; c += v3 * R0; 653 | debug_assert_bits!(v3, 26); 654 | debug_assert_bits!(d, 37); 655 | // debug_assert_bits!(c, 64); 656 | /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 657 | t3 = (c & M) as u32; c >>= 26; c += v3 * R1; 658 | debug_assert_bits!(t3, 26); 659 | debug_assert_bits!(c, 39); 660 | /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 661 | /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 662 | 663 | c = c.wrapping_add( 664 | (a.n[0] as u64) * (b.n[4] as u64)).wrapping_add( 665 | (a.n[1] as u64) * (b.n[3] as u64)).wrapping_add( 666 | (a.n[2] as u64) * (b.n[2] as u64)).wrapping_add( 667 | (a.n[3] as u64) * (b.n[1] as u64)).wrapping_add( 668 | (a.n[4] as u64) * (b.n[0] as u64)); 669 | debug_assert_bits!(c, 63); 670 | /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 671 | d = d.wrapping_add( 672 | (a.n[5] as u64) * (b.n[9] as u64)).wrapping_add( 673 | (a.n[6] as u64) * (b.n[8] as u64)).wrapping_add( 674 | (a.n[7] as u64) * (b.n[7] as u64)).wrapping_add( 675 | (a.n[8] as u64) * (b.n[6] as u64)).wrapping_add( 676 | (a.n[9] as u64) * (b.n[5] as u64)); 677 | debug_assert_bits!(d, 62); 678 | /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 679 | v4 = d & M; d >>= 26; c += v4 * R0; 680 | debug_assert_bits!(v4, 26); 681 | debug_assert_bits!(d, 36); 682 | // debug_assert_bits!(c, 64); 683 | /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 684 | t4 = (c & M) as u32; c >>= 26; c += v4 * R1; 685 | debug_assert_bits!(t4, 26); 686 | debug_assert_bits!(c, 39); 687 | /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 688 | /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 689 | 690 | c = c.wrapping_add( 691 | (a.n[0] as u64) * (b.n[5] as u64)).wrapping_add( 692 | (a.n[1] as u64) * (b.n[4] as u64)).wrapping_add( 693 | (a.n[2] as u64) * (b.n[3] as u64)).wrapping_add( 694 | (a.n[3] as u64) * (b.n[2] as u64)).wrapping_add( 695 | (a.n[4] as u64) * (b.n[1] as u64)).wrapping_add( 696 | (a.n[5] as u64) * (b.n[0] as u64)); 697 | debug_assert_bits!(c, 63); 698 | /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 699 | d = d.wrapping_add( 700 | (a.n[6] as u64) * (b.n[9] as u64)).wrapping_add( 701 | (a.n[7] as u64) * (b.n[8] as u64)).wrapping_add( 702 | (a.n[8] as u64) * (b.n[7] as u64)).wrapping_add( 703 | (a.n[9] as u64) * (b.n[6] as u64)); 704 | debug_assert_bits!(d, 62); 705 | /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 706 | v5 = d & M; d >>= 26; c += v5 * R0; 707 | debug_assert_bits!(v5, 26); 708 | debug_assert_bits!(d, 36); 709 | // debug_assert_bits!(c, 64); 710 | /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 711 | t5 = (c & M) as u32; c >>= 26; c += v5 * R1; 712 | debug_assert_bits!(t5, 26); 713 | debug_assert_bits!(c, 39); 714 | /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 715 | /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 716 | 717 | c = c.wrapping_add( 718 | (a.n[0] as u64) * (b.n[6] as u64)).wrapping_add( 719 | (a.n[1] as u64) * (b.n[5] as u64)).wrapping_add( 720 | (a.n[2] as u64) * (b.n[4] as u64)).wrapping_add( 721 | (a.n[3] as u64) * (b.n[3] as u64)).wrapping_add( 722 | (a.n[4] as u64) * (b.n[2] as u64)).wrapping_add( 723 | (a.n[5] as u64) * (b.n[1] as u64)).wrapping_add( 724 | (a.n[6] as u64) * (b.n[0] as u64)); 725 | debug_assert_bits!(c, 63); 726 | /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 727 | d = d.wrapping_add( 728 | (a.n[7] as u64) * (b.n[9] as u64)).wrapping_add( 729 | (a.n[8] as u64) * (b.n[8] as u64)).wrapping_add( 730 | (a.n[9] as u64) * (b.n[7] as u64)); 731 | debug_assert_bits!(d, 61); 732 | /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 733 | v6 = d & M; d >>= 26; c += v6 * R0; 734 | debug_assert_bits!(v6, 26); 735 | debug_assert_bits!(d, 35); 736 | // debug_assert_bits!(c, 64); 737 | /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 738 | t6 = (c & M) as u32; c >>= 26; c += v6 * R1; 739 | debug_assert_bits!(t6, 26); 740 | debug_assert_bits!(c, 39); 741 | /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 742 | /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 743 | 744 | c = c.wrapping_add( 745 | (a.n[0] as u64) * (b.n[7] as u64)).wrapping_add( 746 | (a.n[1] as u64) * (b.n[6] as u64)).wrapping_add( 747 | (a.n[2] as u64) * (b.n[5] as u64)).wrapping_add( 748 | (a.n[3] as u64) * (b.n[4] as u64)).wrapping_add( 749 | (a.n[4] as u64) * (b.n[3] as u64)).wrapping_add( 750 | (a.n[5] as u64) * (b.n[2] as u64)).wrapping_add( 751 | (a.n[6] as u64) * (b.n[1] as u64)).wrapping_add( 752 | (a.n[7] as u64) * (b.n[0] as u64)); 753 | // debug_assert_bits!(c, 64); 754 | debug_assert!(c <= 0x8000007c00000007); 755 | /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 756 | d = d.wrapping_add( 757 | (a.n[8] as u64) * (b.n[9] as u64)).wrapping_add( 758 | (a.n[9] as u64) * (b.n[8] as u64)); 759 | debug_assert_bits!(d, 58); 760 | /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 761 | v7 = d & M; d >>= 26; c += v7 * R0; 762 | debug_assert_bits!(v7, 26); 763 | debug_assert_bits!(d, 32); 764 | // debug_assert_bits!(c, 64); 765 | debug_assert!(c <= 0x800001703fffc2f7); 766 | /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 767 | t7 = (c & M) as u32; c >>= 26; c += v7 * R1; 768 | debug_assert_bits!(t7, 26); 769 | debug_assert_bits!(c, 38); 770 | /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 771 | /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 772 | 773 | c = c.wrapping_add( 774 | (a.n[0] as u64) * (b.n[8] as u64)).wrapping_add( 775 | (a.n[1] as u64) * (b.n[7] as u64)).wrapping_add( 776 | (a.n[2] as u64) * (b.n[6] as u64)).wrapping_add( 777 | (a.n[3] as u64) * (b.n[5] as u64)).wrapping_add( 778 | (a.n[4] as u64) * (b.n[4] as u64)).wrapping_add( 779 | (a.n[5] as u64) * (b.n[3] as u64)).wrapping_add( 780 | (a.n[6] as u64) * (b.n[2] as u64)).wrapping_add( 781 | (a.n[7] as u64) * (b.n[1] as u64)).wrapping_add( 782 | (a.n[8] as u64) * (b.n[0] as u64)); 783 | // debug_assert_bits!(c, 64); 784 | debug_assert!(c <= 0x9000007b80000008); 785 | /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 786 | d = d.wrapping_add((a.n[9] as u64) * (b.n[9] as u64)); 787 | debug_assert_bits!(d, 57); 788 | /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 789 | v8 = d & M; d >>= 26; c += v8 * R0; 790 | debug_assert_bits!(v8, 26); 791 | debug_assert_bits!(d, 31); 792 | // debug_assert_bits!(c, 64); 793 | debug_assert!(c <= 0x9000016fbfffc2f8); 794 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 795 | 796 | self.n[3] = t3; 797 | debug_assert_bits!(self.n[3], 26); 798 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 799 | self.n[4] = t4; 800 | debug_assert_bits!(self.n[4], 26); 801 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 802 | self.n[5] = t5; 803 | debug_assert_bits!(self.n[5], 26); 804 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 805 | self.n[6] = t6; 806 | debug_assert_bits!(self.n[6], 26); 807 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 808 | self.n[7] = t7; 809 | debug_assert_bits!(self.n[7], 26); 810 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 811 | 812 | self.n[8] = (c & M) as u32; c >>= 26; c += v8 * R1; 813 | debug_assert_bits!(self.n[8], 26); 814 | debug_assert_bits!(c, 39); 815 | /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 816 | /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 817 | c += d * R0 + t9 as u64; 818 | debug_assert_bits!(c, 45); 819 | /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 820 | self.n[9] = (c & (M >> 4)) as u32; c >>= 22; c += d * (R1 << 4); 821 | debug_assert_bits!(self.n[9], 22); 822 | debug_assert_bits!(c, 46); 823 | /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 824 | /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 825 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 826 | 827 | d = c * (R0 >> 4) + t0 as u64; 828 | debug_assert_bits!(d, 56); 829 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 830 | self.n[0] = (d & M) as u32; d >>= 26; 831 | debug_assert_bits!(self.n[0], 26); 832 | debug_assert_bits!(d, 30); 833 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 834 | d += c * (R1 >> 4) + t1 as u64; 835 | debug_assert_bits!(d, 53); 836 | debug_assert!(d <= 0x10000003ffffbf); 837 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 838 | /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 839 | self.n[1] = (d & M) as u32; d >>= 26; 840 | debug_assert_bits!(self.n[1], 26); 841 | debug_assert_bits!(d, 27); 842 | debug_assert!(d <= 0x4000000); 843 | /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 844 | d += t2 as u64; 845 | debug_assert_bits!(d, 27); 846 | /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 847 | self.n[2] = d as u32; 848 | debug_assert_bits!(self.n[2], 27); 849 | /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 850 | } 851 | 852 | fn sqr_inner(&mut self, a: &Field) { 853 | const M: u64 = 0x3ffffff; 854 | const R0: u64 = 0x3d10; 855 | const R1: u64 = 0x400; 856 | 857 | let (mut c, mut d): (u64, u64); 858 | let (v0, v1, v2, v3, v4, v5, v6, v7, v8): (u64, u64, u64, u64, u64, u64, u64, u64, u64); 859 | let (t9, t0, t1, t2, t3, t4, t5, t6, t7): (u32, u32, u32, u32, u32, u32, u32, u32, u32); 860 | 861 | debug_assert_bits!(a.n[0], 30); 862 | debug_assert_bits!(a.n[1], 30); 863 | debug_assert_bits!(a.n[2], 30); 864 | debug_assert_bits!(a.n[3], 30); 865 | debug_assert_bits!(a.n[4], 30); 866 | debug_assert_bits!(a.n[5], 30); 867 | debug_assert_bits!(a.n[6], 30); 868 | debug_assert_bits!(a.n[7], 30); 869 | debug_assert_bits!(a.n[8], 30); 870 | debug_assert_bits!(a.n[9], 26); 871 | 872 | // [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. 873 | // px is a shorthand for sum(a.n[i]*a.n[x-i], i=0..x). 874 | // Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. 875 | 876 | d = (((a.n[0]*2) as u64) * (a.n[9] as u64)).wrapping_add( 877 | ((a.n[1]*2) as u64) * (a.n[8] as u64)).wrapping_add( 878 | ((a.n[2]*2) as u64) * (a.n[7] as u64)).wrapping_add( 879 | ((a.n[3]*2) as u64) * (a.n[6] as u64)).wrapping_add( 880 | ((a.n[4]*2) as u64) * (a.n[5] as u64)); 881 | // debug_assert_bits!(d, 64); 882 | /* [d 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ 883 | t9 = (d & M) as u32; d >>= 26; 884 | debug_assert_bits!(t9, 26); 885 | debug_assert_bits!(d, 38); 886 | /* [d t9 0 0 0 0 0 0 0 0 0] = [p9 0 0 0 0 0 0 0 0 0] */ 887 | 888 | c = (a.n[0] as u64) * (a.n[0] as u64); 889 | debug_assert_bits!(c, 60); 890 | /* [d t9 0 0 0 0 0 0 0 0 c] = [p9 0 0 0 0 0 0 0 0 p0] */ 891 | d = d.wrapping_add( 892 | ((a.n[1]*2) as u64) * (a.n[9] as u64)).wrapping_add( 893 | ((a.n[2]*2) as u64) * (a.n[8] as u64)).wrapping_add( 894 | ((a.n[3]*2) as u64) * (a.n[7] as u64)).wrapping_add( 895 | ((a.n[4]*2) as u64) * (a.n[6] as u64)).wrapping_add( 896 | (a.n[5] as u64) * (a.n[5] as u64)); 897 | debug_assert_bits!(d, 63); 898 | /* [d t9 0 0 0 0 0 0 0 0 c] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 899 | v0 = d & M; d >>= 26; c += v0 * R0; 900 | debug_assert_bits!(v0, 26); 901 | debug_assert_bits!(d, 37); 902 | debug_assert_bits!(c, 61); 903 | /* [d u0 t9 0 0 0 0 0 0 0 0 c-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 904 | t0 = (c & M) as u32; c >>= 26; c += v0 * R1; 905 | debug_assert_bits!(t0, 26); 906 | debug_assert_bits!(c, 37); 907 | /* [d u0 t9 0 0 0 0 0 0 0 c-u0*R1 t0-u0*R0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 908 | /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 0 p0] */ 909 | 910 | c = c.wrapping_add( 911 | ((a.n[0]*2) as u64) * (a.n[1] as u64)); 912 | debug_assert_bits!(c, 62); 913 | /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p10 p9 0 0 0 0 0 0 0 p1 p0] */ 914 | d = d.wrapping_add( 915 | ((a.n[2]*2) as u64) * (a.n[9] as u64)).wrapping_add( 916 | ((a.n[3]*2) as u64) * (a.n[8] as u64)).wrapping_add( 917 | ((a.n[4]*2) as u64) * (a.n[7] as u64)).wrapping_add( 918 | ((a.n[5]*2) as u64) * (a.n[6] as u64)); 919 | debug_assert_bits!(d, 63); 920 | /* [d 0 t9 0 0 0 0 0 0 0 c t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 921 | v1 = d & M; d >>= 26; c += v1 * R0; 922 | debug_assert_bits!(v1, 26); 923 | debug_assert_bits!(d, 37); 924 | debug_assert_bits!(c, 63); 925 | /* [d u1 0 t9 0 0 0 0 0 0 0 c-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 926 | t1 = (c & M) as u32; c >>= 26; c += v1 * R1; 927 | debug_assert_bits!(t1, 26); 928 | debug_assert_bits!(c, 38); 929 | /* [d u1 0 t9 0 0 0 0 0 0 c-u1*R1 t1-u1*R0 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 930 | /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 0 p1 p0] */ 931 | 932 | c = c.wrapping_add( 933 | ((a.n[0]*2) as u64) * (a.n[2] as u64)).wrapping_add( 934 | (a.n[1] as u64) * (a.n[1] as u64)); 935 | debug_assert_bits!(c, 62); 936 | /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 937 | d = d.wrapping_add( 938 | ((a.n[3]*2) as u64) * (a.n[9] as u64)).wrapping_add( 939 | ((a.n[4]*2) as u64) * (a.n[8] as u64)).wrapping_add( 940 | ((a.n[5]*2) as u64) * (a.n[7] as u64)).wrapping_add( 941 | (a.n[6] as u64) * (a.n[6] as u64)); 942 | debug_assert_bits!(d, 63); 943 | /* [d 0 0 t9 0 0 0 0 0 0 c t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 944 | v2 = d & M; d >>= 26; c += v2 * R0; 945 | debug_assert_bits!(v2, 26); 946 | debug_assert_bits!(d, 37); 947 | debug_assert_bits!(c, 63); 948 | /* [d u2 0 0 t9 0 0 0 0 0 0 c-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 949 | t2 = (c & M) as u32; c >>= 26; c += v2 * R1; 950 | debug_assert_bits!(t2, 26); 951 | debug_assert_bits!(c, 38); 952 | /* [d u2 0 0 t9 0 0 0 0 0 c-u2*R1 t2-u2*R0 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 953 | /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 0 p2 p1 p0] */ 954 | 955 | c = c.wrapping_add( 956 | ((a.n[0]*2) as u64) * (a.n[3] as u64)).wrapping_add( 957 | ((a.n[1]*2) as u64) * (a.n[2] as u64)); 958 | debug_assert_bits!(c, 63); 959 | /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 960 | d = d.wrapping_add( 961 | ((a.n[4]*2) as u64) * (a.n[9] as u64)).wrapping_add( 962 | ((a.n[5]*2) as u64) * (a.n[8] as u64)).wrapping_add( 963 | ((a.n[6]*2) as u64) * (a.n[7] as u64)); 964 | debug_assert_bits!(d, 63); 965 | /* [d 0 0 0 t9 0 0 0 0 0 c t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 966 | v3 = d & M; d >>= 26; c += v3 * R0; 967 | debug_assert_bits!(v3, 26); 968 | debug_assert_bits!(d, 37); 969 | // debug_assert_bits!(c, 64); 970 | /* [d u3 0 0 0 t9 0 0 0 0 0 c-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 971 | t3 = (c & M) as u32; c >>= 26; c += v3 * R1; 972 | debug_assert_bits!(t3, 26); 973 | debug_assert_bits!(c, 39); 974 | /* [d u3 0 0 0 t9 0 0 0 0 c-u3*R1 t3-u3*R0 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 975 | /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 0 p3 p2 p1 p0] */ 976 | 977 | c = c.wrapping_add( 978 | ((a.n[0]*2) as u64) * (a.n[4] as u64)).wrapping_add( 979 | ((a.n[1]*2) as u64) * (a.n[3] as u64)).wrapping_add( 980 | (a.n[2] as u64) * (a.n[2] as u64)); 981 | debug_assert_bits!(c, 63); 982 | /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 983 | d = d.wrapping_add( 984 | ((a.n[5]*2) as u64) * (a.n[9] as u64)).wrapping_add( 985 | ((a.n[6]*2) as u64) * (a.n[8] as u64)).wrapping_add( 986 | (a.n[7] as u64) * (a.n[7] as u64)); 987 | debug_assert_bits!(d, 62); 988 | /* [d 0 0 0 0 t9 0 0 0 0 c t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 989 | v4 = d & M; d >>= 26; c += v4 * R0; 990 | debug_assert_bits!(v4, 26); 991 | debug_assert_bits!(d, 36); 992 | // debug_assert_bits!(c, 64); 993 | /* [d u4 0 0 0 0 t9 0 0 0 0 c-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 994 | t4 = (c & M) as u32; c >>= 26; c += v4 * R1; 995 | debug_assert_bits!(t4, 26); 996 | debug_assert_bits!(c, 39); 997 | /* [d u4 0 0 0 0 t9 0 0 0 c-u4*R1 t4-u4*R0 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 998 | /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 0 p4 p3 p2 p1 p0] */ 999 | 1000 | c = c.wrapping_add( 1001 | ((a.n[0]*2) as u64) * (a.n[5] as u64)).wrapping_add( 1002 | ((a.n[1]*2) as u64) * (a.n[4] as u64)).wrapping_add( 1003 | ((a.n[2]*2) as u64) * (a.n[3] as u64)); 1004 | debug_assert_bits!(c, 63); 1005 | /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 1006 | d = d.wrapping_add( 1007 | ((a.n[6]*2) as u64) * (a.n[9] as u64)).wrapping_add( 1008 | ((a.n[7]*2) as u64) * (a.n[8] as u64)); 1009 | debug_assert_bits!(d, 62); 1010 | /* [d 0 0 0 0 0 t9 0 0 0 c t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 1011 | v5 = d & M; d >>= 26; c += v5 * R0; 1012 | debug_assert_bits!(v5, 26); 1013 | debug_assert_bits!(d, 36); 1014 | // debug_assert_bits!(c, 64); 1015 | /* [d u5 0 0 0 0 0 t9 0 0 0 c-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 1016 | t5 = (c & M) as u32; c >>= 26; c += v5 * R1; 1017 | debug_assert_bits!(t5, 26); 1018 | debug_assert_bits!(c, 39); 1019 | /* [d u5 0 0 0 0 0 t9 0 0 c-u5*R1 t5-u5*R0 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 1020 | /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 0 p5 p4 p3 p2 p1 p0] */ 1021 | 1022 | c = c.wrapping_add( 1023 | ((a.n[0]*2) as u64) * (a.n[6] as u64)).wrapping_add( 1024 | ((a.n[1]*2) as u64) * (a.n[5] as u64)).wrapping_add( 1025 | ((a.n[2]*2) as u64) * (a.n[4] as u64)).wrapping_add( 1026 | (a.n[3] as u64) * (a.n[3] as u64)); 1027 | debug_assert_bits!(c, 63); 1028 | /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 1029 | d = d.wrapping_add( 1030 | ((a.n[7]*2) as u64) * (a.n[9] as u64)).wrapping_add( 1031 | (a.n[8] as u64) * (a.n[8] as u64)); 1032 | debug_assert_bits!(d, 61); 1033 | /* [d 0 0 0 0 0 0 t9 0 0 c t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 1034 | v6 = d & M; d >>= 26; c += v6 * R0; 1035 | debug_assert_bits!(v6, 26); 1036 | debug_assert_bits!(d, 35); 1037 | // debug_assert_bits!(c, 64); 1038 | /* [d u6 0 0 0 0 0 0 t9 0 0 c-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 1039 | t6 = (c & M) as u32; c >>= 26; c += v6 * R1; 1040 | debug_assert_bits!(t6, 26); 1041 | debug_assert_bits!(c, 39); 1042 | /* [d u6 0 0 0 0 0 0 t9 0 c-u6*R1 t6-u6*R0 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 1043 | /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 0 p6 p5 p4 p3 p2 p1 p0] */ 1044 | 1045 | c = c.wrapping_add( 1046 | ((a.n[0]*2) as u64) * (a.n[7] as u64)).wrapping_add( 1047 | ((a.n[1]*2) as u64) * (a.n[6] as u64)).wrapping_add( 1048 | ((a.n[2]*2) as u64) * (a.n[5] as u64)).wrapping_add( 1049 | ((a.n[3]*2) as u64) * (a.n[4] as u64)); 1050 | // debug_assert_bits!(c, 64); 1051 | debug_assert!(c <= 0x8000007C00000007); 1052 | /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 1053 | d = d.wrapping_add( 1054 | ((a.n[8]*2) as u64) * (a.n[9] as u64)); 1055 | debug_assert_bits!(d, 58); 1056 | /* [d 0 0 0 0 0 0 0 t9 0 c t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 1057 | v7 = d & M; d >>= 26; c += v7 * R0; 1058 | debug_assert_bits!(v7, 26); 1059 | debug_assert_bits!(d, 32); 1060 | /* debug_assert_bits!(c, 64); */ 1061 | debug_assert!(c <= 0x800001703FFFC2F7); 1062 | /* [d u7 0 0 0 0 0 0 0 t9 0 c-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 1063 | t7 = (c & M) as u32; c >>= 26; c += v7 * R1; 1064 | debug_assert_bits!(t7, 26); 1065 | debug_assert_bits!(c, 38); 1066 | /* [d u7 0 0 0 0 0 0 0 t9 c-u7*R1 t7-u7*R0 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 1067 | /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 0 p7 p6 p5 p4 p3 p2 p1 p0] */ 1068 | 1069 | c = c.wrapping_add( 1070 | ((a.n[0]*2) as u64) * (a.n[8] as u64)).wrapping_add( 1071 | ((a.n[1]*2) as u64) * (a.n[7] as u64)).wrapping_add( 1072 | ((a.n[2]*2) as u64) * (a.n[6] as u64)).wrapping_add( 1073 | ((a.n[3]*2) as u64) * (a.n[5] as u64)).wrapping_add( 1074 | (a.n[4] as u64) * (a.n[4] as u64)); 1075 | // debug_assert_bits!(c, 64); 1076 | debug_assert!(c <= 0x9000007B80000008); 1077 | /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1078 | d = d.wrapping_add( 1079 | (a.n[9] as u64) * (a.n[9] as u64)); 1080 | debug_assert_bits!(d, 57); 1081 | /* [d 0 0 0 0 0 0 0 0 t9 c t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1082 | v8 = d & M; d >>= 26; c += v8 * R0; 1083 | debug_assert_bits!(v8, 26); 1084 | debug_assert_bits!(d, 31); 1085 | /* debug_assert_bits!(c, 64); */ 1086 | debug_assert!(c <= 0x9000016FBFFFC2F8); 1087 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 t3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1088 | 1089 | self.n[3] = t3; 1090 | debug_assert_bits!(self.n[3], 26); 1091 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 t4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1092 | self.n[4] = t4; 1093 | debug_assert_bits!(self.n[4], 26); 1094 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 t5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1095 | self.n[5] = t5; 1096 | debug_assert_bits!(self.n[5], 26); 1097 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 t6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1098 | self.n[6] = t6; 1099 | debug_assert_bits!(self.n[6], 26); 1100 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 t7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1101 | self.n[7] = t7; 1102 | debug_assert_bits!(self.n[7], 26); 1103 | /* [d u8 0 0 0 0 0 0 0 0 t9 c-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1104 | 1105 | self.n[8] = (c & M) as u32; c >>= 26; c += v8 * R1; 1106 | debug_assert_bits!(self.n[8], 26); 1107 | debug_assert_bits!(c, 39); 1108 | /* [d u8 0 0 0 0 0 0 0 0 t9+c-u8*R1 r8-u8*R0 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1109 | /* [d 0 0 0 0 0 0 0 0 0 t9+c r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1110 | c += d * R0 + t9 as u64; 1111 | debug_assert_bits!(c, 45); 1112 | /* [d 0 0 0 0 0 0 0 0 0 c-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1113 | self.n[9] = (c & (M >> 4)) as u32; c >>= 22; c += d * (R1 << 4); 1114 | debug_assert_bits!(self.n[9], 22); 1115 | debug_assert_bits!(c, 46); 1116 | /* [d 0 0 0 0 0 0 0 0 r9+((c-d*R1<<4)<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1117 | /* [d 0 0 0 0 0 0 0 -d*R1 r9+(c<<22)-d*R0 r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1118 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 t0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1119 | 1120 | d = c * (R0 >> 4) + t0 as u64; 1121 | debug_assert_bits!(d, 56); 1122 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1 d-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1123 | self.n[0] = (d & M) as u32; d >>= 26; 1124 | debug_assert_bits!(self.n[0], 26); 1125 | debug_assert_bits!(d, 30); 1126 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 t1+d r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1127 | d += c * (R1 >> 4) + t1 as u64; 1128 | debug_assert_bits!(d, 53); 1129 | debug_assert!(d <= 0x10000003FFFFBF); 1130 | /* [r9+(c<<22) r8 r7 r6 r5 r4 r3 t2 d-c*R1>>4 r0-c*R0>>4] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1131 | /* [r9 r8 r7 r6 r5 r4 r3 t2 d r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1132 | self.n[1] = (d & M) as u32; d >>= 26; 1133 | debug_assert_bits!(self.n[1], 26); 1134 | debug_assert_bits!(d, 27); 1135 | debug_assert!(d <= 0x4000000); 1136 | /* [r9 r8 r7 r6 r5 r4 r3 t2+d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1137 | d += t2 as u64; 1138 | debug_assert_bits!(d, 27); 1139 | /* [r9 r8 r7 r6 r5 r4 r3 d r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1140 | self.n[2] = d as u32; 1141 | debug_assert_bits!(self.n[2], 27); 1142 | /* [r9 r8 r7 r6 r5 r4 r3 r2 r1 r0] = [p18 p17 p16 p15 p14 p13 p12 p11 p10 p9 p8 p7 p6 p5 p4 p3 p2 p1 p0] */ 1143 | } 1144 | 1145 | /// Sets a field element to be the product of two others. Requires 1146 | /// the inputs' magnitudes to be at most 8. The output magnitude 1147 | /// is 1 (but not guaranteed to be normalized). 1148 | pub fn mul_in_place(&mut self, a: &Field, b: &Field) { 1149 | debug_assert!(a.magnitude <= 8); 1150 | debug_assert!(b.magnitude <= 8); 1151 | debug_assert!(a.verify()); 1152 | debug_assert!(b.verify()); 1153 | self.mul_inner(a, b); 1154 | self.magnitude = 1; 1155 | self.normalized = false; 1156 | debug_assert!(self.verify()); 1157 | } 1158 | 1159 | /// Sets a field element to be the square of another. Requires the 1160 | /// input's magnitude to be at most 8. The output magnitude is 1 1161 | /// (but not guaranteed to be normalized). 1162 | pub fn sqr_in_place(&mut self, a: &Field) { 1163 | debug_assert!(a.magnitude <= 8); 1164 | debug_assert!(a.verify()); 1165 | self.sqr_inner(a); 1166 | self.magnitude = 1; 1167 | self.normalized = false; 1168 | debug_assert!(a.verify()); 1169 | } 1170 | 1171 | pub fn sqr(&self) -> Field { 1172 | let mut ret = Field::default(); 1173 | ret.sqr_in_place(self); 1174 | ret 1175 | } 1176 | 1177 | /// If a has a square root, it is computed in r and 1 is 1178 | /// returned. If a does not have a square root, the root of its 1179 | /// negation is computed and 0 is returned. The input's magnitude 1180 | /// can be at most 8. The output magnitude is 1 (but not 1181 | /// guaranteed to be normalized). The result in r will always be a 1182 | /// square itself. 1183 | pub fn sqrt(&self) -> (Field, bool) { 1184 | let mut x2 = self.sqr(); 1185 | x2 *= self; 1186 | 1187 | let mut x3 = x2.sqr(); 1188 | x3 *= self; 1189 | 1190 | let mut x6 = x3.clone(); 1191 | for _ in 0..3 { 1192 | x6 = x6.sqr(); 1193 | } 1194 | x6 *= &x3; 1195 | 1196 | let mut x9 = x6.clone(); 1197 | for _ in 0..3 { 1198 | x9 = x9.sqr(); 1199 | } 1200 | x9 *= &x3; 1201 | 1202 | let mut x11 = x9.clone(); 1203 | for _ in 0..2 { 1204 | x11 = x11.sqr(); 1205 | } 1206 | x11 *= &x2; 1207 | 1208 | let mut x22 = x11.clone(); 1209 | for _ in 0..11 { 1210 | x22 = x22.sqr(); 1211 | } 1212 | x22 *= &x11; 1213 | 1214 | let mut x44 = x22.clone(); 1215 | for _ in 0..22 { 1216 | x44 = x44.sqr(); 1217 | } 1218 | x44 *= &x22; 1219 | 1220 | let mut x88 = x44.clone(); 1221 | for _ in 0..44 { 1222 | x88 = x88.sqr(); 1223 | } 1224 | x88 *= &x44; 1225 | 1226 | let mut x176 = x88.clone(); 1227 | for _ in 0..88 { 1228 | x176 = x176.sqr(); 1229 | } 1230 | x176 *= &x88; 1231 | 1232 | let mut x220 = x176.clone(); 1233 | for _ in 0..44 { 1234 | x220 = x220.sqr(); 1235 | } 1236 | x220 *= &x44; 1237 | 1238 | let mut x223 = x220.clone(); 1239 | for _ in 0..3 { 1240 | x223 = x223.sqr(); 1241 | } 1242 | x223 *= &x3; 1243 | 1244 | let mut t1 = x223; 1245 | for _ in 0..23 { 1246 | t1 = t1.sqr(); 1247 | } 1248 | t1 *= &x22; 1249 | for _ in 0..6 { 1250 | t1 = t1.sqr(); 1251 | } 1252 | t1 *= &x2; 1253 | t1 = t1.sqr(); 1254 | let r = t1.sqr(); 1255 | 1256 | t1 = r.sqr(); 1257 | (r, &t1 == self) 1258 | } 1259 | 1260 | /// Sets a field element to be the (modular) inverse of 1261 | /// another. Requires the input's magnitude to be at most 8. The 1262 | /// output magnitude is 1 (but not guaranteed to be normalized). 1263 | pub fn inv(&self) -> Field { 1264 | let mut x2 = self.sqr(); 1265 | x2 *= self; 1266 | 1267 | let mut x3 = x2.sqr(); 1268 | x3 *= self; 1269 | 1270 | let mut x6 = x3.clone(); 1271 | for _ in 0..3 { 1272 | x6 = x6.sqr(); 1273 | } 1274 | x6 *= &x3; 1275 | 1276 | let mut x9 = x6.clone(); 1277 | for _ in 0..3 { 1278 | x9 = x9.sqr(); 1279 | } 1280 | x9 *= &x3; 1281 | 1282 | let mut x11 = x9.clone(); 1283 | for _ in 0..2 { 1284 | x11 = x11.sqr(); 1285 | } 1286 | x11 *= &x2; 1287 | 1288 | let mut x22 = x11.clone(); 1289 | for _ in 0..11 { 1290 | x22 = x22.sqr(); 1291 | } 1292 | x22 *= &x11; 1293 | 1294 | let mut x44 = x22.clone(); 1295 | for _ in 0..22 { 1296 | x44 = x44.sqr(); 1297 | } 1298 | x44 *= &x22; 1299 | 1300 | let mut x88 = x44.clone(); 1301 | for _ in 0..44 { 1302 | x88 = x88.sqr(); 1303 | } 1304 | x88 *= &x44; 1305 | 1306 | let mut x176 = x88.clone(); 1307 | for _ in 0..88 { 1308 | x176 = x176.sqr(); 1309 | } 1310 | x176 *= &x88; 1311 | 1312 | let mut x220 = x176.clone(); 1313 | for _ in 0..44 { 1314 | x220 = x220.sqr(); 1315 | } 1316 | x220 *= &x44; 1317 | 1318 | let mut x223 = x220.clone(); 1319 | for _ in 0..3 { 1320 | x223 = x223.sqr(); 1321 | } 1322 | x223 *= &x3; 1323 | 1324 | let mut t1 = x223.clone(); 1325 | for _ in 0..23 { 1326 | t1 = t1.sqr(); 1327 | } 1328 | t1 *= &x22; 1329 | for _ in 0..5 { 1330 | t1 = t1.sqr(); 1331 | } 1332 | t1 *= self; 1333 | for _ in 0..3 { 1334 | t1 = t1.sqr(); 1335 | } 1336 | t1 *= &x2; 1337 | for _ in 0..2 { 1338 | t1 = t1.sqr(); 1339 | } 1340 | let r = self * &t1; 1341 | r 1342 | } 1343 | 1344 | /// Potentially faster version of secp256k1_fe_inv, without 1345 | /// constant-time guarantee. 1346 | pub fn inv_var(&self) -> Field { 1347 | self.inv() 1348 | } 1349 | 1350 | /// Checks whether a field element is a quadratic residue. 1351 | pub fn is_quad_var(&self) -> bool { 1352 | let (_, ret) = self.sqrt(); 1353 | ret 1354 | } 1355 | 1356 | /// If flag is true, set *r equal to *a; otherwise leave 1357 | /// it. Constant-time. 1358 | pub fn cmov(&mut self, other: &Field, flag: bool) { 1359 | self.n[0] = if flag { other.n[0] } else { self.n[0] }; 1360 | self.n[1] = if flag { other.n[1] } else { self.n[1] }; 1361 | self.n[2] = if flag { other.n[2] } else { self.n[2] }; 1362 | self.n[3] = if flag { other.n[3] } else { self.n[3] }; 1363 | self.n[4] = if flag { other.n[4] } else { self.n[4] }; 1364 | self.n[5] = if flag { other.n[5] } else { self.n[5] }; 1365 | self.n[6] = if flag { other.n[6] } else { self.n[6] }; 1366 | self.n[7] = if flag { other.n[7] } else { self.n[7] }; 1367 | self.n[8] = if flag { other.n[8] } else { self.n[8] }; 1368 | self.n[9] = if flag { other.n[9] } else { self.n[9] }; 1369 | self.magnitude = if flag { other.magnitude } else { self.magnitude }; 1370 | self.normalized = if flag { other.normalized } else { self.normalized }; 1371 | } 1372 | } 1373 | 1374 | impl Default for Field { 1375 | fn default() -> Field { 1376 | Self { 1377 | n: [0u32; 10], 1378 | magnitude: 0, 1379 | normalized: true, 1380 | } 1381 | } 1382 | } 1383 | 1384 | impl Add for Field { 1385 | type Output = Field; 1386 | fn add(self, other: Field) -> Field { 1387 | let mut ret = self.clone(); 1388 | ret.add_assign(&other); 1389 | ret 1390 | } 1391 | } 1392 | 1393 | impl<'a, 'b> Add<&'a Field> for &'b Field { 1394 | type Output = Field; 1395 | fn add(self, other: &'a Field) -> Field { 1396 | let mut ret = self.clone(); 1397 | ret.add_assign(other); 1398 | ret 1399 | } 1400 | } 1401 | 1402 | impl<'a> AddAssign<&'a Field> for Field { 1403 | fn add_assign(&mut self, other: &'a Field) { 1404 | self.n[0] += other.n[0]; 1405 | self.n[1] += other.n[1]; 1406 | self.n[2] += other.n[2]; 1407 | self.n[3] += other.n[3]; 1408 | self.n[4] += other.n[4]; 1409 | self.n[5] += other.n[5]; 1410 | self.n[6] += other.n[6]; 1411 | self.n[7] += other.n[7]; 1412 | self.n[8] += other.n[8]; 1413 | self.n[9] += other.n[9]; 1414 | 1415 | self.magnitude += other.magnitude; 1416 | self.normalized = false; 1417 | debug_assert!(self.verify()); 1418 | } 1419 | } 1420 | 1421 | impl AddAssign for Field { 1422 | fn add_assign(&mut self, other: Field) { 1423 | self.add_assign(&other) 1424 | } 1425 | } 1426 | 1427 | impl Mul for Field { 1428 | type Output = Field; 1429 | fn mul(self, other: Field) -> Field { 1430 | let mut ret = Field::default(); 1431 | ret.mul_in_place(&self, &other); 1432 | ret 1433 | } 1434 | } 1435 | 1436 | impl<'a, 'b> Mul<&'a Field> for &'b Field { 1437 | type Output = Field; 1438 | fn mul(self, other: &'a Field) -> Field { 1439 | let mut ret = Field::default(); 1440 | ret.mul_in_place(self, other); 1441 | ret 1442 | } 1443 | } 1444 | 1445 | impl<'a> MulAssign<&'a Field> for Field { 1446 | fn mul_assign(&mut self, other: &'a Field) { 1447 | let mut ret = Field::default(); 1448 | ret.mul_in_place(self, other); 1449 | *self = ret; 1450 | } 1451 | } 1452 | 1453 | impl MulAssign for Field { 1454 | fn mul_assign(&mut self, other: Field) { 1455 | self.mul_assign(&other) 1456 | } 1457 | } 1458 | 1459 | impl PartialEq for Field { 1460 | fn eq(&self, other: &Field) -> bool { 1461 | let mut na = self.neg(1); 1462 | na += other; 1463 | return na.normalizes_to_zero(); 1464 | } 1465 | } 1466 | 1467 | impl Eq for Field { } 1468 | 1469 | impl Ord for Field { 1470 | fn cmp(&self, other: &Field) -> Ordering { 1471 | self.cmp_var(other) 1472 | } 1473 | } 1474 | 1475 | impl PartialOrd for Field { 1476 | fn partial_cmp(&self, other: &Field) -> Option { 1477 | Some(self.cmp(other)) 1478 | } 1479 | } 1480 | 1481 | #[derive(Debug, Clone, Eq, PartialEq)] 1482 | /// Compact field element storage. 1483 | pub struct FieldStorage(pub [u32; 8]); 1484 | 1485 | impl Default for FieldStorage { 1486 | fn default() -> FieldStorage { 1487 | FieldStorage([0; 8]) 1488 | } 1489 | } 1490 | 1491 | impl FieldStorage { 1492 | pub fn new( 1493 | d7: u32, d6: u32, d5: u32, d4: u32, d3: u32, d2: u32, d1: u32, d0: u32 1494 | ) -> Self { 1495 | field_storage_const!(d7, d6, d5, d4, d3, d2, d1, d0) 1496 | } 1497 | 1498 | pub fn cmov(&mut self, other: &FieldStorage, flag: bool) { 1499 | self.0[0] = if flag { other.0[0] } else { self.0[0] }; 1500 | self.0[1] = if flag { other.0[1] } else { self.0[1] }; 1501 | self.0[2] = if flag { other.0[2] } else { self.0[2] }; 1502 | self.0[3] = if flag { other.0[3] } else { self.0[3] }; 1503 | self.0[4] = if flag { other.0[4] } else { self.0[4] }; 1504 | self.0[5] = if flag { other.0[5] } else { self.0[5] }; 1505 | self.0[6] = if flag { other.0[6] } else { self.0[6] }; 1506 | self.0[7] = if flag { other.0[7] } else { self.0[7] }; 1507 | } 1508 | } 1509 | 1510 | impl From for Field { 1511 | fn from(a: FieldStorage) -> Field { 1512 | let mut r = Field::default(); 1513 | 1514 | r.n[0] = a.0[0] & 0x3FFFFFF; 1515 | r.n[1] = a.0[0] >> 26 | ((a.0[1] << 6) & 0x3FFFFFF); 1516 | r.n[2] = a.0[1] >> 20 | ((a.0[2] << 12) & 0x3FFFFFF); 1517 | r.n[3] = a.0[2] >> 14 | ((a.0[3] << 18) & 0x3FFFFFF); 1518 | r.n[4] = a.0[3] >> 8 | ((a.0[4] << 24) & 0x3FFFFFF); 1519 | r.n[5] = (a.0[4] >> 2) & 0x3FFFFFF; 1520 | r.n[6] = a.0[4] >> 28 | ((a.0[5] << 4) & 0x3FFFFFF); 1521 | r.n[7] = a.0[5] >> 22 | ((a.0[6] << 10) & 0x3FFFFFF); 1522 | r.n[8] = a.0[6] >> 16 | ((a.0[7] << 16) & 0x3FFFFFF); 1523 | r.n[9] = a.0[7] >> 10; 1524 | 1525 | r.magnitude = 1; 1526 | r.normalized = true; 1527 | 1528 | r 1529 | } 1530 | } 1531 | 1532 | impl Into for Field { 1533 | fn into(self) -> FieldStorage { 1534 | debug_assert!(self.normalized); 1535 | let mut r = FieldStorage::default(); 1536 | 1537 | r.0[0] = self.n[0] | self.n[1] << 26; 1538 | r.0[1] = self.n[1] >> 6 | self.n[2] << 20; 1539 | r.0[2] = self.n[2] >> 12 | self.n[3] << 14; 1540 | r.0[3] = self.n[3] >> 18 | self.n[4] << 8; 1541 | r.0[4] = self.n[4] >> 24 | self.n[5] << 2 | self.n[6] << 28; 1542 | r.0[5] = self.n[6] >> 4 | self.n[7] << 22; 1543 | r.0[6] = self.n[7] >> 10 | self.n[8] << 16; 1544 | r.0[7] = self.n[8] >> 16 | self.n[9] << 10; 1545 | 1546 | r 1547 | } 1548 | } 1549 | -------------------------------------------------------------------------------- /src/group.rs: -------------------------------------------------------------------------------- 1 | use field::{Field, FieldStorage}; 2 | 3 | #[macro_export] 4 | /// Define an affine group element constant. 5 | macro_rules! affine_const { 6 | ($x: expr, $y: expr) => { 7 | $crate::curve::Affine { 8 | x: $x, y: $y, infinity: false, 9 | } 10 | } 11 | } 12 | 13 | #[macro_export] 14 | /// Define a jacobian group element constant. 15 | macro_rules! jacobian_const { 16 | ($x: expr, $y: expr) => { 17 | $crate::curve::Jacobian { 18 | x: $x, y: $y, infinity: false, 19 | z: field_const!(0, 0, 0, 0, 0, 0, 0, 1), 20 | } 21 | } 22 | } 23 | 24 | #[macro_export] 25 | /// Define an affine group storage constant. 26 | macro_rules! affine_storage_const { 27 | ($x: expr, $y: expr) => { 28 | $crate::curve::AffineStorage { 29 | x: $x, y: $y, 30 | } 31 | } 32 | } 33 | 34 | #[derive(Debug, Clone, Eq, PartialEq)] 35 | /// A group element of the secp256k1 curve, in affine coordinates. 36 | pub struct Affine { 37 | pub x: Field, 38 | pub y: Field, 39 | pub infinity: bool, 40 | } 41 | 42 | #[derive(Debug, Clone, Eq, PartialEq)] 43 | /// A group element of the secp256k1 curve, in jacobian coordinates. 44 | pub struct Jacobian { 45 | pub x: Field, 46 | pub y: Field, 47 | pub z: Field, 48 | pub infinity: bool, 49 | } 50 | 51 | #[derive(Debug, Clone, Eq, PartialEq)] 52 | /// Affine coordinate group element compact storage. 53 | pub struct AffineStorage { 54 | pub x: FieldStorage, 55 | pub y: FieldStorage, 56 | } 57 | 58 | impl Default for Affine { 59 | fn default() -> Affine { 60 | Affine { 61 | x: Field::default(), 62 | y: Field::default(), 63 | infinity: false, 64 | } 65 | } 66 | } 67 | 68 | impl Default for Jacobian { 69 | fn default() -> Jacobian { 70 | Jacobian { 71 | x: Field::default(), 72 | y: Field::default(), 73 | z: Field::default(), 74 | infinity: false, 75 | } 76 | } 77 | } 78 | 79 | impl Default for AffineStorage { 80 | fn default() -> AffineStorage { 81 | AffineStorage { 82 | x: FieldStorage::default(), 83 | y: FieldStorage::default(), 84 | } 85 | } 86 | } 87 | 88 | pub static AFFINE_INFINITY: Affine = Affine { 89 | x: field_const!(0, 0, 0, 0, 0, 0, 0, 0), 90 | y: field_const!(0, 0, 0, 0, 0, 0, 0, 0), 91 | infinity: true, 92 | }; 93 | 94 | pub static JACOBIAN_INFINITY: Jacobian = Jacobian { 95 | x: field_const!(0, 0, 0, 0, 0, 0, 0, 0), 96 | y: field_const!(0, 0, 0, 0, 0, 0, 0, 0), 97 | z: field_const!(0, 0, 0, 0, 0, 0, 0, 0), 98 | infinity: true, 99 | }; 100 | 101 | pub static AFFINE_G: Affine = affine_const!( 102 | field_const!( 103 | 0x79BE667E, 0xF9DCBBAC, 0x55A06295, 0xCE870B07, 104 | 0x029BFCDB, 0x2DCE28D9, 0x59F2815B, 0x16F81798 105 | ), 106 | field_const!( 107 | 0x483ADA77, 0x26A3C465, 0x5DA4FBFC, 0x0E1108A8, 108 | 0xFD17B448, 0xA6855419, 0x9C47D08F, 0xFB10D4B8 109 | ) 110 | ); 111 | 112 | pub const CURVE_B: u32 = 7; 113 | 114 | impl Affine { 115 | /// Set a group element equal to the point with given X and Y 116 | /// coordinates. 117 | pub fn set_xy(&mut self, x: &Field, y: &Field) { 118 | self.infinity = false; 119 | self.x = x.clone(); 120 | self.y = y.clone(); 121 | } 122 | 123 | /// Set a group element (affine) equal to the point with the given 124 | /// X coordinate and a Y coordinate that is a quadratic residue 125 | /// modulo p. The return value is true iff a coordinate with the 126 | /// given X coordinate exists. 127 | pub fn set_xquad(&mut self, x: &Field) -> bool { 128 | self.x = x.clone(); 129 | let x2 = x.sqr(); 130 | let x3 = x * &x2; 131 | self.infinity = false; 132 | let mut c = Field::default(); 133 | c.set_int(CURVE_B); 134 | c += &x3; 135 | let (v, ret) = c.sqrt(); 136 | self.y = v; 137 | ret 138 | } 139 | 140 | /// Set a group element (affine) equal to the point with the given 141 | /// X coordinate, and given oddness for Y. Return value indicates 142 | /// whether the result is valid. 143 | pub fn set_xo_var(&mut self, x: &Field, odd: bool) -> bool { 144 | if !self.set_xquad(x) { 145 | return false; 146 | } 147 | self.y.normalize_var(); 148 | if self.y.is_odd() != odd { 149 | self.y = self.y.neg(1); 150 | } 151 | return true; 152 | } 153 | 154 | /// Check whether a group element is the point at infinity. 155 | pub fn is_infinity(&self) -> bool { 156 | self.infinity 157 | } 158 | 159 | /// Check whether a group element is valid (i.e., on the curve). 160 | pub fn is_valid_var(&self) -> bool { 161 | if self.is_infinity() { 162 | return false; 163 | } 164 | let y2 = self.y.sqr(); 165 | let mut x3 = self.x.sqr(); 166 | x3 *= &self.x; 167 | let mut c = Field::default(); 168 | c.set_int(CURVE_B); 169 | x3 += &c; 170 | x3.normalize_weak(); 171 | y2.eq_var(&x3) 172 | } 173 | 174 | pub fn neg_in_place(&mut self, other: &Affine) { 175 | *self = other.clone(); 176 | self.y.normalize_weak(); 177 | self.y = self.y.neg(1); 178 | } 179 | 180 | pub fn neg(&self) -> Affine { 181 | let mut ret = Affine::default(); 182 | ret.neg_in_place(self); 183 | ret 184 | } 185 | 186 | /// Set a group element equal to another which is given in 187 | /// jacobian coordinates. 188 | pub fn set_gej(&mut self, a: &Jacobian) { 189 | self.infinity = a.infinity; 190 | let mut a = a.clone(); 191 | a.z = a.z.inv(); 192 | let z2 = a.z.sqr(); 193 | let z3 = &a.z * &z2; 194 | a.x *= &z2; 195 | a.y *= &z3; 196 | a.z.set_int(1); 197 | self.x = a.x; 198 | self.y = a.y; 199 | } 200 | 201 | pub fn set_gej_var(&mut self, a: &Jacobian) { 202 | let mut a = a.clone(); 203 | self.infinity = a.infinity; 204 | if a.is_infinity() { 205 | return; 206 | } 207 | a.z = a.z.inv_var(); 208 | let z2 = a.z.sqr(); 209 | let z3 = &a.z * &z2; 210 | a.x *= &z2; 211 | a.y *= &z3; 212 | a.z.set_int(1); 213 | self.x = a.x; 214 | self.y = a.y; 215 | } 216 | 217 | pub fn set_gej_zinv(&mut self, a: &Jacobian, zi: &Field) { 218 | let zi2 = zi.sqr(); 219 | let zi3 = &zi2 * &zi; 220 | self.x = &a.x * &zi2; 221 | self.y = &a.y * &zi3; 222 | self.infinity = a.infinity; 223 | } 224 | 225 | /// Clear a secp256k1_ge to prevent leaking sensitive information. 226 | pub fn clear(&mut self) { 227 | self.infinity = false; 228 | self.x.clear(); 229 | self.y.clear(); 230 | } 231 | } 232 | 233 | pub fn set_table_gej_var(r: &mut [Affine], a: &[Jacobian], zr: &[Field]) { 234 | debug_assert!(r.len() == a.len()); 235 | 236 | let mut i = r.len() - 1; 237 | let mut zi: Field; 238 | 239 | if r.len() > 0 { 240 | zi = a[i].z.inv(); 241 | r[i].set_gej_zinv(&a[i], &zi); 242 | 243 | while i > 0 { 244 | zi *= &zr[i]; 245 | i -= 1; 246 | r[i].set_gej_zinv(&a[i], &zi); 247 | } 248 | } 249 | } 250 | 251 | pub fn globalz_set_table_gej( 252 | r: &mut [Affine], globalz: &mut Field, a: &[Jacobian], zr: &[Field] 253 | ) { 254 | debug_assert!(r.len() == a.len() && a.len() == zr.len()); 255 | 256 | let mut i = r.len() - 1; 257 | let mut zs: Field; 258 | 259 | if r.len() > 0 { 260 | r[i].x = a[i].x.clone(); 261 | r[i].y = a[i].y.clone(); 262 | *globalz = a[i].z.clone(); 263 | r[i].infinity = false; 264 | zs = zr[i].clone(); 265 | 266 | while i > 0 { 267 | if i != r.len() - 1 { 268 | zs *= &zr[i]; 269 | } 270 | i -= 1; 271 | r[i].set_gej_zinv(&a[i], &zs); 272 | } 273 | } 274 | } 275 | 276 | impl Jacobian { 277 | /// Set a group element (jacobian) equal to the point at infinity. 278 | pub fn set_infinity(&mut self) { 279 | self.infinity = true; 280 | self.x.clear(); 281 | self.y.clear(); 282 | self.z.clear(); 283 | } 284 | 285 | /// Set a group element (jacobian) equal to another which is given 286 | /// in affine coordinates. 287 | pub fn set_ge(&mut self, a: &Affine) { 288 | self.infinity = a.infinity; 289 | self.x = a.x.clone(); 290 | self.y = a.y.clone(); 291 | self.z.set_int(1); 292 | } 293 | 294 | /// Compare the X coordinate of a group element (jacobian). 295 | pub fn eq_x_var(&self, x: &Field) -> bool { 296 | debug_assert!(!self.is_infinity()); 297 | let mut r = self.z.sqr(); 298 | r *= x; 299 | let mut r2 = self.x.clone(); 300 | r2.normalize_weak(); 301 | return r.eq_var(&r2); 302 | } 303 | 304 | /// Set r equal to the inverse of a (i.e., mirrored around the X 305 | /// axis). 306 | pub fn neg_in_place(&mut self, a: &Jacobian) { 307 | self.infinity = a.infinity; 308 | self.x = a.x.clone(); 309 | self.y = a.y.clone(); 310 | self.z = a.z.clone(); 311 | self.y.normalize_weak(); 312 | self.y = self.y.neg(1); 313 | } 314 | 315 | pub fn neg(&self) -> Jacobian { 316 | let mut ret = Jacobian::default(); 317 | ret.neg_in_place(self); 318 | ret 319 | } 320 | 321 | /// Check whether a group element is the point at infinity. 322 | pub fn is_infinity(&self) -> bool { 323 | self.infinity 324 | } 325 | 326 | /// Check whether a group element's y coordinate is a quadratic residue. 327 | pub fn has_quad_y_var(&self) -> bool { 328 | if self.infinity { 329 | return false; 330 | } 331 | 332 | let yz = &self.y * &self.z; 333 | return yz.is_quad_var(); 334 | } 335 | 336 | /// Set r equal to the double of a. If rzr is not-NULL, r->z = 337 | /// a->z * *rzr (where infinity means an implicit z = 0). a may 338 | /// not be zero. Constant time. 339 | pub fn double_nonzero_in_place(&mut self, a: &Jacobian, rzr: Option<&mut Field>) { 340 | debug_assert!(!self.is_infinity()); 341 | self.double_var_in_place(a, rzr); 342 | } 343 | 344 | /// Set r equal to the double of a. If rzr is not-NULL, r->z = 345 | /// a->z * *rzr (where infinity means an implicit z = 0). 346 | pub fn double_var_in_place(&mut self, a: &Jacobian, rzr: Option<&mut Field>) { 347 | self.infinity = a.infinity; 348 | if self.infinity { 349 | if let Some(rzr) = rzr { 350 | rzr.set_int(1); 351 | } 352 | return; 353 | } 354 | 355 | if let Some(rzr) = rzr { 356 | *rzr = a.y.clone(); 357 | rzr.normalize_weak(); 358 | rzr.mul_int(2); 359 | } 360 | 361 | self.z = &a.z * &a.y; 362 | self.z.mul_int(2); 363 | let mut t1 = a.x.sqr(); 364 | t1.mul_int(3); 365 | let mut t2 = t1.sqr(); 366 | let mut t3 = a.y.sqr(); 367 | t3.mul_int(2); 368 | let mut t4 = t3.sqr(); 369 | t4.mul_int(2); 370 | t3 *= &a.x; 371 | self.x = t3.clone(); 372 | self.x.mul_int(4); 373 | self.x = self.x.neg(4); 374 | self.x += &t2; 375 | t2 = t2.neg(1); 376 | t3.mul_int(6); 377 | t3 += &t2; 378 | self.y = &t1 * &t3; 379 | t2 = t4.neg(2); 380 | self.y += &t2; 381 | } 382 | 383 | pub fn double_var(&self, rzr: Option<&mut Field>) -> Jacobian { 384 | let mut ret = Jacobian::default(); 385 | ret.double_var_in_place(&self, rzr); 386 | ret 387 | } 388 | 389 | /// Set r equal to the sum of a and b. If rzr is non-NULL, r->z = 390 | /// a->z * *rzr (a cannot be infinity in that case). 391 | pub fn add_var_in_place(&mut self, a: &Jacobian, b: &Jacobian, rzr: Option<&mut Field>) { 392 | if a.is_infinity() { 393 | debug_assert!(rzr.is_none()); 394 | *self = b.clone(); 395 | return; 396 | } 397 | if b.is_infinity() { 398 | if let Some(rzr) = rzr { 399 | rzr.set_int(1); 400 | } 401 | *self = a.clone(); 402 | return; 403 | } 404 | 405 | self.infinity = false; 406 | let z22 = b.z.sqr(); 407 | let z12 = a.z.sqr(); 408 | let u1 = &a.x * &z22; 409 | let u2 = &b.x * &z12; 410 | let mut s1 = &a.y * &z22; s1 *= &b.z; 411 | let mut s2 = &b.y * &z12; s2 *= &a.z; 412 | let mut h = u1.neg(1); h += &u2; 413 | let mut i = s1.neg(1); i += &s2; 414 | if h.normalizes_to_zero_var() { 415 | if i.normalizes_to_zero_var() { 416 | self.double_var_in_place(a, rzr); 417 | } else { 418 | if let Some(rzr) = rzr { 419 | rzr.set_int(0); 420 | } 421 | self.infinity = true; 422 | } 423 | return; 424 | } 425 | let i2 = i.sqr(); 426 | let h2 = h.sqr(); 427 | let mut h3 = &h * &h2; 428 | h *= &b.z; 429 | if let Some(rzr) = rzr { 430 | *rzr = h.clone(); 431 | } 432 | self.z = &a.z * &h; 433 | let t = &u1 * &h2; 434 | self.x = t.clone(); self.x.mul_int(2); self.x += &h3; 435 | self.x = self.x.neg(3); self.x += &i2; 436 | self.y = self.x.neg(5); self.y += &t; self.y *= &i; 437 | h3 *= &s1; h3 = h3.neg(1); 438 | self.y += &h3; 439 | } 440 | 441 | pub fn add_var(&self, b: &Jacobian, rzr: Option<&mut Field>) -> Jacobian { 442 | let mut ret = Jacobian::default(); 443 | ret.add_var_in_place(self, b, rzr); 444 | ret 445 | } 446 | 447 | /// Set r equal to the sum of a and b (with b given in affine 448 | /// coordinates, and not infinity). 449 | pub fn add_ge_in_place(&mut self, a: &Jacobian, b: &Affine) { 450 | const FE1: Field = field_const!(0, 0, 0, 0, 0, 0, 0, 0); 451 | 452 | debug_assert!(!b.infinity); 453 | 454 | let zz = a.z.sqr(); 455 | let mut u1 = a.x.clone(); u1.normalize_weak(); 456 | let u2 = &b.x * &zz; 457 | let mut s1 = a.y.clone(); s1.normalize_weak(); 458 | let mut s2 = &b.y * &zz; 459 | s2 *= &a.z; 460 | let mut t = u1.clone(); t += &u2; 461 | let mut m = s1.clone(); m += &s2; 462 | let mut rr = t.sqr(); 463 | let mut m_alt = u2.neg(1); 464 | let tt = &u1 * &m_alt; 465 | rr += &tt; 466 | let degenerate = m.normalizes_to_zero() && rr.normalizes_to_zero(); 467 | let mut rr_alt = s1.clone(); 468 | rr_alt.mul_int(2); 469 | m_alt += &u1; 470 | 471 | rr_alt.cmov(&rr, !degenerate); 472 | m_alt.cmov(&m, !degenerate); 473 | 474 | let mut n = m_alt.sqr(); 475 | let mut q = &n * &t; 476 | 477 | n = n.sqr(); 478 | n.cmov(&m, degenerate); 479 | t = rr_alt.sqr(); 480 | self.z = &a.z * &m_alt; 481 | let infinity = { 482 | let p = self.z.normalizes_to_zero(); 483 | let q = a.infinity; 484 | 485 | match (p, q) { 486 | (true, true) => false, 487 | (true, false) => true, 488 | (false, true) => false, 489 | (false, false) => false, 490 | } 491 | }; 492 | self.z.mul_int(2); 493 | q = q.neg(1); 494 | t += &q; 495 | t.normalize_weak(); 496 | self.x = t.clone(); 497 | t.mul_int(2); 498 | t += &q; 499 | t *= &rr_alt; 500 | t += &n; 501 | self.y = t.neg(3); 502 | self.y.normalize_weak(); 503 | self.x.mul_int(4); 504 | self.y.mul_int(4); 505 | 506 | self.x.cmov(&b.x, a.infinity); 507 | self.y.cmov(&b.y, a.infinity); 508 | self.z.cmov(&FE1, a.infinity); 509 | self.infinity = infinity; 510 | } 511 | 512 | pub fn add_ge(&self, b: &Affine) -> Jacobian { 513 | let mut ret = Jacobian::default(); 514 | ret.add_ge_in_place(self, b); 515 | ret 516 | } 517 | 518 | /// Set r equal to the sum of a and b (with b given in affine 519 | /// coordinates). This is more efficient than 520 | /// secp256k1_gej_add_var. It is identical to secp256k1_gej_add_ge 521 | /// but without constant-time guarantee, and b is allowed to be 522 | /// infinity. If rzr is non-NULL, r->z = a->z * *rzr (a cannot be 523 | /// infinity in that case). 524 | pub fn add_ge_var_in_place(&mut self, a: &Jacobian, b: &Affine, rzr: Option<&mut Field>) { 525 | if a.is_infinity() { 526 | debug_assert!(rzr.is_none()); 527 | self.set_ge(b); 528 | return; 529 | } 530 | if b.is_infinity() { 531 | if let Some(rzr) = rzr { 532 | rzr.set_int(1); 533 | } 534 | *self = a.clone(); 535 | return; 536 | } 537 | self.infinity = false; 538 | 539 | let z12 = a.z.sqr(); 540 | let mut u1 = a.x.clone(); u1.normalize_weak(); 541 | let u2 = &b.x * &z12; 542 | let mut s1 = a.y.clone(); s1.normalize_weak(); 543 | let mut s2 = &b.y * &z12; s2 *= &a.z; 544 | let mut h = u1.neg(1); h += &u2; 545 | let mut i = s1.neg(1); i += &s2; 546 | if h.normalizes_to_zero_var() { 547 | if i.normalizes_to_zero_var() { 548 | self.double_var_in_place(a, rzr); 549 | } else { 550 | if let Some(rzr) = rzr { 551 | rzr.set_int(0); 552 | } 553 | self.infinity = true; 554 | } 555 | return; 556 | } 557 | let i2 = i.sqr(); 558 | let h2 = h.sqr(); 559 | let mut h3 = &h * &h2; 560 | if let Some(rzr) = rzr { 561 | *rzr = h.clone(); 562 | } 563 | self.z = &a.z * &h; 564 | let t = &u1 * &h2; 565 | self.x = t.clone(); self.x.mul_int(2); self.x += &h3; 566 | self.x = self.x.neg(3); self.x += &i2; 567 | self.y = self.x.neg(5); self.y += &t; self.y *= &i; 568 | h3 *= &s1; h3 = h3.neg(1); 569 | self.y += &h3; 570 | } 571 | 572 | pub fn add_ge_var(&self, b: &Affine, rzr: Option<&mut Field>) -> Jacobian { 573 | let mut ret = Jacobian::default(); 574 | ret.add_ge_var_in_place(&self, b, rzr); 575 | ret 576 | } 577 | 578 | /// Set r equal to the sum of a and b (with the inverse of b's Z 579 | /// coordinate passed as bzinv). 580 | pub fn add_zinv_var_in_place(&mut self, a: &Jacobian, b: &Affine, bzinv: &Field) { 581 | if b.is_infinity() { 582 | *self = a.clone(); 583 | return; 584 | } 585 | if a.is_infinity() { 586 | self.infinity = b.infinity; 587 | let bzinv2 = bzinv.sqr(); 588 | let bzinv3 = &bzinv2 * bzinv; 589 | self.x = &b.x * &bzinv2; 590 | self.y = &b.y * &bzinv3; 591 | self.z.set_int(1); 592 | return; 593 | } 594 | self.infinity = false; 595 | 596 | let az = &a.z * &bzinv; 597 | let z12 = az.sqr(); 598 | let mut u1 = a.x.clone(); u1.normalize_weak(); 599 | let u2 = &b.x * &z12; 600 | let mut s1 = a.y.clone(); s1.normalize_weak(); 601 | let mut s2 = &b.y * &z12; s2 *= &az; 602 | let mut h = u1.neg(1); h += &u2; 603 | let mut i = s1.neg(1); i += &s2; 604 | if h.normalizes_to_zero_var() { 605 | if i.normalizes_to_zero_var() { 606 | self.double_var_in_place(a, None); 607 | } else { 608 | self.infinity = true; 609 | } 610 | return; 611 | } 612 | let i2 = i.sqr(); 613 | let h2 = h.sqr(); 614 | let mut h3 = &h * &h2; 615 | self.z = a.z.clone(); self.z *= &h; 616 | let t = &u1 * &h2; 617 | self.x = t.clone(); self.x.mul_int(2); self.x += &h3; 618 | self.x = self.x.neg(3); self.x += &i2; 619 | self.y = self.x.neg(5); self.y += &t; self.y *= &i; 620 | h3 *= &s1; h3 = h3.neg(1); 621 | self.y += &h3; 622 | } 623 | 624 | pub fn add_zinv_var(&mut self, b: &Affine, bzinv: &Field) -> Jacobian { 625 | let mut ret = Jacobian::default(); 626 | ret.add_zinv_var_in_place(&self, b, bzinv); 627 | ret 628 | } 629 | 630 | /// Clear a secp256k1_gej to prevent leaking sensitive 631 | /// information. 632 | pub fn clear(&mut self) { 633 | self.infinity = false; 634 | self.x.clear(); 635 | self.y.clear(); 636 | self.z.clear(); 637 | } 638 | 639 | /// Rescale a jacobian point by b which must be 640 | /// non-zero. Constant-time. 641 | pub fn rescale(&mut self, s: &Field) { 642 | debug_assert!(!s.is_zero()); 643 | let zz = s.sqr(); 644 | self.x *= &zz; 645 | self.y *= &zz; 646 | self.y *= s; 647 | self.z *= s; 648 | } 649 | } 650 | 651 | impl From for Affine { 652 | fn from(a: AffineStorage) -> Affine { 653 | affine_const!( 654 | a.x.into(), 655 | a.y.into() 656 | ) 657 | } 658 | } 659 | 660 | impl Into for Affine { 661 | fn into(mut self) -> AffineStorage { 662 | debug_assert!(!self.is_infinity()); 663 | self.x.normalize(); 664 | self.y.normalize(); 665 | affine_storage_const!( 666 | self.x.into(), 667 | self.y.into() 668 | ) 669 | } 670 | } 671 | 672 | impl AffineStorage { 673 | /// If flag is true, set *r equal to *a; otherwise leave 674 | /// it. Constant-time. 675 | pub fn cmov(&mut self, a: &AffineStorage, flag: bool) { 676 | self.x.cmov(&a.x, flag); 677 | self.y.cmov(&a.y, flag); 678 | } 679 | } 680 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Pure Rust implementation of the secp256k1 curve and fast ECDSA 2 | //! signatures. The secp256k1 curve is used excusively in Bitcoin and 3 | //! Ethereum alike cryptocurrencies. 4 | 5 | #![deny(unused_import_braces, unused_imports, 6 | unused_comparisons, unused_must_use, 7 | unused_variables, non_shorthand_field_patterns, 8 | unreachable_code, unused_parens)] 9 | 10 | #![no_std] 11 | extern crate hmac_drbg; 12 | extern crate typenum; 13 | extern crate digest; 14 | extern crate sha2; 15 | extern crate rand; 16 | 17 | #[macro_use] 18 | mod field; 19 | #[macro_use] 20 | mod group; 21 | mod scalar; 22 | mod ecmult; 23 | mod ecdsa; 24 | mod ecdh; 25 | mod error; 26 | 27 | use hmac_drbg::HmacDRBG; 28 | use sha2::Sha256; 29 | use typenum::U32; 30 | 31 | use field::Field; 32 | use group::{Affine, Jacobian}; 33 | use scalar::Scalar; 34 | 35 | use ecmult::{ECMULT_CONTEXT, ECMULT_GEN_CONTEXT}; 36 | 37 | use rand::Rng; 38 | 39 | pub use error::Error; 40 | 41 | /// Curve related structs. 42 | pub mod curve { 43 | pub use field::Field; 44 | pub use group::{Affine, Jacobian, AffineStorage, AFFINE_G, CURVE_B}; 45 | pub use scalar::Scalar; 46 | 47 | pub use ecmult::{ECMultContext, ECMultGenContext, 48 | ECMULT_CONTEXT, ECMULT_GEN_CONTEXT}; 49 | } 50 | 51 | /// Utilities to manipulate the secp256k1 curve parameters. 52 | pub mod util { 53 | pub const TAG_PUBKEY_EVEN: u8 = 0x02; 54 | pub const TAG_PUBKEY_ODD: u8 = 0x03; 55 | pub const TAG_PUBKEY_UNCOMPRESSED: u8 = 0x04; 56 | pub const TAG_PUBKEY_HYBRID_EVEN: u8 = 0x06; 57 | pub const TAG_PUBKEY_HYBRID_ODD: u8 = 0x07; 58 | 59 | pub use group::{AFFINE_INFINITY, JACOBIAN_INFINITY, 60 | set_table_gej_var, globalz_set_table_gej}; 61 | pub use ecmult::{WINDOW_A, WINDOW_G, ECMULT_TABLE_SIZE_A, ECMULT_TABLE_SIZE_G, 62 | odd_multiples_table}; 63 | } 64 | 65 | #[derive(Debug, Clone, Eq, PartialEq)] 66 | /// Public key on a secp256k1 curve. 67 | pub struct PublicKey(Affine); 68 | #[derive(Debug, Clone, Eq, PartialEq)] 69 | /// Secret key (256-bit) on a secp256k1 curve. 70 | pub struct SecretKey(Scalar); 71 | #[derive(Debug, Clone, Eq, PartialEq)] 72 | /// An ECDSA signature. 73 | pub struct Signature { 74 | pub r: Scalar, 75 | pub s: Scalar 76 | } 77 | #[derive(Debug, Clone, Copy, Eq, PartialEq)] 78 | /// Tag used for public key recovery from signatures. 79 | pub struct RecoveryId(u8); 80 | #[derive(Debug, Clone, Eq, PartialEq)] 81 | /// Hashed message input to an ECDSA signature. 82 | pub struct Message(pub Scalar); 83 | #[derive(Debug, Clone, Eq, PartialEq)] 84 | /// Shared secret using ECDH. 85 | pub struct SharedSecret([u8; 32]); 86 | 87 | impl PublicKey { 88 | pub fn from_secret_key(seckey: &SecretKey) -> PublicKey { 89 | let mut pj = Jacobian::default(); 90 | ECMULT_GEN_CONTEXT.ecmult_gen(&mut pj, &seckey.0); 91 | let mut p = Affine::default(); 92 | p.set_gej(&pj); 93 | PublicKey(p) 94 | } 95 | 96 | pub fn parse(p: &[u8; 65]) -> Result { 97 | use util::{TAG_PUBKEY_HYBRID_EVEN, TAG_PUBKEY_HYBRID_ODD}; 98 | 99 | if !(p[0] == 0x04 || p[0] == 0x06 || p[0] == 0x07) { 100 | return Err(Error::InvalidPublicKey); 101 | } 102 | let mut x = Field::default(); 103 | let mut y = Field::default(); 104 | let mut data = [0u8; 32]; 105 | for i in 0..32 { 106 | data[i] = p[i+1]; 107 | } 108 | if !x.set_b32(&data) { 109 | return Err(Error::InvalidPublicKey); 110 | } 111 | for i in 0..32 { 112 | data[i] = p[i+33]; 113 | } 114 | if !y.set_b32(&data) { 115 | return Err(Error::InvalidPublicKey); 116 | } 117 | let mut elem = Affine::default(); 118 | elem.set_xy(&x, &y); 119 | if (p[0] == TAG_PUBKEY_HYBRID_EVEN || p[0] == TAG_PUBKEY_HYBRID_ODD) && 120 | (y.is_odd() != (p[0] == TAG_PUBKEY_HYBRID_ODD)) 121 | { 122 | return Err(Error::InvalidPublicKey); 123 | } 124 | if elem.is_infinity() { 125 | return Err(Error::InvalidPublicKey); 126 | } 127 | if elem.is_valid_var() { 128 | return Ok(PublicKey(elem)); 129 | } else { 130 | return Err(Error::InvalidPublicKey); 131 | } 132 | } 133 | 134 | pub fn serialize(&self) -> [u8; 65] { 135 | use util::TAG_PUBKEY_UNCOMPRESSED; 136 | 137 | debug_assert!(!self.0.is_infinity()); 138 | 139 | let mut ret = [0u8; 65]; 140 | let mut elem = self.0.clone(); 141 | 142 | elem.x.normalize_var(); 143 | elem.y.normalize_var(); 144 | let d = elem.x.b32(); 145 | for i in 0..32 { 146 | ret[1+i] = d[i]; 147 | } 148 | let d = elem.y.b32(); 149 | for i in 0..32 { 150 | ret[33+i] = d[i]; 151 | } 152 | ret[0] = TAG_PUBKEY_UNCOMPRESSED; 153 | 154 | ret 155 | } 156 | } 157 | 158 | impl Into for PublicKey { 159 | fn into(self) -> Affine { 160 | self.0 161 | } 162 | } 163 | 164 | impl SecretKey { 165 | pub fn parse(p: &[u8; 32]) -> Result { 166 | let mut elem = Scalar::default(); 167 | if !elem.set_b32(p) && !elem.is_zero() { 168 | Ok(SecretKey(elem)) 169 | } else { 170 | Err(Error::InvalidSecretKey) 171 | } 172 | } 173 | 174 | pub fn random(rng: &mut R) -> SecretKey { 175 | loop { 176 | let mut ret = [0u8; 32]; 177 | rng.fill_bytes(&mut ret); 178 | 179 | match Self::parse(&ret) { 180 | Ok(key) => return key, 181 | Err(_) => (), 182 | } 183 | } 184 | } 185 | 186 | pub fn serialize(&self) -> [u8; 32] { 187 | self.0.b32() 188 | } 189 | } 190 | 191 | impl Into for SecretKey { 192 | fn into(self) -> Scalar { 193 | self.0 194 | } 195 | } 196 | 197 | impl Signature { 198 | pub fn parse(p: &[u8; 64]) -> Signature { 199 | let mut r = Scalar::default(); 200 | let mut s = Scalar::default(); 201 | 202 | let mut data = [0u8; 32]; 203 | for i in 0..32 { 204 | data[i] = p[i]; 205 | } 206 | r.set_b32(&data); 207 | for i in 0..32 { 208 | data[i] = p[i+32]; 209 | } 210 | s.set_b32(&data); 211 | 212 | Signature { r, s } 213 | } 214 | 215 | pub fn serialize(&self) -> [u8; 64] { 216 | let mut ret = [0u8; 64]; 217 | 218 | let ra = self.r.b32(); 219 | for i in 0..32 { 220 | ret[i] = ra[i]; 221 | } 222 | let sa = self.s.b32(); 223 | for i in 0..32 { 224 | ret[i+32] = sa[i]; 225 | } 226 | 227 | ret 228 | } 229 | } 230 | 231 | impl Message { 232 | pub fn parse(p: &[u8; 32]) -> Message { 233 | let mut m = Scalar::default(); 234 | m.set_b32(p); 235 | 236 | Message(m) 237 | } 238 | 239 | pub fn serialize(&self) -> [u8; 32] { 240 | self.0.b32() 241 | } 242 | } 243 | 244 | impl RecoveryId { 245 | pub fn parse(p: u8) -> Result { 246 | if p < 4 { 247 | Ok(RecoveryId(p)) 248 | } else { 249 | Err(Error::InvalidRecoveryId) 250 | } 251 | } 252 | 253 | pub fn serialize(&self) -> u8 { 254 | self.0 255 | } 256 | } 257 | 258 | impl Into for RecoveryId { 259 | fn into(self) -> u8 { 260 | self.0 261 | } 262 | } 263 | 264 | impl Into for RecoveryId { 265 | fn into(self) -> i32 { 266 | self.0 as i32 267 | } 268 | } 269 | 270 | impl SharedSecret { 271 | pub fn new(pubkey: &PublicKey, seckey: &SecretKey) -> Result { 272 | let inner = match ECMULT_CONTEXT.ecdh_raw(&pubkey.0, &seckey.0) { 273 | Some(val) => val, 274 | None => return Err(Error::InvalidSecretKey), 275 | }; 276 | 277 | Ok(SharedSecret(inner)) 278 | } 279 | } 280 | 281 | impl AsRef<[u8]> for SharedSecret { 282 | fn as_ref(&self) -> &[u8] { 283 | &self.0 284 | } 285 | } 286 | 287 | /// Check signature is a valid message signed by public key. 288 | pub fn verify(message: &Message, signature: &Signature, pubkey: &PublicKey) -> bool { 289 | ECMULT_CONTEXT.verify_raw(&signature.r, &signature.s, &pubkey.0, &message.0) 290 | } 291 | 292 | /// Recover public key from a signed message. 293 | pub fn recover(message: &Message, signature: &Signature, recovery_id: &RecoveryId) -> Result { 294 | ECMULT_CONTEXT.recover_raw(&signature.r, &signature.s, recovery_id.0, &message.0).map(|v| PublicKey(v)) 295 | } 296 | 297 | /// Sign a message using the secret key. 298 | pub fn sign(message: &Message, seckey: &SecretKey) -> Result<(Signature, RecoveryId), Error> { 299 | let seckey_b32 = seckey.0.b32(); 300 | let message_b32 = message.0.b32(); 301 | 302 | let mut drbg = HmacDRBG::::new(&seckey_b32, &message_b32, &[]); 303 | let generated = drbg.generate::(None); 304 | let mut generated_arr = [0u8; 32]; 305 | for i in 0..32 { 306 | generated_arr[i] = generated[i]; 307 | } 308 | let mut nonce = Scalar::default(); 309 | let mut overflow = nonce.set_b32(&generated_arr); 310 | 311 | while overflow || nonce.is_zero() { 312 | let generated = drbg.generate::(None); 313 | let mut generated_arr = [0u8; 32]; 314 | for i in 0..32 { 315 | generated_arr[i] = generated[i]; 316 | } 317 | overflow = nonce.set_b32(&generated_arr); 318 | } 319 | 320 | let result = ECMULT_GEN_CONTEXT.sign_raw(&seckey.0, &message.0, &nonce); 321 | #[allow(unused_assignments)] 322 | { 323 | nonce = Scalar::default(); 324 | generated_arr = [0u8; 32]; 325 | } 326 | if let Ok((sigr, sigs, recid)) = result { 327 | return Ok((Signature { 328 | r: sigr, 329 | s: sigs, 330 | }, RecoveryId(recid))); 331 | } else { 332 | return Err(result.err().unwrap()); 333 | } 334 | } 335 | -------------------------------------------------------------------------------- /src/scalar.rs: -------------------------------------------------------------------------------- 1 | use core::ops::{Add, AddAssign, Mul, MulAssign}; 2 | 3 | const SECP256K1_N_0: u32 = 0xD0364141; 4 | const SECP256K1_N_1: u32 = 0xBFD25E8C; 5 | const SECP256K1_N_2: u32 = 0xAF48A03B; 6 | const SECP256K1_N_3: u32 = 0xBAAEDCE6; 7 | const SECP256K1_N_4: u32 = 0xFFFFFFFE; 8 | const SECP256K1_N_5: u32 = 0xFFFFFFFF; 9 | const SECP256K1_N_6: u32 = 0xFFFFFFFF; 10 | const SECP256K1_N_7: u32 = 0xFFFFFFFF; 11 | 12 | const SECP256K1_N_C_0: u32 = !SECP256K1_N_0 + 1; 13 | const SECP256K1_N_C_1: u32 = !SECP256K1_N_1; 14 | const SECP256K1_N_C_2: u32 = !SECP256K1_N_2; 15 | const SECP256K1_N_C_3: u32 = !SECP256K1_N_3; 16 | const SECP256K1_N_C_4: u32 = 1; 17 | 18 | const SECP256K1_N_H_0: u32 = 0x681B20A0; 19 | const SECP256K1_N_H_1: u32 = 0xDFE92F46; 20 | const SECP256K1_N_H_2: u32 = 0x57A4501D; 21 | const SECP256K1_N_H_3: u32 = 0x5D576E73; 22 | const SECP256K1_N_H_4: u32 = 0xFFFFFFFF; 23 | const SECP256K1_N_H_5: u32 = 0xFFFFFFFF; 24 | const SECP256K1_N_H_6: u32 = 0xFFFFFFFF; 25 | const SECP256K1_N_H_7: u32 = 0x7FFFFFFF; 26 | 27 | #[derive(Debug, Clone, Eq, PartialEq)] 28 | /// A 256-bit scalar value. 29 | pub struct Scalar(pub [u32; 8]); 30 | 31 | impl Scalar { 32 | /// Clear a scalar to prevent the leak of sensitive data. 33 | pub fn clear(&mut self) { 34 | self.0 = [0u32; 8]; 35 | } 36 | 37 | /// Set a scalar to an unsigned integer. 38 | pub fn set_int(&mut self, v: u32) { 39 | self.0 = [v, 0, 0, 0, 0, 0, 0, 0]; 40 | } 41 | 42 | /// Access bits from a scalar. All requested bits must belong to 43 | /// the same 32-bit limb. 44 | pub fn bits(&self, offset: usize, count: usize) -> u32 { 45 | debug_assert!((offset + count - 1) >> 5 == offset >> 5); 46 | (self.0[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1) 47 | } 48 | 49 | /// Access bits from a scalar. Not constant time. 50 | pub fn bits_var(&self, offset: usize, count: usize) -> u32 { 51 | debug_assert!(count < 32); 52 | debug_assert!(offset + count <= 256); 53 | if (offset + count - 1) >> 5 == offset >> 5 { 54 | return self.bits(offset, count); 55 | } else { 56 | debug_assert!((offset >> 5) + 1 < 8); 57 | return ((self.0[offset >> 5] >> (offset & 0x1f)) | (self.0[(offset >> 5) + 1] << (32 - (offset & 0x1f)))) & ((1 << count) - 1); 58 | } 59 | } 60 | 61 | fn check_overflow(&self) -> bool { 62 | let mut yes: bool = false; 63 | let mut no: bool = false; 64 | no = no || (self.0[7] < SECP256K1_N_7); /* No need for a > check. */ 65 | no = no || (self.0[6] < SECP256K1_N_6); /* No need for a > check. */ 66 | no = no || (self.0[5] < SECP256K1_N_5); /* No need for a > check. */ 67 | no = no || (self.0[4] < SECP256K1_N_4); 68 | yes = yes || ((self.0[4] > SECP256K1_N_4) && !no); 69 | no = no || ((self.0[3] < SECP256K1_N_3) && !yes); 70 | yes = yes || ((self.0[3] > SECP256K1_N_3) && !no); 71 | no = no || ((self.0[2] < SECP256K1_N_2) && !yes); 72 | yes = yes || ((self.0[2] > SECP256K1_N_2) && !no); 73 | no = no || ((self.0[1] < SECP256K1_N_1) && !yes); 74 | yes = yes || ((self.0[1] > SECP256K1_N_1) && !no); 75 | yes = yes || ((self.0[0] >= SECP256K1_N_0) && !no); 76 | return yes; 77 | } 78 | 79 | fn reduce(&mut self, overflow: bool) -> bool { 80 | let o: u64 = if overflow { 1 } else { 0 }; 81 | let mut t: u64; 82 | t = (self.0[0] as u64) + o * (SECP256K1_N_C_0 as u64); 83 | self.0[0] = (t & 0xFFFFFFFF) as u32; t >>= 32; 84 | t += (self.0[1] as u64) + o * (SECP256K1_N_C_1 as u64); 85 | self.0[1] = (t & 0xFFFFFFFF) as u32; t >>= 32; 86 | t += (self.0[2] as u64) + o * (SECP256K1_N_C_2 as u64); 87 | self.0[2] = (t & 0xFFFFFFFF) as u32; t >>= 32; 88 | t += (self.0[3] as u64) + o * (SECP256K1_N_C_3 as u64); 89 | self.0[3] = (t & 0xFFFFFFFF) as u32; t >>= 32; 90 | t += (self.0[4] as u64) + o * (SECP256K1_N_C_4 as u64); 91 | self.0[4] = (t & 0xFFFFFFFF) as u32; t >>= 32; 92 | t += self.0[5] as u64; 93 | self.0[5] = (t & 0xFFFFFFFF) as u32; t >>= 32; 94 | t += self.0[6] as u64; 95 | self.0[6] = (t & 0xFFFFFFFF) as u32; t >>= 32; 96 | t += self.0[7] as u64; 97 | self.0[7] = (t & 0xFFFFFFFF) as u32; 98 | overflow 99 | } 100 | 101 | /// Add two scalars together (modulo the group order). Returns 102 | /// whether it overflowed. 103 | pub fn add_in_place(&mut self, a: &Scalar, b: &Scalar) -> bool { 104 | let overflow: u64; 105 | let mut t: u64 = (a.0[0] as u64) + (b.0[0] as u64); 106 | self.0[0] = (t & 0xFFFFFFFF) as u32; t >>= 32; 107 | t += (a.0[1] as u64) + (b.0[1] as u64); 108 | self.0[1] = (t & 0xFFFFFFFF) as u32; t >>= 32; 109 | t += (a.0[2] as u64) + (b.0[2] as u64); 110 | self.0[2] = (t & 0xFFFFFFFF) as u32; t >>= 32; 111 | t += (a.0[3] as u64) + (b.0[3] as u64); 112 | self.0[3] = (t & 0xFFFFFFFF) as u32; t >>= 32; 113 | t += (a.0[4] as u64) + (b.0[4] as u64); 114 | self.0[4] = (t & 0xFFFFFFFF) as u32; t >>= 32; 115 | t += (a.0[5] as u64) + (b.0[5] as u64); 116 | self.0[5] = (t & 0xFFFFFFFF) as u32; t >>= 32; 117 | t += (a.0[6] as u64) + (b.0[6] as u64); 118 | self.0[6] = (t & 0xFFFFFFFF) as u32; t >>= 32; 119 | t += (a.0[7] as u64) + (b.0[7] as u64); 120 | self.0[7] = (t & 0xFFFFFFFF) as u32; t >>= 32; 121 | overflow = t + if self.check_overflow() { 1 } else { 0 }; 122 | debug_assert!(overflow == 0 || overflow == 1); 123 | self.reduce(overflow == 1); 124 | return overflow == 1; 125 | } 126 | 127 | /// Conditionally add a power of two to a scalar. The result is 128 | /// not allowed to overflow. 129 | pub fn cadd_bit(&mut self, mut bit: usize, flag: bool) { 130 | let mut t: u64; 131 | debug_assert!(bit < 256); 132 | bit += if flag { 0 } else { usize::max_value() } & 0x100; 133 | t = (self.0[0] as u64) + ((if (bit >> 5) == 0 { 1 } else { 0 }) << (bit & 0x1F)); 134 | self.0[0] = (t & 0xFFFFFFFF) as u32; t >>= 32; 135 | t += (self.0[1] as u64) + ((if (bit >> 5) == 1 { 1 } else { 0 }) << (bit & 0x1F)); 136 | self.0[1] = (t & 0xFFFFFFFF) as u32; t >>= 32; 137 | t += (self.0[2] as u64) + ((if (bit >> 5) == 2 { 1 } else { 0 }) << (bit & 0x1F)); 138 | self.0[2] = (t & 0xFFFFFFFF) as u32; t >>= 32; 139 | t += (self.0[3] as u64) + ((if (bit >> 5) == 3 { 1 } else { 0 }) << (bit & 0x1F)); 140 | self.0[3] = (t & 0xFFFFFFFF) as u32; t >>= 32; 141 | t += (self.0[4] as u64) + ((if (bit >> 5) == 4 { 1 } else { 0 }) << (bit & 0x1F)); 142 | self.0[4] = (t & 0xFFFFFFFF) as u32; t >>= 32; 143 | t += (self.0[5] as u64) + ((if (bit >> 5) == 5 { 1 } else { 0 }) << (bit & 0x1F)); 144 | self.0[5] = (t & 0xFFFFFFFF) as u32; t >>= 32; 145 | t += (self.0[6] as u64) + ((if (bit >> 5) == 6 { 1 } else { 0 }) << (bit & 0x1F)); 146 | self.0[6] = (t & 0xFFFFFFFF) as u32; t >>= 32; 147 | t += (self.0[7] as u64) + ((if (bit >> 5) == 7 { 1 } else { 0 }) << (bit & 0x1F)); 148 | self.0[7] = (t & 0xFFFFFFFF) as u32; 149 | debug_assert!((t >> 32) == 0); 150 | debug_assert!(!self.check_overflow()); 151 | } 152 | 153 | /// Set a scalar from a big endian byte array. 154 | pub fn set_b32(&mut self, b32: &[u8; 32]) -> bool { 155 | self.0[0] = (b32[31] as u32) | ((b32[30] as u32) << 8) | ((b32[29] as u32) << 16) | ((b32[28] as u32) << 24); 156 | self.0[1] = (b32[27] as u32) | ((b32[26] as u32) << 8) | ((b32[25] as u32) << 16) | ((b32[24] as u32) << 24); 157 | self.0[2] = (b32[23] as u32) | ((b32[22] as u32) << 8) | ((b32[21] as u32) << 16) | ((b32[20] as u32) << 24); 158 | self.0[3] = (b32[19] as u32) | ((b32[18] as u32) << 8) | ((b32[17] as u32) << 16) | ((b32[16] as u32) << 24); 159 | self.0[4] = (b32[15] as u32) | ((b32[14] as u32) << 8) | ((b32[13] as u32) << 16) | ((b32[12] as u32) << 24); 160 | self.0[5] = (b32[11] as u32) | ((b32[10] as u32) << 8) | ((b32[9] as u32) << 16) | ((b32[8] as u32) << 24); 161 | self.0[6] = (b32[7] as u32) | ((b32[6] as u32) << 8) | ((b32[5] as u32) << 16) | ((b32[4] as u32) << 24); 162 | self.0[7] = (b32[3] as u32) | ((b32[2] as u32) << 8) | ((b32[1] as u32) << 16) | ((b32[0] as u32) << 24); 163 | 164 | let overflow = self.check_overflow(); 165 | self.reduce(overflow) 166 | } 167 | 168 | /// Convert a scalar to a byte array. 169 | pub fn b32(&self) -> [u8; 32] { 170 | let mut bin = [0u8; 32]; 171 | bin[0] = (self.0[7] >> 24) as u8; bin[1] = (self.0[7] >> 16) as u8; bin[2] = (self.0[7] >> 8) as u8; bin[3] = (self.0[7]) as u8; 172 | bin[4] = (self.0[6] >> 24) as u8; bin[5] = (self.0[6] >> 16) as u8; bin[6] = (self.0[6] >> 8) as u8; bin[7] = (self.0[6]) as u8; 173 | bin[8] = (self.0[5] >> 24) as u8; bin[9] = (self.0[5] >> 16) as u8; bin[10] = (self.0[5] >> 8) as u8; bin[11] = (self.0[5]) as u8; 174 | bin[12] = (self.0[4] >> 24) as u8; bin[13] = (self.0[4] >> 16) as u8; bin[14] = (self.0[4] >> 8) as u8; bin[15] = (self.0[4]) as u8; 175 | bin[16] = (self.0[3] >> 24) as u8; bin[17] = (self.0[3] >> 16) as u8; bin[18] = (self.0[3] >> 8) as u8; bin[19] = (self.0[3]) as u8; 176 | bin[20] = (self.0[2] >> 24) as u8; bin[21] = (self.0[2] >> 16) as u8; bin[22] = (self.0[2] >> 8) as u8; bin[23] = (self.0[2]) as u8; 177 | bin[24] = (self.0[1] >> 24) as u8; bin[25] = (self.0[1] >> 16) as u8; bin[26] = (self.0[1] >> 8) as u8; bin[27] = (self.0[1]) as u8; 178 | bin[28] = (self.0[0] >> 24) as u8; bin[29] = (self.0[0] >> 16) as u8; bin[30] = (self.0[0] >> 8) as u8; bin[31] = (self.0[0]) as u8; 179 | bin 180 | } 181 | 182 | /// Check whether a scalar equals zero. 183 | pub fn is_zero(&self) -> bool { 184 | (self.0[0] | self.0[1] | self.0[2] | self.0[3] | self.0[4] | self.0[5] | self.0[6] | self.0[7]) == 0 185 | } 186 | 187 | /// Compute the complement of a scalar (modulo the group order). 188 | pub fn neg_in_place(&mut self, a: &Scalar) { 189 | let nonzero: u64 = 0xFFFFFFFF * if !a.is_zero() { 1 } else { 0 }; 190 | let mut t: u64 = (!a.0[0]) as u64 + (SECP256K1_N_0 + 1) as u64; 191 | self.0[0] = (t & nonzero) as u32; t >>= 32; 192 | t += (!a.0[1]) as u64 + SECP256K1_N_1 as u64; 193 | self.0[1] = (t & nonzero) as u32; t >>= 32; 194 | t += (!a.0[2]) as u64 + SECP256K1_N_2 as u64; 195 | self.0[2] = (t & nonzero) as u32; t >>= 32; 196 | t += (!a.0[3]) as u64 + SECP256K1_N_3 as u64; 197 | self.0[3] = (t & nonzero) as u32; t >>= 32; 198 | t += (!a.0[4]) as u64 + SECP256K1_N_4 as u64; 199 | self.0[4] = (t & nonzero) as u32; t >>= 32; 200 | t += (!a.0[5]) as u64 + SECP256K1_N_5 as u64; 201 | self.0[5] = (t & nonzero) as u32; t >>= 32; 202 | t += (!a.0[6]) as u64 + SECP256K1_N_6 as u64; 203 | self.0[6] = (t & nonzero) as u32; t >>= 32; 204 | t += (!a.0[7]) as u64 + SECP256K1_N_7 as u64; 205 | self.0[7] = (t & nonzero) as u32; 206 | } 207 | 208 | pub fn neg(&self) -> Scalar { 209 | let mut ret = Scalar::default(); 210 | ret.neg_in_place(self); 211 | ret 212 | } 213 | 214 | /// Check whether a scalar equals one. 215 | pub fn is_one(&self) -> bool { 216 | ((self.0[0] ^ 1) | self.0[1] | self.0[2] | self.0[3] | self.0[4] | self.0[5] | self.0[6] | self.0[7]) == 0 217 | } 218 | 219 | /// Check whether a scalar is higher than the group order divided 220 | /// by 2. 221 | pub fn is_high(&self) -> bool { 222 | let mut yes: bool = false; 223 | let mut no: bool = false; 224 | no = no || (self.0[7] < SECP256K1_N_H_7); 225 | yes = yes || ((self.0[7] > SECP256K1_N_H_7) & !no); 226 | no = no || ((self.0[6] < SECP256K1_N_H_6) & !yes); /* No need for a > check. */ 227 | no = no || ((self.0[5] < SECP256K1_N_H_5) & !yes); /* No need for a > check. */ 228 | no = no || ((self.0[4] < SECP256K1_N_H_4) & !yes); /* No need for a > check. */ 229 | no = no || ((self.0[3] < SECP256K1_N_H_3) & !yes); 230 | yes = yes || ((self.0[3] > SECP256K1_N_H_3) && !no); 231 | no = no || ((self.0[2] < SECP256K1_N_H_2) && !yes); 232 | yes = yes || ((self.0[2] > SECP256K1_N_H_2) && !no); 233 | no = no || ((self.0[1] < SECP256K1_N_H_1) && !yes); 234 | yes = yes || ((self.0[1] > SECP256K1_N_H_1) && !no); 235 | yes = yes || ((self.0[0] >= SECP256K1_N_H_0) && !no); 236 | return yes; 237 | } 238 | 239 | /// Conditionally negate a number, in constant time. Returns -1 if 240 | /// the number was negated, 1 otherwise. 241 | pub fn cond_neg_mut(&mut self, flag: bool) -> isize { 242 | let mask = if flag { u32::max_value() } else { 0 }; 243 | let nonzero: u64 = 0xFFFFFFFF * if !self.is_zero() { 1 } else { 0 }; 244 | let mut t: u64 = (self.0[0] ^ mask) as u64 + ((SECP256K1_N_0 + 1) & mask) as u64; 245 | self.0[0] = (t & nonzero) as u32; t >>= 32; 246 | t += (self.0[1] ^ mask) as u64 + (SECP256K1_N_1 & mask) as u64; 247 | self.0[1] = (t & nonzero) as u32; t >>= 32; 248 | t += (self.0[2] ^ mask) as u64 + (SECP256K1_N_2 & mask) as u64; 249 | self.0[2] = (t & nonzero) as u32; t >>= 32; 250 | t += (self.0[3] ^ mask) as u64 + (SECP256K1_N_3 & mask) as u64; 251 | self.0[3] = (t & nonzero) as u32; t >>= 32; 252 | t += (self.0[4] ^ mask) as u64 + (SECP256K1_N_4 & mask) as u64; 253 | self.0[4] = (t & nonzero) as u32; t >>= 32; 254 | t += (self.0[5] ^ mask) as u64 + (SECP256K1_N_5 & mask) as u64; 255 | self.0[5] = (t & nonzero) as u32; t >>= 32; 256 | t += (self.0[6] ^ mask) as u64 + (SECP256K1_N_6 & mask) as u64; 257 | self.0[6] = (t & nonzero) as u32; t >>= 32; 258 | t += (self.0[7] ^ mask) as u64 + (SECP256K1_N_7 & mask) as u64; 259 | self.0[7] = (t & nonzero) as u32; 260 | 261 | if mask == 0 { 262 | return 1; 263 | } else { 264 | return -1; 265 | } 266 | } 267 | } 268 | 269 | macro_rules! define_ops { 270 | ($c0: ident, $c1: ident, $c2: ident) => { 271 | #[allow(unused_macros)] 272 | macro_rules! muladd { 273 | ($a: expr, $b: expr) => { 274 | let a = $a; let b = $b; 275 | let t = (a as u64) * (b as u64); 276 | let mut th = (t >> 32) as u32; 277 | let tl = t as u32; 278 | $c0 = $c0.wrapping_add(tl); 279 | th = th.wrapping_add(if $c0 < tl { 1 } else { 0 }); 280 | $c1 = $c1.wrapping_add(th); 281 | $c2 = $c2.wrapping_add(if $c1 < th { 1 } else { 0 }); 282 | debug_assert!($c1 >= th || $c2 != 0); 283 | } 284 | } 285 | 286 | #[allow(unused_macros)] 287 | macro_rules! muladd_fast { 288 | ($a: expr, $b: expr) => { 289 | let a = $a; let b = $b; 290 | let t = (a as u64) * (b as u64); 291 | let mut th = (t >> 32) as u32; 292 | let tl = t as u32; 293 | $c0 = $c0.wrapping_add(tl); 294 | th = th.wrapping_add(if $c0 < tl { 1 } else { 0 }); 295 | $c1 = $c1.wrapping_add(th); 296 | debug_assert!($c1 >= th); 297 | } 298 | } 299 | 300 | #[allow(unused_macros)] 301 | macro_rules! muladd2 { 302 | ($a: expr, $b: expr) => { 303 | let a = $a; let b = $b; 304 | let t = (a as u64) * (b as u64); 305 | let th = (t >> 32) as u32; 306 | let tl = t as u32; 307 | let mut th2 = th.wrapping_add(th); 308 | $c2 = $c2.wrapping_add(if th2 < th { 1 } else { 0 }); 309 | debug_assert!(th2 >= th || $c2 != 0); 310 | let tl2 = tl.wrapping_add(tl); 311 | th2 = th2.wrapping_add(if tl2 < tl { 1 } else { 0 }); 312 | $c0 = $c0.wrapping_add(tl2); 313 | th2 = th2.wrapping_add(if $c0 < tl2 { 1 } else { 0 }); 314 | $c2 = $c2.wrapping_add(if $c0 < tl2 && th2 == 0 { 1 } else { 0 }); 315 | debug_assert!($c0 >= tl2 || th2 != 0 || $c2 != 0); 316 | $c1 = $c1.wrapping_add(th2); 317 | $c2 = $c2.wrapping_add(if $c1 < th2 { 1 } else { 0 }); 318 | debug_assert!($c1 >= th2 || $c2 != 0); 319 | } 320 | } 321 | 322 | #[allow(unused_macros)] 323 | macro_rules! sumadd { 324 | ($a: expr) => { 325 | let a = $a; 326 | $c0 = $c0.wrapping_add(a); 327 | let over = if $c0 < a { 1 } else { 0 }; 328 | $c1 = $c1.wrapping_add(over); 329 | $c2 = $c2.wrapping_add(if $c1 < over { 1 } else { 0 }); 330 | } 331 | } 332 | 333 | #[allow(unused_macros)] 334 | macro_rules! sumadd_fast { 335 | ($a: expr) => { 336 | let a = $a; 337 | $c0 = $c0.wrapping_add(a); 338 | $c1 = $c1.wrapping_add(if $c0 < a { 1 } else { 0 }); 339 | debug_assert!($c1 != 0 || $c0 >= a); 340 | debug_assert!($c2 == 0); 341 | } 342 | } 343 | 344 | #[allow(unused_macros)] 345 | macro_rules! extract { 346 | () => { 347 | { 348 | #[allow(unused_assignments)] 349 | { 350 | let n = $c0; 351 | $c0 = $c1; 352 | $c1 = $c2; 353 | $c2 = 0; 354 | n 355 | } 356 | } 357 | } 358 | } 359 | 360 | #[allow(unused_macros)] 361 | macro_rules! extract_fast { 362 | () => { 363 | { 364 | #[allow(unused_assignments)] 365 | { 366 | let n = $c0; 367 | $c0 = $c1; 368 | $c1 = 0; 369 | debug_assert!($c2 == 0); 370 | n 371 | } 372 | } 373 | } 374 | } 375 | } 376 | } 377 | 378 | impl Scalar { 379 | fn reduce_512(&mut self, l: &[u32; 16]) { 380 | let (mut c0, mut c1, mut c2): (u32, u32, u32); 381 | define_ops!(c0, c1, c2); 382 | 383 | let mut c: u64; 384 | let (n0, n1, n2, n3, n4, n5, n6, n7) = (l[8], l[9], l[10], l[11], l[12], l[13], l[14], l[15]); 385 | let (m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12): (u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32, u32); 386 | let (p0, p1, p2, p3, p4, p5, p6, p7, p8): (u32, u32, u32, u32, u32, u32, u32, u32, u32); 387 | 388 | c0 = l[0]; c1 = 0; c2 = 0; 389 | muladd_fast!(n0, SECP256K1_N_C_0); 390 | m0 = extract_fast!(); 391 | sumadd_fast!(l[1]); 392 | muladd!(n1, SECP256K1_N_C_0); 393 | muladd!(n0, SECP256K1_N_C_1); 394 | m1 = extract!(); 395 | sumadd!(l[2]); 396 | muladd!(n2, SECP256K1_N_C_0); 397 | muladd!(n1, SECP256K1_N_C_1); 398 | muladd!(n0, SECP256K1_N_C_2); 399 | m2 = extract!(); 400 | sumadd!(l[3]); 401 | muladd!(n3, SECP256K1_N_C_0); 402 | muladd!(n2, SECP256K1_N_C_1); 403 | muladd!(n1, SECP256K1_N_C_2); 404 | muladd!(n0, SECP256K1_N_C_3); 405 | m3 = extract!(); 406 | sumadd!(l[4]); 407 | muladd!(n4, SECP256K1_N_C_0); 408 | muladd!(n3, SECP256K1_N_C_1); 409 | muladd!(n2, SECP256K1_N_C_2); 410 | muladd!(n1, SECP256K1_N_C_3); 411 | sumadd!(n0); 412 | m4 = extract!(); 413 | sumadd!(l[5]); 414 | muladd!(n5, SECP256K1_N_C_0); 415 | muladd!(n4, SECP256K1_N_C_1); 416 | muladd!(n3, SECP256K1_N_C_2); 417 | muladd!(n2, SECP256K1_N_C_3); 418 | sumadd!(n1); 419 | m5 = extract!(); 420 | sumadd!(l[6]); 421 | muladd!(n6, SECP256K1_N_C_0); 422 | muladd!(n5, SECP256K1_N_C_1); 423 | muladd!(n4, SECP256K1_N_C_2); 424 | muladd!(n3, SECP256K1_N_C_3); 425 | sumadd!(n2); 426 | m6 = extract!(); 427 | sumadd!(l[7]); 428 | muladd!(n7, SECP256K1_N_C_0); 429 | muladd!(n6, SECP256K1_N_C_1); 430 | muladd!(n5, SECP256K1_N_C_2); 431 | muladd!(n4, SECP256K1_N_C_3); 432 | sumadd!(n3); 433 | m7 = extract!(); 434 | muladd!(n7, SECP256K1_N_C_1); 435 | muladd!(n6, SECP256K1_N_C_2); 436 | muladd!(n5, SECP256K1_N_C_3); 437 | sumadd!(n4); 438 | m8 = extract!(); 439 | muladd!(n7, SECP256K1_N_C_2); 440 | muladd!(n6, SECP256K1_N_C_3); 441 | sumadd!(n5); 442 | m9 = extract!(); 443 | muladd!(n7, SECP256K1_N_C_3); 444 | sumadd!(n6); 445 | m10 = extract!(); 446 | sumadd_fast!(n7); 447 | m11 = extract_fast!(); 448 | debug_assert!(c0 <= 1); 449 | m12 = c0; 450 | 451 | /* Reduce 385 bits into 258. */ 452 | /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */ 453 | c0 = m0; c1 = 0; c2 = 0; 454 | muladd_fast!(m8, SECP256K1_N_C_0); 455 | p0 = extract_fast!(); 456 | sumadd_fast!(m1); 457 | muladd!(m9, SECP256K1_N_C_0); 458 | muladd!(m8, SECP256K1_N_C_1); 459 | p1 = extract!(); 460 | sumadd!(m2); 461 | muladd!(m10, SECP256K1_N_C_0); 462 | muladd!(m9, SECP256K1_N_C_1); 463 | muladd!(m8, SECP256K1_N_C_2); 464 | p2 = extract!(); 465 | sumadd!(m3); 466 | muladd!(m11, SECP256K1_N_C_0); 467 | muladd!(m10, SECP256K1_N_C_1); 468 | muladd!(m9, SECP256K1_N_C_2); 469 | muladd!(m8, SECP256K1_N_C_3); 470 | p3 = extract!(); 471 | sumadd!(m4); 472 | muladd!(m12, SECP256K1_N_C_0); 473 | muladd!(m11, SECP256K1_N_C_1); 474 | muladd!(m10, SECP256K1_N_C_2); 475 | muladd!(m9, SECP256K1_N_C_3); 476 | sumadd!(m8); 477 | p4 = extract!(); 478 | sumadd!(m5); 479 | muladd!(m12, SECP256K1_N_C_1); 480 | muladd!(m11, SECP256K1_N_C_2); 481 | muladd!(m10, SECP256K1_N_C_3); 482 | sumadd!(m9); 483 | p5 = extract!(); 484 | sumadd!(m6); 485 | muladd!(m12, SECP256K1_N_C_2); 486 | muladd!(m11, SECP256K1_N_C_3); 487 | sumadd!(m10); 488 | p6 = extract!(); 489 | sumadd_fast!(m7); 490 | muladd_fast!(m12, SECP256K1_N_C_3); 491 | sumadd_fast!(m11); 492 | p7 = extract_fast!(); 493 | p8 = c0 + m12; 494 | debug_assert!(p8 <= 2); 495 | 496 | /* Reduce 258 bits into 256. */ 497 | /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */ 498 | c = p0 as u64 + SECP256K1_N_C_0 as u64 * p8 as u64; 499 | self.0[0] = (c & 0xFFFFFFFF) as u32; c >>= 32; 500 | c += p1 as u64 + SECP256K1_N_C_1 as u64 * p8 as u64; 501 | self.0[1] = (c & 0xFFFFFFFF) as u32; c >>= 32; 502 | c += p2 as u64 + SECP256K1_N_C_2 as u64 * p8 as u64; 503 | self.0[2] = (c & 0xFFFFFFFF) as u32; c >>= 32; 504 | c += p3 as u64 + SECP256K1_N_C_3 as u64 * p8 as u64; 505 | self.0[3] = (c & 0xFFFFFFFF) as u32; c >>= 32; 506 | c += p4 as u64 + p8 as u64; 507 | self.0[4] = (c & 0xFFFFFFFF) as u32; c >>= 32; 508 | c += p5 as u64; 509 | self.0[5] = (c & 0xFFFFFFFF) as u32; c >>= 32; 510 | c += p6 as u64; 511 | self.0[6] = (c & 0xFFFFFFFF) as u32; c >>= 32; 512 | c += p7 as u64; 513 | self.0[7] = (c & 0xFFFFFFFF) as u32; c >>= 32; 514 | 515 | let overflow = self.check_overflow(); 516 | debug_assert!(c + if overflow { 1 } else { 0 } <= 1); 517 | self.reduce(c + if overflow { 1 } else { 0 } == 1); 518 | } 519 | 520 | fn mul_512(&self, b: &Scalar, l: &mut [u32; 16]) { 521 | let (mut c0, mut c1, mut c2): (u32, u32, u32) = (0, 0, 0); 522 | define_ops!(c0, c1, c2); 523 | 524 | /* l[0..15] = a[0..7] * b[0..7]. */ 525 | muladd_fast!(self.0[0], b.0[0]); 526 | l[0] = extract_fast!(); 527 | muladd!(self.0[0], b.0[1]); 528 | muladd!(self.0[1], b.0[0]); 529 | l[1] = extract!(); 530 | muladd!(self.0[0], b.0[2]); 531 | muladd!(self.0[1], b.0[1]); 532 | muladd!(self.0[2], b.0[0]); 533 | l[2] = extract!(); 534 | muladd!(self.0[0], b.0[3]); 535 | muladd!(self.0[1], b.0[2]); 536 | muladd!(self.0[2], b.0[1]); 537 | muladd!(self.0[3], b.0[0]); 538 | l[3] = extract!(); 539 | muladd!(self.0[0], b.0[4]); 540 | muladd!(self.0[1], b.0[3]); 541 | muladd!(self.0[2], b.0[2]); 542 | muladd!(self.0[3], b.0[1]); 543 | muladd!(self.0[4], b.0[0]); 544 | l[4] = extract!(); 545 | muladd!(self.0[0], b.0[5]); 546 | muladd!(self.0[1], b.0[4]); 547 | muladd!(self.0[2], b.0[3]); 548 | muladd!(self.0[3], b.0[2]); 549 | muladd!(self.0[4], b.0[1]); 550 | muladd!(self.0[5], b.0[0]); 551 | l[5] = extract!(); 552 | muladd!(self.0[0], b.0[6]); 553 | muladd!(self.0[1], b.0[5]); 554 | muladd!(self.0[2], b.0[4]); 555 | muladd!(self.0[3], b.0[3]); 556 | muladd!(self.0[4], b.0[2]); 557 | muladd!(self.0[5], b.0[1]); 558 | muladd!(self.0[6], b.0[0]); 559 | l[6] = extract!(); 560 | muladd!(self.0[0], b.0[7]); 561 | muladd!(self.0[1], b.0[6]); 562 | muladd!(self.0[2], b.0[5]); 563 | muladd!(self.0[3], b.0[4]); 564 | muladd!(self.0[4], b.0[3]); 565 | muladd!(self.0[5], b.0[2]); 566 | muladd!(self.0[6], b.0[1]); 567 | muladd!(self.0[7], b.0[0]); 568 | l[7] = extract!(); 569 | muladd!(self.0[1], b.0[7]); 570 | muladd!(self.0[2], b.0[6]); 571 | muladd!(self.0[3], b.0[5]); 572 | muladd!(self.0[4], b.0[4]); 573 | muladd!(self.0[5], b.0[3]); 574 | muladd!(self.0[6], b.0[2]); 575 | muladd!(self.0[7], b.0[1]); 576 | l[8] = extract!(); 577 | muladd!(self.0[2], b.0[7]); 578 | muladd!(self.0[3], b.0[6]); 579 | muladd!(self.0[4], b.0[5]); 580 | muladd!(self.0[5], b.0[4]); 581 | muladd!(self.0[6], b.0[3]); 582 | muladd!(self.0[7], b.0[2]); 583 | l[9] = extract!(); 584 | muladd!(self.0[3], b.0[7]); 585 | muladd!(self.0[4], b.0[6]); 586 | muladd!(self.0[5], b.0[5]); 587 | muladd!(self.0[6], b.0[4]); 588 | muladd!(self.0[7], b.0[3]); 589 | l[10] = extract!(); 590 | muladd!(self.0[4], b.0[7]); 591 | muladd!(self.0[5], b.0[6]); 592 | muladd!(self.0[6], b.0[5]); 593 | muladd!(self.0[7], b.0[4]); 594 | l[11] = extract!(); 595 | muladd!(self.0[5], b.0[7]); 596 | muladd!(self.0[6], b.0[6]); 597 | muladd!(self.0[7], b.0[5]); 598 | l[12] = extract!(); 599 | muladd!(self.0[6], b.0[7]); 600 | muladd!(self.0[7], b.0[6]); 601 | l[13] = extract!(); 602 | muladd_fast!(self.0[7], b.0[7]); 603 | l[14] = extract_fast!(); 604 | debug_assert!(c1 == 0); 605 | l[15] = c0; 606 | } 607 | 608 | fn sqr_512(&self, l: &mut [u32; 16]) { 609 | let (mut c0, mut c1, mut c2): (u32, u32, u32) = (0, 0, 0); 610 | define_ops!(c0, c1, c2); 611 | 612 | /* l[0..15] = a[0..7]^2. */ 613 | muladd_fast!(self.0[0], self.0[0]); 614 | l[0] = extract_fast!(); 615 | muladd2!(self.0[0], self.0[1]); 616 | l[1] = extract!(); 617 | muladd2!(self.0[0], self.0[2]); 618 | muladd!(self.0[1], self.0[1]); 619 | l[2] = extract!(); 620 | muladd2!(self.0[0], self.0[3]); 621 | muladd2!(self.0[1], self.0[2]); 622 | l[3] = extract!(); 623 | muladd2!(self.0[0], self.0[4]); 624 | muladd2!(self.0[1], self.0[3]); 625 | muladd!(self.0[2], self.0[2]); 626 | l[4] = extract!(); 627 | muladd2!(self.0[0], self.0[5]); 628 | muladd2!(self.0[1], self.0[4]); 629 | muladd2!(self.0[2], self.0[3]); 630 | l[5] = extract!(); 631 | muladd2!(self.0[0], self.0[6]); 632 | muladd2!(self.0[1], self.0[5]); 633 | muladd2!(self.0[2], self.0[4]); 634 | muladd!(self.0[3], self.0[3]); 635 | l[6] = extract!(); 636 | muladd2!(self.0[0], self.0[7]); 637 | muladd2!(self.0[1], self.0[6]); 638 | muladd2!(self.0[2], self.0[5]); 639 | muladd2!(self.0[3], self.0[4]); 640 | l[7] = extract!(); 641 | muladd2!(self.0[1], self.0[7]); 642 | muladd2!(self.0[2], self.0[6]); 643 | muladd2!(self.0[3], self.0[5]); 644 | muladd!(self.0[4], self.0[4]); 645 | l[8] = extract!(); 646 | muladd2!(self.0[2], self.0[7]); 647 | muladd2!(self.0[3], self.0[6]); 648 | muladd2!(self.0[4], self.0[5]); 649 | l[9] = extract!(); 650 | muladd2!(self.0[3], self.0[7]); 651 | muladd2!(self.0[4], self.0[6]); 652 | muladd!(self.0[5], self.0[5]); 653 | l[10] = extract!(); 654 | muladd2!(self.0[4], self.0[7]); 655 | muladd2!(self.0[5], self.0[6]); 656 | l[11] = extract!(); 657 | muladd2!(self.0[5], self.0[7]); 658 | muladd!(self.0[6], self.0[6]); 659 | l[12] = extract!(); 660 | muladd2!(self.0[6], self.0[7]); 661 | l[13] = extract!(); 662 | muladd_fast!(self.0[7], self.0[7]); 663 | l[14] = extract_fast!(); 664 | debug_assert!(c1 == 0); 665 | l[15] = c0; 666 | } 667 | 668 | pub fn mul_in_place(&mut self, a: &Scalar, b: &Scalar) { 669 | let mut l = [0u32; 16]; 670 | a.mul_512(b, &mut l); 671 | self.reduce_512(&l); 672 | } 673 | 674 | /// Shift a scalar right by some amount strictly between 0 and 16, 675 | /// returning the low bits that were shifted off. 676 | pub fn shr_int(&mut self, n: usize) -> u32 { 677 | let ret: u32; 678 | debug_assert!(n > 0); 679 | debug_assert!(n < 16); 680 | ret = self.0[0] & ((1 << n) - 1); 681 | self.0[0] = (self.0[0] >> n) + (self.0[1] << (32 - n)); 682 | self.0[1] = (self.0[1] >> n) + (self.0[2] << (32 - n)); 683 | self.0[2] = (self.0[2] >> n) + (self.0[3] << (32 - n)); 684 | self.0[3] = (self.0[3] >> n) + (self.0[4] << (32 - n)); 685 | self.0[4] = (self.0[4] >> n) + (self.0[5] << (32 - n)); 686 | self.0[5] = (self.0[5] >> n) + (self.0[6] << (32 - n)); 687 | self.0[6] = (self.0[6] >> n) + (self.0[7] << (32 - n)); 688 | self.0[7] = self.0[7] >> n; 689 | return ret; 690 | } 691 | 692 | pub fn sqr_in_place(&mut self, a: &Scalar) { 693 | let mut l = [0u32; 16]; 694 | a.sqr_512(&mut l); 695 | self.reduce_512(&l); 696 | } 697 | 698 | pub fn sqr(&self) -> Scalar { 699 | let mut ret = Scalar::default(); 700 | ret.sqr_in_place(self); 701 | ret 702 | } 703 | 704 | pub fn inv_in_place(&mut self, x: &Scalar) { 705 | let u2 = x.sqr(); 706 | let x2 = &u2 * x; 707 | let u5 = &u2 * &x2; 708 | let x3 = &u5 * &u2; 709 | let u9 = &x3 * &u2; 710 | let u11 = &u9 * &u2; 711 | let u13 = &u11 * &u2; 712 | 713 | let mut x6 = u13.sqr(); 714 | x6 = x6.sqr(); 715 | x6 *= &u11; 716 | 717 | let mut x8 = x6.sqr(); 718 | x8 = x8.sqr(); 719 | x8 *= &x2; 720 | 721 | let mut x14 = x8.sqr(); 722 | for _ in 0..5 { 723 | x14 = x14.sqr(); 724 | } 725 | x14 *= &x6; 726 | 727 | let mut x28 = x14.sqr(); 728 | for _ in 0..13 { 729 | x28 = x28.sqr(); 730 | } 731 | x28 *= &x14; 732 | 733 | let mut x56 = x28.sqr(); 734 | for _ in 0..27 { 735 | x56 = x56.sqr(); 736 | } 737 | x56 *= &x28; 738 | 739 | let mut x112 = x56.sqr(); 740 | for _ in 0..55 { 741 | x112 = x112.sqr(); 742 | } 743 | x112 *= &x56; 744 | 745 | let mut x126 = x112.sqr(); 746 | for _ in 0..13 { 747 | x126 = x126.sqr(); 748 | } 749 | x126 *= &x14; 750 | 751 | let mut t = x126; 752 | for _ in 0..3 { 753 | t = t.sqr(); 754 | } 755 | t *= &u5; 756 | for _ in 0..4 { 757 | t = t.sqr(); 758 | } 759 | t *= &x3; 760 | for _ in 0..4 { 761 | t = t.sqr(); 762 | } 763 | t *= &u5; 764 | for _ in 0..5 { 765 | t = t.sqr(); 766 | } 767 | t *= &u11; 768 | for _ in 0..4 { 769 | t = t.sqr(); 770 | } 771 | t *= &u11; 772 | for _ in 0..4 { 773 | t = t.sqr(); 774 | } 775 | t *= &x3; 776 | for _ in 0..5 { 777 | t = t.sqr(); 778 | } 779 | t *= &x3; 780 | for _ in 0..6 { 781 | t = t.sqr(); 782 | } 783 | t *= &u13; 784 | for _ in 0..4 { 785 | t = t.sqr(); 786 | } 787 | t *= &u5; 788 | for _ in 0..3 { 789 | t = t.sqr(); 790 | } 791 | t *= &x3; 792 | for _ in 0..5 { 793 | t = t.sqr(); 794 | } 795 | t *= &u9; 796 | for _ in 0..6 { 797 | t = t.sqr(); 798 | } 799 | t *= &u5; 800 | for _ in 0..10 { 801 | t = t.sqr(); 802 | } 803 | t *= &x3; 804 | for _ in 0..4 { 805 | t = t.sqr(); 806 | } 807 | t *= &x3; 808 | for _ in 0..9 { 809 | t = t.sqr(); 810 | } 811 | t *= &x8; 812 | for _ in 0..5 { 813 | t = t.sqr(); 814 | } 815 | t *= &u9; 816 | for _ in 0..6 { 817 | t = t.sqr(); 818 | } 819 | t *= &u11; 820 | for _ in 0..4 { 821 | t = t.sqr(); 822 | } 823 | t *= &u13; 824 | for _ in 0..5 { 825 | t = t.sqr(); 826 | } 827 | t *= &x2; 828 | for _ in 0..6 { 829 | t = t.sqr(); 830 | } 831 | t *= &u13; 832 | for _ in 0..10 { 833 | t = t.sqr(); 834 | } 835 | t *= &u13; 836 | for _ in 0..4 { 837 | t = t.sqr(); 838 | } 839 | t *= &u9; 840 | for _ in 0..6 { 841 | t = t.sqr(); 842 | } 843 | t *= x; 844 | for _ in 0..8 { 845 | t = t.sqr(); 846 | } 847 | *self = &t * &x6; 848 | } 849 | 850 | pub fn inv(&self) -> Scalar { 851 | let mut ret = Scalar::default(); 852 | ret.inv_in_place(self); 853 | ret 854 | } 855 | 856 | pub fn inv_var(&self) -> Scalar { 857 | self.inv() 858 | } 859 | 860 | pub fn is_even(&self) -> bool { 861 | return self.0[0] & 1 == 0; 862 | } 863 | } 864 | 865 | impl Default for Scalar { 866 | fn default() -> Scalar { 867 | Scalar([0u32; 8]) 868 | } 869 | } 870 | 871 | impl Add for Scalar { 872 | type Output = Scalar; 873 | fn add(self, other: Scalar) -> Scalar { 874 | let mut ret = Scalar::default(); 875 | ret.add_in_place(&self, &other); 876 | ret 877 | } 878 | } 879 | 880 | impl<'a, 'b> Add<&'a Scalar> for &'b Scalar { 881 | type Output = Scalar; 882 | fn add(self, other: &'a Scalar) -> Scalar { 883 | let mut ret = Scalar::default(); 884 | ret.add_in_place(self, other); 885 | ret 886 | } 887 | } 888 | 889 | impl<'a> AddAssign<&'a Scalar> for Scalar { 890 | fn add_assign(&mut self, other: &'a Scalar) { 891 | let mut ret = Scalar::default(); 892 | ret.add_in_place(self, other); 893 | *self = ret; 894 | } 895 | } 896 | 897 | impl AddAssign for Scalar { 898 | fn add_assign(&mut self, other: Scalar) { 899 | self.add_assign(&other) 900 | } 901 | } 902 | 903 | impl Mul for Scalar { 904 | type Output = Scalar; 905 | fn mul(self, other: Scalar) -> Scalar { 906 | let mut ret = Scalar::default(); 907 | ret.mul_in_place(&self, &other); 908 | ret 909 | } 910 | } 911 | 912 | impl<'a, 'b> Mul<&'a Scalar> for &'b Scalar { 913 | type Output = Scalar; 914 | fn mul(self, other: &'a Scalar) -> Scalar { 915 | let mut ret = Scalar::default(); 916 | ret.mul_in_place(self, other); 917 | ret 918 | } 919 | } 920 | 921 | impl<'a> MulAssign<&'a Scalar> for Scalar { 922 | fn mul_assign(&mut self, other: &'a Scalar) { 923 | let mut ret = Scalar::default(); 924 | ret.mul_in_place(self, other); 925 | *self = ret; 926 | } 927 | } 928 | 929 | impl MulAssign for Scalar { 930 | fn mul_assign(&mut self, other: Scalar) { 931 | self.mul_assign(&other) 932 | } 933 | } 934 | -------------------------------------------------------------------------------- /tests/verify.rs: -------------------------------------------------------------------------------- 1 | extern crate secp256k1; 2 | extern crate secp256k1_test; 3 | extern crate rand; 4 | 5 | use secp256k1::*; 6 | use secp256k1::curve::*; 7 | use secp256k1_test::{Secp256k1, Message as SecpMessage, RecoverableSignature as SecpRecoverableSignature, RecoveryId as SecpRecoveryId, Signature as SecpSignature}; 8 | use secp256k1_test::ecdh::{SharedSecret as SecpSharedSecret}; 9 | use secp256k1_test::key; 10 | use rand::thread_rng; 11 | 12 | #[test] 13 | fn test_verify() { 14 | let secp256k1 = Secp256k1::new(); 15 | 16 | let message_arr = [5u8; 32]; 17 | let (privkey, pubkey) = secp256k1.generate_keypair(&mut thread_rng()).unwrap(); 18 | let message = SecpMessage::from_slice(&message_arr).unwrap(); 19 | let signature = secp256k1.sign(&message, &privkey).unwrap(); 20 | 21 | let pubkey_arr = pubkey.serialize_vec(&secp256k1, false); 22 | assert!(pubkey_arr.len() == 65); 23 | let mut pubkey_a = [0u8; 65]; 24 | for i in 0..65 { 25 | pubkey_a[i] = pubkey_arr[i]; 26 | } 27 | 28 | let ctx_pubkey = PublicKey::parse(&pubkey_a).unwrap(); 29 | let mut ctx_message = Message::parse(&message_arr); 30 | let signature_arr = signature.serialize_compact(&secp256k1); 31 | assert!(signature_arr.len() == 64); 32 | let mut signature_a = [0u8; 64]; 33 | for i in 0..64 { 34 | signature_a[i] = signature_arr[i]; 35 | } 36 | let ctx_sig = Signature::parse(&signature_a); 37 | 38 | secp256k1.verify(&message, &signature, &pubkey).unwrap(); 39 | assert!(verify(&ctx_message, &ctx_sig, &ctx_pubkey)); 40 | let mut f_ctx_sig = ctx_sig.clone(); 41 | f_ctx_sig.r.set_int(0); 42 | if f_ctx_sig.r != ctx_sig.r { 43 | assert!(!ECMULT_CONTEXT.verify_raw(&f_ctx_sig.r, &ctx_sig.s, &ctx_pubkey.clone().into(), &ctx_message.0)); 44 | } 45 | f_ctx_sig.r.set_int(1); 46 | if f_ctx_sig.r != ctx_sig.r { 47 | assert!(!ECMULT_CONTEXT.verify_raw(&f_ctx_sig.r, &ctx_sig.s, &ctx_pubkey.clone().into(), &ctx_message.0)); 48 | } 49 | } 50 | 51 | #[test] 52 | fn test_recover() { 53 | let secp256k1 = Secp256k1::new(); 54 | 55 | let message_arr = [5u8; 32]; 56 | let (privkey, pubkey) = secp256k1.generate_keypair(&mut thread_rng()).unwrap(); 57 | let message = SecpMessage::from_slice(&message_arr).unwrap(); 58 | let signature = secp256k1.sign_recoverable(&message, &privkey).unwrap(); 59 | 60 | let pubkey_arr = pubkey.serialize_vec(&secp256k1, false); 61 | assert!(pubkey_arr.len() == 65); 62 | let mut pubkey_a = [0u8; 65]; 63 | for i in 0..65 { 64 | pubkey_a[i] = pubkey_arr[i]; 65 | } 66 | 67 | let mut ctx_message = Message::parse(&message_arr); 68 | let (rec_id, signature_arr) = signature.serialize_compact(&secp256k1); 69 | assert!(signature_arr.len() == 64); 70 | let mut signature_a = [0u8; 64]; 71 | for i in 0..64 { 72 | signature_a[i] = signature_arr[i]; 73 | } 74 | let ctx_sig = Signature::parse(&signature_a); 75 | 76 | // secp256k1.recover(&message, &signature).unwrap(); 77 | let ctx_pubkey = recover(&ctx_message, &ctx_sig, &RecoveryId::parse(rec_id.to_i32() as u8).unwrap()).unwrap(); 78 | let sp = ctx_pubkey.serialize(); 79 | 80 | let sps: &[u8] = &sp; 81 | let gps: &[u8] = &pubkey_a; 82 | assert_eq!(sps, gps); 83 | } 84 | 85 | #[test] 86 | fn test_convert_key1() { 87 | let secret: [u8; 32] = [ 88 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 89 | 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 90 | 0x00,0x00,0x00,0x00,0x00,0x01, 91 | ]; 92 | let expected: &[u8] = &[ 93 | 0x04,0x79,0xbe,0x66,0x7e,0xf9,0xdc,0xbb,0xac,0x55,0xa0,0x62,0x95, 94 | 0xce,0x87,0x0b,0x07,0x02,0x9b,0xfc,0xdb,0x2d,0xce,0x28,0xd9,0x59, 95 | 0xf2,0x81,0x5b,0x16,0xf8,0x17,0x98,0x48,0x3a,0xda,0x77,0x26,0xa3, 96 | 0xc4,0x65,0x5d,0xa4,0xfb,0xfc,0x0e,0x11,0x08,0xa8,0xfd,0x17,0xb4, 97 | 0x48,0xa6,0x85,0x54,0x19,0x9c,0x47,0xd0,0x8f,0xfb,0x10,0xd4,0xb8 98 | ]; 99 | let seckey = SecretKey::parse(&secret).unwrap(); 100 | let pubkey = PublicKey::from_secret_key(&seckey); 101 | let public = pubkey.serialize(); 102 | let pubkey_a: &[u8] = &public; 103 | 104 | assert_eq!(expected, pubkey_a); 105 | } 106 | 107 | #[test] 108 | fn test_convert_key2() { 109 | let secret: [u8; 32] = [ 110 | 0x4d,0x5d,0xb4,0x10,0x7d,0x23,0x7d,0xf6,0xa3,0xd5,0x8e,0xe5,0xf7, 111 | 0x0a,0xe6,0x3d,0x73,0xd7,0x65,0x8d,0x40,0x26,0xf2,0xee,0xfd,0x2f, 112 | 0x20,0x4c,0x81,0x68,0x2c,0xb7 113 | ]; 114 | let expected: &[u8] = &[ 115 | 0x04,0x3f,0xa8,0xc0,0x8c,0x65,0xa8,0x3f,0x6b,0x4e,0xa3,0xe0,0x4e, 116 | 0x1c,0xc7,0x0c,0xbe,0x3c,0xd3,0x91,0x49,0x9e,0x3e,0x05,0xab,0x7d, 117 | 0xed,0xf2,0x8a,0xff,0x9a,0xfc,0x53,0x82,0x00,0xff,0x93,0xe3,0xf2, 118 | 0xb2,0xcb,0x50,0x29,0xf0,0x3c,0x7e,0xbe,0xe8,0x20,0xd6,0x3a,0x4c, 119 | 0x5a,0x95,0x41,0xc8,0x3a,0xce,0xbe,0x29,0x3f,0x54,0xca,0xcf,0x0e 120 | ]; 121 | let seckey = SecretKey::parse(&secret).unwrap(); 122 | let pubkey = PublicKey::from_secret_key(&seckey); 123 | let public = pubkey.serialize(); 124 | let pubkey_a: &[u8] = &public; 125 | 126 | assert_eq!(expected, pubkey_a); 127 | } 128 | 129 | #[test] 130 | fn test_convert_anykey() { 131 | let secp256k1 = Secp256k1::new(); 132 | let (secp_privkey, secp_pubkey) = secp256k1.generate_keypair(&mut thread_rng()).unwrap(); 133 | 134 | let mut secret = [0u8; 32]; 135 | for i in 0..32 { 136 | secret[i] = secp_privkey[i]; 137 | } 138 | 139 | let seckey = SecretKey::parse(&secret).unwrap(); 140 | let pubkey = PublicKey::from_secret_key(&seckey); 141 | let public = pubkey.serialize(); 142 | let pubkey_r: &[u8] = &public; 143 | 144 | let secp_pubkey_arr = secp_pubkey.serialize_vec(&secp256k1, false); 145 | assert!(secp_pubkey_arr.len() == 65); 146 | let mut secp_pubkey_a = [0u8; 65]; 147 | for i in 0..65 { 148 | secp_pubkey_a[i] = secp_pubkey_arr[i]; 149 | } 150 | let secp_pubkey_r: &[u8] = &secp_pubkey_a; 151 | 152 | assert_eq!(secp_pubkey_r, pubkey_r); 153 | } 154 | 155 | #[test] 156 | fn test_sign_verify() { 157 | let secp256k1 = Secp256k1::new(); 158 | 159 | let message_arr = [6u8; 32]; 160 | let (secp_privkey, secp_pubkey) = secp256k1.generate_keypair(&mut thread_rng()).unwrap(); 161 | 162 | let secp_message = SecpMessage::from_slice(&message_arr).unwrap(); 163 | let pubkey_arr = secp_pubkey.serialize_vec(&secp256k1, false); 164 | assert!(pubkey_arr.len() == 65); 165 | let mut pubkey_a = [0u8; 65]; 166 | for i in 0..65 { 167 | pubkey_a[i] = pubkey_arr[i]; 168 | } 169 | let pubkey = PublicKey::parse(&pubkey_a).unwrap(); 170 | let mut seckey_a = [0u8; 32]; 171 | for i in 0..32 { 172 | seckey_a[i] = secp_privkey[i]; 173 | } 174 | let seckey = SecretKey::parse(&seckey_a).unwrap(); 175 | let message = Message::parse(&message_arr); 176 | 177 | let (sig, recid) = sign(&message, &seckey).unwrap(); 178 | 179 | // Self verify 180 | assert!(verify(&message, &sig, &pubkey)); 181 | 182 | // Self recover 183 | let recovered_pubkey = recover(&message, &sig, &recid).unwrap(); 184 | let rpa = recovered_pubkey.serialize(); 185 | let opa = pubkey.serialize(); 186 | let rpr: &[u8] = &rpa; 187 | let opr: &[u8] = &opa; 188 | assert_eq!(rpr, opr); 189 | 190 | let signature_a = sig.serialize(); 191 | let secp_recid = SecpRecoveryId::from_i32(recid.into()).unwrap(); 192 | let secp_rec_signature = SecpRecoverableSignature::from_compact(&secp256k1, &signature_a, secp_recid).unwrap(); 193 | let secp_signature = SecpSignature::from_compact(&secp256k1, &signature_a).unwrap(); 194 | 195 | // External verify 196 | secp256k1.verify(&secp_message, &secp_signature, &secp_pubkey).unwrap(); 197 | 198 | // External recover 199 | let recovered_pubkey = secp256k1.recover(&secp_message, &secp_rec_signature).unwrap(); 200 | let rpa = recovered_pubkey.serialize_vec(&secp256k1, false); 201 | let rpr: &[u8] = &rpa; 202 | assert_eq!(rpr, opr); 203 | } 204 | 205 | #[test] 206 | fn test_failing_sign_verify() { 207 | let seckey_a: [u8; 32] = [169, 195, 92, 103, 2, 159, 75, 46, 158, 79, 249, 49, 208, 28, 48, 210, 5, 47, 136, 77, 21, 51, 224, 54, 213, 165, 90, 122, 233, 199, 0, 248]; 208 | let seckey = SecretKey::parse(&seckey_a).unwrap(); 209 | let pubkey = PublicKey::from_secret_key(&seckey); 210 | let message_arr = [6u8; 32]; 211 | let message = Message::parse(&message_arr); 212 | 213 | let (sig, recid) = sign(&message, &seckey).unwrap(); 214 | let tmp: u8 = recid.into(); 215 | assert_eq!(tmp, 1u8); 216 | 217 | let recovered_pubkey = recover(&message, &sig, &recid).unwrap(); 218 | let rpa = recovered_pubkey.serialize(); 219 | let opa = pubkey.serialize(); 220 | let rpr: &[u8] = &rpa; 221 | let opr: &[u8] = &opa; 222 | assert_eq!(rpr, opr); 223 | } 224 | 225 | fn genkey(secp256k1: &Secp256k1) -> (key::PublicKey, key::SecretKey, PublicKey, SecretKey) { 226 | let (secp_privkey, secp_pubkey) = secp256k1.generate_keypair(&mut thread_rng()).unwrap(); 227 | let pubkey_arr = secp_pubkey.serialize_vec(&secp256k1, false); 228 | assert!(pubkey_arr.len() == 65); 229 | let mut pubkey_a = [0u8; 65]; 230 | for i in 0..65 { 231 | pubkey_a[i] = pubkey_arr[i]; 232 | } 233 | let pubkey = PublicKey::parse(&pubkey_a).unwrap(); 234 | let mut seckey_a = [0u8; 32]; 235 | for i in 0..32 { 236 | seckey_a[i] = secp_privkey[i]; 237 | } 238 | let seckey = SecretKey::parse(&seckey_a).unwrap(); 239 | 240 | (secp_pubkey, secp_privkey, pubkey, seckey) 241 | } 242 | 243 | #[test] 244 | fn test_shared_secret() { 245 | let secp256k1 = Secp256k1::new(); 246 | 247 | let (spub1, ssec1, pub1, sec1) = genkey(&secp256k1); 248 | let (spub2, ssec2, pub2, sec2) = genkey(&secp256k1); 249 | 250 | let shared1 = SharedSecret::new(&pub1, &sec2).unwrap(); 251 | let shared2 = SharedSecret::new(&pub2, &sec1).unwrap(); 252 | 253 | let secp_shared1 = SecpSharedSecret::new(&secp256k1, &spub1, &ssec2); 254 | let secp_shared2 = SecpSharedSecret::new(&secp256k1, &spub2, &ssec1); 255 | 256 | assert_eq!(shared1, shared2); 257 | 258 | for i in 0..32 { 259 | assert_eq!(shared1.as_ref()[i], secp_shared1[i]); 260 | } 261 | 262 | for i in 0..32 { 263 | assert_eq!(shared2.as_ref()[i], secp_shared2[i]); 264 | } 265 | } 266 | --------------------------------------------------------------------------------