├── .gitignore ├── Cargo.toml ├── LICENSE.txt ├── README.md └── src ├── barriers.rs └── lib.rs /.gitignore: -------------------------------------------------------------------------------- 1 | *.swo 2 | *.swp 3 | 4 | /Cargo.lock 5 | /target 6 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "timing-shield" 3 | version = "0.3.0" 4 | authors = ["Tim McLean "] 5 | description = "Comprehensive timing leak protection for Rust." 6 | homepage = "https://www.chosenplaintext.ca/open-source/rust-timing-shield/" 7 | repository = "https://github.com/timmclean/rust-timing-shield" 8 | categories = ["cryptography"] 9 | keywords = ["constant-time", "constant", "time", "crypto"] 10 | license = "MIT" 11 | readme = "README.md" 12 | edition = "2021" 13 | 14 | [dev-dependencies] 15 | quickcheck = "0.9.2" 16 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Tim McLean 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 9 | of the Software, and to permit persons to whom the Software is furnished to do 10 | so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-timing-shield 2 | 3 | Comprehensive timing attack protection for Rust programs. 4 | 5 | One of the fundamental challenges of writing software that operates on sensitive information 6 | is preventing *timing leaks*. A timing leak is when there exists a relationship between the 7 | values of secret variables in your program and the execution time of your code or other code 8 | running on the same hardware. Attackers who are aware of this relationship can use a 9 | high-resolution timer to learn secret information that they would not normally be able to 10 | access (e.g. extract an SSL key from a web server). 11 | 12 | To prevent timing leaks in cryptography code, it is best practice to write code that is 13 | *constant-time*. For a full background on writing constant-time code, see 14 | [A beginner's guide to constant-time 15 | cryptography](https://www.chosenplaintext.ca/articles/beginners-guide-constant-time-cryptography.html). 16 | 17 | rust-timing-shield is a framework for writing code without timing leaks. 18 | See the 19 | [Getting Started page](https://www.chosenplaintext.ca/open-source/rust-timing-shield/getting-started) 20 | or the 21 | [project home page](https://www.chosenplaintext.ca/open-source/rust-timing-shield) 22 | for more information. 23 | 24 | ## Reporting security vulnerabilities 25 | 26 | Please visit the [Security 27 | page](https://www.chosenplaintext.ca/open-source/rust-timing-shield/security) 28 | for more information. 29 | 30 | ## License 31 | 32 | The MIT License (MIT) 33 | 34 | Copyright (c) 2017-2022 Tim McLean 35 | 36 | Permission is hereby granted, free of charge, to any person obtaining a copy of 37 | this software and associated documentation files (the "Software"), to deal in 38 | the Software without restriction, including without limitation the rights to 39 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 40 | of the Software, and to permit persons to whom the Software is furnished to do 41 | so, subject to the following conditions: 42 | 43 | The above copyright notice and this permission notice shall be included in all 44 | copies or substantial portions of the Software. 45 | 46 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 47 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 48 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 49 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 50 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 51 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 52 | SOFTWARE. 53 | -------------------------------------------------------------------------------- /src/barriers.rs: -------------------------------------------------------------------------------- 1 | /// Identity function accepting a `u8` as input and outputting that same `u8` as 2 | /// output, while blocking the compiler from applying optimizations across this node in the data 3 | /// dependence graph. 4 | /// 5 | /// This is an **internal utility** of rust-timing-shield that users of the framework will not 6 | /// normally need to use. 7 | /// 8 | /// # Background 9 | /// 10 | /// rust-timing-shield has essentially two goals: (1) prevent the programmer from writing code that 11 | /// they shouldn't, and (2) prevent the compiler from transforming a programmer's secure code 12 | /// into vulnerable code. This function is a small primitive that assists in accomplishing the 2nd 13 | /// goal. 14 | /// 15 | /// An important rule of protecting code from timing attacks is to ensure that a program never 16 | /// branches on a secret value. In rust-timing-shield terminology, we should never emit a 17 | /// branch conditional on a `TpBool`. The Rust compiler uses many LLVM optimization passes that may 18 | /// produce a conditional branch where none was written by the programmer, so we must remove the 19 | /// compiler's ability to identify situations where a conditional branch could reasonably be 20 | /// introduced. The most problematic optimizations identified so far output [an LLVM IR `select` 21 | /// instruction](https://llvm.org/docs/LangRef.html#select-instruction). 22 | /// 23 | /// Of course, it is legal for the compiler to emit `select` instructions in any situation as long 24 | /// as the output and side effects of the code remain the same. For example, it could expand this 25 | /// code: 26 | /// 27 | /// ``` 28 | /// fn add(a: u8, b: u8) -> u8 { 29 | /// a + b 30 | /// } 31 | /// ``` 32 | /// 33 | /// to: 34 | /// 35 | /// ``` 36 | /// fn add(a: u8, b: u8) -> u8 { 37 | /// if a == 5 && b == 5 { 38 | /// return 10; 39 | /// } 40 | /// if a == 7 && b == 5 { 41 | /// return 12; 42 | /// } 43 | /// a + b 44 | /// } 45 | /// ``` 46 | /// 47 | /// The current implementation of rust-timing-shield assumes that this is ridiculous and wouldn't 48 | /// happen. In particular, an assumption is made that the compiler will only insert a `select` for 49 | /// "values of boolean origin". For example, this branchless code: 50 | /// 51 | /// ``` 52 | /// fn add_bool(number: u8, b: bool) -> u8 { 53 | /// number + (b as u8) 54 | /// } 55 | /// ``` 56 | /// 57 | /// could reasonably, under this assumption, be rewritten with a branch: 58 | /// 59 | /// ``` 60 | /// fn add_bool(number: u8, b: bool) -> u8 { 61 | /// if b { 62 | /// return number + 1; 63 | /// } 64 | /// 65 | /// number 66 | /// } 67 | /// ``` 68 | /// 69 | /// since `b as u8` is a "value of boolean origin". Based on the empirical tests of the Rust 70 | /// compiler and a review of how LLVM optimization passes are implemented, this assumption appears 71 | /// to be reasonable. 72 | /// 73 | /// Now, with this assumption in place, rust-timing-shield only needs to ensure that all 74 | /// timing-protected values either are not of boolean origin or hide their origin from the 75 | /// compiler. This is accomplished in two steps: 76 | /// 77 | /// 1. All timing-protected boolean values (`TpBool`) are stored as `u8` values and never as 78 | /// `bool`, even in intermediate computations. 79 | /// 2. `TpBool::protect(bool)` converts the `bool` to a `u8` and uses an optimization barrier (this 80 | /// function) to hide the origin of the `u8`. 81 | /// 82 | /// # Usage 83 | /// 84 | /// `optimization_barrier_u8` is a low-cost (but not zero-cost) way to hide a value's origin from the compiler. If a 85 | /// value passes through an optimization barrier, the compiler will be able to perform 86 | /// optimizations on computations prior to the barrier and optimizations on computations after the 87 | /// barrier, but will be unable to perform optimizations *across* the barrier. 88 | /// 89 | /// It is important that the barrier interrupt the flow of a value from one part of the program to 90 | /// the next. As an example of incorrect usage: 91 | /// 92 | /// ``` 93 | /// # use timing_shield::barriers::optimization_barrier_u8; 94 | /// # 95 | /// fn add_bool(number: u8, secret_condition: bool) -> u8 { 96 | /// let secret_condition_u8 = secret_condition as u8; 97 | /// 98 | /// // WRONG: barrier does not interrupt data flow 99 | /// optimization_barrier_u8(secret_condition_u8); 100 | /// 101 | /// number + secret_condition_u8 102 | /// } 103 | /// ``` 104 | /// 105 | /// Here, the return value from `optimization_barrier_u8` is unused, so the optimization barrier 106 | /// has no effect. An optimization barrier must be the single connection between two parts of the 107 | /// program's data dependence graph: 108 | /// 109 | /// ``` 110 | /// # use timing_shield::barriers::optimization_barrier_u8; 111 | /// # 112 | /// fn add_bool(number: u8, secret_condition: bool) -> u8 { 113 | /// let secret_condition_u8 = secret_condition as u8; 114 | /// 115 | /// // Override the previous definition to avoid accidentally using the pre-barrier value 116 | /// let secret_condition_u8 = optimization_barrier_u8(secret_condition_u8); 117 | /// 118 | /// number + secret_condition_u8 119 | /// } 120 | /// ``` 121 | /// 122 | /// In rust-timing-shield, optimization barriers are used to hide when a `u8` is of boolean origin. 123 | /// The use of a barrier prevents the compiler from identifying that the `u8` value after the 124 | /// barrier is the same value that was cast from a `bool` before the barrier. This suppresses the 125 | /// many optimizations that would transform the timing-leak-proof branchless computations with `u8` 126 | /// values that rust-timing-shield produces into branching computations that leak boolean values. 127 | /// 128 | /// # Performance considerations 129 | /// 130 | /// `optimization_barrier_u8` is currently implemented as an empty inline assembly block. The `u8` 131 | /// input value is provided as an input/output register to the assembly block and then immediately 132 | /// returned as is. Although the assembly block is a no-op, LLVM is forced to assume that the value 133 | /// may have changed and can make no assumptions about what the output may be. The Rust Unstable 134 | /// reference [describes the assembly block as a black 135 | /// box](https://doc.rust-lang.org/unstable-book/library-features/asm.html): 136 | /// 137 | /// > “The compiler cannot assume that the instructions in the asm are the ones that will 138 | /// actually end up executed. This effectively means that the compiler must treat the `asm!` as a 139 | /// black box and only take the interface specification into account, not the instructions 140 | /// themselves.” 141 | /// 142 | /// Since no actual assembler instructions are provided, it might seem that this function call 143 | /// would have zero overhead after inlining. However, there are other considerations that may 144 | /// affect performance: 145 | /// 146 | /// - The barrier will (obviously) prevent matching on code patterns that span across the barrier. 147 | /// This is intended. 148 | /// - The barrier will force the compiler to schedule an actual register to hold the value 149 | /// temporarily. 150 | /// - The barrier will force the value into a single register, which may impair the compiler's 151 | /// ability to perform optimizations such as auto-vectorization. 152 | /// - Constant folding cannot proceed past a barrier. 153 | /// 154 | /// For these reasons, optimization barriers are only used where necessary to minimize any 155 | /// potential impact on performance, keeping rust-timing-shield zero-cost for as many applications 156 | /// as possible. 157 | #[inline(always)] 158 | pub fn optimization_barrier_u8(mut value: u8) -> u8 { 159 | unsafe { 160 | std::arch::asm!( 161 | // Rust requires us to use every register defined, so we use it inside of a comment. 162 | "/* optimization_barrier_u8 {unused} */", 163 | 164 | // Define a single input/output register called "unused". 165 | // The Rust compiler will perceive this as a mutation of `value`. 166 | unused = inout(reg_byte) value, 167 | 168 | // By guaranteeing more invariants we improve the compiler's ability to optimize. 169 | // Since the assembly block is a no-op, we easily uphold all of these invariants. 170 | options(pure, nomem, nostack, preserves_flags) 171 | ); 172 | } 173 | 174 | value 175 | } 176 | 177 | #[cfg(test)] 178 | mod tests { 179 | use super::*; 180 | use quickcheck::quickcheck; 181 | 182 | quickcheck! { 183 | fn optimization_barrier_is_identity(value: u8) -> bool { 184 | optimization_barrier_u8(value) == value 185 | } 186 | } 187 | 188 | // TODO I would like to add a test that checks the compiled LLVM IR for `select` instructions. 189 | // This would help confirm that the optimization barrier is working correctly. 190 | // 191 | // According to my tests, the following function compiles to a single `select`: 192 | // 193 | // pub fn select(cond: bool, a: u32, b: u32) -> u32 { 194 | // let mask = (cond as u32) - 1; 195 | // 196 | // (a & (!mask)) | (b & mask) 197 | // } 198 | // 199 | // while this function is branchless in LLVM IR and x86_64: 200 | // 201 | // pub fn select(cond: bool, a: u32, b: u32) -> u32 { 202 | // let mask = (optimization_barrier_u8(cond as u8) as u32) - 1; 203 | // 204 | // (a & (!mask)) | (b & mask) 205 | // } 206 | // 207 | // Tested with `cargo llvm-ir` from the `cargo-asm` tool. 208 | } 209 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2017-2022 Tim McLean 2 | 3 | //! Comprehensive timing attack protection for Rust programs. 4 | //! 5 | //! Project home page: 6 | //! 7 | //! One of the fundamental challenges of writing software that operates on sensitive information 8 | //! is preventing *timing leaks*. A timing leak is when there exists a relationship between the 9 | //! values of secret variables in your program and the execution time of your code or other code 10 | //! running on the same hardware. Attackers who are aware of this relationship can use a 11 | //! high-resolution timer to learn secret information that they would not normally be able to 12 | //! access (e.g. extract an SSL key from a web server). 13 | //! 14 | //! To prevent timing leaks in cryptography code, it is best practice to write code that is 15 | //! *constant-time*. For a full background on writing constant-time code, see [A beginner's guide 16 | //! to constant-time 17 | //! cryptography](https://www.chosenplaintext.ca/articles/beginners-guide-constant-time-cryptography.html). 18 | //! 19 | //! `rust-timing-shield` is a framework for writing code without timing leaks. 20 | //! See the [Getting Started 21 | //! page](https://www.chosenplaintext.ca/open-source/rust-timing-shield/getting-started) for more 22 | //! information. 23 | 24 | #![feature(min_specialization)] 25 | 26 | #[cfg(test)] 27 | extern crate quickcheck; 28 | 29 | pub mod barriers; 30 | 31 | use std::ops::Add; 32 | use std::ops::AddAssign; 33 | use std::ops::BitAnd; 34 | use std::ops::BitAndAssign; 35 | use std::ops::BitOr; 36 | use std::ops::BitOrAssign; 37 | use std::ops::BitXor; 38 | use std::ops::BitXorAssign; 39 | use std::ops::Mul; 40 | use std::ops::MulAssign; 41 | use std::ops::Neg; 42 | use std::ops::Not; 43 | use std::ops::Shl; 44 | use std::ops::ShlAssign; 45 | use std::ops::Shr; 46 | use std::ops::ShrAssign; 47 | use std::ops::Sub; 48 | use std::ops::SubAssign; 49 | 50 | use crate::barriers::optimization_barrier_u8; 51 | 52 | macro_rules! impl_unary_op { 53 | ( 54 | $trait_name:ident, $op_name:ident, 55 | $input_type:ident, $output_type:ident 56 | ) => { 57 | impl $trait_name for $input_type { 58 | type Output = $output_type; 59 | 60 | #[inline(always)] 61 | fn $op_name(self) -> $output_type { 62 | $output_type((self.0).$op_name()) 63 | } 64 | } 65 | }; 66 | } 67 | 68 | macro_rules! impl_bin_op { 69 | ( 70 | $trait_name:ident, $op_name:ident, $output_type:ident, 71 | ($lhs_var:ident : $lhs_type:ty) => $lhs_expr:expr, 72 | ($rhs_var:ident : $rhs_type:ty) => $rhs_expr:expr 73 | ) => { 74 | impl_bin_op!( 75 | $trait_name, $op_name, $op_name, $output_type, 76 | ($lhs_var: $lhs_type) => $lhs_expr, 77 | ($rhs_var: $rhs_type) => $rhs_expr, 78 | (output) => output 79 | ); 80 | }; 81 | ( 82 | $trait_name:ident, $op_name:ident, $output_type:ident, 83 | ($lhs_var:ident : $lhs_type:ty) => $lhs_expr:expr, 84 | ($rhs_var:ident : $rhs_type:ty) => $rhs_expr:expr, 85 | ($output_var:ident) => $output_expr:expr 86 | ) => { 87 | impl_bin_op!( 88 | $trait_name, $op_name, $op_name, $output_type, 89 | ($lhs_var: $lhs_type) => $lhs_expr, 90 | ($rhs_var: $rhs_type) => $rhs_expr, 91 | ($output_var) => $output_expr 92 | ); 93 | }; 94 | ( 95 | $trait_name:ident, $outer_op_name:ident, $inner_op_name:ident, $output_type:ident, 96 | ($lhs_var:ident : $lhs_type:ty) => $lhs_expr:expr, 97 | ($rhs_var:ident : $rhs_type:ty) => $rhs_expr:expr 98 | ) => { 99 | impl_bin_op!( 100 | $trait_name, $outer_op_name, $inner_op_name, $output_type, 101 | ($lhs_var: $lhs_type) => $lhs_expr, 102 | ($rhs_var: $rhs_type) => $rhs_expr, 103 | (output) => output 104 | ); 105 | }; 106 | ( 107 | $trait_name:ident, $outer_op_name:ident, $inner_op_name:ident, $output_type:ident, 108 | ($lhs_var:ident : $lhs_type:ty) => $lhs_expr:expr, 109 | ($rhs_var:ident : $rhs_type:ty) => $rhs_expr:expr, 110 | ($output_var:ident) => $output_expr:expr 111 | ) => { 112 | impl $trait_name<$rhs_type> for $lhs_type { 113 | type Output = $output_type; 114 | 115 | #[inline(always)] 116 | fn $outer_op_name(self, other: $rhs_type) -> $output_type { 117 | let lhs = { 118 | let $lhs_var = self; 119 | $lhs_expr 120 | }; 121 | let rhs = { 122 | let $rhs_var = other; 123 | $rhs_expr 124 | }; 125 | let $output_var = lhs.$inner_op_name(rhs); 126 | $output_type($output_expr) 127 | } 128 | } 129 | } 130 | } 131 | 132 | macro_rules! derive_assign_op { 133 | ( 134 | $trait_name:ident, $assign_op_name:ident, $op_name:ident, 135 | $lhs_type:ty, $rhs_type:ty 136 | ) => { 137 | impl $trait_name<$rhs_type> for $lhs_type { 138 | #[inline(always)] 139 | fn $assign_op_name(&mut self, rhs: $rhs_type) { 140 | *self = self.$op_name(rhs); 141 | } 142 | } 143 | }; 144 | } 145 | 146 | macro_rules! impl_as { 147 | ($tp_type:ident, $type:ident, $fn_name:ident) => { 148 | /// Casts from one number type to another, following the same conventions as Rust's `as` 149 | /// keyword. 150 | #[inline(always)] 151 | pub fn $fn_name(self) -> $tp_type { 152 | $tp_type(self.0 as $type) 153 | } 154 | }; 155 | } 156 | 157 | macro_rules! as_unsigned_type { 158 | (u8) => { 159 | u8 160 | }; 161 | (u16) => { 162 | u16 163 | }; 164 | (u32) => { 165 | u32 166 | }; 167 | (u64) => { 168 | u64 169 | }; 170 | (i8) => { 171 | u8 172 | }; 173 | (i16) => { 174 | u16 175 | }; 176 | (i32) => { 177 | u32 178 | }; 179 | (i64) => { 180 | u64 181 | }; 182 | } 183 | 184 | macro_rules! impl_tp_eq { 185 | ( 186 | $lhs_type:ty, $rhs_type:ty, 187 | ($lhs_var:ident, $rhs_var:ident) => $eq_expr:expr 188 | ) => { 189 | impl TpEq<$rhs_type> for $lhs_type { 190 | #[inline(always)] 191 | fn tp_eq(&self, other: &$rhs_type) -> TpBool { 192 | let $lhs_var = self; 193 | let $rhs_var = other; 194 | $eq_expr 195 | } 196 | 197 | #[inline(always)] 198 | fn tp_not_eq(&self, other: &$rhs_type) -> TpBool { 199 | // TODO might not be optimal 200 | !self.tp_eq(other) 201 | } 202 | } 203 | }; 204 | } 205 | 206 | macro_rules! impl_tp_eq_for_number { 207 | ( 208 | $inner_type:ident, 209 | ($lhs_var:ident : $lhs_type:ty) => $lhs_expr:expr, 210 | ($rhs_var:ident : $rhs_type:ty) => $rhs_expr:expr 211 | ) => { 212 | impl_tp_eq!($lhs_type, $rhs_type, (lhs, rhs) => { 213 | let l = { 214 | let $lhs_var = lhs; 215 | $lhs_expr 216 | }; 217 | let r = { 218 | let $rhs_var = rhs; 219 | $rhs_expr 220 | }; 221 | let bit_diff = l ^ r; 222 | let msb_iff_zero_diff = bit_diff.wrapping_sub(1) & !bit_diff; 223 | let type_bitwidth = $inner_type::count_zeros(0); 224 | let unsigned_msb_iff_zero_diff = msb_iff_zero_diff as as_unsigned_type!($inner_type); 225 | TpBool((unsigned_msb_iff_zero_diff >> (type_bitwidth - 1)) as u8) 226 | }); 227 | } 228 | } 229 | 230 | macro_rules! impl_tp_ord { 231 | ( 232 | $lhs_type:ty, $rhs_type:ty, 233 | tp_lt($lhs_var:ident, $rhs_var:ident) => $lt_expr:expr 234 | ) => { 235 | impl TpOrd<$rhs_type> for $lhs_type { 236 | #[inline(always)] 237 | fn tp_lt(&self, other: &$rhs_type) -> TpBool { 238 | let $lhs_var = self; 239 | let $rhs_var = other; 240 | $lt_expr 241 | } 242 | 243 | #[inline(always)] 244 | fn tp_gt(&self, other: &$rhs_type) -> TpBool { 245 | other.tp_lt(self) 246 | } 247 | 248 | #[inline(always)] 249 | fn tp_lt_eq(&self, other: &$rhs_type) -> TpBool { 250 | // TODO might not be optimal 251 | !self.tp_gt(other) 252 | } 253 | 254 | #[inline(always)] 255 | fn tp_gt_eq(&self, other: &$rhs_type) -> TpBool { 256 | // TODO might not be optimal 257 | !self.tp_lt(other) 258 | } 259 | } 260 | }; 261 | } 262 | 263 | macro_rules! impl_tp_cond_swap_with_xor { 264 | ($tp_type:ident, $type:ident) => { 265 | impl TpCondSwap for $tp_type { 266 | #[inline(always)] 267 | fn tp_cond_swap(condition: TpBool, a: &mut $tp_type, b: &mut $tp_type) { 268 | // Zero-extend condition to this type's width 269 | let cond_zx = $tp_type(condition.0 as $type); 270 | 271 | // Create mask of 11...11 for true or 00...00 for false 272 | let mask = !(cond_zx - 1); 273 | 274 | // swapper will be a XOR b for true or 00...00 for false 275 | let swapper = (*a ^ *b) & mask; 276 | 277 | *a ^= swapper; 278 | *b ^= swapper; 279 | } 280 | } 281 | }; 282 | } 283 | 284 | macro_rules! define_number_type { 285 | ( 286 | $tp_type:ident, $type:ident, 287 | tp_lt($tp_lt_lhs_var:ident, $tp_lt_rhs_var:ident) => $tp_lt_expr:expr, 288 | methods { 289 | $($methods:tt)* 290 | } 291 | ) => { 292 | /// A number type that prevents its value from being leaked to attackers through timing 293 | /// information. 294 | /// 295 | /// Use this type's `protect` method as early as possible to prevent the value from being 296 | /// used in variable-time computations. 297 | /// 298 | /// Unlike Rust's built-in number types, `rust-timing-shield` number types have no overflow 299 | /// checking, even in debug mode. In other words, they behave like Rust's 300 | /// [Wrapping](https://doc.rust-lang.org/std/num/struct.Wrapping.html) types. 301 | /// 302 | /// Additionally, all shift distances are reduced mod the bit width of the type 303 | /// (e.g. `some_i64 << 104` is equivalent to `some_i64 << 40`). 304 | /// 305 | /// ``` 306 | /// # use timing_shield::*; 307 | /// # let some_u8 = 5u8; 308 | /// # let some_other_u8 = 20u8; 309 | /// // Protect the value as early as possible to limit the risk 310 | /// let protected_value = TpU8::protect(some_u8); 311 | /// let other_protected_value = TpU8::protect(some_other_u8); 312 | /// 313 | /// // Do some computation with the protected values 314 | /// let x = (other_protected_value + protected_value) & 0x40; 315 | /// 316 | /// // If needed, remove protection using `expose` 317 | /// println!("{}", x.expose()); 318 | /// ``` 319 | #[cfg(target_arch = "x86_64")] 320 | #[derive(Clone, Copy)] 321 | pub struct $tp_type($type); 322 | 323 | impl $tp_type { 324 | /// Hide `input` behind a protective abstraction to prevent the value from being used 325 | /// in such a way that the value could leak out via a timing side channel. 326 | /// 327 | /// ``` 328 | /// # use timing_shield::*; 329 | /// # let secret_u32 = 5u32; 330 | /// let protected = TpU32::protect(secret_u32); 331 | /// 332 | /// // Use `protected` instead of `secret_u32` to avoid timing leaks 333 | /// ``` 334 | #[inline(always)] 335 | pub fn protect(input: $type) -> Self { 336 | $tp_type(input) 337 | } 338 | 339 | $($methods)* 340 | 341 | /// Shifts left by `n` bits, wrapping truncated bits around to the right side of the 342 | /// resulting value. 343 | /// 344 | /// If `n` is larger than the bitwidth of this number type, 345 | /// `n` is reduced mod that bitwidth. 346 | /// For example, rotating an `i16` with `n = 35` is equivalent to rotating with `n = 347 | /// 3`, since `35 = 3 mod 16`. 348 | #[inline(always)] 349 | pub fn rotate_left(self, n: u32) -> Self { 350 | $tp_type(self.0.rotate_left(n)) 351 | } 352 | 353 | /// Shifts right by `n` bits, wrapping truncated bits around to the left side of the 354 | /// resulting value. 355 | /// 356 | /// If `n` is larger than the bitwidth of this number type, 357 | /// `n` is reduced mod that bitwidth. 358 | /// For example, rotating an `i16` with `n = 35` is equivalent to rotating with `n = 359 | /// 3`, since `35 = 3 mod 16`. 360 | #[inline(always)] 361 | pub fn rotate_right(self, n: u32) -> Self { 362 | $tp_type(self.0.rotate_right(n)) 363 | } 364 | 365 | /// Remove the timing protection and expose the raw number value. 366 | /// Once a value is exposed, it is the library user's responsibility to prevent timing 367 | /// leaks (if necessary). 368 | /// 369 | /// Commonly, this method is used when a value is safe to make public (e.g. when an 370 | /// encryption algorithm outputs a ciphertext). Alternatively, this method may need to 371 | /// be used when providing a secret value to an interface that does not use 372 | /// `timing-shield`'s types (e.g. writing a secret key to a file using a file system 373 | /// API). 374 | #[inline(always)] 375 | pub fn expose(self) -> $type { 376 | self.0 377 | } 378 | } 379 | 380 | impl_unary_op!(Not, not, $tp_type, $tp_type); 381 | 382 | impl_bin_op!(Add, add, wrapping_add, $tp_type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 383 | impl_bin_op!(Add, add, wrapping_add, $tp_type, (l: $type ) => l , (r: $tp_type) => r.0); 384 | impl_bin_op!(Add, add, wrapping_add, $tp_type, (l: $tp_type) => l.0, (r: $type ) => r ); 385 | 386 | impl_bin_op!(Sub, sub, wrapping_sub, $tp_type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 387 | impl_bin_op!(Sub, sub, wrapping_sub, $tp_type, (l: $type ) => l , (r: $tp_type) => r.0); 388 | impl_bin_op!(Sub, sub, wrapping_sub, $tp_type, (l: $tp_type) => l.0, (r: $type ) => r ); 389 | 390 | impl_bin_op!(Mul, mul, wrapping_mul, $tp_type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 391 | impl_bin_op!(Mul, mul, wrapping_mul, $tp_type, (l: $type ) => l , (r: $tp_type) => r.0); 392 | impl_bin_op!(Mul, mul, wrapping_mul, $tp_type, (l: $tp_type) => l.0, (r: $type ) => r ); 393 | 394 | impl_bin_op!(BitAnd, bitand, $tp_type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 395 | impl_bin_op!(BitAnd, bitand, $tp_type, (l: $type ) => l , (r: $tp_type) => r.0); 396 | impl_bin_op!(BitAnd, bitand, $tp_type, (l: $tp_type) => l.0, (r: $type ) => r ); 397 | 398 | impl_bin_op!(BitOr, bitor, $tp_type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 399 | impl_bin_op!(BitOr, bitor, $tp_type, (l: $type ) => l , (r: $tp_type) => r.0); 400 | impl_bin_op!(BitOr, bitor, $tp_type, (l: $tp_type) => l.0, (r: $type ) => r ); 401 | 402 | impl_bin_op!(BitXor, bitxor, $tp_type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 403 | impl_bin_op!(BitXor, bitxor, $tp_type, (l: $type ) => l , (r: $tp_type) => r.0); 404 | impl_bin_op!(BitXor, bitxor, $tp_type, (l: $tp_type) => l.0, (r: $type ) => r ); 405 | 406 | impl_bin_op!(Shl, shl, wrapping_shl, $tp_type, (l: $tp_type) => l.0, (r: u32) => r); 407 | impl_bin_op!(Shr, shr, wrapping_shr, $tp_type, (l: $tp_type) => l.0, (r: u32) => r); 408 | 409 | derive_assign_op!(AddAssign, add_assign, add, $tp_type, $tp_type); 410 | derive_assign_op!(AddAssign, add_assign, add, $tp_type, $type); 411 | 412 | derive_assign_op!(SubAssign, sub_assign, sub, $tp_type, $tp_type); 413 | derive_assign_op!(SubAssign, sub_assign, sub, $tp_type, $type); 414 | 415 | derive_assign_op!(MulAssign, mul_assign, mul, $tp_type, $tp_type); 416 | derive_assign_op!(MulAssign, mul_assign, mul, $tp_type, $type); 417 | 418 | derive_assign_op!(BitAndAssign, bitand_assign, bitand, $tp_type, $tp_type); 419 | derive_assign_op!(BitAndAssign, bitand_assign, bitand, $tp_type, $type); 420 | 421 | derive_assign_op!(BitOrAssign, bitor_assign, bitor, $tp_type, $tp_type); 422 | derive_assign_op!(BitOrAssign, bitor_assign, bitor, $tp_type, $type); 423 | 424 | derive_assign_op!(BitXorAssign, bitxor_assign, bitxor, $tp_type, $tp_type); 425 | derive_assign_op!(BitXorAssign, bitxor_assign, bitxor, $tp_type, $type); 426 | 427 | derive_assign_op!(ShlAssign, shl_assign, shl, $tp_type, u32); 428 | derive_assign_op!(ShrAssign, shr_assign, shr, $tp_type, u32); 429 | 430 | impl_tp_eq_for_number!($type, (l: $tp_type) => l.0, (r: $tp_type) => r.0); 431 | impl_tp_eq_for_number!($type, (l: $type ) => l , (r: $tp_type) => r.0); 432 | impl_tp_eq_for_number!($type, (l: $tp_type) => l.0, (r: $type ) => r ); 433 | 434 | impl_tp_ord!($tp_type, $tp_type, tp_lt(l, r) => { 435 | let $tp_lt_lhs_var = l.0; 436 | let $tp_lt_rhs_var = r.0; 437 | $tp_lt_expr 438 | }); 439 | impl_tp_ord!($type, $tp_type, tp_lt(l, r) => { 440 | let $tp_lt_lhs_var = *l; 441 | let $tp_lt_rhs_var = r.0; 442 | $tp_lt_expr 443 | }); 444 | impl_tp_ord!($tp_type, $type, tp_lt(l, r) => { 445 | let $tp_lt_lhs_var = l.0; 446 | let $tp_lt_rhs_var = *r; 447 | $tp_lt_expr 448 | }); 449 | 450 | impl_tp_cond_swap_with_xor!($tp_type, $type); 451 | 452 | impl TpEq for [$tp_type] { 453 | #[inline(always)] 454 | fn tp_eq(&self, other: &[$tp_type]) -> TpBool { 455 | if self.len() != other.len() { 456 | return TP_FALSE; 457 | } 458 | 459 | let acc = self.iter().zip(other.iter()) 460 | .fold($tp_type(0), |prev, (&a, &b)| prev | (a ^ b)); 461 | acc.tp_eq(&0) 462 | } 463 | 464 | #[inline(always)] 465 | fn tp_not_eq(&self, other: &[$tp_type]) -> TpBool { 466 | if self.len() != other.len() { 467 | return TP_TRUE; 468 | } 469 | 470 | let acc = self.iter().zip(other.iter()) 471 | .fold($tp_type(0), |prev, (&a, &b)| prev | (a ^ b)); 472 | acc.tp_not_eq(&0) 473 | } 474 | } 475 | } 476 | } 477 | 478 | /// A trait for performing equality tests on types with timing leak protection. 479 | /// 480 | /// **Important**: implementations of this trait are only required to protect inputs that are already a 481 | /// timing-protected type. For example, `a.tp_eq(&b)` is allowed to leak `a` if `a` is a `u32`, 482 | /// instead of a timing-protected type like `TpU32`. 483 | /// 484 | /// Ideally, this trait will be removed in the future if/when Rust allows overloading of the `==` 485 | /// and `!=` operators. 486 | pub trait TpEq 487 | where 488 | Rhs: ?Sized, 489 | { 490 | /// Compare `self` with `other` for equality without leaking the result. 491 | /// **Important**: if either input is not a timing-protected type, this operation might leak the 492 | /// value of that type. To prevent timing leaks, protect values before performing any operations 493 | /// on them. 494 | /// 495 | /// Equivalent to `!a.tp_not_eq(&other)` 496 | fn tp_eq(&self, other: &Rhs) -> TpBool; 497 | 498 | /// Compare `self` with `other` for inequality without leaking the result. 499 | /// **Important**: if either input is not a timing-protected type, this operation might leak the 500 | /// value of that type. To prevent timing leaks, protect values before performing any operations 501 | /// on them. 502 | /// 503 | /// Equivalent to `!a.tp_eq(&other)` 504 | fn tp_not_eq(&self, other: &Rhs) -> TpBool; 505 | } 506 | 507 | /// A trait for performing comparisons on types with timing leak protection. 508 | /// 509 | /// **Important**: implementations of this trait are only required to protect inputs that are already a 510 | /// timing-protected type. For example, `a.tp_lt(&b)` is allowed to leak `a` if `a` is a `u32`, 511 | /// instead of a timing-protected type like `TpU32`. 512 | /// 513 | /// Ideally, this trait will be removed in the future if/when Rust allows overloading of the `<`, 514 | /// `>`, `<=`, and `>=` operators. 515 | pub trait TpOrd 516 | where 517 | Rhs: ?Sized, 518 | { 519 | /// Compute `self < other` without leaking the result. 520 | /// **Important**: if either input is not a timing-protected type, this operation might leak the 521 | /// value of that type. To prevent timing leaks, protect values before performing any operations 522 | /// on them. 523 | fn tp_lt(&self, other: &Rhs) -> TpBool; 524 | 525 | /// Compute `self <= other` without leaking the result. 526 | /// **Important**: if either input is not a timing-protected type, this operation might leak the 527 | /// value of that type. To prevent timing leaks, protect values before performing any operations 528 | /// on them. 529 | fn tp_lt_eq(&self, other: &Rhs) -> TpBool; 530 | 531 | /// Compute `self > other` without leaking the result. 532 | /// **Important**: if either input is not a timing-protected type, this operation might leak the 533 | /// value of that type. To prevent timing leaks, protect values before performing any operations 534 | /// on them. 535 | fn tp_gt(&self, other: &Rhs) -> TpBool; 536 | 537 | /// Compute `self >= other` without leaking the result. 538 | /// **Important**: if either input is not a timing-protected type, this operation might leak the 539 | /// value of that type. To prevent timing leaks, protect values before performing any operations 540 | /// on them. 541 | fn tp_gt_eq(&self, other: &Rhs) -> TpBool; 542 | } 543 | 544 | /// A trait for performing conditional swaps of two values without leaking whether the swap 545 | /// occurred. 546 | /// 547 | /// For convenience, you may want to use the [`select`](struct.TpBool.html#method.select) or 548 | /// [`cond_swap`](struct.TpBool.html#method.cond_swap) methods on [`TpBool`](struct.TpBool.html) 549 | /// instead of using this trait directly: 550 | /// 551 | /// ``` 552 | /// # use timing_shield::*; 553 | /// let condition: TpBool; 554 | /// let mut a: TpU32; 555 | /// let mut b: TpU32; 556 | /// # condition = TpBool::protect(true); 557 | /// # a = TpU32::protect(5); 558 | /// # b = TpU32::protect(6); 559 | /// // ... 560 | /// condition.cond_swap(&mut a, &mut b); 561 | /// 562 | /// // OR: 563 | /// let a_if_true = condition.select(a, b); 564 | /// # assert_eq!(a_if_true.expose(), a.expose()); 565 | /// ``` 566 | /// 567 | /// This trait doesn't really make sense to implement on non-`Tp` types. 568 | pub trait TpCondSwap { 569 | /// Swap `a` and `b` if and only if `condition` is true. 570 | /// 571 | /// Implementers of this trait must take care to avoid leaking whether the swap occurred. 572 | fn tp_cond_swap(condition: TpBool, a: &mut Self, b: &mut Self); 573 | } 574 | 575 | impl TpEq for [T] 576 | where 577 | T: TpEq, 578 | { 579 | #[inline(always)] 580 | default fn tp_eq(&self, other: &[T]) -> TpBool { 581 | if self.len() != other.len() { 582 | return TP_FALSE; 583 | } 584 | 585 | self.iter() 586 | .zip(other.iter()) 587 | .fold(TP_TRUE, |prev, (a, b)| prev & a.tp_eq(b)) 588 | } 589 | 590 | #[inline(always)] 591 | default fn tp_not_eq(&self, other: &[T]) -> TpBool { 592 | if self.len() != other.len() { 593 | return TP_FALSE; 594 | } 595 | 596 | self.iter() 597 | .zip(other.iter()) 598 | .fold(TP_FALSE, |prev, (a, b)| prev | a.tp_not_eq(b)) 599 | } 600 | } 601 | 602 | impl TpEq for Vec 603 | where 604 | T: TpEq, 605 | { 606 | #[inline(always)] 607 | fn tp_eq(&self, other: &Vec) -> TpBool { 608 | self[..].tp_eq(&other[..]) 609 | } 610 | 611 | #[inline(always)] 612 | fn tp_not_eq(&self, other: &Vec) -> TpBool { 613 | self[..].tp_not_eq(&other[..]) 614 | } 615 | } 616 | 617 | impl TpCondSwap for [T] 618 | where 619 | T: TpCondSwap, 620 | { 621 | #[inline(always)] 622 | fn tp_cond_swap(condition: TpBool, a: &mut Self, b: &mut Self) { 623 | if a.len() != b.len() { 624 | panic!("cannot swap values of slices of unequal length"); 625 | } 626 | 627 | for (a_elem, b_elem) in a.iter_mut().zip(b.iter_mut()) { 628 | condition.cond_swap(a_elem, b_elem); 629 | } 630 | } 631 | } 632 | 633 | impl TpCondSwap for Vec 634 | where 635 | T: TpCondSwap, 636 | { 637 | #[inline(always)] 638 | fn tp_cond_swap(condition: TpBool, a: &mut Self, b: &mut Self) { 639 | condition.cond_swap(a.as_mut_slice(), b.as_mut_slice()); 640 | } 641 | } 642 | 643 | define_number_type!(TpU8, u8, tp_lt(lhs, rhs) => { 644 | let overflowing_iff_lt = (lhs as u32).wrapping_sub(rhs as u32); 645 | TpBool((overflowing_iff_lt >> 31) as u8) 646 | }, methods { 647 | impl_as!(TpU16, u16, as_u16); 648 | impl_as!(TpU32, u32, as_u32); 649 | impl_as!(TpU64, u64, as_u64); 650 | impl_as!(TpI8, i8, as_i8); 651 | impl_as!(TpI16, i16, as_i16); 652 | impl_as!(TpI32, i32, as_i32); 653 | impl_as!(TpI64, i64, as_i64); 654 | }); 655 | 656 | define_number_type!(TpU16, u16, tp_lt(lhs, rhs) => { 657 | let overflowing_iff_lt = (lhs as u32).wrapping_sub(rhs as u32); 658 | TpBool((overflowing_iff_lt >> 31) as u8) 659 | }, methods { 660 | impl_as!(TpU8, u8, as_u8); 661 | impl_as!(TpU32, u32, as_u32); 662 | impl_as!(TpU64, u64, as_u64); 663 | impl_as!(TpI8, i8, as_i8); 664 | impl_as!(TpI16, i16, as_i16); 665 | impl_as!(TpI32, i32, as_i32); 666 | impl_as!(TpI64, i64, as_i64); 667 | }); 668 | 669 | define_number_type!(TpU32, u32, tp_lt(lhs, rhs) => { 670 | let overflowing_iff_lt = (lhs as u64).wrapping_sub(rhs as u64); 671 | TpBool((overflowing_iff_lt >> 63) as u8) 672 | }, methods { 673 | impl_as!(TpU8, u8, as_u8); 674 | impl_as!(TpU16, u16, as_u16); 675 | impl_as!(TpU64, u64, as_u64); 676 | impl_as!(TpI8, i8, as_i8); 677 | impl_as!(TpI16, i16, as_i16); 678 | impl_as!(TpI32, i32, as_i32); 679 | impl_as!(TpI64, i64, as_i64); 680 | }); 681 | 682 | define_number_type!(TpU64, u64, tp_lt(lhs, rhs) => { 683 | let overflowing_iff_lt = (lhs as u128).wrapping_sub(rhs as u128); 684 | TpBool((overflowing_iff_lt >> 127) as u8) 685 | }, methods { 686 | impl_as!(TpU8, u8, as_u8); 687 | impl_as!(TpU16, u16, as_u16); 688 | impl_as!(TpU32, u32, as_u32); 689 | impl_as!(TpI8, i8, as_i8); 690 | impl_as!(TpI16, i16, as_i16); 691 | impl_as!(TpI32, i32, as_i32); 692 | impl_as!(TpI64, i64, as_i64); 693 | }); 694 | 695 | define_number_type!(TpI8, i8, tp_lt(lhs, rhs) => { 696 | let overflowing_iff_lt = ((lhs as i32).wrapping_sub(rhs as i32)) as u32; 697 | TpBool((overflowing_iff_lt >> 31) as u8) 698 | }, methods { 699 | impl_as!(TpU8, u8, as_u8); 700 | impl_as!(TpU16, u16, as_u16); 701 | impl_as!(TpU32, u32, as_u32); 702 | impl_as!(TpU64, u64, as_u64); 703 | impl_as!(TpI16, i16, as_i16); 704 | impl_as!(TpI32, i32, as_i32); 705 | impl_as!(TpI64, i64, as_i64); 706 | }); 707 | impl_unary_op!(Neg, neg, TpI8, TpI8); 708 | 709 | define_number_type!(TpI16, i16, tp_lt(lhs, rhs) => { 710 | let overflowing_iff_lt = ((lhs as i32).wrapping_sub(rhs as i32)) as u32; 711 | TpBool((overflowing_iff_lt >> 31) as u8) 712 | }, methods { 713 | impl_as!(TpU8, u8, as_u8); 714 | impl_as!(TpU16, u16, as_u16); 715 | impl_as!(TpU32, u32, as_u32); 716 | impl_as!(TpU64, u64, as_u64); 717 | impl_as!(TpI8, i8, as_i8); 718 | impl_as!(TpI32, i32, as_i32); 719 | impl_as!(TpI64, i64, as_i64); 720 | }); 721 | impl_unary_op!(Neg, neg, TpI16, TpI16); 722 | 723 | define_number_type!(TpI32, i32, tp_lt(lhs, rhs) => { 724 | let overflowing_iff_lt = ((lhs as i64).wrapping_sub(rhs as i64)) as u64; 725 | TpBool((overflowing_iff_lt >> 63) as u8) 726 | }, methods { 727 | impl_as!(TpU8, u8, as_u8); 728 | impl_as!(TpU16, u16, as_u16); 729 | impl_as!(TpU32, u32, as_u32); 730 | impl_as!(TpU64, u64, as_u64); 731 | impl_as!(TpI8, i8, as_i8); 732 | impl_as!(TpI16, i16, as_i16); 733 | impl_as!(TpI64, i64, as_i64); 734 | }); 735 | impl_unary_op!(Neg, neg, TpI32, TpI32); 736 | 737 | define_number_type!(TpI64, i64, tp_lt(lhs, rhs) => { 738 | let overflowing_iff_lt = ((lhs as i128).wrapping_sub(rhs as i128)) as u128; 739 | TpBool((overflowing_iff_lt >> 127) as u8) 740 | }, methods { 741 | impl_as!(TpU8, u8, as_u8); 742 | impl_as!(TpU16, u16, as_u16); 743 | impl_as!(TpU32, u32, as_u32); 744 | impl_as!(TpU64, u64, as_u64); 745 | impl_as!(TpI8, i8, as_i8); 746 | impl_as!(TpI16, i16, as_i16); 747 | impl_as!(TpI32, i32, as_i32); 748 | }); 749 | impl_unary_op!(Neg, neg, TpI64, TpI64); 750 | 751 | /// A boolean type that prevents its value from being leaked to attackers through timing 752 | /// information. 753 | /// 754 | /// ``` 755 | /// # use timing_shield::*; 756 | /// # let some_boolean = true; 757 | /// let protected = TpBool::protect(some_boolean); 758 | /// 759 | /// // Use `protected` from now on instead of `some_boolean` 760 | /// ``` 761 | /// 762 | /// Use the `protect` method as early as possible in the computation for maximum protection: 763 | /// 764 | /// ``` 765 | /// # use timing_shield::*; 766 | /// # let some_boolean = true; 767 | /// // DANGEROUS: 768 | /// let badly_protected_boolean = TpU8::protect(some_boolean as u8); 769 | /// 770 | /// // Safe: 771 | /// let protected = TpBool::protect(some_boolean).as_u8(); 772 | /// # assert_eq!(protected.expose(), 1u8); 773 | /// 774 | /// // DANGEROUS: 775 | /// # let byte1 = 1u8; 776 | /// # let byte2 = 2u8; 777 | /// let badly_protected_value = TpBool::protect(byte1 == byte2); 778 | /// # assert_eq!(badly_protected_value.expose(), false); 779 | /// 780 | /// // Safe: 781 | /// let protected_bool = TpU8::protect(byte1).tp_eq(&TpU8::protect(byte2)); 782 | /// # assert_eq!(protected_bool.expose(), false); 783 | /// ``` 784 | /// 785 | /// Note that `&` and `|` are provided instead of `&&` and `||` because the usual boolean 786 | /// short-circuiting behaviour leaks information about the values of the booleans. 787 | #[cfg(target_arch = "x86_64")] 788 | #[derive(Clone, Copy)] 789 | pub struct TpBool(u8); 790 | 791 | static TP_FALSE: TpBool = TpBool(0); 792 | static TP_TRUE: TpBool = TpBool(1); 793 | 794 | impl TpBool { 795 | /// Hide `input` behind a protective abstraction to prevent the value from being used 796 | /// in such a way that the value could leak out via a timing side channel. 797 | /// 798 | /// ``` 799 | /// # use timing_shield::*; 800 | /// # let some_secret_bool = true; 801 | /// let protected_bool = TpBool::protect(some_secret_bool); 802 | /// 803 | /// // Use `protected_bool` instead of `some_secret_bool` to avoid timing leaks 804 | /// ``` 805 | #[inline(always)] 806 | pub fn protect(input: bool) -> Self { 807 | // `as u8` ensures value is 0 or 1 808 | // LLVM IR: input_u8 = zext i1 input to i8 809 | let input_u8 = input as u8; 810 | 811 | // Place an optimization barrier to hide that the u8 was originally a bool 812 | let input_u8 = optimization_barrier_u8(input_u8); 813 | 814 | TpBool(input_u8) 815 | } 816 | 817 | impl_as!(TpU8, u8, as_u8); 818 | impl_as!(TpU16, u16, as_u16); 819 | impl_as!(TpU32, u32, as_u32); 820 | impl_as!(TpU64, u64, as_u64); 821 | impl_as!(TpI8, i8, as_i8); 822 | impl_as!(TpI16, i16, as_i16); 823 | impl_as!(TpI32, i32, as_i32); 824 | impl_as!(TpI64, i64, as_i64); 825 | 826 | /// Remove the timing protection and expose the raw boolean value. 827 | /// Once the boolean is exposed, it is the library user's responsibility to prevent timing 828 | /// leaks (if necessary). Note: this can be very difficult to do correctly with boolean values. 829 | /// 830 | /// Commonly, this method is used when a value is safe to make public (e.g. the result of a 831 | /// signature verification). 832 | #[inline(always)] 833 | pub fn expose(self) -> bool { 834 | let bool_as_u8: u8 = optimization_barrier_u8(self.0); 835 | 836 | unsafe { 837 | // Safe as long as TpBool correctly maintains the invariant that self.0 is 0 or 1 838 | std::mem::transmute::(bool_as_u8) 839 | } 840 | } 841 | 842 | /// Constant-time conditional swap. Swaps `a` and `b` if this boolean is true, otherwise has no 843 | /// effect. This operation is implemented without branching on the boolean value, and it will 844 | /// not leak information about whether the values were swapped. 845 | #[inline(always)] 846 | pub fn cond_swap(self, a: &mut T, b: &mut T) 847 | where 848 | T: TpCondSwap + ?Sized, 849 | { 850 | T::tp_cond_swap(self, a, b); 851 | } 852 | 853 | /// Returns one of the arguments, depending on the value of this boolean. 854 | /// The return value is selected without branching on the boolean value, and no information 855 | /// about which value was selected will be leaked. 856 | #[inline(always)] 857 | pub fn select(self, when_true: T, when_false: T) -> T 858 | where 859 | T: TpCondSwap, 860 | { 861 | // TODO is this optimal? 862 | // seems to compile to use NEG instead of DEC 863 | // NEG clobbers the carry flag, so arguably DEC could be better 864 | 865 | let mut result = when_false; 866 | let mut replace_with = when_true; 867 | self.cond_swap(&mut result, &mut replace_with); 868 | result 869 | } 870 | } 871 | 872 | impl Not for TpBool { 873 | type Output = TpBool; 874 | 875 | #[inline(always)] 876 | fn not(self) -> TpBool { 877 | TpBool(self.0 ^ 0x01) 878 | } 879 | } 880 | 881 | impl_bin_op!(BitAnd, bitand, TpBool, (l: TpBool) => l.0 , (r: TpBool) => r.0 ); 882 | impl_bin_op!(BitAnd, bitand, TpBool, (l: bool) => l as u8, (r: TpBool) => r.0 ); 883 | impl_bin_op!(BitAnd, bitand, TpBool, (l: TpBool) => l.0 , (r: bool) => r as u8); 884 | 885 | impl_bin_op!(BitOr, bitor, TpBool, (l: TpBool) => l.0 , (r: TpBool) => r.0 ); 886 | impl_bin_op!(BitOr, bitor, TpBool, (l: bool) => l as u8, (r: TpBool) => r.0 ); 887 | impl_bin_op!(BitOr, bitor, TpBool, (l: TpBool) => l.0 , (r: bool) => r as u8); 888 | 889 | impl_bin_op!(BitXor, bitxor, TpBool, (l: TpBool) => l.0 , (r: TpBool) => r.0 ); 890 | impl_bin_op!(BitXor, bitxor, TpBool, (l: bool) => l as u8, (r: TpBool) => r.0 ); 891 | impl_bin_op!(BitXor, bitxor, TpBool, (l: TpBool) => l.0 , (r: bool) => r as u8); 892 | 893 | derive_assign_op!(BitAndAssign, bitand_assign, bitand, TpBool, TpBool); 894 | derive_assign_op!(BitAndAssign, bitand_assign, bitand, TpBool, bool); 895 | 896 | derive_assign_op!(BitOrAssign, bitor_assign, bitor, TpBool, TpBool); 897 | derive_assign_op!(BitOrAssign, bitor_assign, bitor, TpBool, bool); 898 | 899 | derive_assign_op!(BitXorAssign, bitxor_assign, bitxor, TpBool, TpBool); 900 | derive_assign_op!(BitXorAssign, bitxor_assign, bitxor, TpBool, bool); 901 | 902 | impl_tp_eq!(TpBool, TpBool, (l, r) => { 903 | l.bitxor(*r).not() 904 | }); 905 | impl_tp_eq!(bool, TpBool, (l, r) => { 906 | TpBool((*l as u8) ^ r.0).not() 907 | }); 908 | impl_tp_eq!(TpBool, bool, (l, r) => { 909 | TpBool(l.0 ^ (*r as u8)).not() 910 | }); 911 | 912 | impl TpCondSwap for TpBool { 913 | #[inline(always)] 914 | fn tp_cond_swap(condition: TpBool, a: &mut TpBool, b: &mut TpBool) { 915 | let swapper = (*a ^ *b) & condition; 916 | *a ^= swapper; 917 | *b ^= swapper; 918 | } 919 | } 920 | 921 | #[cfg(test)] 922 | mod tests { 923 | use super::*; 924 | use quickcheck::quickcheck; 925 | 926 | // The separate modules in the tests below are to work around limitations of Rust macros 927 | // (concat_idents does not work in function definitions) 928 | 929 | macro_rules! test_tp_eq { 930 | ( 931 | $test_name:ident, 932 | ($lhs_var:ident : $lhs_type:ty) => $lhs_expr:expr, 933 | ($rhs_var:ident : $rhs_type:ty) => $rhs_expr:expr 934 | ) => { 935 | quickcheck! { 936 | fn $test_name(lhs: $lhs_type, rhs: $rhs_type) -> bool { 937 | let lhs_tp = { 938 | let $lhs_var = lhs.clone(); 939 | $lhs_expr 940 | }; 941 | let rhs_tp = { 942 | let $rhs_var = rhs.clone(); 943 | $rhs_expr 944 | }; 945 | ((lhs == rhs) == (lhs_tp.tp_eq(&rhs_tp).expose())) 946 | && ((lhs != rhs) == (lhs_tp.tp_not_eq(&rhs_tp).expose())) 947 | } 948 | } 949 | }; 950 | } 951 | 952 | macro_rules! test_tp_ord { 953 | ( 954 | $test_name:ident, 955 | ($lhs_var:ident : $lhs_type:ident) => $lhs_expr:expr, 956 | ($rhs_var:ident : $rhs_type:ident) => $rhs_expr:expr 957 | ) => { 958 | mod $test_name { 959 | use super::*; 960 | quickcheck! { 961 | fn test_tp_lt(lhs: $lhs_type, rhs: $rhs_type) -> bool { 962 | let lhs_tp = { 963 | let $lhs_var = lhs; 964 | $lhs_expr 965 | }; 966 | let rhs_tp = { 967 | let $rhs_var = rhs; 968 | $rhs_expr 969 | }; 970 | (lhs < rhs) == (lhs_tp.tp_lt(&rhs_tp).expose()) 971 | } 972 | 973 | fn test_tp_gt(lhs: $lhs_type, rhs: $rhs_type) -> bool { 974 | let lhs_tp = { 975 | let $lhs_var = lhs; 976 | $lhs_expr 977 | }; 978 | let rhs_tp = { 979 | let $rhs_var = rhs; 980 | $rhs_expr 981 | }; 982 | (lhs > rhs) == (lhs_tp.tp_gt(&rhs_tp).expose()) 983 | } 984 | 985 | fn test_tp_lt_eq(lhs: $lhs_type, rhs: $rhs_type) -> bool { 986 | let lhs_tp = { 987 | let $lhs_var = lhs; 988 | $lhs_expr 989 | }; 990 | let rhs_tp = { 991 | let $rhs_var = rhs; 992 | $rhs_expr 993 | }; 994 | (lhs <= rhs) == (lhs_tp.tp_lt_eq(&rhs_tp).expose()) 995 | } 996 | 997 | fn test_tp_gt_eq(lhs: $lhs_type, rhs: $rhs_type) -> bool { 998 | let lhs_tp = { 999 | let $lhs_var = lhs; 1000 | $lhs_expr 1001 | }; 1002 | let rhs_tp = { 1003 | let $rhs_var = rhs; 1004 | $rhs_expr 1005 | }; 1006 | (lhs >= rhs) == (lhs_tp.tp_gt_eq(&rhs_tp).expose()) 1007 | } 1008 | } 1009 | } 1010 | }; 1011 | } 1012 | macro_rules! test_number_type { 1013 | ($tp_type:ident, $type:ident, $test_mod:ident) => { 1014 | mod $test_mod { 1015 | use super::*; 1016 | 1017 | mod ops { 1018 | use super::*; 1019 | 1020 | fn protect(x: $type) -> $tp_type { 1021 | $tp_type::protect(x) 1022 | } 1023 | 1024 | quickcheck! { 1025 | fn not(x: $type) -> bool { 1026 | (!x) == (!protect(x)).expose() 1027 | } 1028 | 1029 | fn add_no_leak(l: $type, r: $type) -> bool { 1030 | (l.wrapping_add(r)) == (protect(l) + protect(r)).expose() 1031 | } 1032 | fn add_leak_lhs(l: $type, r: $type) -> bool { 1033 | (l.wrapping_add(r)) == (l + protect(r)).expose() 1034 | } 1035 | fn add_leak_rhs(l: $type, r: $type) -> bool { 1036 | (l.wrapping_add(r)) == (protect(l) + r).expose() 1037 | } 1038 | 1039 | fn sub_no_leak(l: $type, r: $type) -> bool { 1040 | (l.wrapping_sub(r)) == (protect(l) - protect(r)).expose() 1041 | } 1042 | fn sub_leak_lhs(l: $type, r: $type) -> bool { 1043 | (l.wrapping_sub(r)) == (l - protect(r)).expose() 1044 | } 1045 | fn sub_leak_rhs(l: $type, r: $type) -> bool { 1046 | (l.wrapping_sub(r)) == (protect(l) - r).expose() 1047 | } 1048 | 1049 | fn mul_no_leak(l: $type, r: $type) -> bool { 1050 | (l.wrapping_mul(r)) == (protect(l) * protect(r)).expose() 1051 | } 1052 | fn mul_leak_lhs(l: $type, r: $type) -> bool { 1053 | (l.wrapping_mul(r)) == (l * protect(r)).expose() 1054 | } 1055 | fn mul_leak_rhs(l: $type, r: $type) -> bool { 1056 | (l.wrapping_mul(r)) == (protect(l) * r).expose() 1057 | } 1058 | 1059 | fn bitand_no_leak(l: $type, r: $type) -> bool { 1060 | (l & r) == (protect(l) & protect(r)).expose() 1061 | } 1062 | fn bitand_leak_lhs(l: $type, r: $type) -> bool { 1063 | (l & r) == (l & protect(r)).expose() 1064 | } 1065 | fn bitand_leak_rhs(l: $type, r: $type) -> bool { 1066 | (l & r) == (protect(l) & r).expose() 1067 | } 1068 | 1069 | fn bitor_no_leak(l: $type, r: $type) -> bool { 1070 | (l | r) == (protect(l) | protect(r)).expose() 1071 | } 1072 | fn bitor_leak_lhs(l: $type, r: $type) -> bool { 1073 | (l | r) == (l | protect(r)).expose() 1074 | } 1075 | fn bitor_leak_rhs(l: $type, r: $type) -> bool { 1076 | (l | r) == (protect(l) | r).expose() 1077 | } 1078 | 1079 | fn bitxor_no_leak(l: $type, r: $type) -> bool { 1080 | (l ^ r) == (protect(l) ^ protect(r)).expose() 1081 | } 1082 | fn bitxor_leak_lhs(l: $type, r: $type) -> bool { 1083 | (l ^ r) == (l ^ protect(r)).expose() 1084 | } 1085 | fn bitxor_leak_rhs(l: $type, r: $type) -> bool { 1086 | (l ^ r) == (protect(l) ^ r).expose() 1087 | } 1088 | 1089 | fn shl_leak_rhs(l: $type, r: u32) -> bool { 1090 | let bits = $type::count_zeros(0); 1091 | (l << (r % bits)) == (protect(l) << r).expose() 1092 | } 1093 | 1094 | fn shr_leak_rhs(l: $type, r: u32) -> bool { 1095 | let bits = $type::count_zeros(0); 1096 | (l >> (r % bits)) == (protect(l) >> r).expose() 1097 | } 1098 | 1099 | fn rotate_left_leak_rhs(l: $type, r: u32) -> bool { 1100 | let bits = $type::count_zeros(0); 1101 | (l.rotate_left(r % bits)) == protect(l).rotate_left(r).expose() 1102 | } 1103 | 1104 | fn rotate_right_leak_rhs(l: $type, r: u32) -> bool { 1105 | let bits = $type::count_zeros(0); 1106 | (l.rotate_right(r % bits)) == protect(l).rotate_right(r).expose() 1107 | } 1108 | } 1109 | } 1110 | 1111 | mod tp_eq { 1112 | use super::*; 1113 | 1114 | test_tp_eq!( 1115 | no_leak, 1116 | (l: $type) => $tp_type::protect(l), 1117 | (r: $type) => $tp_type::protect(r) 1118 | ); 1119 | test_tp_eq!( 1120 | leak_lhs, 1121 | (l: $type) => l, 1122 | (r: $type) => $tp_type::protect(r) 1123 | ); 1124 | test_tp_eq!( 1125 | leak_rhs, 1126 | (l: $type) => $tp_type::protect(l), 1127 | (r: $type) => r 1128 | ); 1129 | 1130 | } 1131 | 1132 | // Numeric types have a specialized implementation of TpEq for slices, so we'll 1133 | // test that separately. 1134 | mod slice_tp_eq { 1135 | use super::*; 1136 | 1137 | quickcheck! { 1138 | fn no_leak(l: Vec<$type>, r: Vec<$type>) -> bool { 1139 | let lhs = l.clone() 1140 | .into_iter() 1141 | .map(|n| $tp_type::protect(n)) 1142 | .collect::>(); 1143 | let rhs = r.clone() 1144 | .into_iter() 1145 | .map(|n| $tp_type::protect(n)) 1146 | .collect::>(); 1147 | let lhs_slice: &[_] = &lhs; 1148 | let rhs_slice: &[_] = &rhs; 1149 | 1150 | ((l == r) == (lhs_slice.tp_eq(&rhs_slice).expose())) 1151 | && ((l != r) == (lhs_slice.tp_not_eq(&rhs_slice).expose())) 1152 | } 1153 | } 1154 | } 1155 | 1156 | mod tp_ord { 1157 | use super::*; 1158 | 1159 | test_tp_ord!( 1160 | no_leak, 1161 | (l: $type) => $tp_type::protect(l), 1162 | (r: $type) => $tp_type::protect(r) 1163 | ); 1164 | test_tp_ord!( 1165 | leak_lhs, 1166 | (l: $type) => l, 1167 | (r: $type) => $tp_type::protect(r) 1168 | ); 1169 | test_tp_ord!( 1170 | leak_rhs, 1171 | (l: $type) => $tp_type::protect(l), 1172 | (r: $type) => r 1173 | ); 1174 | } 1175 | 1176 | mod tp_cond_swap { 1177 | use super::*; 1178 | 1179 | quickcheck! { 1180 | fn test(condition: bool, a: $type, b: $type) -> bool { 1181 | let mut swap1 = $tp_type::protect(a); 1182 | let mut swap2 = $tp_type::protect(b); 1183 | TpBool::protect(condition).cond_swap(&mut swap1, &mut swap2); 1184 | if condition { 1185 | (swap1.expose() == b) && (swap2.expose() == a) 1186 | } else { 1187 | (swap1.expose() == a) && (swap2.expose() == b) 1188 | } 1189 | } 1190 | } 1191 | } 1192 | } 1193 | } 1194 | } 1195 | 1196 | test_number_type!(TpU8, u8, u8_tests); 1197 | test_number_type!(TpU16, u16, u16_tests); 1198 | test_number_type!(TpU32, u32, u32_tests); 1199 | test_number_type!(TpU64, u64, u64_tests); 1200 | test_number_type!(TpI8, i8, i8_tests); 1201 | test_number_type!(TpI16, i16, i16_tests); 1202 | test_number_type!(TpI32, i32, i32_tests); 1203 | test_number_type!(TpI64, i64, i64_tests); 1204 | 1205 | // negation tests are separate because unsigned types don't impl Neg 1206 | quickcheck! { 1207 | fn i8_neg(x: i8) -> bool { 1208 | (-x) == (-TpI8::protect(x)).expose() 1209 | } 1210 | fn i16_neg(x: i16) -> bool { 1211 | (-x) == (-TpI16::protect(x)).expose() 1212 | } 1213 | fn i32_neg(x: i32) -> bool { 1214 | (-x) == (-TpI32::protect(x)).expose() 1215 | } 1216 | fn i64_neg(x: i64) -> bool { 1217 | (-x) == (-TpI64::protect(x)).expose() 1218 | } 1219 | } 1220 | 1221 | mod tp_bool { 1222 | use super::*; 1223 | 1224 | #[test] 1225 | fn test_values() { 1226 | assert_eq!(TP_FALSE.0, 0); 1227 | assert_eq!(TP_TRUE.0, 1); 1228 | assert_eq!(TpBool::protect(false).0, 0); 1229 | assert_eq!(TpBool::protect(true).0, 1); 1230 | assert_eq!(TP_FALSE.expose(), false); 1231 | assert_eq!(TP_TRUE.expose(), true); 1232 | } 1233 | 1234 | quickcheck! { 1235 | fn tpbool_select(c: bool, a: u8, b: u8) -> bool { 1236 | let tp_a = TpU8::protect(a); 1237 | let tp_b = TpU8::protect(b); 1238 | let result = TpBool::protect(c).select(tp_a, tp_b).expose(); 1239 | if c { 1240 | result == a 1241 | } else { 1242 | result == b 1243 | } 1244 | } 1245 | } 1246 | 1247 | #[test] 1248 | fn test_not() { 1249 | assert_eq!((!TP_FALSE).0, 1u8); 1250 | assert_eq!((!TP_TRUE).0, 0u8); 1251 | } 1252 | 1253 | fn protect(x: bool) -> TpBool { 1254 | TpBool::protect(x) 1255 | } 1256 | 1257 | quickcheck! { 1258 | fn bitand_no_leak(l: bool, r: bool) -> bool { 1259 | (l && r) == (protect(l) & protect(r)).expose() 1260 | } 1261 | fn bitand_leak_lhs(l: bool, r: bool) -> bool { 1262 | (l && r) == (l & protect(r)).expose() 1263 | } 1264 | fn bitand_leak_rhs(l: bool, r: bool) -> bool { 1265 | (l && r) == (protect(l) & r).expose() 1266 | } 1267 | 1268 | fn bitor_no_leak(l: bool, r: bool) -> bool { 1269 | (l || r) == (protect(l) | protect(r)).expose() 1270 | } 1271 | fn bitor_leak_lhs(l: bool, r: bool) -> bool { 1272 | (l || r) == (l | protect(r)).expose() 1273 | } 1274 | fn bitor_leak_rhs(l: bool, r: bool) -> bool { 1275 | (l || r) == (protect(l) | r).expose() 1276 | } 1277 | 1278 | fn bitxor_no_leak(l: bool, r: bool) -> bool { 1279 | (l ^ r) == (protect(l) ^ protect(r)).expose() 1280 | } 1281 | fn bitxor_leak_lhs(l: bool, r: bool) -> bool { 1282 | (l ^ r) == (l ^ protect(r)).expose() 1283 | } 1284 | fn bitxor_leak_rhs(l: bool, r: bool) -> bool { 1285 | (l ^ r) == (protect(l) ^ r).expose() 1286 | } 1287 | } 1288 | 1289 | quickcheck! { 1290 | fn tp_eq_no_leak(a: bool, b: bool) -> bool { 1291 | let tp_a = protect(a); 1292 | let tp_b = protect(b); 1293 | (a == b) == (tp_a.tp_eq(&tp_b).expose()) 1294 | } 1295 | fn tp_eq_leak_lhs(a: bool, b: bool) -> bool { 1296 | let tp_b = protect(b); 1297 | (a == b) == (a.tp_eq(&tp_b).expose()) 1298 | } 1299 | fn tp_eq_leak_rhs(a: bool, b: bool) -> bool { 1300 | let tp_a = protect(a); 1301 | (a == b) == (tp_a.tp_eq(&b).expose()) 1302 | } 1303 | } 1304 | 1305 | quickcheck! { 1306 | fn tp_cond_swap(swap: bool, a: bool, b: bool) -> bool { 1307 | let mut swap1 = protect(a); 1308 | let mut swap2 = protect(b); 1309 | protect(swap).cond_swap(&mut swap1, &mut swap2); 1310 | if swap { 1311 | (swap1.expose() == b) && (swap2.expose() == a) 1312 | } else { 1313 | (swap1.expose() == a) && (swap2.expose() == b) 1314 | } 1315 | } 1316 | } 1317 | } 1318 | 1319 | quickcheck! { 1320 | fn tp_cond_swap_slices(swap: bool, a: Vec, b: Vec) -> quickcheck::TestResult { 1321 | if a.len() != b.len() { 1322 | return quickcheck::TestResult::discard(); 1323 | } 1324 | 1325 | let mut swap1 = a.iter().map(|&x| TpU8::protect(x)).collect::>(); 1326 | let mut swap2 = b.iter().map(|&x| TpU8::protect(x)).collect::>(); 1327 | { 1328 | let slice_ref1: &mut [TpU8] = &mut *swap1; 1329 | let slice_ref2: &mut [TpU8] = &mut *swap2; 1330 | TpBool::protect(swap).cond_swap(slice_ref1, slice_ref2); 1331 | } 1332 | let res1: Vec<_> = swap1.iter().map(|x| x.expose()).collect(); 1333 | let res2: Vec<_> = swap2.iter().map(|x| x.expose()).collect(); 1334 | quickcheck::TestResult::from_bool( 1335 | if swap { 1336 | (res1 == b) && (res2 == a) 1337 | } else { 1338 | (res1 == a) && (res2 == b) 1339 | } 1340 | ) 1341 | } 1342 | 1343 | fn tp_cond_swap_vecs(swap: bool, a: Vec, b: Vec) -> quickcheck::TestResult { 1344 | if a.len() != b.len() { 1345 | return quickcheck::TestResult::discard(); 1346 | } 1347 | 1348 | let mut swap1 = a.iter().map(|&x| TpU8::protect(x)).collect::>(); 1349 | let mut swap2 = b.iter().map(|&x| TpU8::protect(x)).collect::>(); 1350 | { 1351 | let vec_ref1: &mut Vec = &mut swap1; 1352 | let vec_ref2: &mut Vec = &mut swap2; 1353 | TpBool::protect(swap).cond_swap(vec_ref1, vec_ref2); 1354 | } 1355 | let res1: Vec<_> = swap1.iter().map(|x| x.expose()).collect(); 1356 | let res2: Vec<_> = swap2.iter().map(|x| x.expose()).collect(); 1357 | quickcheck::TestResult::from_bool( 1358 | if swap { 1359 | (res1 == b) && (res2 == a) 1360 | } else { 1361 | (res1 == a) && (res2 == b) 1362 | } 1363 | ) 1364 | } 1365 | } 1366 | } 1367 | 1368 | // TODO assume barrel shifter on x86? 1369 | // TODO impl TpCondSwap for tuples 1370 | // TODO explain downsides (e.g. secret constants will get leaked through constant 1371 | // folding/propagation) 1372 | --------------------------------------------------------------------------------