├── .github └── workflows │ └── CI.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT └── src ├── bench.rs ├── cli.rs ├── console.rs ├── event.rs ├── formatters ├── json.rs ├── mod.rs ├── pretty.rs └── terse.rs ├── helpers ├── concurrency.rs ├── exit_code.rs ├── isatty.rs ├── metrics.rs └── mod.rs ├── lib.rs ├── options.rs ├── stats.rs ├── stats └── tests.rs ├── test_result.rs ├── tests.rs ├── time.rs └── types.rs /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: CI 4 | 5 | jobs: 6 | check: 7 | name: Check 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | profile: minimal 14 | toolchain: stable 15 | override: true 16 | - uses: actions-rs/cargo@v1 17 | with: 18 | command: check 19 | args: --all 20 | 21 | test: 22 | name: Test Suite 23 | runs-on: ${{ matrix.os }} 24 | strategy: 25 | matrix: 26 | os: [ubuntu-latest, macos-latest] 27 | toolchain: [stable, nightly] 28 | steps: 29 | - uses: actions/checkout@v2 30 | - uses: actions-rs/toolchain@v1 31 | with: 32 | profile: minimal 33 | toolchain: ${{ matrix.toolchain }} 34 | override: true 35 | - uses: actions-rs/cargo@v1 36 | if: matrix.toolchain == 'stable' 37 | with: 38 | command: test 39 | - uses: actions-rs/cargo@v1 40 | if: matrix.toolchain == 'nightly' 41 | with: 42 | command: test 43 | args: --all-features 44 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tester" 3 | version = "0.9.1" 4 | authors = ["The Rust Project Developers"] 5 | license = "MIT OR Apache-2.0" 6 | description = "A fork of Rust’s `test` crate that doesn’t require unstable language features." 7 | repository = "https://github.com/messense/rustc-test" 8 | edition = "2018" 9 | 10 | [features] 11 | asm_black_box = [] 12 | capture = [] 13 | 14 | [dependencies] 15 | getopts = "0.2" 16 | term = "0.7" 17 | num_cpus = "1.13.0" 18 | cfg-if = "1.0.0" 19 | 20 | [target.'cfg(unix)'.dependencies] 21 | libc = { version = "0.2", default-features = false } 22 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /src/bench.rs: -------------------------------------------------------------------------------- 1 | //! Benchmarking module. 2 | 3 | use super::{ 4 | event::CompletedTest, options::BenchMode, test_result::TestResult, types::TestDesc, Sender, 5 | }; 6 | 7 | use crate::stats; 8 | use std::cmp; 9 | #[cfg(feature = "capture")] 10 | use std::io; 11 | use std::panic::{catch_unwind, AssertUnwindSafe}; 12 | use std::sync::{Arc, Mutex}; 13 | use std::time::{Duration, Instant}; 14 | 15 | #[cfg(feature = "asm_black_box")] 16 | pub use std::hint::black_box; 17 | 18 | #[cfg(not(feature = "asm_black_box"))] 19 | #[inline(never)] 20 | pub fn black_box(dummy: T) -> T { 21 | dummy 22 | } 23 | 24 | /// Manager of the benchmarking runs. 25 | /// 26 | /// This is fed into functions marked with `#[bench]` to allow for 27 | /// set-up & tear-down before running a piece of code repeatedly via a 28 | /// call to `iter`. 29 | #[derive(Clone)] 30 | pub struct Bencher { 31 | mode: BenchMode, 32 | summary: Option, 33 | pub bytes: u64, 34 | } 35 | 36 | impl Bencher { 37 | /// Callback for benchmark functions to run in their body. 38 | pub fn iter(&mut self, mut inner: F) 39 | where 40 | F: FnMut() -> T, 41 | { 42 | if self.mode == BenchMode::Single { 43 | ns_iter_inner(&mut inner, 1); 44 | return; 45 | } 46 | 47 | self.summary = Some(iter(&mut inner)); 48 | } 49 | 50 | pub fn bench(&mut self, mut f: F) -> Option 51 | where 52 | F: FnMut(&mut Bencher), 53 | { 54 | f(self); 55 | self.summary 56 | } 57 | } 58 | 59 | #[derive(Debug, Clone, PartialEq)] 60 | pub struct BenchSamples { 61 | pub ns_iter_summ: stats::Summary, 62 | pub mb_s: usize, 63 | } 64 | 65 | pub fn fmt_bench_samples(bs: &BenchSamples) -> String { 66 | use std::fmt::Write; 67 | let mut output = String::new(); 68 | 69 | let median = bs.ns_iter_summ.median as usize; 70 | let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; 71 | 72 | write!( 73 | output, 74 | "{:>11} ns/iter (+/- {})", 75 | fmt_thousands_sep(median, ','), 76 | fmt_thousands_sep(deviation, ',') 77 | ) 78 | .unwrap(); 79 | if bs.mb_s != 0 { 80 | write!(output, " = {} MB/s", bs.mb_s).unwrap(); 81 | } 82 | output 83 | } 84 | 85 | // Format a number with thousands separators 86 | fn fmt_thousands_sep(mut n: usize, sep: char) -> String { 87 | use std::fmt::Write; 88 | let mut output = String::new(); 89 | let mut trailing = false; 90 | for &pow in &[9, 6, 3, 0] { 91 | let base = 10_usize.pow(pow); 92 | if pow == 0 || trailing || n / base != 0 { 93 | if !trailing { 94 | write!(output, "{}", n / base).unwrap(); 95 | } else { 96 | write!(output, "{:03}", n / base).unwrap(); 97 | } 98 | if pow != 0 { 99 | output.push(sep); 100 | } 101 | trailing = true; 102 | } 103 | n %= base; 104 | } 105 | 106 | output 107 | } 108 | 109 | fn ns_iter_inner(inner: &mut F, k: u64) -> u64 110 | where 111 | F: FnMut() -> T, 112 | { 113 | let start = Instant::now(); 114 | for _ in 0..k { 115 | black_box(inner()); 116 | } 117 | start.elapsed().as_nanos() as u64 118 | } 119 | 120 | pub fn iter(inner: &mut F) -> stats::Summary 121 | where 122 | F: FnMut() -> T, 123 | { 124 | // Initial bench run to get ballpark figure. 125 | let ns_single = ns_iter_inner(inner, 1); 126 | 127 | // Try to estimate iter count for 1ms falling back to 1m 128 | // iterations if first run took < 1ns. 129 | let ns_target_total = 1_000_000; // 1ms 130 | let mut n = ns_target_total / cmp::max(1, ns_single); 131 | 132 | // if the first run took more than 1ms we don't want to just 133 | // be left doing 0 iterations on every loop. The unfortunate 134 | // side effect of not being able to do as many runs is 135 | // automatically handled by the statistical analysis below 136 | // (i.e., larger error bars). 137 | n = cmp::max(1, n); 138 | 139 | let mut total_run = Duration::new(0, 0); 140 | let samples: &mut [f64] = &mut [0.0_f64; 50]; 141 | loop { 142 | let loop_start = Instant::now(); 143 | 144 | for p in &mut *samples { 145 | *p = ns_iter_inner(inner, n) as f64 / n as f64; 146 | } 147 | 148 | stats::winsorize(samples, 5.0); 149 | let summ = stats::Summary::new(samples); 150 | 151 | for p in &mut *samples { 152 | let ns = ns_iter_inner(inner, 5 * n); 153 | *p = ns as f64 / (5 * n) as f64; 154 | } 155 | 156 | stats::winsorize(samples, 5.0); 157 | let summ5 = stats::Summary::new(samples); 158 | 159 | let loop_run = loop_start.elapsed(); 160 | 161 | // If we've run for 100ms and seem to have converged to a 162 | // stable median. 163 | if loop_run > Duration::from_millis(100) 164 | && summ.median_abs_dev_pct < 1.0 165 | && summ.median - summ5.median < summ5.median_abs_dev 166 | { 167 | return summ5; 168 | } 169 | 170 | total_run += loop_run; 171 | // Longest we ever run for is 3s. 172 | if total_run > Duration::from_secs(3) { 173 | return summ5; 174 | } 175 | 176 | // If we overflow here just return the results so far. We check a 177 | // multiplier of 10 because we're about to multiply by 2 and the 178 | // next iteration of the loop will also multiply by 5 (to calculate 179 | // the summ5 result) 180 | n = match n.checked_mul(10) { 181 | Some(_) => n * 2, 182 | None => { 183 | return summ5; 184 | } 185 | }; 186 | } 187 | } 188 | 189 | pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) 190 | where 191 | F: FnMut(&mut Bencher), 192 | { 193 | let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 }; 194 | 195 | let data = Arc::new(Mutex::new(Vec::new())); 196 | 197 | if !nocapture { 198 | #[cfg(feature = "capture")] 199 | io::set_output_capture(Some(data.clone())); 200 | } 201 | 202 | let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); 203 | 204 | #[cfg(feature = "capture")] 205 | io::set_output_capture(None); 206 | 207 | let test_result = match result { 208 | //bs.bench(f) { 209 | Ok(Some(ns_iter_summ)) => { 210 | let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); 211 | let mb_s = bs.bytes * 1000 / ns_iter; 212 | 213 | let bs = BenchSamples { ns_iter_summ, mb_s: mb_s as usize }; 214 | TestResult::TrBench(bs) 215 | } 216 | Ok(None) => { 217 | // iter not called, so no data. 218 | // FIXME: error in this case? 219 | let samples: &mut [f64] = &mut [0.0_f64; 1]; 220 | let bs = BenchSamples { ns_iter_summ: stats::Summary::new(samples), mb_s: 0 }; 221 | TestResult::TrBench(bs) 222 | } 223 | Err(_) => TestResult::TrFailed, 224 | }; 225 | 226 | let stdout = data.lock().unwrap().to_vec(); 227 | let message = CompletedTest::new(desc, test_result, None, stdout); 228 | monitor_ch.send(message).unwrap(); 229 | } 230 | 231 | pub fn run_once(f: F) 232 | where 233 | F: FnMut(&mut Bencher), 234 | { 235 | let mut bs = Bencher { mode: BenchMode::Single, summary: None, bytes: 0 }; 236 | bs.bench(f); 237 | } 238 | -------------------------------------------------------------------------------- /src/cli.rs: -------------------------------------------------------------------------------- 1 | //! Module converting command-line arguments into test configuration. 2 | 3 | use std::env; 4 | use std::path::PathBuf; 5 | 6 | use super::helpers::isatty; 7 | use super::options::{ColorConfig, Options, OutputFormat, RunIgnored}; 8 | use super::time::TestTimeOptions; 9 | 10 | #[derive(Debug)] 11 | pub struct TestOpts { 12 | pub list: bool, 13 | pub filters: Vec, 14 | pub filter_exact: bool, 15 | pub force_run_in_process: bool, 16 | pub exclude_should_panic: bool, 17 | pub run_ignored: RunIgnored, 18 | pub run_tests: bool, 19 | pub bench_benchmarks: bool, 20 | pub logfile: Option, 21 | pub nocapture: bool, 22 | pub color: ColorConfig, 23 | pub format: OutputFormat, 24 | pub test_threads: Option, 25 | pub skip: Vec, 26 | pub time_options: Option, 27 | pub options: Options, 28 | } 29 | 30 | impl TestOpts { 31 | pub fn use_color(&self) -> bool { 32 | match self.color { 33 | ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(), 34 | ColorConfig::AlwaysColor => true, 35 | ColorConfig::NeverColor => false, 36 | } 37 | } 38 | } 39 | 40 | /// Result of parsing the options. 41 | pub type OptRes = Result; 42 | /// Result of parsing the option part. 43 | type OptPartRes = Result; 44 | 45 | fn optgroups() -> getopts::Options { 46 | let mut opts = getopts::Options::new(); 47 | opts.optflag("", "include-ignored", "Run ignored and not ignored tests") 48 | .optflag("", "ignored", "Run only ignored tests") 49 | .optflag("", "force-run-in-process", "Forces tests to run in-process when panic=abort") 50 | .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") 51 | .optflag("", "test", "Run tests and not benchmarks") 52 | .optflag("", "bench", "Run benchmarks instead of tests") 53 | .optflag("", "list", "List all tests and benchmarks") 54 | .optflag("h", "help", "Display this message (longer with --help)") 55 | .optopt( 56 | "", 57 | "logfile", 58 | "Write logs to the specified file instead \ 59 | of stdout", 60 | "PATH", 61 | ) 62 | .optflag( 63 | "", 64 | "nocapture", 65 | "don't capture stdout/stderr of each \ 66 | task, allow printing directly", 67 | ) 68 | .optopt( 69 | "", 70 | "test-threads", 71 | "Number of threads used for running tests \ 72 | in parallel", 73 | "n_threads", 74 | ) 75 | .optmulti( 76 | "", 77 | "skip", 78 | "Skip tests whose names contain FILTER (this flag can \ 79 | be used multiple times)", 80 | "FILTER", 81 | ) 82 | .optflag( 83 | "q", 84 | "quiet", 85 | "Display one character per test instead of one line. \ 86 | Alias to --format=terse", 87 | ) 88 | .optflag("", "exact", "Exactly match filters rather than by substring") 89 | .optopt( 90 | "", 91 | "color", 92 | "Configure coloring of output: 93 | auto = colorize if stdout is a tty and tests are run on serially (default); 94 | always = always colorize output; 95 | never = never colorize output;", 96 | "auto|always|never", 97 | ) 98 | .optopt( 99 | "", 100 | "format", 101 | "Configure formatting of output: 102 | pretty = Print verbose output; 103 | terse = Display one character per test; 104 | json = Output a json document", 105 | "pretty|terse|json", 106 | ) 107 | .optflag("", "show-output", "Show captured stdout of successful tests") 108 | .optopt( 109 | "Z", 110 | "", 111 | "Enable nightly-only flags: 112 | unstable-options = Allow use of experimental features", 113 | "unstable-options", 114 | ) 115 | .optflagopt( 116 | "", 117 | "report-time", 118 | "Show execution time of each test. Available values: 119 | plain = do not colorize the execution time (default); 120 | colored = colorize output according to the `color` parameter value; 121 | 122 | Threshold values for colorized output can be configured via 123 | `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and 124 | `RUST_TEST_TIME_DOCTEST` environment variables. 125 | 126 | Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. 127 | Durations must be specified in milliseconds, e.g. `500,2000` means that the warn time 128 | is 0.5 seconds, and the critical time is 2 seconds. 129 | 130 | Not available for --format=terse", 131 | "plain|colored", 132 | ) 133 | .optflag( 134 | "", 135 | "ensure-time", 136 | "Treat excess of the test execution time limit as error. 137 | 138 | Threshold values for this option can be configured via 139 | `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and 140 | `RUST_TEST_TIME_DOCTEST` environment variables. 141 | 142 | Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. 143 | 144 | `CRITICAL_TIME` here means the limit that should not be exceeded by test. 145 | ", 146 | ); 147 | opts 148 | } 149 | 150 | fn usage(binary: &str, options: &getopts::Options) { 151 | let message = format!("Usage: {} [OPTIONS] [FILTERS...]", binary); 152 | println!( 153 | r#"{usage} 154 | 155 | The FILTERS string is tested against the name of all tests, and only those 156 | tests whose names contain the filter are run. Multiple filter strings may 157 | be passed, which will run all tests matching any of the filters. 158 | 159 | By default, all tests are run in parallel. This can be altered with the 160 | --test-threads flag or the RUST_TEST_THREADS environment variable when running 161 | tests (set it to 1). 162 | 163 | All tests have their standard output and standard error captured by default. 164 | This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE 165 | environment variable to a value other than "0". Logging is not captured by default. 166 | 167 | Test Attributes: 168 | 169 | `#[test]` - Indicates a function is a test to be run. This function 170 | takes no arguments. 171 | `#[bench]` - Indicates a function is a benchmark to be run. This 172 | function takes one argument (test::Bencher). 173 | `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if 174 | the code causes a panic (an assertion failure or panic!) 175 | A message may be provided, which the failure string must 176 | contain: #[should_panic(expected = "foo")]. 177 | `#[ignore]` - When applied to a function which is already attributed as a 178 | test, then the test runner will ignore these tests during 179 | normal test runs. Running with --ignored or --include-ignored will run 180 | these tests."#, 181 | usage = options.usage(&message) 182 | ); 183 | } 184 | 185 | /// Parses command line arguments into test options. 186 | /// Returns `None` if help was requested (since we only show help message and don't run tests), 187 | /// returns `Some(Err(..))` if provided arguments are incorrect, 188 | /// otherwise creates a `TestOpts` object and returns it. 189 | pub fn parse_opts(args: &[String]) -> Option { 190 | // Parse matches. 191 | let opts = optgroups(); 192 | let args = args.get(1..).unwrap_or(args); 193 | let matches = match opts.parse(args) { 194 | Ok(m) => m, 195 | Err(f) => return Some(Err(f.to_string())), 196 | }; 197 | 198 | // Check if help was requested. 199 | if matches.opt_present("h") { 200 | // Show help and do nothing more. 201 | usage(&args[0], &opts); 202 | return None; 203 | } 204 | 205 | // Actually parse the opts. 206 | let opts_result = parse_opts_impl(matches); 207 | 208 | Some(opts_result) 209 | } 210 | 211 | // Gets the option value and checks if unstable features are enabled. 212 | macro_rules! unstable_optflag { 213 | ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ 214 | let opt = $matches.opt_present($option_name); 215 | if !$allow_unstable && opt { 216 | return Err(format!( 217 | "The \"{}\" flag is only accepted on the nightly compiler with -Z unstable-options", 218 | $option_name 219 | )); 220 | } 221 | 222 | opt 223 | }}; 224 | } 225 | 226 | // Implementation of `parse_opts` that doesn't care about help message 227 | // and returns a `Result`. 228 | fn parse_opts_impl(matches: getopts::Matches) -> OptRes { 229 | let allow_unstable = get_allow_unstable(&matches)?; 230 | 231 | // Unstable flags 232 | let force_run_in_process = unstable_optflag!(matches, allow_unstable, "force-run-in-process"); 233 | let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); 234 | let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); 235 | let time_options = get_time_options(&matches, allow_unstable)?; 236 | 237 | let quiet = matches.opt_present("quiet"); 238 | let exact = matches.opt_present("exact"); 239 | let list = matches.opt_present("list"); 240 | let skip = matches.opt_strs("skip"); 241 | 242 | let bench_benchmarks = matches.opt_present("bench"); 243 | let run_tests = !bench_benchmarks || matches.opt_present("test"); 244 | 245 | let logfile = get_log_file(&matches)?; 246 | let run_ignored = get_run_ignored(&matches, include_ignored)?; 247 | let filters = matches.free.clone(); 248 | let nocapture = get_nocapture(&matches)?; 249 | let test_threads = get_test_threads(&matches)?; 250 | let color = get_color_config(&matches)?; 251 | let format = get_format(&matches, quiet, allow_unstable)?; 252 | 253 | let options = Options::new().display_output(matches.opt_present("show-output")); 254 | 255 | let test_opts = TestOpts { 256 | list, 257 | filters, 258 | filter_exact: exact, 259 | force_run_in_process, 260 | exclude_should_panic, 261 | run_ignored, 262 | run_tests, 263 | bench_benchmarks, 264 | logfile, 265 | nocapture, 266 | color, 267 | format, 268 | test_threads, 269 | skip, 270 | time_options, 271 | options, 272 | }; 273 | 274 | Ok(test_opts) 275 | } 276 | 277 | // FIXME: Copied from librustc_ast until linkage errors are resolved. Issue #47566 278 | fn is_nightly() -> bool { 279 | // Whether this is a feature-staged build, i.e., on the beta or stable channel 280 | let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); 281 | // Whether we should enable unstable features for bootstrapping 282 | let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); 283 | 284 | bootstrap || !disable_unstable_features 285 | } 286 | 287 | // Gets the CLI options associated with `report-time` feature. 288 | fn get_time_options( 289 | matches: &getopts::Matches, 290 | allow_unstable: bool, 291 | ) -> OptPartRes> { 292 | let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); 293 | let colored_opt_str = matches.opt_str("report-time"); 294 | let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); 295 | let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time"); 296 | 297 | // If `ensure-test-time` option is provided, time output is enforced, 298 | // so user won't be confused if any of tests will silently fail. 299 | let options = if report_time || ensure_test_time { 300 | if ensure_test_time && !report_time { 301 | report_time_colored = true; 302 | } 303 | Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored)) 304 | } else { 305 | None 306 | }; 307 | 308 | Ok(options) 309 | } 310 | 311 | fn get_test_threads(matches: &getopts::Matches) -> OptPartRes> { 312 | let test_threads = match matches.opt_str("test-threads") { 313 | Some(n_str) => match n_str.parse::() { 314 | Ok(0) => return Err("argument for --test-threads must not be 0".to_string()), 315 | Ok(n) => Some(n), 316 | Err(e) => { 317 | return Err(format!( 318 | "argument for --test-threads must be a number > 0 \ 319 | (error: {})", 320 | e 321 | )); 322 | } 323 | }, 324 | None => None, 325 | }; 326 | 327 | Ok(test_threads) 328 | } 329 | 330 | fn get_format( 331 | matches: &getopts::Matches, 332 | quiet: bool, 333 | allow_unstable: bool, 334 | ) -> OptPartRes { 335 | let format = match matches.opt_str("format").as_deref() { 336 | None if quiet => OutputFormat::Terse, 337 | Some("pretty") | None => OutputFormat::Pretty, 338 | Some("terse") => OutputFormat::Terse, 339 | Some("json") => { 340 | if !allow_unstable { 341 | return Err("The \"json\" format is only accepted on the nightly compiler".into()); 342 | } 343 | OutputFormat::Json 344 | } 345 | 346 | Some(v) => { 347 | return Err(format!( 348 | "argument for --format must be pretty, terse, or json (was \ 349 | {})", 350 | v 351 | )); 352 | } 353 | }; 354 | 355 | Ok(format) 356 | } 357 | 358 | fn get_color_config(matches: &getopts::Matches) -> OptPartRes { 359 | let color = match matches.opt_str("color").as_deref() { 360 | Some("auto") | None => ColorConfig::AutoColor, 361 | Some("always") => ColorConfig::AlwaysColor, 362 | Some("never") => ColorConfig::NeverColor, 363 | 364 | Some(v) => { 365 | return Err(format!( 366 | "argument for --color must be auto, always, or never (was \ 367 | {})", 368 | v 369 | )); 370 | } 371 | }; 372 | 373 | Ok(color) 374 | } 375 | 376 | fn get_nocapture(matches: &getopts::Matches) -> OptPartRes { 377 | let mut nocapture = matches.opt_present("nocapture"); 378 | if !nocapture { 379 | nocapture = match env::var("RUST_TEST_NOCAPTURE") { 380 | Ok(val) => &val != "0", 381 | Err(_) => false, 382 | }; 383 | } 384 | 385 | Ok(nocapture) 386 | } 387 | 388 | fn get_run_ignored(matches: &getopts::Matches, include_ignored: bool) -> OptPartRes { 389 | let run_ignored = match (include_ignored, matches.opt_present("ignored")) { 390 | (true, true) => { 391 | return Err("the options --include-ignored and --ignored are mutually exclusive".into()); 392 | } 393 | (true, false) => RunIgnored::Yes, 394 | (false, true) => RunIgnored::Only, 395 | (false, false) => RunIgnored::No, 396 | }; 397 | 398 | Ok(run_ignored) 399 | } 400 | 401 | fn get_allow_unstable(matches: &getopts::Matches) -> OptPartRes { 402 | let mut allow_unstable = false; 403 | 404 | if let Some(opt) = matches.opt_str("Z") { 405 | if !is_nightly() { 406 | return Err("the option `Z` is only accepted on the nightly compiler".into()); 407 | } 408 | 409 | match &*opt { 410 | "unstable-options" => { 411 | allow_unstable = true; 412 | } 413 | _ => { 414 | return Err("Unrecognized option to `Z`".into()); 415 | } 416 | } 417 | }; 418 | 419 | Ok(allow_unstable) 420 | } 421 | 422 | fn get_log_file(matches: &getopts::Matches) -> OptPartRes> { 423 | let logfile = matches.opt_str("logfile").map(|s| PathBuf::from(&s)); 424 | 425 | Ok(logfile) 426 | } 427 | -------------------------------------------------------------------------------- /src/console.rs: -------------------------------------------------------------------------------- 1 | //! Module providing interface for running tests in the console. 2 | 3 | use std::fs::File; 4 | use std::io; 5 | use std::io::prelude::Write; 6 | use std::time::Instant; 7 | 8 | use super::{ 9 | bench::fmt_bench_samples, 10 | cli::TestOpts, 11 | event::{CompletedTest, TestEvent}, 12 | filter_tests, 13 | formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}, 14 | helpers::{concurrency::get_concurrency, metrics::MetricMap}, 15 | options::{Options, OutputFormat}, 16 | run_tests, 17 | test_result::TestResult, 18 | time::{TestExecTime, TestSuiteExecTime}, 19 | types::{NamePadding, TestDesc, TestDescAndFn}, 20 | }; 21 | 22 | /// Generic wrapper over stdout. 23 | pub enum OutputLocation { 24 | Pretty(Box), 25 | Raw(T), 26 | } 27 | 28 | impl Write for OutputLocation { 29 | fn write(&mut self, buf: &[u8]) -> io::Result { 30 | match *self { 31 | OutputLocation::Pretty(ref mut term) => term.write(buf), 32 | OutputLocation::Raw(ref mut stdout) => stdout.write(buf), 33 | } 34 | } 35 | 36 | fn flush(&mut self) -> io::Result<()> { 37 | match *self { 38 | OutputLocation::Pretty(ref mut term) => term.flush(), 39 | OutputLocation::Raw(ref mut stdout) => stdout.flush(), 40 | } 41 | } 42 | } 43 | 44 | pub struct ConsoleTestState { 45 | pub log_out: Option, 46 | pub total: usize, 47 | pub passed: usize, 48 | pub failed: usize, 49 | pub ignored: usize, 50 | pub allowed_fail: usize, 51 | pub filtered_out: usize, 52 | pub measured: usize, 53 | pub exec_time: Option, 54 | pub metrics: MetricMap, 55 | pub failures: Vec<(TestDesc, Vec)>, 56 | pub not_failures: Vec<(TestDesc, Vec)>, 57 | pub time_failures: Vec<(TestDesc, Vec)>, 58 | pub options: Options, 59 | } 60 | 61 | impl ConsoleTestState { 62 | pub fn new(opts: &TestOpts) -> io::Result { 63 | let log_out = match opts.logfile { 64 | Some(ref path) => Some(File::create(path)?), 65 | None => None, 66 | }; 67 | 68 | Ok(ConsoleTestState { 69 | log_out, 70 | total: 0, 71 | passed: 0, 72 | failed: 0, 73 | ignored: 0, 74 | allowed_fail: 0, 75 | filtered_out: 0, 76 | measured: 0, 77 | exec_time: None, 78 | metrics: MetricMap::new(), 79 | failures: Vec::new(), 80 | not_failures: Vec::new(), 81 | time_failures: Vec::new(), 82 | options: opts.options, 83 | }) 84 | } 85 | 86 | pub fn write_log(&mut self, msg: F) -> io::Result<()> 87 | where 88 | S: AsRef, 89 | F: FnOnce() -> S, 90 | { 91 | match self.log_out { 92 | None => Ok(()), 93 | Some(ref mut o) => { 94 | let msg = msg(); 95 | let msg = msg.as_ref(); 96 | o.write_all(msg.as_bytes()) 97 | } 98 | } 99 | } 100 | 101 | pub fn write_log_result( 102 | &mut self, 103 | test: &TestDesc, 104 | result: &TestResult, 105 | exec_time: Option<&TestExecTime>, 106 | ) -> io::Result<()> { 107 | self.write_log(|| { 108 | format!( 109 | "{} {}", 110 | match *result { 111 | TestResult::TrOk => "ok".to_owned(), 112 | TestResult::TrFailed => "failed".to_owned(), 113 | TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg), 114 | TestResult::TrIgnored => "ignored".to_owned(), 115 | TestResult::TrAllowedFail => "failed (allowed)".to_owned(), 116 | TestResult::TrBench(ref bs) => fmt_bench_samples(bs), 117 | TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(), 118 | }, 119 | test.name, 120 | ) 121 | })?; 122 | if let Some(exec_time) = exec_time { 123 | self.write_log(|| format!(" <{}>", exec_time))?; 124 | } 125 | self.write_log(|| "\n") 126 | } 127 | 128 | fn current_test_count(&self) -> usize { 129 | self.passed + self.failed + self.ignored + self.measured + self.allowed_fail 130 | } 131 | } 132 | 133 | // List the tests to console, and optionally to logfile. Filters are honored. 134 | pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Result<()> { 135 | let mut output = match term::stdout() { 136 | None => OutputLocation::Raw(io::stdout()), 137 | Some(t) => OutputLocation::Pretty(t), 138 | }; 139 | 140 | let quiet = opts.format == OutputFormat::Terse; 141 | let mut st = ConsoleTestState::new(opts)?; 142 | 143 | let mut ntest = 0; 144 | let mut nbench = 0; 145 | 146 | for test in filter_tests(&opts, tests) { 147 | use crate::TestFn::*; 148 | 149 | let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test; 150 | 151 | let fntype = match testfn { 152 | StaticTestFn(..) | DynTestFn(..) => { 153 | ntest += 1; 154 | "test" 155 | } 156 | StaticBenchFn(..) | DynBenchFn(..) => { 157 | nbench += 1; 158 | "benchmark" 159 | } 160 | }; 161 | 162 | writeln!(output, "{}: {}", name, fntype)?; 163 | st.write_log(|| format!("{} {}\n", fntype, name))?; 164 | } 165 | 166 | fn plural(count: u32, s: &str) -> String { 167 | match count { 168 | 1 => format!("{} {}", 1, s), 169 | n => format!("{} {}s", n, s), 170 | } 171 | } 172 | 173 | if !quiet { 174 | if ntest != 0 || nbench != 0 { 175 | writeln!(output)?; 176 | } 177 | 178 | writeln!(output, "{}, {}", plural(ntest, "test"), plural(nbench, "benchmark"))?; 179 | } 180 | 181 | Ok(()) 182 | } 183 | 184 | // Updates `ConsoleTestState` depending on result of the test execution. 185 | fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) { 186 | let test = completed_test.desc; 187 | let stdout = completed_test.stdout; 188 | match completed_test.result { 189 | TestResult::TrOk => { 190 | st.passed += 1; 191 | st.not_failures.push((test, stdout)); 192 | } 193 | TestResult::TrIgnored => st.ignored += 1, 194 | TestResult::TrAllowedFail => st.allowed_fail += 1, 195 | TestResult::TrBench(bs) => { 196 | st.metrics.insert_metric( 197 | test.name.as_slice(), 198 | bs.ns_iter_summ.median, 199 | bs.ns_iter_summ.max - bs.ns_iter_summ.min, 200 | ); 201 | st.measured += 1 202 | } 203 | TestResult::TrFailed => { 204 | st.failed += 1; 205 | st.failures.push((test, stdout)); 206 | } 207 | TestResult::TrFailedMsg(msg) => { 208 | st.failed += 1; 209 | let mut stdout = stdout; 210 | stdout.extend_from_slice(format!("note: {}", msg).as_bytes()); 211 | st.failures.push((test, stdout)); 212 | } 213 | TestResult::TrTimedFail => { 214 | st.failed += 1; 215 | st.time_failures.push((test, stdout)); 216 | } 217 | } 218 | } 219 | 220 | // Handler for events that occur during test execution. 221 | // It is provided as a callback to the `run_tests` function. 222 | fn on_test_event( 223 | event: &TestEvent, 224 | st: &mut ConsoleTestState, 225 | out: &mut dyn OutputFormatter, 226 | ) -> io::Result<()> { 227 | match (*event).clone() { 228 | TestEvent::TeFiltered(ref filtered_tests) => { 229 | st.total = filtered_tests.len(); 230 | out.write_run_start(filtered_tests.len())?; 231 | } 232 | TestEvent::TeFilteredOut(filtered_out) => { 233 | st.filtered_out = filtered_out; 234 | } 235 | TestEvent::TeWait(ref test) => out.write_test_start(test)?, 236 | TestEvent::TeTimeout(ref test) => out.write_timeout(test)?, 237 | TestEvent::TeResult(completed_test) => { 238 | let test = &completed_test.desc; 239 | let result = &completed_test.result; 240 | let exec_time = &completed_test.exec_time; 241 | let stdout = &completed_test.stdout; 242 | 243 | st.write_log_result(test, result, exec_time.as_ref())?; 244 | out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?; 245 | handle_test_result(st, completed_test); 246 | } 247 | } 248 | 249 | Ok(()) 250 | } 251 | 252 | /// A simple console test runner. 253 | /// Runs provided tests reporting process and results to the stdout. 254 | pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { 255 | let output = match term::stdout() { 256 | None => OutputLocation::Raw(io::stdout()), 257 | Some(t) => OutputLocation::Pretty(t), 258 | }; 259 | 260 | let max_name_len = tests 261 | .iter() 262 | .max_by_key(|t| len_if_padded(*t)) 263 | .map(|t| t.desc.name.as_slice().len()) 264 | .unwrap_or(0); 265 | 266 | let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1; 267 | 268 | let mut out: Box = match opts.format { 269 | OutputFormat::Pretty => Box::new(PrettyFormatter::new( 270 | output, 271 | opts.use_color(), 272 | max_name_len, 273 | is_multithreaded, 274 | opts.time_options, 275 | )), 276 | OutputFormat::Terse => { 277 | Box::new(TerseFormatter::new(output, opts.use_color(), max_name_len, is_multithreaded)) 278 | } 279 | OutputFormat::Json => Box::new(JsonFormatter::new(output)), 280 | }; 281 | let mut st = ConsoleTestState::new(opts)?; 282 | 283 | // Prevent the usage of `Instant` in some cases: 284 | // - It's currently not supported for wasm targets. 285 | // - We disable it for miri because it's not available when isolation is enabled. 286 | let is_instant_supported = !cfg!(target_arch = "wasm32") && !cfg!(miri); 287 | 288 | let start_time = if is_instant_supported { Some(Instant::now()) } else { None }; 289 | run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?; 290 | st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed())); 291 | 292 | assert!(st.current_test_count() == st.total); 293 | 294 | out.write_run_finish(&st) 295 | } 296 | 297 | // Calculates padding for given test description. 298 | fn len_if_padded(t: &TestDescAndFn) -> usize { 299 | match t.testfn.padding() { 300 | NamePadding::PadNone => 0, 301 | NamePadding::PadOnRight => t.desc.name.as_slice().len(), 302 | } 303 | } 304 | -------------------------------------------------------------------------------- /src/event.rs: -------------------------------------------------------------------------------- 1 | //! Module containing different events that can occur 2 | //! during tests execution process. 3 | 4 | use super::test_result::TestResult; 5 | use super::time::TestExecTime; 6 | use super::types::TestDesc; 7 | 8 | #[derive(Debug, Clone)] 9 | pub struct CompletedTest { 10 | pub desc: TestDesc, 11 | pub result: TestResult, 12 | pub exec_time: Option, 13 | pub stdout: Vec, 14 | } 15 | 16 | impl CompletedTest { 17 | pub fn new( 18 | desc: TestDesc, 19 | result: TestResult, 20 | exec_time: Option, 21 | stdout: Vec, 22 | ) -> Self { 23 | Self { desc, result, exec_time, stdout } 24 | } 25 | } 26 | 27 | unsafe impl Send for CompletedTest {} 28 | 29 | #[derive(Debug, Clone)] 30 | pub enum TestEvent { 31 | TeFiltered(Vec), 32 | TeWait(TestDesc), 33 | TeResult(CompletedTest), 34 | TeTimeout(TestDesc), 35 | TeFilteredOut(usize), 36 | } 37 | -------------------------------------------------------------------------------- /src/formatters/json.rs: -------------------------------------------------------------------------------- 1 | use std::{borrow::Cow, io, io::prelude::Write}; 2 | 3 | use super::OutputFormatter; 4 | use crate::{ 5 | console::{ConsoleTestState, OutputLocation}, 6 | test_result::TestResult, 7 | time, 8 | types::TestDesc, 9 | }; 10 | 11 | pub(crate) struct JsonFormatter { 12 | out: OutputLocation, 13 | } 14 | 15 | impl JsonFormatter { 16 | pub fn new(out: OutputLocation) -> Self { 17 | Self { out } 18 | } 19 | 20 | fn writeln_message(&mut self, s: &str) -> io::Result<()> { 21 | assert!(!s.contains('\n')); 22 | 23 | self.out.write_all(s.as_ref())?; 24 | self.out.write_all(b"\n") 25 | } 26 | 27 | fn write_message(&mut self, s: &str) -> io::Result<()> { 28 | assert!(!s.contains('\n')); 29 | 30 | self.out.write_all(s.as_ref()) 31 | } 32 | 33 | fn write_event( 34 | &mut self, 35 | ty: &str, 36 | name: &str, 37 | evt: &str, 38 | exec_time: Option<&time::TestExecTime>, 39 | stdout: Option>, 40 | extra: Option<&str>, 41 | ) -> io::Result<()> { 42 | // A doc test's name includes a filename which must be escaped for correct json. 43 | self.write_message(&*format!( 44 | r#"{{ "type": "{}", "name": "{}", "event": "{}""#, 45 | ty, 46 | EscapedString(name), 47 | evt 48 | ))?; 49 | if let Some(exec_time) = exec_time { 50 | self.write_message(&*format!(r#", "exec_time": {}"#, exec_time.0.as_secs_f64()))?; 51 | } 52 | if let Some(stdout) = stdout { 53 | self.write_message(&*format!(r#", "stdout": "{}""#, EscapedString(stdout)))?; 54 | } 55 | if let Some(extra) = extra { 56 | self.write_message(&*format!(r#", {}"#, extra))?; 57 | } 58 | self.writeln_message(" }") 59 | } 60 | } 61 | 62 | impl OutputFormatter for JsonFormatter { 63 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { 64 | self.writeln_message(&*format!( 65 | r#"{{ "type": "suite", "event": "started", "test_count": {} }}"#, 66 | test_count 67 | )) 68 | } 69 | 70 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { 71 | self.writeln_message(&*format!( 72 | r#"{{ "type": "test", "event": "started", "name": "{}" }}"#, 73 | EscapedString(desc.name.as_slice()) 74 | )) 75 | } 76 | 77 | fn write_result( 78 | &mut self, 79 | desc: &TestDesc, 80 | result: &TestResult, 81 | exec_time: Option<&time::TestExecTime>, 82 | stdout: &[u8], 83 | state: &ConsoleTestState, 84 | ) -> io::Result<()> { 85 | let display_stdout = state.options.display_output || *result != TestResult::TrOk; 86 | let stdout = if display_stdout && !stdout.is_empty() { 87 | Some(String::from_utf8_lossy(stdout)) 88 | } else { 89 | None 90 | }; 91 | match *result { 92 | TestResult::TrOk => { 93 | self.write_event("test", desc.name.as_slice(), "ok", exec_time, stdout, None) 94 | } 95 | 96 | TestResult::TrFailed => { 97 | self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None) 98 | } 99 | 100 | TestResult::TrTimedFail => self.write_event( 101 | "test", 102 | desc.name.as_slice(), 103 | "failed", 104 | exec_time, 105 | stdout, 106 | Some(r#""reason": "time limit exceeded""#), 107 | ), 108 | 109 | TestResult::TrFailedMsg(ref m) => self.write_event( 110 | "test", 111 | desc.name.as_slice(), 112 | "failed", 113 | exec_time, 114 | stdout, 115 | Some(&*format!(r#""message": "{}""#, EscapedString(m))), 116 | ), 117 | 118 | TestResult::TrIgnored => { 119 | self.write_event("test", desc.name.as_slice(), "ignored", exec_time, stdout, None) 120 | } 121 | 122 | TestResult::TrAllowedFail => self.write_event( 123 | "test", 124 | desc.name.as_slice(), 125 | "allowed_failure", 126 | exec_time, 127 | stdout, 128 | None, 129 | ), 130 | 131 | TestResult::TrBench(ref bs) => { 132 | let median = bs.ns_iter_summ.median as usize; 133 | let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; 134 | 135 | let mbps = if bs.mb_s == 0 { 136 | String::new() 137 | } else { 138 | format!(r#", "mib_per_second": {}"#, bs.mb_s) 139 | }; 140 | 141 | let line = format!( 142 | "{{ \"type\": \"bench\", \ 143 | \"name\": \"{}\", \ 144 | \"median\": {}, \ 145 | \"deviation\": {}{} }}", 146 | EscapedString(desc.name.as_slice()), 147 | median, 148 | deviation, 149 | mbps 150 | ); 151 | 152 | self.writeln_message(&*line) 153 | } 154 | } 155 | } 156 | 157 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { 158 | self.writeln_message(&*format!( 159 | r#"{{ "type": "test", "event": "timeout", "name": "{}" }}"#, 160 | EscapedString(desc.name.as_slice()) 161 | )) 162 | } 163 | 164 | fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result { 165 | self.write_message(&*format!( 166 | "{{ \"type\": \"suite\", \ 167 | \"event\": \"{}\", \ 168 | \"passed\": {}, \ 169 | \"failed\": {}, \ 170 | \"allowed_fail\": {}, \ 171 | \"ignored\": {}, \ 172 | \"measured\": {}, \ 173 | \"filtered_out\": {}", 174 | if state.failed == 0 { "ok" } else { "failed" }, 175 | state.passed, 176 | state.failed + state.allowed_fail, 177 | state.allowed_fail, 178 | state.ignored, 179 | state.measured, 180 | state.filtered_out, 181 | ))?; 182 | 183 | if let Some(ref exec_time) = state.exec_time { 184 | let time_str = format!(", \"exec_time\": {}", exec_time.0.as_secs_f64()); 185 | self.write_message(&time_str)?; 186 | } 187 | 188 | self.writeln_message(" }")?; 189 | 190 | Ok(state.failed == 0) 191 | } 192 | } 193 | 194 | /// A formatting utility used to print strings with characters in need of escaping. 195 | /// Base code taken form `libserialize::json::escape_str` 196 | struct EscapedString>(S); 197 | 198 | impl> std::fmt::Display for EscapedString { 199 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> ::std::fmt::Result { 200 | let mut start = 0; 201 | 202 | for (i, byte) in self.0.as_ref().bytes().enumerate() { 203 | let escaped = match byte { 204 | b'"' => "\\\"", 205 | b'\\' => "\\\\", 206 | b'\x00' => "\\u0000", 207 | b'\x01' => "\\u0001", 208 | b'\x02' => "\\u0002", 209 | b'\x03' => "\\u0003", 210 | b'\x04' => "\\u0004", 211 | b'\x05' => "\\u0005", 212 | b'\x06' => "\\u0006", 213 | b'\x07' => "\\u0007", 214 | b'\x08' => "\\b", 215 | b'\t' => "\\t", 216 | b'\n' => "\\n", 217 | b'\x0b' => "\\u000b", 218 | b'\x0c' => "\\f", 219 | b'\r' => "\\r", 220 | b'\x0e' => "\\u000e", 221 | b'\x0f' => "\\u000f", 222 | b'\x10' => "\\u0010", 223 | b'\x11' => "\\u0011", 224 | b'\x12' => "\\u0012", 225 | b'\x13' => "\\u0013", 226 | b'\x14' => "\\u0014", 227 | b'\x15' => "\\u0015", 228 | b'\x16' => "\\u0016", 229 | b'\x17' => "\\u0017", 230 | b'\x18' => "\\u0018", 231 | b'\x19' => "\\u0019", 232 | b'\x1a' => "\\u001a", 233 | b'\x1b' => "\\u001b", 234 | b'\x1c' => "\\u001c", 235 | b'\x1d' => "\\u001d", 236 | b'\x1e' => "\\u001e", 237 | b'\x1f' => "\\u001f", 238 | b'\x7f' => "\\u007f", 239 | _ => { 240 | continue; 241 | } 242 | }; 243 | 244 | if start < i { 245 | f.write_str(&self.0.as_ref()[start..i])?; 246 | } 247 | 248 | f.write_str(escaped)?; 249 | 250 | start = i + 1; 251 | } 252 | 253 | if start != self.0.as_ref().len() { 254 | f.write_str(&self.0.as_ref()[start..])?; 255 | } 256 | 257 | Ok(()) 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /src/formatters/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{io, io::prelude::Write}; 2 | 3 | use crate::{ 4 | console::ConsoleTestState, 5 | test_result::TestResult, 6 | time, 7 | types::{TestDesc, TestName}, 8 | }; 9 | 10 | mod json; 11 | mod pretty; 12 | mod terse; 13 | 14 | pub(crate) use self::json::JsonFormatter; 15 | pub(crate) use self::pretty::PrettyFormatter; 16 | pub(crate) use self::terse::TerseFormatter; 17 | 18 | pub(crate) trait OutputFormatter { 19 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()>; 20 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()>; 21 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()>; 22 | fn write_result( 23 | &mut self, 24 | desc: &TestDesc, 25 | result: &TestResult, 26 | exec_time: Option<&time::TestExecTime>, 27 | stdout: &[u8], 28 | state: &ConsoleTestState, 29 | ) -> io::Result<()>; 30 | fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result; 31 | } 32 | 33 | pub(crate) fn write_stderr_delimiter(test_output: &mut Vec, test_name: &TestName) { 34 | match test_output.last() { 35 | Some(b'\n') => (), 36 | Some(_) => test_output.push(b'\n'), 37 | None => (), 38 | } 39 | writeln!(test_output, "---- {} stderr ----", test_name).unwrap(); 40 | } 41 | -------------------------------------------------------------------------------- /src/formatters/pretty.rs: -------------------------------------------------------------------------------- 1 | use std::{io, io::prelude::Write}; 2 | 3 | use super::OutputFormatter; 4 | use crate::{ 5 | bench::fmt_bench_samples, 6 | console::{ConsoleTestState, OutputLocation}, 7 | test_result::TestResult, 8 | time, 9 | types::TestDesc, 10 | }; 11 | 12 | pub(crate) struct PrettyFormatter { 13 | out: OutputLocation, 14 | use_color: bool, 15 | time_options: Option, 16 | 17 | /// Number of columns to fill when aligning names 18 | max_name_len: usize, 19 | 20 | is_multithreaded: bool, 21 | } 22 | 23 | impl PrettyFormatter { 24 | pub fn new( 25 | out: OutputLocation, 26 | use_color: bool, 27 | max_name_len: usize, 28 | is_multithreaded: bool, 29 | time_options: Option, 30 | ) -> Self { 31 | PrettyFormatter { out, use_color, max_name_len, is_multithreaded, time_options } 32 | } 33 | 34 | #[cfg(test)] 35 | pub fn output_location(&self) -> &OutputLocation { 36 | &self.out 37 | } 38 | 39 | pub fn write_ok(&mut self) -> io::Result<()> { 40 | self.write_short_result("ok", term::color::GREEN) 41 | } 42 | 43 | pub fn write_failed(&mut self) -> io::Result<()> { 44 | self.write_short_result("FAILED", term::color::RED) 45 | } 46 | 47 | pub fn write_ignored(&mut self) -> io::Result<()> { 48 | self.write_short_result("ignored", term::color::YELLOW) 49 | } 50 | 51 | pub fn write_allowed_fail(&mut self) -> io::Result<()> { 52 | self.write_short_result("FAILED (allowed)", term::color::YELLOW) 53 | } 54 | 55 | pub fn write_time_failed(&mut self) -> io::Result<()> { 56 | self.write_short_result("FAILED (time limit exceeded)", term::color::RED) 57 | } 58 | 59 | pub fn write_bench(&mut self) -> io::Result<()> { 60 | self.write_pretty("bench", term::color::CYAN) 61 | } 62 | 63 | pub fn write_short_result( 64 | &mut self, 65 | result: &str, 66 | color: term::color::Color, 67 | ) -> io::Result<()> { 68 | self.write_pretty(result, color) 69 | } 70 | 71 | pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { 72 | match self.out { 73 | OutputLocation::Pretty(ref mut term) => { 74 | if self.use_color { 75 | term.fg(color)?; 76 | } 77 | term.write_all(word.as_bytes())?; 78 | if self.use_color { 79 | term.reset()?; 80 | } 81 | term.flush() 82 | } 83 | OutputLocation::Raw(ref mut stdout) => { 84 | stdout.write_all(word.as_bytes())?; 85 | stdout.flush() 86 | } 87 | } 88 | } 89 | 90 | pub fn write_plain>(&mut self, s: S) -> io::Result<()> { 91 | let s = s.as_ref(); 92 | self.out.write_all(s.as_bytes())?; 93 | self.out.flush() 94 | } 95 | 96 | fn write_time( 97 | &mut self, 98 | desc: &TestDesc, 99 | exec_time: Option<&time::TestExecTime>, 100 | ) -> io::Result<()> { 101 | if let (Some(opts), Some(time)) = (self.time_options, exec_time) { 102 | let time_str = format!(" <{}>", time); 103 | 104 | let color = if opts.colored { 105 | if opts.is_critical(desc, time) { 106 | Some(term::color::RED) 107 | } else if opts.is_warn(desc, time) { 108 | Some(term::color::YELLOW) 109 | } else { 110 | None 111 | } 112 | } else { 113 | None 114 | }; 115 | 116 | match color { 117 | Some(color) => self.write_pretty(&time_str, color)?, 118 | None => self.write_plain(&time_str)?, 119 | } 120 | } 121 | 122 | Ok(()) 123 | } 124 | 125 | fn write_results( 126 | &mut self, 127 | inputs: &Vec<(TestDesc, Vec)>, 128 | results_type: &str, 129 | ) -> io::Result<()> { 130 | let results_out_str = format!("\n{}:\n", results_type); 131 | 132 | self.write_plain(&results_out_str)?; 133 | 134 | let mut results = Vec::new(); 135 | let mut stdouts = String::new(); 136 | for &(ref f, ref stdout) in inputs { 137 | results.push(f.name.to_string()); 138 | if !stdout.is_empty() { 139 | stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); 140 | let output = String::from_utf8_lossy(stdout); 141 | stdouts.push_str(&output); 142 | stdouts.push('\n'); 143 | } 144 | } 145 | if !stdouts.is_empty() { 146 | self.write_plain("\n")?; 147 | self.write_plain(&stdouts)?; 148 | } 149 | 150 | self.write_plain(&results_out_str)?; 151 | results.sort(); 152 | for name in &results { 153 | self.write_plain(&format!(" {}\n", name))?; 154 | } 155 | Ok(()) 156 | } 157 | 158 | pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> { 159 | self.write_results(&state.not_failures, "successes") 160 | } 161 | 162 | pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { 163 | self.write_results(&state.failures, "failures") 164 | } 165 | 166 | pub fn write_time_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { 167 | self.write_results(&state.time_failures, "failures (time limit exceeded)") 168 | } 169 | 170 | fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { 171 | let name = desc.padded_name(self.max_name_len, desc.name.padding()); 172 | self.write_plain(&format!("test {} ... ", name))?; 173 | 174 | Ok(()) 175 | } 176 | } 177 | 178 | impl OutputFormatter for PrettyFormatter { 179 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { 180 | let noun = if test_count != 1 { "tests" } else { "test" }; 181 | self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) 182 | } 183 | 184 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { 185 | // When running tests concurrently, we should not print 186 | // the test's name as the result will be mis-aligned. 187 | // When running the tests serially, we print the name here so 188 | // that the user can see which test hangs. 189 | if !self.is_multithreaded { 190 | self.write_test_name(desc)?; 191 | } 192 | 193 | Ok(()) 194 | } 195 | 196 | fn write_result( 197 | &mut self, 198 | desc: &TestDesc, 199 | result: &TestResult, 200 | exec_time: Option<&time::TestExecTime>, 201 | _: &[u8], 202 | _: &ConsoleTestState, 203 | ) -> io::Result<()> { 204 | if self.is_multithreaded { 205 | self.write_test_name(desc)?; 206 | } 207 | 208 | match *result { 209 | TestResult::TrOk => self.write_ok()?, 210 | TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?, 211 | TestResult::TrIgnored => self.write_ignored()?, 212 | TestResult::TrAllowedFail => self.write_allowed_fail()?, 213 | TestResult::TrBench(ref bs) => { 214 | self.write_bench()?; 215 | self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?; 216 | } 217 | TestResult::TrTimedFail => self.write_time_failed()?, 218 | } 219 | 220 | self.write_time(desc, exec_time)?; 221 | self.write_plain("\n") 222 | } 223 | 224 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { 225 | if self.is_multithreaded { 226 | self.write_test_name(desc)?; 227 | } 228 | 229 | self.write_plain(&format!( 230 | "test {} has been running for over {} seconds\n", 231 | desc.name, 232 | time::TEST_WARN_TIMEOUT_S 233 | )) 234 | } 235 | 236 | fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result { 237 | if state.options.display_output { 238 | self.write_successes(state)?; 239 | } 240 | let success = state.failed == 0; 241 | if !success { 242 | if !state.failures.is_empty() { 243 | self.write_failures(state)?; 244 | } 245 | 246 | if !state.time_failures.is_empty() { 247 | self.write_time_failures(state)?; 248 | } 249 | } 250 | 251 | self.write_plain("\ntest result: ")?; 252 | 253 | if success { 254 | // There's no parallelism at this point so it's safe to use color 255 | self.write_pretty("ok", term::color::GREEN)?; 256 | } else { 257 | self.write_pretty("FAILED", term::color::RED)?; 258 | } 259 | 260 | let s = if state.allowed_fail > 0 { 261 | format!( 262 | ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out", 263 | state.passed, 264 | state.failed + state.allowed_fail, 265 | state.allowed_fail, 266 | state.ignored, 267 | state.measured, 268 | state.filtered_out 269 | ) 270 | } else { 271 | format!( 272 | ". {} passed; {} failed; {} ignored; {} measured; {} filtered out", 273 | state.passed, state.failed, state.ignored, state.measured, state.filtered_out 274 | ) 275 | }; 276 | 277 | self.write_plain(&s)?; 278 | 279 | if let Some(ref exec_time) = state.exec_time { 280 | let time_str = format!("; finished in {}", exec_time); 281 | self.write_plain(&time_str)?; 282 | } 283 | 284 | self.write_plain("\n\n")?; 285 | 286 | Ok(success) 287 | } 288 | } 289 | -------------------------------------------------------------------------------- /src/formatters/terse.rs: -------------------------------------------------------------------------------- 1 | use std::{io, io::prelude::Write}; 2 | 3 | use super::OutputFormatter; 4 | use crate::{ 5 | bench::fmt_bench_samples, 6 | console::{ConsoleTestState, OutputLocation}, 7 | test_result::TestResult, 8 | time, 9 | types::NamePadding, 10 | types::TestDesc, 11 | }; 12 | 13 | // insert a '\n' after 100 tests in quiet mode 14 | const QUIET_MODE_MAX_COLUMN: usize = 100; 15 | 16 | pub(crate) struct TerseFormatter { 17 | out: OutputLocation, 18 | use_color: bool, 19 | is_multithreaded: bool, 20 | /// Number of columns to fill when aligning names 21 | max_name_len: usize, 22 | 23 | test_count: usize, 24 | total_test_count: usize, 25 | } 26 | 27 | impl TerseFormatter { 28 | pub fn new( 29 | out: OutputLocation, 30 | use_color: bool, 31 | max_name_len: usize, 32 | is_multithreaded: bool, 33 | ) -> Self { 34 | TerseFormatter { 35 | out, 36 | use_color, 37 | max_name_len, 38 | is_multithreaded, 39 | test_count: 0, 40 | total_test_count: 0, // initialized later, when write_run_start is called 41 | } 42 | } 43 | 44 | pub fn write_ok(&mut self) -> io::Result<()> { 45 | self.write_short_result(".", term::color::GREEN) 46 | } 47 | 48 | pub fn write_failed(&mut self) -> io::Result<()> { 49 | self.write_short_result("F", term::color::RED) 50 | } 51 | 52 | pub fn write_ignored(&mut self) -> io::Result<()> { 53 | self.write_short_result("i", term::color::YELLOW) 54 | } 55 | 56 | pub fn write_allowed_fail(&mut self) -> io::Result<()> { 57 | self.write_short_result("a", term::color::YELLOW) 58 | } 59 | 60 | pub fn write_bench(&mut self) -> io::Result<()> { 61 | self.write_pretty("bench", term::color::CYAN) 62 | } 63 | 64 | pub fn write_short_result( 65 | &mut self, 66 | result: &str, 67 | color: term::color::Color, 68 | ) -> io::Result<()> { 69 | self.write_pretty(result, color)?; 70 | if self.test_count % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 { 71 | // we insert a new line every 100 dots in order to flush the 72 | // screen when dealing with line-buffered output (e.g., piping to 73 | // `stamp` in the rust CI). 74 | let out = format!(" {}/{}\n", self.test_count + 1, self.total_test_count); 75 | self.write_plain(&out)?; 76 | } 77 | 78 | self.test_count += 1; 79 | Ok(()) 80 | } 81 | 82 | pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { 83 | match self.out { 84 | OutputLocation::Pretty(ref mut term) => { 85 | if self.use_color { 86 | term.fg(color)?; 87 | } 88 | term.write_all(word.as_bytes())?; 89 | if self.use_color { 90 | term.reset()?; 91 | } 92 | term.flush() 93 | } 94 | OutputLocation::Raw(ref mut stdout) => { 95 | stdout.write_all(word.as_bytes())?; 96 | stdout.flush() 97 | } 98 | } 99 | } 100 | 101 | pub fn write_plain>(&mut self, s: S) -> io::Result<()> { 102 | let s = s.as_ref(); 103 | self.out.write_all(s.as_bytes())?; 104 | self.out.flush() 105 | } 106 | 107 | pub fn write_outputs(&mut self, state: &ConsoleTestState) -> io::Result<()> { 108 | self.write_plain("\nsuccesses:\n")?; 109 | let mut successes = Vec::new(); 110 | let mut stdouts = String::new(); 111 | for &(ref f, ref stdout) in &state.not_failures { 112 | successes.push(f.name.to_string()); 113 | if !stdout.is_empty() { 114 | stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); 115 | let output = String::from_utf8_lossy(stdout); 116 | stdouts.push_str(&output); 117 | stdouts.push('\n'); 118 | } 119 | } 120 | if !stdouts.is_empty() { 121 | self.write_plain("\n")?; 122 | self.write_plain(&stdouts)?; 123 | } 124 | 125 | self.write_plain("\nsuccesses:\n")?; 126 | successes.sort(); 127 | for name in &successes { 128 | self.write_plain(&format!(" {}\n", name))?; 129 | } 130 | Ok(()) 131 | } 132 | 133 | pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { 134 | self.write_plain("\nfailures:\n")?; 135 | let mut failures = Vec::new(); 136 | let mut fail_out = String::new(); 137 | for &(ref f, ref stdout) in &state.failures { 138 | failures.push(f.name.to_string()); 139 | if !stdout.is_empty() { 140 | fail_out.push_str(&format!("---- {} stdout ----\n", f.name)); 141 | let output = String::from_utf8_lossy(stdout); 142 | fail_out.push_str(&output); 143 | fail_out.push('\n'); 144 | } 145 | } 146 | if !fail_out.is_empty() { 147 | self.write_plain("\n")?; 148 | self.write_plain(&fail_out)?; 149 | } 150 | 151 | self.write_plain("\nfailures:\n")?; 152 | failures.sort(); 153 | for name in &failures { 154 | self.write_plain(&format!(" {}\n", name))?; 155 | } 156 | Ok(()) 157 | } 158 | 159 | fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { 160 | let name = desc.padded_name(self.max_name_len, desc.name.padding()); 161 | self.write_plain(&format!("test {} ... ", name))?; 162 | 163 | Ok(()) 164 | } 165 | } 166 | 167 | impl OutputFormatter for TerseFormatter { 168 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { 169 | self.total_test_count = test_count; 170 | let noun = if test_count != 1 { "tests" } else { "test" }; 171 | self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) 172 | } 173 | 174 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { 175 | // Remnants from old libtest code that used the padding value 176 | // in order to indicate benchmarks. 177 | // When running benchmarks, terse-mode should still print their name as if 178 | // it is the Pretty formatter. 179 | if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight { 180 | self.write_test_name(desc)?; 181 | } 182 | 183 | Ok(()) 184 | } 185 | 186 | fn write_result( 187 | &mut self, 188 | desc: &TestDesc, 189 | result: &TestResult, 190 | _: Option<&time::TestExecTime>, 191 | _: &[u8], 192 | _: &ConsoleTestState, 193 | ) -> io::Result<()> { 194 | match *result { 195 | TestResult::TrOk => self.write_ok(), 196 | TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => { 197 | self.write_failed() 198 | } 199 | TestResult::TrIgnored => self.write_ignored(), 200 | TestResult::TrAllowedFail => self.write_allowed_fail(), 201 | TestResult::TrBench(ref bs) => { 202 | if self.is_multithreaded { 203 | self.write_test_name(desc)?; 204 | } 205 | self.write_bench()?; 206 | self.write_plain(&format!(": {}\n", fmt_bench_samples(bs))) 207 | } 208 | } 209 | } 210 | 211 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { 212 | self.write_plain(&format!( 213 | "test {} has been running for over {} seconds\n", 214 | desc.name, 215 | time::TEST_WARN_TIMEOUT_S 216 | )) 217 | } 218 | 219 | fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result { 220 | if state.options.display_output { 221 | self.write_outputs(state)?; 222 | } 223 | let success = state.failed == 0; 224 | if !success { 225 | self.write_failures(state)?; 226 | } 227 | 228 | self.write_plain("\ntest result: ")?; 229 | 230 | if success { 231 | // There's no parallelism at this point so it's safe to use color 232 | self.write_pretty("ok", term::color::GREEN)?; 233 | } else { 234 | self.write_pretty("FAILED", term::color::RED)?; 235 | } 236 | 237 | let s = if state.allowed_fail > 0 { 238 | format!( 239 | ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out", 240 | state.passed, 241 | state.failed + state.allowed_fail, 242 | state.allowed_fail, 243 | state.ignored, 244 | state.measured, 245 | state.filtered_out 246 | ) 247 | } else { 248 | format!( 249 | ". {} passed; {} failed; {} ignored; {} measured; {} filtered out", 250 | state.passed, state.failed, state.ignored, state.measured, state.filtered_out 251 | ) 252 | }; 253 | 254 | self.write_plain(&s)?; 255 | 256 | if let Some(ref exec_time) = state.exec_time { 257 | let time_str = format!("; finished in {}", exec_time); 258 | self.write_plain(&time_str)?; 259 | } 260 | 261 | self.write_plain("\n\n")?; 262 | 263 | Ok(success) 264 | } 265 | } 266 | -------------------------------------------------------------------------------- /src/helpers/concurrency.rs: -------------------------------------------------------------------------------- 1 | //! Helper module which helps to determine amount of threads to be used 2 | //! during tests execution. 3 | use std::{env, num::NonZeroUsize}; 4 | 5 | pub fn get_concurrency() -> usize { 6 | if let Ok(value) = env::var("RUST_TEST_THREADS") { 7 | match value.parse::().ok() { 8 | Some(n) => n.get(), 9 | _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", value), 10 | } 11 | } else { 12 | num_cpus::get_physical() 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/helpers/exit_code.rs: -------------------------------------------------------------------------------- 1 | //! Helper module to detect subprocess exit code. 2 | 3 | use std::process::ExitStatus; 4 | 5 | #[cfg(not(unix))] 6 | pub fn get_exit_code(status: ExitStatus) -> Result { 7 | status.code().ok_or("received no exit code from child process".into()) 8 | } 9 | 10 | #[cfg(unix)] 11 | pub fn get_exit_code(status: ExitStatus) -> Result { 12 | use std::os::unix::process::ExitStatusExt; 13 | match status.code() { 14 | Some(code) => Ok(code), 15 | None => match status.signal() { 16 | Some(signal) => Err(format!("child process exited with signal {}", signal)), 17 | None => Err("child process exited with unknown signal".into()), 18 | }, 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/helpers/isatty.rs: -------------------------------------------------------------------------------- 1 | //! Helper module which provides a function to test 2 | //! if stdout is a tty. 3 | 4 | cfg_if::cfg_if! { 5 | if #[cfg(unix)] { 6 | pub fn stdout_isatty() -> bool { 7 | unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } 8 | } 9 | } else if #[cfg(windows)] { 10 | pub fn stdout_isatty() -> bool { 11 | type DWORD = u32; 12 | type BOOL = i32; 13 | type HANDLE = *mut u8; 14 | type LPDWORD = *mut u32; 15 | const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; 16 | extern "system" { 17 | fn GetStdHandle(which: DWORD) -> HANDLE; 18 | fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; 19 | } 20 | unsafe { 21 | let handle = GetStdHandle(STD_OUTPUT_HANDLE); 22 | let mut out = 0; 23 | GetConsoleMode(handle, &mut out) != 0 24 | } 25 | } 26 | } else { 27 | // FIXME: Implement isatty on SGX 28 | pub fn stdout_isatty() -> bool { 29 | false 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /src/helpers/metrics.rs: -------------------------------------------------------------------------------- 1 | //! Benchmark metrics. 2 | use std::collections::BTreeMap; 3 | 4 | #[derive(Clone, PartialEq, Debug, Copy)] 5 | pub struct Metric { 6 | value: f64, 7 | noise: f64, 8 | } 9 | 10 | impl Metric { 11 | pub fn new(value: f64, noise: f64) -> Metric { 12 | Metric { value, noise } 13 | } 14 | } 15 | 16 | #[derive(Clone, PartialEq)] 17 | pub struct MetricMap(BTreeMap); 18 | 19 | impl MetricMap { 20 | pub fn new() -> MetricMap { 21 | MetricMap(BTreeMap::new()) 22 | } 23 | 24 | /// Insert a named `value` (+/- `noise`) metric into the map. The value 25 | /// must be non-negative. The `noise` indicates the uncertainty of the 26 | /// metric, which doubles as the "noise range" of acceptable 27 | /// pairwise-regressions on this named value, when comparing from one 28 | /// metric to the next using `compare_to_old`. 29 | /// 30 | /// If `noise` is positive, then it means this metric is of a value 31 | /// you want to see grow smaller, so a change larger than `noise` in the 32 | /// positive direction represents a regression. 33 | /// 34 | /// If `noise` is negative, then it means this metric is of a value 35 | /// you want to see grow larger, so a change larger than `noise` in the 36 | /// negative direction represents a regression. 37 | pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { 38 | let m = Metric { value, noise }; 39 | self.0.insert(name.to_owned(), m); 40 | } 41 | 42 | pub fn fmt_metrics(&self) -> String { 43 | let v = self 44 | .0 45 | .iter() 46 | .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) 47 | .collect::>(); 48 | v.join(", ") 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /src/helpers/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module with common helpers not directly related to tests 2 | //! but used in `libtest`. 3 | 4 | pub mod concurrency; 5 | pub mod exit_code; 6 | pub mod isatty; 7 | pub mod metrics; 8 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! Support code for rustc's built in unit-test and micro-benchmarking 2 | //! framework. 3 | //! 4 | //! Almost all user code will only be interested in `Bencher` and 5 | //! `black_box`. All other interactions (such as writing tests and 6 | //! benchmarks themselves) should be done via the `#[test]` and 7 | //! `#[bench]` attributes. 8 | //! 9 | //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details. 10 | 11 | // Currently, not much of this is meant for users. It is intended to 12 | // support the simplest interface possible for representing and 13 | // running tests while providing a base that other test frameworks may 14 | // build off of. 15 | 16 | // N.B., this is also specified in this crate's Cargo.toml, but librustc_ast contains logic specific to 17 | // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by 18 | // cargo) to detect this crate. 19 | #![cfg_attr(feature = "asm_black_box", feature(test))] 20 | #![cfg_attr(feature = "capture", feature(internal_output_capture))] 21 | 22 | // Public reexports 23 | pub use self::bench::{black_box, Bencher}; 24 | pub use self::console::run_tests_console; 25 | pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic}; 26 | pub use self::types::TestName::*; 27 | pub use self::types::*; 28 | pub use self::ColorConfig::*; 29 | pub use cli::TestOpts; 30 | 31 | // Module to be used by rustc to compile tests in libtest 32 | pub mod test { 33 | pub use crate::{ 34 | assert_test_result, 35 | bench::Bencher, 36 | cli::{parse_opts, TestOpts}, 37 | filter_tests, 38 | helpers::metrics::{Metric, MetricMap}, 39 | options::{Options, RunIgnored, RunStrategy, ShouldPanic}, 40 | run_test, test_main, test_main_static, 41 | test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, 42 | time::{TestExecTime, TestTimeOptions}, 43 | types::{ 44 | DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, 45 | TestDescAndFn, TestName, TestType, 46 | }, 47 | }; 48 | } 49 | 50 | use std::{ 51 | env, io, 52 | io::prelude::Write, 53 | panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}, 54 | process::{self, Command}, 55 | sync::mpsc::{channel, Sender}, 56 | sync::{Arc, Mutex}, 57 | thread, 58 | time::{Duration, Instant}, 59 | }; 60 | 61 | pub mod bench; 62 | mod cli; 63 | mod console; 64 | mod event; 65 | mod formatters; 66 | mod helpers; 67 | mod options; 68 | pub mod stats; 69 | mod test_result; 70 | mod time; 71 | mod types; 72 | 73 | #[cfg(test)] 74 | mod tests; 75 | 76 | use event::{CompletedTest, TestEvent}; 77 | use helpers::concurrency::get_concurrency; 78 | use helpers::exit_code::get_exit_code; 79 | use options::{Concurrent, RunStrategy}; 80 | use test_result::*; 81 | use time::TestExecTime; 82 | 83 | // Process exit code to be used to indicate test failures. 84 | const ERROR_EXIT_CODE: i32 = 101; 85 | 86 | const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE"; 87 | 88 | // The default console test runner. It accepts the command line 89 | // arguments and a vector of test_descs. 90 | pub fn test_main(args: &[String], tests: Vec, options: Option) { 91 | let mut opts = match cli::parse_opts(args) { 92 | Some(Ok(o)) => o, 93 | Some(Err(msg)) => { 94 | eprintln!("error: {}", msg); 95 | process::exit(ERROR_EXIT_CODE); 96 | } 97 | None => return, 98 | }; 99 | if let Some(options) = options { 100 | opts.options = options; 101 | } 102 | if opts.list { 103 | if let Err(e) = console::list_tests_console(&opts, tests) { 104 | eprintln!("error: io error when listing tests: {:?}", e); 105 | process::exit(ERROR_EXIT_CODE); 106 | } 107 | } else { 108 | match console::run_tests_console(&opts, tests) { 109 | Ok(true) => {} 110 | Ok(false) => process::exit(ERROR_EXIT_CODE), 111 | Err(e) => { 112 | eprintln!("error: io error when listing tests: {:?}", e); 113 | process::exit(ERROR_EXIT_CODE); 114 | } 115 | } 116 | } 117 | } 118 | 119 | /// A variant optimized for invocation with a static test vector. 120 | /// This will panic (intentionally) when fed any dynamic tests. 121 | /// 122 | /// This is the entry point for the main function generated by `rustc --test` 123 | /// when panic=unwind. 124 | pub fn test_main_static(tests: &[&TestDescAndFn]) { 125 | let args = env::args().collect::>(); 126 | let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); 127 | test_main(&args, owned_tests, None) 128 | } 129 | 130 | /// A variant optimized for invocation with a static test vector. 131 | /// This will panic (intentionally) when fed any dynamic tests. 132 | /// 133 | /// Runs tests in panic=abort mode, which involves spawning subprocesses for 134 | /// tests. 135 | /// 136 | /// This is the entry point for the main function generated by `rustc --test` 137 | /// when panic=abort. 138 | pub fn test_main_static_abort(tests: &[&TestDescAndFn]) { 139 | // If we're being run in SpawnedSecondary mode, run the test here. run_test 140 | // will then exit the process. 141 | if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) { 142 | env::remove_var(SECONDARY_TEST_INVOKER_VAR); 143 | let test = tests 144 | .iter() 145 | .filter(|test| test.desc.name.as_slice() == name) 146 | .map(make_owned_test) 147 | .next() 148 | .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name)); 149 | let TestDescAndFn { desc, testfn } = test; 150 | let testfn = match testfn { 151 | StaticTestFn(f) => f, 152 | _ => panic!("only static tests are supported"), 153 | }; 154 | run_test_in_spawned_subprocess(desc, Box::new(testfn)); 155 | } 156 | 157 | let args = env::args().collect::>(); 158 | let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); 159 | test_main(&args, owned_tests, Some(Options::new().panic_abort(true))) 160 | } 161 | 162 | /// Clones static values for putting into a dynamic vector, which test_main() 163 | /// needs to hand out ownership of tests to parallel test runners. 164 | /// 165 | /// This will panic when fed any dynamic tests, because they cannot be cloned. 166 | fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { 167 | match test.testfn { 168 | StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() }, 169 | StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() }, 170 | _ => panic!("non-static tests passed to test::test_main_static"), 171 | } 172 | } 173 | 174 | /// Invoked when unit tests terminate. Should panic if the unit 175 | /// Tests is considered a failure. By default, invokes `report()` 176 | /// and checks for a `0` result. 177 | pub trait Termination { 178 | fn report(self) -> i32; 179 | } 180 | 181 | impl Termination for () { 182 | fn report(self) -> i32 { 183 | 0 184 | } 185 | } 186 | 187 | /// Invoked when unit tests terminate. Should panic if the unit 188 | /// Tests is considered a failure. By default, invokes `report()` 189 | /// and checks for a `0` result. 190 | pub fn assert_test_result(result: T) { 191 | let code = result.report(); 192 | assert_eq!( 193 | code, 0, 194 | "the test returned a termination value with a non-zero status code ({}) \ 195 | which indicates a failure", 196 | code 197 | ); 198 | } 199 | 200 | pub fn run_tests( 201 | opts: &TestOpts, 202 | tests: Vec, 203 | mut notify_about_test_event: F, 204 | ) -> io::Result<()> 205 | where 206 | F: FnMut(TestEvent) -> io::Result<()>, 207 | { 208 | use std::collections::{self, HashMap}; 209 | use std::hash::BuildHasherDefault; 210 | use std::sync::mpsc::RecvTimeoutError; 211 | // Use a deterministic hasher 212 | type TestMap = 213 | HashMap>; 214 | 215 | let tests_len = tests.len(); 216 | 217 | let mut filtered_tests = filter_tests(opts, tests); 218 | if !opts.bench_benchmarks { 219 | filtered_tests = convert_benchmarks_to_tests(filtered_tests); 220 | } 221 | 222 | let filtered_tests = { 223 | let mut filtered_tests = filtered_tests; 224 | for test in filtered_tests.iter_mut() { 225 | test.desc.name = test.desc.name.with_padding(test.testfn.padding()); 226 | } 227 | 228 | filtered_tests 229 | }; 230 | 231 | let filtered_out = tests_len - filtered_tests.len(); 232 | let event = TestEvent::TeFilteredOut(filtered_out); 233 | notify_about_test_event(event)?; 234 | 235 | let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect(); 236 | 237 | let event = TestEvent::TeFiltered(filtered_descs); 238 | notify_about_test_event(event)?; 239 | 240 | let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests 241 | .into_iter() 242 | .partition(|e| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_))); 243 | 244 | let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); 245 | 246 | let mut remaining = filtered_tests; 247 | remaining.reverse(); 248 | let mut pending = 0; 249 | 250 | let (tx, rx) = channel::(); 251 | let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process { 252 | RunStrategy::SpawnPrimary 253 | } else { 254 | RunStrategy::InProcess 255 | }; 256 | 257 | let mut running_tests: TestMap = HashMap::default(); 258 | 259 | fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec { 260 | let now = Instant::now(); 261 | let timed_out = running_tests 262 | .iter() 263 | .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone()) } else { None }) 264 | .collect(); 265 | for test in &timed_out { 266 | running_tests.remove(test); 267 | } 268 | timed_out 269 | } 270 | 271 | fn calc_timeout(running_tests: &TestMap) -> Option { 272 | running_tests.values().min().map(|next_timeout| { 273 | let now = Instant::now(); 274 | if *next_timeout >= now { *next_timeout - now } else { Duration::new(0, 0) } 275 | }) 276 | } 277 | 278 | if concurrency == 1 { 279 | while !remaining.is_empty() { 280 | let test = remaining.pop().unwrap(); 281 | let event = TestEvent::TeWait(test.desc.clone()); 282 | notify_about_test_event(event)?; 283 | run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No); 284 | let completed_test = rx.recv().unwrap(); 285 | 286 | let event = TestEvent::TeResult(completed_test); 287 | notify_about_test_event(event)?; 288 | } 289 | } else { 290 | while pending > 0 || !remaining.is_empty() { 291 | while pending < concurrency && !remaining.is_empty() { 292 | let test = remaining.pop().unwrap(); 293 | let timeout = time::get_default_test_timeout(); 294 | running_tests.insert(test.desc.clone(), timeout); 295 | 296 | let event = TestEvent::TeWait(test.desc.clone()); 297 | notify_about_test_event(event)?; //here no pad 298 | run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes); 299 | pending += 1; 300 | } 301 | 302 | let mut res; 303 | loop { 304 | if let Some(timeout) = calc_timeout(&running_tests) { 305 | res = rx.recv_timeout(timeout); 306 | for test in get_timed_out_tests(&mut running_tests) { 307 | let event = TestEvent::TeTimeout(test); 308 | notify_about_test_event(event)?; 309 | } 310 | 311 | match res { 312 | Err(RecvTimeoutError::Timeout) => { 313 | // Result is not yet ready, continue waiting. 314 | } 315 | _ => { 316 | // We've got a result, stop the loop. 317 | break; 318 | } 319 | } 320 | } else { 321 | res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); 322 | break; 323 | } 324 | } 325 | 326 | let completed_test = res.unwrap(); 327 | running_tests.remove(&completed_test.desc); 328 | 329 | let event = TestEvent::TeResult(completed_test); 330 | notify_about_test_event(event)?; 331 | pending -= 1; 332 | } 333 | } 334 | 335 | if opts.bench_benchmarks { 336 | // All benchmarks run at the end, in serial. 337 | for b in filtered_benchs { 338 | let event = TestEvent::TeWait(b.desc.clone()); 339 | notify_about_test_event(event)?; 340 | run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No); 341 | let completed_test = rx.recv().unwrap(); 342 | 343 | let event = TestEvent::TeResult(completed_test); 344 | notify_about_test_event(event)?; 345 | } 346 | } 347 | Ok(()) 348 | } 349 | 350 | pub fn filter_tests(opts: &TestOpts, tests: Vec) -> Vec { 351 | let mut filtered = tests; 352 | let matches_filter = |test: &TestDescAndFn, filter: &str| { 353 | let test_name = test.desc.name.as_slice(); 354 | 355 | match opts.filter_exact { 356 | true => test_name == filter, 357 | false => test_name.contains(filter), 358 | } 359 | }; 360 | 361 | // Remove tests that don't match the test filter 362 | if !opts.filters.is_empty() { 363 | filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter))); 364 | } 365 | 366 | // Skip tests that match any of the skip filters 367 | filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); 368 | 369 | // Excludes #[should_panic] tests 370 | if opts.exclude_should_panic { 371 | filtered.retain(|test| test.desc.should_panic == ShouldPanic::No); 372 | } 373 | 374 | // maybe unignore tests 375 | match opts.run_ignored { 376 | RunIgnored::Yes => { 377 | filtered.iter_mut().for_each(|test| test.desc.ignore = false); 378 | } 379 | RunIgnored::Only => { 380 | filtered.retain(|test| test.desc.ignore); 381 | filtered.iter_mut().for_each(|test| test.desc.ignore = false); 382 | } 383 | RunIgnored::No => {} 384 | } 385 | 386 | // Sort the tests alphabetically 387 | filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())); 388 | 389 | filtered 390 | } 391 | 392 | pub fn convert_benchmarks_to_tests(tests: Vec) -> Vec { 393 | // convert benchmarks to tests, if we're not benchmarking them 394 | tests 395 | .into_iter() 396 | .map(|x| { 397 | let testfn = match x.testfn { 398 | DynBenchFn(bench) => DynTestFn(Box::new(move || { 399 | bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b))) 400 | })), 401 | StaticBenchFn(benchfn) => DynTestFn(Box::new(move || { 402 | bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b))) 403 | })), 404 | f => f, 405 | }; 406 | TestDescAndFn { desc: x.desc, testfn } 407 | }) 408 | .collect() 409 | } 410 | 411 | pub fn run_test( 412 | opts: &TestOpts, 413 | force_ignore: bool, 414 | test: TestDescAndFn, 415 | strategy: RunStrategy, 416 | monitor_ch: Sender, 417 | concurrency: Concurrent, 418 | ) { 419 | let TestDescAndFn { desc, testfn } = test; 420 | 421 | // Emscripten can catch panics but other wasm targets cannot 422 | let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No 423 | && cfg!(target_arch = "wasm32") 424 | && !cfg!(target_os = "emscripten"); 425 | 426 | if force_ignore || desc.ignore || ignore_because_no_process_support { 427 | let message = CompletedTest::new(desc, TrIgnored, None, Vec::new()); 428 | monitor_ch.send(message).unwrap(); 429 | return; 430 | } 431 | 432 | struct TestRunOpts { 433 | pub strategy: RunStrategy, 434 | pub nocapture: bool, 435 | pub concurrency: Concurrent, 436 | pub time: Option, 437 | } 438 | 439 | fn run_test_inner( 440 | desc: TestDesc, 441 | monitor_ch: Sender, 442 | testfn: Box, 443 | opts: TestRunOpts, 444 | ) { 445 | let concurrency = opts.concurrency; 446 | let name = desc.name.clone(); 447 | 448 | let runtest = move || match opts.strategy { 449 | RunStrategy::InProcess => run_test_in_process( 450 | desc, 451 | opts.nocapture, 452 | opts.time.is_some(), 453 | testfn, 454 | monitor_ch, 455 | opts.time, 456 | ), 457 | RunStrategy::SpawnPrimary => spawn_test_subprocess( 458 | desc, 459 | opts.nocapture, 460 | opts.time.is_some(), 461 | monitor_ch, 462 | opts.time, 463 | ), 464 | }; 465 | 466 | // If the platform is single-threaded we're just going to run 467 | // the test synchronously, regardless of the concurrency 468 | // level. 469 | let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32"); 470 | if concurrency == Concurrent::Yes && supports_threads { 471 | let cfg = thread::Builder::new().name(name.as_slice().to_owned()); 472 | cfg.spawn(runtest).unwrap(); 473 | } else { 474 | runtest(); 475 | } 476 | } 477 | 478 | let test_run_opts = 479 | TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options }; 480 | 481 | match testfn { 482 | DynBenchFn(bencher) => { 483 | // Benchmarks aren't expected to panic, so we run them all in-process. 484 | crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| { 485 | bencher.run(harness) 486 | }); 487 | } 488 | StaticBenchFn(benchfn) => { 489 | // Benchmarks aren't expected to panic, so we run them all in-process. 490 | crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn); 491 | } 492 | DynTestFn(f) => { 493 | match strategy { 494 | RunStrategy::InProcess => (), 495 | _ => panic!("Cannot run dynamic test fn out-of-process"), 496 | }; 497 | run_test_inner( 498 | desc, 499 | monitor_ch, 500 | Box::new(move || __rust_begin_short_backtrace(f)), 501 | test_run_opts, 502 | ); 503 | } 504 | StaticTestFn(f) => run_test_inner( 505 | desc, 506 | monitor_ch, 507 | Box::new(move || __rust_begin_short_backtrace(f)), 508 | test_run_opts, 509 | ), 510 | } 511 | } 512 | 513 | /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. 514 | #[inline(never)] 515 | fn __rust_begin_short_backtrace(f: F) { 516 | f(); 517 | 518 | // prevent this frame from being tail-call optimised away 519 | black_box(()); 520 | } 521 | 522 | fn run_test_in_process( 523 | desc: TestDesc, 524 | nocapture: bool, 525 | report_time: bool, 526 | testfn: Box, 527 | monitor_ch: Sender, 528 | time_opts: Option, 529 | ) { 530 | // Buffer for capturing standard I/O 531 | let data = Arc::new(Mutex::new(Vec::new())); 532 | 533 | if !nocapture { 534 | #[cfg(feature = "capture")] 535 | io::set_output_capture(Some(data.clone())); 536 | } 537 | 538 | let start = if report_time { Some(Instant::now()) } else { None }; 539 | let result = catch_unwind(AssertUnwindSafe(testfn)); 540 | let exec_time = start.map(|start| { 541 | let duration = start.elapsed(); 542 | TestExecTime(duration) 543 | }); 544 | 545 | #[cfg(feature = "capture")] 546 | io::set_output_capture(None); 547 | 548 | let test_result = match result { 549 | Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time), 550 | Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time), 551 | }; 552 | let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); 553 | let message = CompletedTest::new(desc, test_result, exec_time, stdout); 554 | monitor_ch.send(message).unwrap(); 555 | } 556 | 557 | fn spawn_test_subprocess( 558 | desc: TestDesc, 559 | nocapture: bool, 560 | report_time: bool, 561 | monitor_ch: Sender, 562 | time_opts: Option, 563 | ) { 564 | let (result, test_output, exec_time) = (|| { 565 | let args = env::args().collect::>(); 566 | let current_exe = &args[0]; 567 | 568 | let mut command = Command::new(current_exe); 569 | command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice()); 570 | if nocapture { 571 | command.stdout(process::Stdio::inherit()); 572 | command.stderr(process::Stdio::inherit()); 573 | } 574 | 575 | let start = if report_time { Some(Instant::now()) } else { None }; 576 | let output = match command.output() { 577 | Ok(out) => out, 578 | Err(e) => { 579 | let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e); 580 | return (TrFailed, err.into_bytes(), None); 581 | } 582 | }; 583 | let exec_time = start.map(|start| { 584 | let duration = start.elapsed(); 585 | TestExecTime(duration) 586 | }); 587 | 588 | let std::process::Output { stdout, stderr, status } = output; 589 | let mut test_output = stdout; 590 | formatters::write_stderr_delimiter(&mut test_output, &desc.name); 591 | test_output.extend_from_slice(&stderr); 592 | 593 | let result = match (|| -> Result { 594 | let exit_code = get_exit_code(status)?; 595 | Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time)) 596 | })() { 597 | Ok(r) => r, 598 | Err(e) => { 599 | write!(&mut test_output, "Unexpected error: {}", e).unwrap(); 600 | TrFailed 601 | } 602 | }; 603 | 604 | (result, test_output, exec_time) 605 | })(); 606 | 607 | let message = CompletedTest::new(desc, result, exec_time, test_output); 608 | monitor_ch.send(message).unwrap(); 609 | } 610 | 611 | fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box) -> ! { 612 | let builtin_panic_hook = panic::take_hook(); 613 | let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| { 614 | let test_result = match panic_info { 615 | Some(info) => calc_result(&desc, Err(info.payload()), &None, &None), 616 | None => calc_result(&desc, Ok(()), &None, &None), 617 | }; 618 | 619 | // We don't support serializing TrFailedMsg, so just 620 | // print the message out to stderr. 621 | if let TrFailedMsg(msg) = &test_result { 622 | eprintln!("{}", msg); 623 | } 624 | 625 | if let Some(info) = panic_info { 626 | builtin_panic_hook(info); 627 | } 628 | 629 | if let TrOk = test_result { 630 | process::exit(test_result::TR_OK); 631 | } else { 632 | process::exit(test_result::TR_FAILED); 633 | } 634 | }); 635 | let record_result2 = record_result.clone(); 636 | panic::set_hook(Box::new(move |info| record_result2(Some(&info)))); 637 | testfn(); 638 | record_result(None); 639 | unreachable!("panic=abort callback should have exited the process") 640 | } 641 | -------------------------------------------------------------------------------- /src/options.rs: -------------------------------------------------------------------------------- 1 | //! Enums denoting options for test execution. 2 | 3 | /// Whether to execute tests concurrently or not 4 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 5 | pub enum Concurrent { 6 | Yes, 7 | No, 8 | } 9 | 10 | /// Number of times to run a benchmarked function 11 | #[derive(Clone, PartialEq, Eq)] 12 | pub enum BenchMode { 13 | Auto, 14 | Single, 15 | } 16 | 17 | /// Whether test is expected to panic or not 18 | #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] 19 | pub enum ShouldPanic { 20 | No, 21 | Yes, 22 | YesWithMessage(&'static str), 23 | } 24 | 25 | /// Whether should console output be colored or not 26 | #[derive(Copy, Clone, Debug)] 27 | pub enum ColorConfig { 28 | AutoColor, 29 | AlwaysColor, 30 | NeverColor, 31 | } 32 | 33 | /// Format of the test results output 34 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 35 | pub enum OutputFormat { 36 | /// Verbose output 37 | Pretty, 38 | /// Quiet output 39 | Terse, 40 | /// JSON output 41 | Json, 42 | } 43 | 44 | /// Whether ignored test should be run or not 45 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 46 | pub enum RunIgnored { 47 | Yes, 48 | No, 49 | /// Run only ignored tests 50 | Only, 51 | } 52 | 53 | #[derive(Clone, Copy)] 54 | pub enum RunStrategy { 55 | /// Runs the test in the current process, and sends the result back over the 56 | /// supplied channel. 57 | InProcess, 58 | 59 | /// Spawns a subprocess to run the test, and sends the result back over the 60 | /// supplied channel. Requires `argv[0]` to exist and point to the binary 61 | /// that's currently running. 62 | SpawnPrimary, 63 | } 64 | 65 | /// Options for the test run defined by the caller (instead of CLI arguments). 66 | /// In case we want to add other options as well, just add them in this struct. 67 | #[derive(Copy, Clone, Debug)] 68 | pub struct Options { 69 | pub display_output: bool, 70 | pub panic_abort: bool, 71 | } 72 | 73 | impl Options { 74 | pub fn new() -> Options { 75 | Options { display_output: false, panic_abort: false } 76 | } 77 | 78 | pub fn display_output(mut self, display_output: bool) -> Options { 79 | self.display_output = display_output; 80 | self 81 | } 82 | 83 | pub fn panic_abort(mut self, panic_abort: bool) -> Options { 84 | self.panic_abort = panic_abort; 85 | self 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/stats.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | #![allow(deprecated)] // Float 3 | 4 | use std::cmp::Ordering::{self, Equal, Greater, Less}; 5 | use std::mem; 6 | 7 | #[cfg(test)] 8 | mod tests; 9 | 10 | fn local_cmp(x: f64, y: f64) -> Ordering { 11 | // arbitrarily decide that NaNs are larger than everything. 12 | if y.is_nan() { 13 | Less 14 | } else if x.is_nan() { 15 | Greater 16 | } else if x < y { 17 | Less 18 | } else if x == y { 19 | Equal 20 | } else { 21 | Greater 22 | } 23 | } 24 | 25 | fn local_sort(v: &mut [f64]) { 26 | v.sort_by(|x: &f64, y: &f64| local_cmp(*x, *y)); 27 | } 28 | 29 | /// Trait that provides simple descriptive statistics on a univariate set of numeric samples. 30 | pub trait Stats { 31 | /// Sum of the samples. 32 | /// 33 | /// Note: this method sacrifices performance at the altar of accuracy 34 | /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at: 35 | /// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric 36 | /// Predicates"][paper] 37 | /// 38 | /// [paper]: http://www.cs.cmu.edu/~quake-papers/robust-arithmetic.ps 39 | fn sum(&self) -> f64; 40 | 41 | /// Minimum value of the samples. 42 | fn min(&self) -> f64; 43 | 44 | /// Maximum value of the samples. 45 | fn max(&self) -> f64; 46 | 47 | /// Arithmetic mean (average) of the samples: sum divided by sample-count. 48 | /// 49 | /// See: 50 | fn mean(&self) -> f64; 51 | 52 | /// Median of the samples: value separating the lower half of the samples from the higher half. 53 | /// Equal to `self.percentile(50.0)`. 54 | /// 55 | /// See: 56 | fn median(&self) -> f64; 57 | 58 | /// Variance of the samples: bias-corrected mean of the squares of the differences of each 59 | /// sample from the sample mean. Note that this calculates the _sample variance_ rather than the 60 | /// population variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n` 61 | /// bias that would appear if we calculated a population variance, by dividing by `(n-1)` rather 62 | /// than `n`. 63 | /// 64 | /// See: 65 | fn var(&self) -> f64; 66 | 67 | /// Standard deviation: the square root of the sample variance. 68 | /// 69 | /// Note: this is not a robust statistic for non-normal distributions. Prefer the 70 | /// `median_abs_dev` for unknown distributions. 71 | /// 72 | /// See: 73 | fn std_dev(&self) -> f64; 74 | 75 | /// Standard deviation as a percent of the mean value. See `std_dev` and `mean`. 76 | /// 77 | /// Note: this is not a robust statistic for non-normal distributions. Prefer the 78 | /// `median_abs_dev_pct` for unknown distributions. 79 | fn std_dev_pct(&self) -> f64; 80 | 81 | /// Scaled median of the absolute deviations of each sample from the sample median. This is a 82 | /// robust (distribution-agnostic) estimator of sample variability. Use this in preference to 83 | /// `std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled 84 | /// by the constant `1.4826` to allow its use as a consistent estimator for the standard 85 | /// deviation. 86 | /// 87 | /// See: 88 | fn median_abs_dev(&self) -> f64; 89 | 90 | /// Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`. 91 | fn median_abs_dev_pct(&self) -> f64; 92 | 93 | /// Percentile: the value below which `pct` percent of the values in `self` fall. For example, 94 | /// percentile(95.0) will return the value `v` such that 95% of the samples `s` in `self` 95 | /// satisfy `s <= v`. 96 | /// 97 | /// Calculated by linear interpolation between closest ranks. 98 | /// 99 | /// See: 100 | fn percentile(&self, pct: f64) -> f64; 101 | 102 | /// Quartiles of the sample: three values that divide the sample into four equal groups, each 103 | /// with 1/4 of the data. The middle value is the median. See `median` and `percentile`. This 104 | /// function may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but 105 | /// is otherwise equivalent. 106 | /// 107 | /// See also: 108 | fn quartiles(&self) -> (f64, f64, f64); 109 | 110 | /// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th 111 | /// percentile (3rd quartile). See `quartiles`. 112 | /// 113 | /// See also: 114 | fn iqr(&self) -> f64; 115 | } 116 | 117 | /// Extracted collection of all the summary statistics of a sample set. 118 | #[derive(Debug, Clone, PartialEq, Copy)] 119 | #[allow(missing_docs)] 120 | pub struct Summary { 121 | pub sum: f64, 122 | pub min: f64, 123 | pub max: f64, 124 | pub mean: f64, 125 | pub median: f64, 126 | pub var: f64, 127 | pub std_dev: f64, 128 | pub std_dev_pct: f64, 129 | pub median_abs_dev: f64, 130 | pub median_abs_dev_pct: f64, 131 | pub quartiles: (f64, f64, f64), 132 | pub iqr: f64, 133 | } 134 | 135 | impl Summary { 136 | /// Construct a new summary of a sample set. 137 | pub fn new(samples: &[f64]) -> Summary { 138 | Summary { 139 | sum: samples.sum(), 140 | min: samples.min(), 141 | max: samples.max(), 142 | mean: samples.mean(), 143 | median: samples.median(), 144 | var: samples.var(), 145 | std_dev: samples.std_dev(), 146 | std_dev_pct: samples.std_dev_pct(), 147 | median_abs_dev: samples.median_abs_dev(), 148 | median_abs_dev_pct: samples.median_abs_dev_pct(), 149 | quartiles: samples.quartiles(), 150 | iqr: samples.iqr(), 151 | } 152 | } 153 | } 154 | 155 | impl Stats for [f64] { 156 | // FIXME #11059 handle NaN, inf and overflow 157 | fn sum(&self) -> f64 { 158 | let mut partials = vec![]; 159 | 160 | for &x in self { 161 | let mut x = x; 162 | let mut j = 0; 163 | // This inner loop applies `hi`/`lo` summation to each 164 | // partial so that the list of partial sums remains exact. 165 | for i in 0..partials.len() { 166 | let mut y: f64 = partials[i]; 167 | if x.abs() < y.abs() { 168 | mem::swap(&mut x, &mut y); 169 | } 170 | // Rounded `x+y` is stored in `hi` with round-off stored in 171 | // `lo`. Together `hi+lo` are exactly equal to `x+y`. 172 | let hi = x + y; 173 | let lo = y - (hi - x); 174 | if lo != 0.0 { 175 | partials[j] = lo; 176 | j += 1; 177 | } 178 | x = hi; 179 | } 180 | if j >= partials.len() { 181 | partials.push(x); 182 | } else { 183 | partials[j] = x; 184 | partials.truncate(j + 1); 185 | } 186 | } 187 | let zero: f64 = 0.0; 188 | partials.iter().fold(zero, |p, q| p + *q) 189 | } 190 | 191 | fn min(&self) -> f64 { 192 | assert!(!self.is_empty()); 193 | self.iter().fold(self[0], |p, q| p.min(*q)) 194 | } 195 | 196 | fn max(&self) -> f64 { 197 | assert!(!self.is_empty()); 198 | self.iter().fold(self[0], |p, q| p.max(*q)) 199 | } 200 | 201 | fn mean(&self) -> f64 { 202 | assert!(!self.is_empty()); 203 | self.sum() / (self.len() as f64) 204 | } 205 | 206 | fn median(&self) -> f64 { 207 | self.percentile(50_f64) 208 | } 209 | 210 | fn var(&self) -> f64 { 211 | if self.len() < 2 { 212 | 0.0 213 | } else { 214 | let mean = self.mean(); 215 | let mut v: f64 = 0.0; 216 | for s in self { 217 | let x = *s - mean; 218 | v += x * x; 219 | } 220 | // N.B., this is _supposed to be_ len-1, not len. If you 221 | // change it back to len, you will be calculating a 222 | // population variance, not a sample variance. 223 | let denom = (self.len() - 1) as f64; 224 | v / denom 225 | } 226 | } 227 | 228 | fn std_dev(&self) -> f64 { 229 | self.var().sqrt() 230 | } 231 | 232 | fn std_dev_pct(&self) -> f64 { 233 | let hundred = 100_f64; 234 | (self.std_dev() / self.mean()) * hundred 235 | } 236 | 237 | fn median_abs_dev(&self) -> f64 { 238 | let med = self.median(); 239 | let abs_devs: Vec = self.iter().map(|&v| (med - v).abs()).collect(); 240 | // This constant is derived by smarter statistics brains than me, but it is 241 | // consistent with how R and other packages treat the MAD. 242 | let number = 1.4826; 243 | abs_devs.median() * number 244 | } 245 | 246 | fn median_abs_dev_pct(&self) -> f64 { 247 | let hundred = 100_f64; 248 | (self.median_abs_dev() / self.median()) * hundred 249 | } 250 | 251 | fn percentile(&self, pct: f64) -> f64 { 252 | let mut tmp = self.to_vec(); 253 | local_sort(&mut tmp); 254 | percentile_of_sorted(&tmp, pct) 255 | } 256 | 257 | fn quartiles(&self) -> (f64, f64, f64) { 258 | let mut tmp = self.to_vec(); 259 | local_sort(&mut tmp); 260 | let first = 25_f64; 261 | let a = percentile_of_sorted(&tmp, first); 262 | let second = 50_f64; 263 | let b = percentile_of_sorted(&tmp, second); 264 | let third = 75_f64; 265 | let c = percentile_of_sorted(&tmp, third); 266 | (a, b, c) 267 | } 268 | 269 | fn iqr(&self) -> f64 { 270 | let (a, _, c) = self.quartiles(); 271 | c - a 272 | } 273 | } 274 | 275 | // Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using 276 | // linear interpolation. If samples are not sorted, return nonsensical value. 277 | fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 { 278 | assert!(!sorted_samples.is_empty()); 279 | if sorted_samples.len() == 1 { 280 | return sorted_samples[0]; 281 | } 282 | let zero: f64 = 0.0; 283 | assert!(zero <= pct); 284 | let hundred = 100_f64; 285 | assert!(pct <= hundred); 286 | if pct == hundred { 287 | return sorted_samples[sorted_samples.len() - 1]; 288 | } 289 | let length = (sorted_samples.len() - 1) as f64; 290 | let rank = (pct / hundred) * length; 291 | let lrank = rank.floor(); 292 | let d = rank - lrank; 293 | let n = lrank as usize; 294 | let lo = sorted_samples[n]; 295 | let hi = sorted_samples[n + 1]; 296 | lo + (hi - lo) * d 297 | } 298 | 299 | /// Winsorize a set of samples, replacing values above the `100-pct` percentile 300 | /// and below the `pct` percentile with those percentiles themselves. This is a 301 | /// way of minimizing the effect of outliers, at the cost of biasing the sample. 302 | /// It differs from trimming in that it does not change the number of samples, 303 | /// just changes the values of those that are outliers. 304 | /// 305 | /// See: 306 | pub fn winsorize(samples: &mut [f64], pct: f64) { 307 | let mut tmp = samples.to_vec(); 308 | local_sort(&mut tmp); 309 | let lo = percentile_of_sorted(&tmp, pct); 310 | let hundred = 100_f64; 311 | let hi = percentile_of_sorted(&tmp, hundred - pct); 312 | for samp in samples { 313 | if *samp > hi { 314 | *samp = hi 315 | } else if *samp < lo { 316 | *samp = lo 317 | } 318 | } 319 | } 320 | -------------------------------------------------------------------------------- /src/stats/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use std::io; 4 | use std::io::prelude::*; 5 | 6 | // Test vectors generated from R, using the script src/etc/stat-test-vectors.r. 7 | 8 | macro_rules! assert_approx_eq { 9 | ($a: expr, $b: expr) => {{ 10 | let (a, b) = (&$a, &$b); 11 | assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b); 12 | }}; 13 | } 14 | 15 | fn check(samples: &[f64], summ: &Summary) { 16 | let summ2 = Summary::new(samples); 17 | 18 | let mut w = io::sink(); 19 | let w = &mut w; 20 | (write!(w, "\n")).unwrap(); 21 | 22 | assert_eq!(summ.sum, summ2.sum); 23 | assert_eq!(summ.min, summ2.min); 24 | assert_eq!(summ.max, summ2.max); 25 | assert_eq!(summ.mean, summ2.mean); 26 | assert_eq!(summ.median, summ2.median); 27 | 28 | // We needed a few more digits to get exact equality on these 29 | // but they're within float epsilon, which is 1.0e-6. 30 | assert_approx_eq!(summ.var, summ2.var); 31 | assert_approx_eq!(summ.std_dev, summ2.std_dev); 32 | assert_approx_eq!(summ.std_dev_pct, summ2.std_dev_pct); 33 | assert_approx_eq!(summ.median_abs_dev, summ2.median_abs_dev); 34 | assert_approx_eq!(summ.median_abs_dev_pct, summ2.median_abs_dev_pct); 35 | 36 | assert_eq!(summ.quartiles, summ2.quartiles); 37 | assert_eq!(summ.iqr, summ2.iqr); 38 | } 39 | 40 | #[test] 41 | fn test_min_max_nan() { 42 | let xs = &[1.0, 2.0, f64::NAN, 3.0, 4.0]; 43 | let summary = Summary::new(xs); 44 | assert_eq!(summary.min, 1.0); 45 | assert_eq!(summary.max, 4.0); 46 | } 47 | 48 | #[test] 49 | fn test_norm2() { 50 | let val = &[958.0000000000, 924.0000000000]; 51 | let summ = &Summary { 52 | sum: 1882.0000000000, 53 | min: 924.0000000000, 54 | max: 958.0000000000, 55 | mean: 941.0000000000, 56 | median: 941.0000000000, 57 | var: 578.0000000000, 58 | std_dev: 24.0416305603, 59 | std_dev_pct: 2.5549022912, 60 | median_abs_dev: 25.2042000000, 61 | median_abs_dev_pct: 2.6784484591, 62 | quartiles: (932.5000000000, 941.0000000000, 949.5000000000), 63 | iqr: 17.0000000000, 64 | }; 65 | check(val, summ); 66 | } 67 | #[test] 68 | fn test_norm10narrow() { 69 | let val = &[ 70 | 966.0000000000, 71 | 985.0000000000, 72 | 1110.0000000000, 73 | 848.0000000000, 74 | 821.0000000000, 75 | 975.0000000000, 76 | 962.0000000000, 77 | 1157.0000000000, 78 | 1217.0000000000, 79 | 955.0000000000, 80 | ]; 81 | let summ = &Summary { 82 | sum: 9996.0000000000, 83 | min: 821.0000000000, 84 | max: 1217.0000000000, 85 | mean: 999.6000000000, 86 | median: 970.5000000000, 87 | var: 16050.7111111111, 88 | std_dev: 126.6914010938, 89 | std_dev_pct: 12.6742097933, 90 | median_abs_dev: 102.2994000000, 91 | median_abs_dev_pct: 10.5408964451, 92 | quartiles: (956.7500000000, 970.5000000000, 1078.7500000000), 93 | iqr: 122.0000000000, 94 | }; 95 | check(val, summ); 96 | } 97 | #[test] 98 | fn test_norm10medium() { 99 | let val = &[ 100 | 954.0000000000, 101 | 1064.0000000000, 102 | 855.0000000000, 103 | 1000.0000000000, 104 | 743.0000000000, 105 | 1084.0000000000, 106 | 704.0000000000, 107 | 1023.0000000000, 108 | 357.0000000000, 109 | 869.0000000000, 110 | ]; 111 | let summ = &Summary { 112 | sum: 8653.0000000000, 113 | min: 357.0000000000, 114 | max: 1084.0000000000, 115 | mean: 865.3000000000, 116 | median: 911.5000000000, 117 | var: 48628.4555555556, 118 | std_dev: 220.5186059170, 119 | std_dev_pct: 25.4846418487, 120 | median_abs_dev: 195.7032000000, 121 | median_abs_dev_pct: 21.4704552935, 122 | quartiles: (771.0000000000, 911.5000000000, 1017.2500000000), 123 | iqr: 246.2500000000, 124 | }; 125 | check(val, summ); 126 | } 127 | #[test] 128 | fn test_norm10wide() { 129 | let val = &[ 130 | 505.0000000000, 131 | 497.0000000000, 132 | 1591.0000000000, 133 | 887.0000000000, 134 | 1026.0000000000, 135 | 136.0000000000, 136 | 1580.0000000000, 137 | 940.0000000000, 138 | 754.0000000000, 139 | 1433.0000000000, 140 | ]; 141 | let summ = &Summary { 142 | sum: 9349.0000000000, 143 | min: 136.0000000000, 144 | max: 1591.0000000000, 145 | mean: 934.9000000000, 146 | median: 913.5000000000, 147 | var: 239208.9888888889, 148 | std_dev: 489.0899599142, 149 | std_dev_pct: 52.3146817750, 150 | median_abs_dev: 611.5725000000, 151 | median_abs_dev_pct: 66.9482758621, 152 | quartiles: (567.2500000000, 913.5000000000, 1331.2500000000), 153 | iqr: 764.0000000000, 154 | }; 155 | check(val, summ); 156 | } 157 | #[test] 158 | fn test_norm25verynarrow() { 159 | let val = &[ 160 | 991.0000000000, 161 | 1018.0000000000, 162 | 998.0000000000, 163 | 1013.0000000000, 164 | 974.0000000000, 165 | 1007.0000000000, 166 | 1014.0000000000, 167 | 999.0000000000, 168 | 1011.0000000000, 169 | 978.0000000000, 170 | 985.0000000000, 171 | 999.0000000000, 172 | 983.0000000000, 173 | 982.0000000000, 174 | 1015.0000000000, 175 | 1002.0000000000, 176 | 977.0000000000, 177 | 948.0000000000, 178 | 1040.0000000000, 179 | 974.0000000000, 180 | 996.0000000000, 181 | 989.0000000000, 182 | 1015.0000000000, 183 | 994.0000000000, 184 | 1024.0000000000, 185 | ]; 186 | let summ = &Summary { 187 | sum: 24926.0000000000, 188 | min: 948.0000000000, 189 | max: 1040.0000000000, 190 | mean: 997.0400000000, 191 | median: 998.0000000000, 192 | var: 393.2066666667, 193 | std_dev: 19.8294393937, 194 | std_dev_pct: 1.9888308788, 195 | median_abs_dev: 22.2390000000, 196 | median_abs_dev_pct: 2.2283567134, 197 | quartiles: (983.0000000000, 998.0000000000, 1013.0000000000), 198 | iqr: 30.0000000000, 199 | }; 200 | check(val, summ); 201 | } 202 | #[test] 203 | fn test_exp10a() { 204 | let val = &[ 205 | 23.0000000000, 206 | 11.0000000000, 207 | 2.0000000000, 208 | 57.0000000000, 209 | 4.0000000000, 210 | 12.0000000000, 211 | 5.0000000000, 212 | 29.0000000000, 213 | 3.0000000000, 214 | 21.0000000000, 215 | ]; 216 | let summ = &Summary { 217 | sum: 167.0000000000, 218 | min: 2.0000000000, 219 | max: 57.0000000000, 220 | mean: 16.7000000000, 221 | median: 11.5000000000, 222 | var: 287.7888888889, 223 | std_dev: 16.9643416875, 224 | std_dev_pct: 101.5828843560, 225 | median_abs_dev: 13.3434000000, 226 | median_abs_dev_pct: 116.0295652174, 227 | quartiles: (4.2500000000, 11.5000000000, 22.5000000000), 228 | iqr: 18.2500000000, 229 | }; 230 | check(val, summ); 231 | } 232 | #[test] 233 | fn test_exp10b() { 234 | let val = &[ 235 | 24.0000000000, 236 | 17.0000000000, 237 | 6.0000000000, 238 | 38.0000000000, 239 | 25.0000000000, 240 | 7.0000000000, 241 | 51.0000000000, 242 | 2.0000000000, 243 | 61.0000000000, 244 | 32.0000000000, 245 | ]; 246 | let summ = &Summary { 247 | sum: 263.0000000000, 248 | min: 2.0000000000, 249 | max: 61.0000000000, 250 | mean: 26.3000000000, 251 | median: 24.5000000000, 252 | var: 383.5666666667, 253 | std_dev: 19.5848580967, 254 | std_dev_pct: 74.4671410520, 255 | median_abs_dev: 22.9803000000, 256 | median_abs_dev_pct: 93.7971428571, 257 | quartiles: (9.5000000000, 24.5000000000, 36.5000000000), 258 | iqr: 27.0000000000, 259 | }; 260 | check(val, summ); 261 | } 262 | #[test] 263 | fn test_exp10c() { 264 | let val = &[ 265 | 71.0000000000, 266 | 2.0000000000, 267 | 32.0000000000, 268 | 1.0000000000, 269 | 6.0000000000, 270 | 28.0000000000, 271 | 13.0000000000, 272 | 37.0000000000, 273 | 16.0000000000, 274 | 36.0000000000, 275 | ]; 276 | let summ = &Summary { 277 | sum: 242.0000000000, 278 | min: 1.0000000000, 279 | max: 71.0000000000, 280 | mean: 24.2000000000, 281 | median: 22.0000000000, 282 | var: 458.1777777778, 283 | std_dev: 21.4050876611, 284 | std_dev_pct: 88.4507754589, 285 | median_abs_dev: 21.4977000000, 286 | median_abs_dev_pct: 97.7168181818, 287 | quartiles: (7.7500000000, 22.0000000000, 35.0000000000), 288 | iqr: 27.2500000000, 289 | }; 290 | check(val, summ); 291 | } 292 | #[test] 293 | fn test_exp25() { 294 | let val = &[ 295 | 3.0000000000, 296 | 24.0000000000, 297 | 1.0000000000, 298 | 19.0000000000, 299 | 7.0000000000, 300 | 5.0000000000, 301 | 30.0000000000, 302 | 39.0000000000, 303 | 31.0000000000, 304 | 13.0000000000, 305 | 25.0000000000, 306 | 48.0000000000, 307 | 1.0000000000, 308 | 6.0000000000, 309 | 42.0000000000, 310 | 63.0000000000, 311 | 2.0000000000, 312 | 12.0000000000, 313 | 108.0000000000, 314 | 26.0000000000, 315 | 1.0000000000, 316 | 7.0000000000, 317 | 44.0000000000, 318 | 25.0000000000, 319 | 11.0000000000, 320 | ]; 321 | let summ = &Summary { 322 | sum: 593.0000000000, 323 | min: 1.0000000000, 324 | max: 108.0000000000, 325 | mean: 23.7200000000, 326 | median: 19.0000000000, 327 | var: 601.0433333333, 328 | std_dev: 24.5161851301, 329 | std_dev_pct: 103.3565983562, 330 | median_abs_dev: 19.2738000000, 331 | median_abs_dev_pct: 101.4410526316, 332 | quartiles: (6.0000000000, 19.0000000000, 31.0000000000), 333 | iqr: 25.0000000000, 334 | }; 335 | check(val, summ); 336 | } 337 | #[test] 338 | fn test_binom25() { 339 | let val = &[ 340 | 18.0000000000, 341 | 17.0000000000, 342 | 27.0000000000, 343 | 15.0000000000, 344 | 21.0000000000, 345 | 25.0000000000, 346 | 17.0000000000, 347 | 24.0000000000, 348 | 25.0000000000, 349 | 24.0000000000, 350 | 26.0000000000, 351 | 26.0000000000, 352 | 23.0000000000, 353 | 15.0000000000, 354 | 23.0000000000, 355 | 17.0000000000, 356 | 18.0000000000, 357 | 18.0000000000, 358 | 21.0000000000, 359 | 16.0000000000, 360 | 15.0000000000, 361 | 31.0000000000, 362 | 20.0000000000, 363 | 17.0000000000, 364 | 15.0000000000, 365 | ]; 366 | let summ = &Summary { 367 | sum: 514.0000000000, 368 | min: 15.0000000000, 369 | max: 31.0000000000, 370 | mean: 20.5600000000, 371 | median: 20.0000000000, 372 | var: 20.8400000000, 373 | std_dev: 4.5650848842, 374 | std_dev_pct: 22.2037202539, 375 | median_abs_dev: 5.9304000000, 376 | median_abs_dev_pct: 29.6520000000, 377 | quartiles: (17.0000000000, 20.0000000000, 24.0000000000), 378 | iqr: 7.0000000000, 379 | }; 380 | check(val, summ); 381 | } 382 | #[test] 383 | fn test_pois25lambda30() { 384 | let val = &[ 385 | 27.0000000000, 386 | 33.0000000000, 387 | 34.0000000000, 388 | 34.0000000000, 389 | 24.0000000000, 390 | 39.0000000000, 391 | 28.0000000000, 392 | 27.0000000000, 393 | 31.0000000000, 394 | 28.0000000000, 395 | 38.0000000000, 396 | 21.0000000000, 397 | 33.0000000000, 398 | 36.0000000000, 399 | 29.0000000000, 400 | 37.0000000000, 401 | 32.0000000000, 402 | 34.0000000000, 403 | 31.0000000000, 404 | 39.0000000000, 405 | 25.0000000000, 406 | 31.0000000000, 407 | 32.0000000000, 408 | 40.0000000000, 409 | 24.0000000000, 410 | ]; 411 | let summ = &Summary { 412 | sum: 787.0000000000, 413 | min: 21.0000000000, 414 | max: 40.0000000000, 415 | mean: 31.4800000000, 416 | median: 32.0000000000, 417 | var: 26.5933333333, 418 | std_dev: 5.1568724372, 419 | std_dev_pct: 16.3814245145, 420 | median_abs_dev: 5.9304000000, 421 | median_abs_dev_pct: 18.5325000000, 422 | quartiles: (28.0000000000, 32.0000000000, 34.0000000000), 423 | iqr: 6.0000000000, 424 | }; 425 | check(val, summ); 426 | } 427 | #[test] 428 | fn test_pois25lambda40() { 429 | let val = &[ 430 | 42.0000000000, 431 | 50.0000000000, 432 | 42.0000000000, 433 | 46.0000000000, 434 | 34.0000000000, 435 | 45.0000000000, 436 | 34.0000000000, 437 | 49.0000000000, 438 | 39.0000000000, 439 | 28.0000000000, 440 | 40.0000000000, 441 | 35.0000000000, 442 | 37.0000000000, 443 | 39.0000000000, 444 | 46.0000000000, 445 | 44.0000000000, 446 | 32.0000000000, 447 | 45.0000000000, 448 | 42.0000000000, 449 | 37.0000000000, 450 | 48.0000000000, 451 | 42.0000000000, 452 | 33.0000000000, 453 | 42.0000000000, 454 | 48.0000000000, 455 | ]; 456 | let summ = &Summary { 457 | sum: 1019.0000000000, 458 | min: 28.0000000000, 459 | max: 50.0000000000, 460 | mean: 40.7600000000, 461 | median: 42.0000000000, 462 | var: 34.4400000000, 463 | std_dev: 5.8685603004, 464 | std_dev_pct: 14.3978417577, 465 | median_abs_dev: 5.9304000000, 466 | median_abs_dev_pct: 14.1200000000, 467 | quartiles: (37.0000000000, 42.0000000000, 45.0000000000), 468 | iqr: 8.0000000000, 469 | }; 470 | check(val, summ); 471 | } 472 | #[test] 473 | fn test_pois25lambda50() { 474 | let val = &[ 475 | 45.0000000000, 476 | 43.0000000000, 477 | 44.0000000000, 478 | 61.0000000000, 479 | 51.0000000000, 480 | 53.0000000000, 481 | 59.0000000000, 482 | 52.0000000000, 483 | 49.0000000000, 484 | 51.0000000000, 485 | 51.0000000000, 486 | 50.0000000000, 487 | 49.0000000000, 488 | 56.0000000000, 489 | 42.0000000000, 490 | 52.0000000000, 491 | 51.0000000000, 492 | 43.0000000000, 493 | 48.0000000000, 494 | 48.0000000000, 495 | 50.0000000000, 496 | 42.0000000000, 497 | 43.0000000000, 498 | 42.0000000000, 499 | 60.0000000000, 500 | ]; 501 | let summ = &Summary { 502 | sum: 1235.0000000000, 503 | min: 42.0000000000, 504 | max: 61.0000000000, 505 | mean: 49.4000000000, 506 | median: 50.0000000000, 507 | var: 31.6666666667, 508 | std_dev: 5.6273143387, 509 | std_dev_pct: 11.3913245723, 510 | median_abs_dev: 4.4478000000, 511 | median_abs_dev_pct: 8.8956000000, 512 | quartiles: (44.0000000000, 50.0000000000, 52.0000000000), 513 | iqr: 8.0000000000, 514 | }; 515 | check(val, summ); 516 | } 517 | #[test] 518 | fn test_unif25() { 519 | let val = &[ 520 | 99.0000000000, 521 | 55.0000000000, 522 | 92.0000000000, 523 | 79.0000000000, 524 | 14.0000000000, 525 | 2.0000000000, 526 | 33.0000000000, 527 | 49.0000000000, 528 | 3.0000000000, 529 | 32.0000000000, 530 | 84.0000000000, 531 | 59.0000000000, 532 | 22.0000000000, 533 | 86.0000000000, 534 | 76.0000000000, 535 | 31.0000000000, 536 | 29.0000000000, 537 | 11.0000000000, 538 | 41.0000000000, 539 | 53.0000000000, 540 | 45.0000000000, 541 | 44.0000000000, 542 | 98.0000000000, 543 | 98.0000000000, 544 | 7.0000000000, 545 | ]; 546 | let summ = &Summary { 547 | sum: 1242.0000000000, 548 | min: 2.0000000000, 549 | max: 99.0000000000, 550 | mean: 49.6800000000, 551 | median: 45.0000000000, 552 | var: 1015.6433333333, 553 | std_dev: 31.8691595957, 554 | std_dev_pct: 64.1488719719, 555 | median_abs_dev: 45.9606000000, 556 | median_abs_dev_pct: 102.1346666667, 557 | quartiles: (29.0000000000, 45.0000000000, 79.0000000000), 558 | iqr: 50.0000000000, 559 | }; 560 | check(val, summ); 561 | } 562 | 563 | #[test] 564 | fn test_sum_f64s() { 565 | assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999); 566 | } 567 | #[test] 568 | fn test_sum_f64_between_ints_that_sum_to_0() { 569 | assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2); 570 | } 571 | 572 | /* 573 | #[bench] 574 | pub fn sum_three_items(b: &mut Bencher) { 575 | b.iter(|| { 576 | [1e20f64, 1.5f64, -1e20f64].sum(); 577 | }) 578 | } 579 | #[bench] 580 | pub fn sum_many_f64(b: &mut Bencher) { 581 | let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60]; 582 | let v = (0..500).map(|i| nums[i % 5]).collect::>(); 583 | 584 | b.iter(|| { 585 | v.sum(); 586 | }) 587 | } 588 | 589 | #[bench] 590 | pub fn no_iter(_: &mut Bencher) {} 591 | */ 592 | -------------------------------------------------------------------------------- /src/test_result.rs: -------------------------------------------------------------------------------- 1 | use std::any::Any; 2 | 3 | use super::bench::BenchSamples; 4 | use super::options::ShouldPanic; 5 | use super::time; 6 | use super::types::TestDesc; 7 | 8 | pub use self::TestResult::*; 9 | 10 | // Return codes for secondary process. 11 | // Start somewhere other than 0 so we know the return code means what we think 12 | // it means. 13 | pub const TR_OK: i32 = 50; 14 | pub const TR_FAILED: i32 = 51; 15 | 16 | #[derive(Debug, Clone, PartialEq)] 17 | pub enum TestResult { 18 | TrOk, 19 | TrFailed, 20 | TrFailedMsg(String), 21 | TrIgnored, 22 | TrAllowedFail, 23 | TrBench(BenchSamples), 24 | TrTimedFail, 25 | } 26 | 27 | unsafe impl Send for TestResult {} 28 | 29 | /// Creates a `TestResult` depending on the raw result of test execution 30 | /// and associated data. 31 | pub fn calc_result<'a>( 32 | desc: &TestDesc, 33 | task_result: Result<(), &'a dyn Any>, 34 | time_opts: &Option, 35 | exec_time: &Option, 36 | ) -> TestResult { 37 | let result = match (&desc.should_panic, task_result) { 38 | (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk, 39 | (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { 40 | let maybe_panic_str = err 41 | .downcast_ref::() 42 | .map(|e| &**e) 43 | .or_else(|| err.downcast_ref::<&'static str>().copied()); 44 | 45 | if maybe_panic_str.map(|e| e.contains(msg)).unwrap_or(false) { 46 | TestResult::TrOk 47 | } else if desc.allow_fail { 48 | TestResult::TrAllowedFail 49 | } else if let Some(panic_str) = maybe_panic_str { 50 | TestResult::TrFailedMsg(format!( 51 | r#"panic did not contain expected string 52 | panic message: `{:?}`, 53 | expected substring: `{:?}`"#, 54 | panic_str, msg 55 | )) 56 | } else { 57 | TestResult::TrFailedMsg(format!( 58 | r#"expected panic with string value, 59 | found non-string value: `{:?}` 60 | expected substring: `{:?}`"#, 61 | (**err).type_id(), 62 | msg 63 | )) 64 | } 65 | } 66 | (&ShouldPanic::Yes, Ok(())) => { 67 | TestResult::TrFailedMsg("test did not panic as expected".to_string()) 68 | } 69 | _ if desc.allow_fail => TestResult::TrAllowedFail, 70 | _ => TestResult::TrFailed, 71 | }; 72 | 73 | // If test is already failed (or allowed to fail), do not change the result. 74 | if result != TestResult::TrOk { 75 | return result; 76 | } 77 | 78 | // Check if test is failed due to timeout. 79 | if let (Some(opts), Some(time)) = (time_opts, exec_time) { 80 | if opts.error_on_excess && opts.is_critical(desc, time) { 81 | return TestResult::TrTimedFail; 82 | } 83 | } 84 | 85 | result 86 | } 87 | 88 | /// Creates a `TestResult` depending on the exit code of test subprocess. 89 | pub fn get_result_from_exit_code( 90 | desc: &TestDesc, 91 | code: i32, 92 | time_opts: &Option, 93 | exec_time: &Option, 94 | ) -> TestResult { 95 | let result = match (desc.allow_fail, code) { 96 | (_, TR_OK) => TestResult::TrOk, 97 | (true, TR_FAILED) => TestResult::TrAllowedFail, 98 | (false, TR_FAILED) => TestResult::TrFailed, 99 | (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)), 100 | }; 101 | 102 | // If test is already failed (or allowed to fail), do not change the result. 103 | if result != TestResult::TrOk { 104 | return result; 105 | } 106 | 107 | // Check if test is failed due to timeout. 108 | if let (Some(opts), Some(time)) = (time_opts, exec_time) { 109 | if opts.error_on_excess && opts.is_critical(desc, time) { 110 | return TestResult::TrTimedFail; 111 | } 112 | } 113 | 114 | result 115 | } 116 | -------------------------------------------------------------------------------- /src/tests.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | use crate::{ 4 | bench::Bencher, 5 | console::OutputLocation, 6 | formatters::PrettyFormatter, 7 | options::OutputFormat, 8 | test::{ 9 | filter_tests, 10 | parse_opts, 11 | run_test, 12 | DynTestFn, 13 | DynTestName, 14 | MetricMap, 15 | RunIgnored, 16 | RunStrategy, 17 | ShouldPanic, 18 | StaticTestName, 19 | TestDesc, 20 | TestDescAndFn, 21 | TestOpts, 22 | TrIgnored, 23 | TrOk, 24 | // FIXME (introduced by #65251) 25 | // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions, 26 | // TestType, TrFailedMsg, TrIgnored, TrOk, 27 | }, 28 | time::{TestTimeOptions, TimeThreshold}, 29 | }; 30 | use std::any::TypeId; 31 | use std::sync::mpsc::channel; 32 | use std::time::Duration; 33 | 34 | impl TestOpts { 35 | fn new() -> TestOpts { 36 | TestOpts { 37 | list: false, 38 | filters: vec![], 39 | filter_exact: false, 40 | force_run_in_process: false, 41 | exclude_should_panic: false, 42 | run_ignored: RunIgnored::No, 43 | run_tests: false, 44 | bench_benchmarks: false, 45 | logfile: None, 46 | nocapture: false, 47 | color: AutoColor, 48 | format: OutputFormat::Pretty, 49 | test_threads: None, 50 | skip: vec![], 51 | time_options: None, 52 | options: Options::new(), 53 | } 54 | } 55 | } 56 | 57 | fn one_ignored_one_unignored_test() -> Vec { 58 | vec![ 59 | TestDescAndFn { 60 | desc: TestDesc { 61 | name: StaticTestName("1"), 62 | ignore: true, 63 | should_panic: ShouldPanic::No, 64 | allow_fail: false, 65 | test_type: TestType::Unknown, 66 | }, 67 | testfn: DynTestFn(Box::new(move || {})), 68 | }, 69 | TestDescAndFn { 70 | desc: TestDesc { 71 | name: StaticTestName("2"), 72 | ignore: false, 73 | should_panic: ShouldPanic::No, 74 | allow_fail: false, 75 | test_type: TestType::Unknown, 76 | }, 77 | testfn: DynTestFn(Box::new(move || {})), 78 | }, 79 | ] 80 | } 81 | 82 | #[test] 83 | pub fn do_not_run_ignored_tests() { 84 | fn f() { 85 | panic!(); 86 | } 87 | let desc = TestDescAndFn { 88 | desc: TestDesc { 89 | name: StaticTestName("whatever"), 90 | ignore: true, 91 | should_panic: ShouldPanic::No, 92 | allow_fail: false, 93 | test_type: TestType::Unknown, 94 | }, 95 | testfn: DynTestFn(Box::new(f)), 96 | }; 97 | let (tx, rx) = channel(); 98 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 99 | let result = rx.recv().unwrap().result; 100 | assert_ne!(result, TrOk); 101 | } 102 | 103 | #[test] 104 | pub fn ignored_tests_result_in_ignored() { 105 | fn f() {} 106 | let desc = TestDescAndFn { 107 | desc: TestDesc { 108 | name: StaticTestName("whatever"), 109 | ignore: true, 110 | should_panic: ShouldPanic::No, 111 | allow_fail: false, 112 | test_type: TestType::Unknown, 113 | }, 114 | testfn: DynTestFn(Box::new(f)), 115 | }; 116 | let (tx, rx) = channel(); 117 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 118 | let result = rx.recv().unwrap().result; 119 | assert_eq!(result, TrIgnored); 120 | } 121 | 122 | // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) 123 | #[test] 124 | #[cfg(not(target_os = "emscripten"))] 125 | fn test_should_panic() { 126 | fn f() { 127 | panic!(); 128 | } 129 | let desc = TestDescAndFn { 130 | desc: TestDesc { 131 | name: StaticTestName("whatever"), 132 | ignore: false, 133 | should_panic: ShouldPanic::Yes, 134 | allow_fail: false, 135 | test_type: TestType::Unknown, 136 | }, 137 | testfn: DynTestFn(Box::new(f)), 138 | }; 139 | let (tx, rx) = channel(); 140 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 141 | let result = rx.recv().unwrap().result; 142 | assert_eq!(result, TrOk); 143 | } 144 | 145 | // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) 146 | #[test] 147 | #[cfg(not(target_os = "emscripten"))] 148 | fn test_should_panic_good_message() { 149 | fn f() { 150 | panic!("an error message"); 151 | } 152 | let desc = TestDescAndFn { 153 | desc: TestDesc { 154 | name: StaticTestName("whatever"), 155 | ignore: false, 156 | should_panic: ShouldPanic::YesWithMessage("error message"), 157 | allow_fail: false, 158 | test_type: TestType::Unknown, 159 | }, 160 | testfn: DynTestFn(Box::new(f)), 161 | }; 162 | let (tx, rx) = channel(); 163 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 164 | let result = rx.recv().unwrap().result; 165 | assert_eq!(result, TrOk); 166 | } 167 | 168 | // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) 169 | #[test] 170 | #[cfg(not(target_os = "emscripten"))] 171 | fn test_should_panic_bad_message() { 172 | use crate::tests::TrFailedMsg; 173 | fn f() { 174 | panic!("an error message"); 175 | } 176 | let expected = "foobar"; 177 | let failed_msg = r#"panic did not contain expected string 178 | panic message: `"an error message"`, 179 | expected substring: `"foobar"`"#; 180 | let desc = TestDescAndFn { 181 | desc: TestDesc { 182 | name: StaticTestName("whatever"), 183 | ignore: false, 184 | should_panic: ShouldPanic::YesWithMessage(expected), 185 | allow_fail: false, 186 | test_type: TestType::Unknown, 187 | }, 188 | testfn: DynTestFn(Box::new(f)), 189 | }; 190 | let (tx, rx) = channel(); 191 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 192 | let result = rx.recv().unwrap().result; 193 | assert_eq!(result, TrFailedMsg(failed_msg.to_string())); 194 | } 195 | 196 | // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) 197 | #[test] 198 | #[cfg(not(target_os = "emscripten"))] 199 | fn test_should_panic_non_string_message_type() { 200 | use crate::tests::TrFailedMsg; 201 | fn f() { 202 | panic!(1i32); 203 | } 204 | let expected = "foobar"; 205 | let failed_msg = format!( 206 | r#"expected panic with string value, 207 | found non-string value: `{:?}` 208 | expected substring: `"foobar"`"#, 209 | TypeId::of::() 210 | ); 211 | let desc = TestDescAndFn { 212 | desc: TestDesc { 213 | name: StaticTestName("whatever"), 214 | ignore: false, 215 | should_panic: ShouldPanic::YesWithMessage(expected), 216 | allow_fail: false, 217 | test_type: TestType::Unknown, 218 | }, 219 | testfn: DynTestFn(Box::new(f)), 220 | }; 221 | let (tx, rx) = channel(); 222 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 223 | let result = rx.recv().unwrap().result; 224 | assert_eq!(result, TrFailedMsg(failed_msg)); 225 | } 226 | 227 | // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) 228 | #[test] 229 | #[cfg(not(target_os = "emscripten"))] 230 | fn test_should_panic_but_succeeds() { 231 | fn f() {} 232 | let desc = TestDescAndFn { 233 | desc: TestDesc { 234 | name: StaticTestName("whatever"), 235 | ignore: false, 236 | should_panic: ShouldPanic::Yes, 237 | allow_fail: false, 238 | test_type: TestType::Unknown, 239 | }, 240 | testfn: DynTestFn(Box::new(f)), 241 | }; 242 | let (tx, rx) = channel(); 243 | run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); 244 | let result = rx.recv().unwrap().result; 245 | assert_eq!(result, TrFailedMsg("test did not panic as expected".to_string())); 246 | } 247 | 248 | fn report_time_test_template(report_time: bool) -> Option { 249 | fn f() {} 250 | let desc = TestDescAndFn { 251 | desc: TestDesc { 252 | name: StaticTestName("whatever"), 253 | ignore: false, 254 | should_panic: ShouldPanic::No, 255 | allow_fail: false, 256 | test_type: TestType::Unknown, 257 | }, 258 | testfn: DynTestFn(Box::new(f)), 259 | }; 260 | let time_options = if report_time { Some(TestTimeOptions::default()) } else { None }; 261 | 262 | let test_opts = TestOpts { time_options, ..TestOpts::new() }; 263 | let (tx, rx) = channel(); 264 | run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); 265 | let exec_time = rx.recv().unwrap().exec_time; 266 | exec_time 267 | } 268 | 269 | #[test] 270 | fn test_should_not_report_time() { 271 | let exec_time = report_time_test_template(false); 272 | assert!(exec_time.is_none()); 273 | } 274 | 275 | #[test] 276 | fn test_should_report_time() { 277 | let exec_time = report_time_test_template(true); 278 | assert!(exec_time.is_some()); 279 | } 280 | 281 | fn time_test_failure_template(test_type: TestType) -> TestResult { 282 | fn f() {} 283 | let desc = TestDescAndFn { 284 | desc: TestDesc { 285 | name: StaticTestName("whatever"), 286 | ignore: false, 287 | should_panic: ShouldPanic::No, 288 | allow_fail: false, 289 | test_type, 290 | }, 291 | testfn: DynTestFn(Box::new(f)), 292 | }; 293 | // `Default` will initialize all the thresholds to 0 milliseconds. 294 | let mut time_options = TestTimeOptions::default(); 295 | time_options.error_on_excess = true; 296 | 297 | let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() }; 298 | let (tx, rx) = channel(); 299 | run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); 300 | let result = rx.recv().unwrap().result; 301 | 302 | result 303 | } 304 | 305 | #[test] 306 | fn test_error_on_exceed() { 307 | let types = [TestType::UnitTest, TestType::IntegrationTest, TestType::DocTest]; 308 | 309 | for test_type in types.iter() { 310 | let result = time_test_failure_template(*test_type); 311 | 312 | assert_eq!(result, TestResult::TrTimedFail); 313 | } 314 | 315 | // Check that for unknown tests thresholds aren't applied. 316 | let result = time_test_failure_template(TestType::Unknown); 317 | assert_eq!(result, TestResult::TrOk); 318 | } 319 | 320 | fn typed_test_desc(test_type: TestType) -> TestDesc { 321 | TestDesc { 322 | name: StaticTestName("whatever"), 323 | ignore: false, 324 | should_panic: ShouldPanic::No, 325 | allow_fail: false, 326 | test_type, 327 | } 328 | } 329 | 330 | fn test_exec_time(millis: u64) -> TestExecTime { 331 | TestExecTime(Duration::from_millis(millis)) 332 | } 333 | 334 | #[test] 335 | fn test_time_options_threshold() { 336 | let unit = TimeThreshold::new(Duration::from_millis(50), Duration::from_millis(100)); 337 | let integration = TimeThreshold::new(Duration::from_millis(500), Duration::from_millis(1000)); 338 | let doc = TimeThreshold::new(Duration::from_millis(5000), Duration::from_millis(10000)); 339 | 340 | let options = TestTimeOptions { 341 | error_on_excess: false, 342 | colored: false, 343 | unit_threshold: unit.clone(), 344 | integration_threshold: integration.clone(), 345 | doctest_threshold: doc.clone(), 346 | }; 347 | 348 | let test_vector = [ 349 | (TestType::UnitTest, unit.warn.as_millis() - 1, false, false), 350 | (TestType::UnitTest, unit.warn.as_millis(), true, false), 351 | (TestType::UnitTest, unit.critical.as_millis(), true, true), 352 | (TestType::IntegrationTest, integration.warn.as_millis() - 1, false, false), 353 | (TestType::IntegrationTest, integration.warn.as_millis(), true, false), 354 | (TestType::IntegrationTest, integration.critical.as_millis(), true, true), 355 | (TestType::DocTest, doc.warn.as_millis() - 1, false, false), 356 | (TestType::DocTest, doc.warn.as_millis(), true, false), 357 | (TestType::DocTest, doc.critical.as_millis(), true, true), 358 | ]; 359 | 360 | for (test_type, time, expected_warn, expected_critical) in test_vector.iter() { 361 | let test_desc = typed_test_desc(*test_type); 362 | let exec_time = test_exec_time(*time as u64); 363 | 364 | assert_eq!(options.is_warn(&test_desc, &exec_time), *expected_warn); 365 | assert_eq!(options.is_critical(&test_desc, &exec_time), *expected_critical); 366 | } 367 | } 368 | 369 | #[test] 370 | fn parse_ignored_flag() { 371 | let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()]; 372 | let opts = parse_opts(&args).unwrap().unwrap(); 373 | assert_eq!(opts.run_ignored, RunIgnored::Only); 374 | } 375 | 376 | #[test] 377 | fn parse_show_output_flag() { 378 | let args = vec!["progname".to_string(), "filter".to_string(), "--show-output".to_string()]; 379 | let opts = parse_opts(&args).unwrap().unwrap(); 380 | assert!(opts.options.display_output); 381 | } 382 | 383 | #[test] 384 | fn parse_include_ignored_flag() { 385 | let args = vec![ 386 | "progname".to_string(), 387 | "filter".to_string(), 388 | "-Zunstable-options".to_string(), 389 | "--include-ignored".to_string(), 390 | ]; 391 | let opts = parse_opts(&args).unwrap().unwrap(); 392 | assert_eq!(opts.run_ignored, RunIgnored::Yes); 393 | } 394 | 395 | #[test] 396 | pub fn filter_for_ignored_option() { 397 | // When we run ignored tests the test filter should filter out all the 398 | // unignored tests and flip the ignore flag on the rest to false 399 | 400 | let mut opts = TestOpts::new(); 401 | opts.run_tests = true; 402 | opts.run_ignored = RunIgnored::Only; 403 | 404 | let tests = one_ignored_one_unignored_test(); 405 | let filtered = filter_tests(&opts, tests); 406 | 407 | assert_eq!(filtered.len(), 1); 408 | assert_eq!(filtered[0].desc.name.to_string(), "1"); 409 | assert!(!filtered[0].desc.ignore); 410 | } 411 | 412 | #[test] 413 | pub fn run_include_ignored_option() { 414 | // When we "--include-ignored" tests, the ignore flag should be set to false on 415 | // all tests and no test filtered out 416 | 417 | let mut opts = TestOpts::new(); 418 | opts.run_tests = true; 419 | opts.run_ignored = RunIgnored::Yes; 420 | 421 | let tests = one_ignored_one_unignored_test(); 422 | let filtered = filter_tests(&opts, tests); 423 | 424 | assert_eq!(filtered.len(), 2); 425 | assert!(!filtered[0].desc.ignore); 426 | assert!(!filtered[1].desc.ignore); 427 | } 428 | 429 | #[test] 430 | pub fn exclude_should_panic_option() { 431 | let mut opts = TestOpts::new(); 432 | opts.run_tests = true; 433 | opts.exclude_should_panic = true; 434 | 435 | let mut tests = one_ignored_one_unignored_test(); 436 | tests.push(TestDescAndFn { 437 | desc: TestDesc { 438 | name: StaticTestName("3"), 439 | ignore: false, 440 | should_panic: ShouldPanic::Yes, 441 | allow_fail: false, 442 | test_type: TestType::Unknown, 443 | }, 444 | testfn: DynTestFn(Box::new(move || {})), 445 | }); 446 | 447 | let filtered = filter_tests(&opts, tests); 448 | 449 | assert_eq!(filtered.len(), 2); 450 | assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No)); 451 | } 452 | 453 | #[test] 454 | pub fn exact_filter_match() { 455 | fn tests() -> Vec { 456 | vec!["base", "base::test", "base::test1", "base::test2"] 457 | .into_iter() 458 | .map(|name| TestDescAndFn { 459 | desc: TestDesc { 460 | name: StaticTestName(name), 461 | ignore: false, 462 | should_panic: ShouldPanic::No, 463 | allow_fail: false, 464 | test_type: TestType::Unknown, 465 | }, 466 | testfn: DynTestFn(Box::new(move || {})), 467 | }) 468 | .collect() 469 | } 470 | 471 | let substr = 472 | filter_tests(&TestOpts { filters: vec!["base".into()], ..TestOpts::new() }, tests()); 473 | assert_eq!(substr.len(), 4); 474 | 475 | let substr = filter_tests(&TestOpts { filters: vec!["bas".into()], ..TestOpts::new() }, tests()); 476 | assert_eq!(substr.len(), 4); 477 | 478 | let substr = 479 | filter_tests(&TestOpts { filters: vec!["::test".into()], ..TestOpts::new() }, tests()); 480 | assert_eq!(substr.len(), 3); 481 | 482 | let substr = 483 | filter_tests(&TestOpts { filters: vec!["base::test".into()], ..TestOpts::new() }, tests()); 484 | assert_eq!(substr.len(), 3); 485 | 486 | let substr = filter_tests( 487 | &TestOpts { filters: vec!["test1".into(), "test2".into()], ..TestOpts::new() }, 488 | tests(), 489 | ); 490 | assert_eq!(substr.len(), 2); 491 | 492 | let exact = filter_tests( 493 | &TestOpts { filters: vec!["base".into()], filter_exact: true, ..TestOpts::new() }, 494 | tests(), 495 | ); 496 | assert_eq!(exact.len(), 1); 497 | 498 | let exact = filter_tests( 499 | &TestOpts { filters: vec!["bas".into()], filter_exact: true, ..TestOpts::new() }, 500 | tests(), 501 | ); 502 | assert_eq!(exact.len(), 0); 503 | 504 | let exact = filter_tests( 505 | &TestOpts { filters: vec!["::test".into()], filter_exact: true, ..TestOpts::new() }, 506 | tests(), 507 | ); 508 | assert_eq!(exact.len(), 0); 509 | 510 | let exact = filter_tests( 511 | &TestOpts { filters: vec!["base::test".into()], filter_exact: true, ..TestOpts::new() }, 512 | tests(), 513 | ); 514 | assert_eq!(exact.len(), 1); 515 | 516 | let exact = filter_tests( 517 | &TestOpts { 518 | filters: vec!["base".into(), "base::test".into()], 519 | filter_exact: true, 520 | ..TestOpts::new() 521 | }, 522 | tests(), 523 | ); 524 | assert_eq!(exact.len(), 2); 525 | } 526 | 527 | #[test] 528 | pub fn sort_tests() { 529 | let mut opts = TestOpts::new(); 530 | opts.run_tests = true; 531 | 532 | let names = vec![ 533 | "sha1::test".to_string(), 534 | "isize::test_to_str".to_string(), 535 | "isize::test_pow".to_string(), 536 | "test::do_not_run_ignored_tests".to_string(), 537 | "test::ignored_tests_result_in_ignored".to_string(), 538 | "test::first_free_arg_should_be_a_filter".to_string(), 539 | "test::parse_ignored_flag".to_string(), 540 | "test::parse_include_ignored_flag".to_string(), 541 | "test::filter_for_ignored_option".to_string(), 542 | "test::run_include_ignored_option".to_string(), 543 | "test::sort_tests".to_string(), 544 | ]; 545 | let tests = { 546 | fn testfn() {} 547 | let mut tests = Vec::new(); 548 | for name in &names { 549 | let test = TestDescAndFn { 550 | desc: TestDesc { 551 | name: DynTestName((*name).clone()), 552 | ignore: false, 553 | should_panic: ShouldPanic::No, 554 | allow_fail: false, 555 | test_type: TestType::Unknown, 556 | }, 557 | testfn: DynTestFn(Box::new(testfn)), 558 | }; 559 | tests.push(test); 560 | } 561 | tests 562 | }; 563 | let filtered = filter_tests(&opts, tests); 564 | 565 | let expected = vec![ 566 | "isize::test_pow".to_string(), 567 | "isize::test_to_str".to_string(), 568 | "sha1::test".to_string(), 569 | "test::do_not_run_ignored_tests".to_string(), 570 | "test::filter_for_ignored_option".to_string(), 571 | "test::first_free_arg_should_be_a_filter".to_string(), 572 | "test::ignored_tests_result_in_ignored".to_string(), 573 | "test::parse_ignored_flag".to_string(), 574 | "test::parse_include_ignored_flag".to_string(), 575 | "test::run_include_ignored_option".to_string(), 576 | "test::sort_tests".to_string(), 577 | ]; 578 | 579 | for (a, b) in expected.iter().zip(filtered) { 580 | assert_eq!(*a, b.desc.name.to_string()); 581 | } 582 | } 583 | 584 | #[test] 585 | pub fn test_metricmap_compare() { 586 | let mut m1 = MetricMap::new(); 587 | let mut m2 = MetricMap::new(); 588 | m1.insert_metric("in-both-noise", 1000.0, 200.0); 589 | m2.insert_metric("in-both-noise", 1100.0, 200.0); 590 | 591 | m1.insert_metric("in-first-noise", 1000.0, 2.0); 592 | m2.insert_metric("in-second-noise", 1000.0, 2.0); 593 | 594 | m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0); 595 | m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0); 596 | 597 | m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0); 598 | m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0); 599 | 600 | m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0); 601 | m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0); 602 | 603 | m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0); 604 | m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0); 605 | } 606 | 607 | #[test] 608 | pub fn test_bench_once_no_iter() { 609 | fn f(_: &mut Bencher) {} 610 | bench::run_once(f); 611 | } 612 | 613 | #[test] 614 | pub fn test_bench_once_iter() { 615 | fn f(b: &mut Bencher) { 616 | b.iter(|| {}) 617 | } 618 | bench::run_once(f); 619 | } 620 | 621 | #[test] 622 | pub fn test_bench_no_iter() { 623 | fn f(_: &mut Bencher) {} 624 | 625 | let (tx, rx) = channel(); 626 | 627 | let desc = TestDesc { 628 | name: StaticTestName("f"), 629 | ignore: false, 630 | should_panic: ShouldPanic::No, 631 | allow_fail: false, 632 | test_type: TestType::Unknown, 633 | }; 634 | 635 | crate::bench::benchmark(desc, tx, true, f); 636 | rx.recv().unwrap(); 637 | } 638 | 639 | #[test] 640 | pub fn test_bench_iter() { 641 | fn f(b: &mut Bencher) { 642 | b.iter(|| {}) 643 | } 644 | 645 | let (tx, rx) = channel(); 646 | 647 | let desc = TestDesc { 648 | name: StaticTestName("f"), 649 | ignore: false, 650 | should_panic: ShouldPanic::No, 651 | allow_fail: false, 652 | test_type: TestType::Unknown, 653 | }; 654 | 655 | crate::bench::benchmark(desc, tx, true, f); 656 | rx.recv().unwrap(); 657 | } 658 | 659 | #[test] 660 | fn should_sort_failures_before_printing_them() { 661 | let test_a = TestDesc { 662 | name: StaticTestName("a"), 663 | ignore: false, 664 | should_panic: ShouldPanic::No, 665 | allow_fail: false, 666 | test_type: TestType::Unknown, 667 | }; 668 | 669 | let test_b = TestDesc { 670 | name: StaticTestName("b"), 671 | ignore: false, 672 | should_panic: ShouldPanic::No, 673 | allow_fail: false, 674 | test_type: TestType::Unknown, 675 | }; 676 | 677 | let mut out = PrettyFormatter::new(OutputLocation::Raw(Vec::new()), false, 10, false, None); 678 | 679 | let st = console::ConsoleTestState { 680 | log_out: None, 681 | total: 0, 682 | passed: 0, 683 | failed: 0, 684 | ignored: 0, 685 | allowed_fail: 0, 686 | filtered_out: 0, 687 | measured: 0, 688 | exec_time: None, 689 | metrics: MetricMap::new(), 690 | failures: vec![(test_b, Vec::new()), (test_a, Vec::new())], 691 | options: Options::new(), 692 | not_failures: Vec::new(), 693 | time_failures: Vec::new(), 694 | }; 695 | 696 | out.write_failures(&st).unwrap(); 697 | let s = match out.output_location() { 698 | &OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]), 699 | &OutputLocation::Pretty(_) => unreachable!(), 700 | }; 701 | 702 | let apos = s.find("a").unwrap(); 703 | let bpos = s.find("b").unwrap(); 704 | assert!(apos < bpos); 705 | } 706 | -------------------------------------------------------------------------------- /src/time.rs: -------------------------------------------------------------------------------- 1 | //! Module `time` contains everything related to the time measurement of unit tests 2 | //! execution. 3 | //! The purposes of this module: 4 | //! - Check whether test is timed out. 5 | //! - Provide helpers for `report-time` and `measure-time` options. 6 | //! - Provide newtypes for executions times. 7 | 8 | use std::env; 9 | use std::fmt; 10 | use std::str::FromStr; 11 | use std::time::{Duration, Instant}; 12 | 13 | use super::types::{TestDesc, TestType}; 14 | 15 | pub const TEST_WARN_TIMEOUT_S: u64 = 60; 16 | 17 | /// This small module contains constants used by `report-time` option. 18 | /// Those constants values will be used if corresponding environment variables are not set. 19 | /// 20 | /// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`, 21 | /// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`, 22 | /// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`. 23 | /// 24 | /// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means 25 | /// warn time, and 200 means critical time. 26 | pub mod time_constants { 27 | use super::TEST_WARN_TIMEOUT_S; 28 | use std::time::Duration; 29 | 30 | /// Environment variable for overriding default threshold for unit-tests. 31 | pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT"; 32 | 33 | // Unit tests are supposed to be really quick. 34 | pub const UNIT_WARN: Duration = Duration::from_millis(50); 35 | pub const UNIT_CRITICAL: Duration = Duration::from_millis(100); 36 | 37 | /// Environment variable for overriding default threshold for unit-tests. 38 | pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION"; 39 | 40 | // Integration tests may have a lot of work, so they can take longer to execute. 41 | pub const INTEGRATION_WARN: Duration = Duration::from_millis(500); 42 | pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000); 43 | 44 | /// Environment variable for overriding default threshold for unit-tests. 45 | pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST"; 46 | 47 | // Doctests are similar to integration tests, because they can include a lot of 48 | // initialization code. 49 | pub const DOCTEST_WARN: Duration = INTEGRATION_WARN; 50 | pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL; 51 | 52 | // Do not suppose anything about unknown tests, base limits on the 53 | // `TEST_WARN_TIMEOUT_S` constant. 54 | pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S); 55 | pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2); 56 | } 57 | 58 | /// Returns an `Instance` object denoting when the test should be considered 59 | /// timed out. 60 | pub fn get_default_test_timeout() -> Instant { 61 | Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S) 62 | } 63 | 64 | /// The measured execution time of a unit test. 65 | #[derive(Debug, Clone, PartialEq)] 66 | pub struct TestExecTime(pub Duration); 67 | 68 | impl fmt::Display for TestExecTime { 69 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 70 | write!(f, "{:.3}s", self.0.as_secs_f64()) 71 | } 72 | } 73 | 74 | /// The measured execution time of the whole test suite. 75 | #[derive(Debug, Clone, Default, PartialEq)] 76 | pub struct TestSuiteExecTime(pub Duration); 77 | 78 | impl fmt::Display for TestSuiteExecTime { 79 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 80 | write!(f, "{:.2}s", self.0.as_secs_f64()) 81 | } 82 | } 83 | 84 | /// Structure denoting time limits for test execution. 85 | #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] 86 | pub struct TimeThreshold { 87 | pub warn: Duration, 88 | pub critical: Duration, 89 | } 90 | 91 | impl TimeThreshold { 92 | /// Creates a new `TimeThreshold` instance with provided durations. 93 | pub fn new(warn: Duration, critical: Duration) -> Self { 94 | Self { warn, critical } 95 | } 96 | 97 | /// Attempts to create a `TimeThreshold` instance with values obtained 98 | /// from the environment variable, and returns `None` if the variable 99 | /// is not set. 100 | /// Environment variable format is expected to match `\d+,\d+`. 101 | /// 102 | /// # Panics 103 | /// 104 | /// Panics if variable with provided name is set but contains inappropriate 105 | /// value. 106 | pub fn from_env_var(env_var_name: &str) -> Option { 107 | let durations_str = env::var(env_var_name).ok()?; 108 | 109 | // Split string into 2 substrings by comma and try to parse numbers. 110 | let mut durations = durations_str.splitn(2, ',').map(|v| { 111 | u64::from_str(v).unwrap_or_else(|_| { 112 | panic!( 113 | "Duration value in variable {} is expected to be a number, but got {}", 114 | env_var_name, v 115 | ) 116 | }) 117 | }); 118 | 119 | // Callback to be called if the environment variable has unexpected structure. 120 | let panic_on_incorrect_value = || { 121 | panic!( 122 | "Duration variable {} expected to have 2 numbers separated by comma, but got {}", 123 | env_var_name, durations_str 124 | ); 125 | }; 126 | 127 | let (warn, critical) = ( 128 | durations.next().unwrap_or_else(panic_on_incorrect_value), 129 | durations.next().unwrap_or_else(panic_on_incorrect_value), 130 | ); 131 | 132 | if warn > critical { 133 | panic!("Test execution warn time should be less or equal to the critical time"); 134 | } 135 | 136 | Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical))) 137 | } 138 | } 139 | 140 | /// Structure with parameters for calculating test execution time. 141 | #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] 142 | pub struct TestTimeOptions { 143 | /// Denotes if the test critical execution time limit excess should be considered 144 | /// a test failure. 145 | pub error_on_excess: bool, 146 | pub colored: bool, 147 | pub unit_threshold: TimeThreshold, 148 | pub integration_threshold: TimeThreshold, 149 | pub doctest_threshold: TimeThreshold, 150 | } 151 | 152 | impl TestTimeOptions { 153 | pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self { 154 | let unit_threshold = TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME) 155 | .unwrap_or_else(Self::default_unit); 156 | 157 | let integration_threshold = 158 | TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME) 159 | .unwrap_or_else(Self::default_integration); 160 | 161 | let doctest_threshold = TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME) 162 | .unwrap_or_else(Self::default_doctest); 163 | 164 | Self { error_on_excess, colored, unit_threshold, integration_threshold, doctest_threshold } 165 | } 166 | 167 | pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { 168 | exec_time.0 >= self.warn_time(test) 169 | } 170 | 171 | pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { 172 | exec_time.0 >= self.critical_time(test) 173 | } 174 | 175 | fn warn_time(&self, test: &TestDesc) -> Duration { 176 | match test.test_type { 177 | TestType::UnitTest => self.unit_threshold.warn, 178 | TestType::IntegrationTest => self.integration_threshold.warn, 179 | TestType::DocTest => self.doctest_threshold.warn, 180 | TestType::Unknown => time_constants::UNKNOWN_WARN, 181 | } 182 | } 183 | 184 | fn critical_time(&self, test: &TestDesc) -> Duration { 185 | match test.test_type { 186 | TestType::UnitTest => self.unit_threshold.critical, 187 | TestType::IntegrationTest => self.integration_threshold.critical, 188 | TestType::DocTest => self.doctest_threshold.critical, 189 | TestType::Unknown => time_constants::UNKNOWN_CRITICAL, 190 | } 191 | } 192 | 193 | fn default_unit() -> TimeThreshold { 194 | TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL) 195 | } 196 | 197 | fn default_integration() -> TimeThreshold { 198 | TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL) 199 | } 200 | 201 | fn default_doctest() -> TimeThreshold { 202 | TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL) 203 | } 204 | } 205 | -------------------------------------------------------------------------------- /src/types.rs: -------------------------------------------------------------------------------- 1 | //! Common types used by `libtest`. 2 | 3 | use std::borrow::Cow; 4 | use std::fmt; 5 | 6 | use super::bench::Bencher; 7 | use super::options; 8 | 9 | pub use NamePadding::*; 10 | pub use TestFn::*; 11 | pub use TestName::*; 12 | 13 | /// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html) 14 | /// conventions. 15 | #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] 16 | pub enum TestType { 17 | /// Unit-tests are expected to be in the `src` folder of the crate. 18 | UnitTest, 19 | /// Integration-style tests are expected to be in the `tests` folder of the crate. 20 | IntegrationTest, 21 | /// Doctests are created by the `librustdoc` manually, so it's a different type of test. 22 | DocTest, 23 | /// Tests for the sources that don't follow the project layout convention 24 | /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly). 25 | Unknown, 26 | } 27 | 28 | #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] 29 | pub enum NamePadding { 30 | PadNone, 31 | PadOnRight, 32 | } 33 | 34 | // The name of a test. By convention this follows the rules for rust 35 | // paths; i.e., it should be a series of identifiers separated by double 36 | // colons. This way if some test runner wants to arrange the tests 37 | // hierarchically it may. 38 | #[derive(Clone, PartialEq, Eq, Hash, Debug)] 39 | pub enum TestName { 40 | StaticTestName(&'static str), 41 | DynTestName(String), 42 | AlignedTestName(Cow<'static, str>, NamePadding), 43 | } 44 | 45 | impl TestName { 46 | pub fn as_slice(&self) -> &str { 47 | match *self { 48 | StaticTestName(s) => s, 49 | DynTestName(ref s) => s, 50 | AlignedTestName(ref s, _) => &*s, 51 | } 52 | } 53 | 54 | pub fn padding(&self) -> NamePadding { 55 | match self { 56 | &AlignedTestName(_, p) => p, 57 | _ => PadNone, 58 | } 59 | } 60 | 61 | pub fn with_padding(&self, padding: NamePadding) -> TestName { 62 | let name = match *self { 63 | TestName::StaticTestName(name) => Cow::Borrowed(name), 64 | TestName::DynTestName(ref name) => Cow::Owned(name.clone()), 65 | TestName::AlignedTestName(ref name, _) => name.clone(), 66 | }; 67 | 68 | TestName::AlignedTestName(name, padding) 69 | } 70 | } 71 | impl fmt::Display for TestName { 72 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 73 | fmt::Display::fmt(self.as_slice(), f) 74 | } 75 | } 76 | 77 | /// Represents a benchmark function. 78 | pub trait TDynBenchFn: Send { 79 | fn run(&self, harness: &mut Bencher); 80 | } 81 | 82 | // A function that runs a test. If the function returns successfully, 83 | // the test succeeds; if the function panics then the test fails. We 84 | // may need to come up with a more clever definition of test in order 85 | // to support isolation of tests into threads. 86 | pub enum TestFn { 87 | StaticTestFn(fn()), 88 | StaticBenchFn(fn(&mut Bencher)), 89 | DynTestFn(Box), 90 | DynBenchFn(Box), 91 | } 92 | 93 | impl TestFn { 94 | pub fn padding(&self) -> NamePadding { 95 | match *self { 96 | StaticTestFn(..) => PadNone, 97 | StaticBenchFn(..) => PadOnRight, 98 | DynTestFn(..) => PadNone, 99 | DynBenchFn(..) => PadOnRight, 100 | } 101 | } 102 | } 103 | 104 | impl fmt::Debug for TestFn { 105 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 106 | f.write_str(match *self { 107 | StaticTestFn(..) => "StaticTestFn(..)", 108 | StaticBenchFn(..) => "StaticBenchFn(..)", 109 | DynTestFn(..) => "DynTestFn(..)", 110 | DynBenchFn(..) => "DynBenchFn(..)", 111 | }) 112 | } 113 | } 114 | 115 | // The definition of a single test. A test runner will run a list of 116 | // these. 117 | #[derive(Clone, Debug, PartialEq, Eq, Hash)] 118 | pub struct TestDesc { 119 | pub name: TestName, 120 | pub ignore: bool, 121 | pub should_panic: options::ShouldPanic, 122 | pub allow_fail: bool, 123 | pub test_type: TestType, 124 | } 125 | 126 | impl TestDesc { 127 | pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String { 128 | let mut name = String::from(self.name.as_slice()); 129 | let fill = column_count.saturating_sub(name.len()); 130 | let pad = " ".repeat(fill); 131 | match align { 132 | PadNone => name, 133 | PadOnRight => { 134 | name.push_str(&pad); 135 | name 136 | } 137 | } 138 | } 139 | } 140 | 141 | #[derive(Debug)] 142 | pub struct TestDescAndFn { 143 | pub desc: TestDesc, 144 | pub testfn: TestFn, 145 | } 146 | --------------------------------------------------------------------------------