├── .azure-pipelines.yml ├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── ci ├── azure-install-rust.yml ├── azure-job-test-all.yml ├── run.sh └── tools.sh ├── libtest ├── Cargo.toml ├── README.md ├── formatters │ ├── json.rs │ ├── mod.rs │ ├── pretty.rs │ └── terse.rs ├── lib.rs └── stats.rs └── rustfmt.toml /.azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | 4 | jobs: 5 | - template: ci/azure-job-test-all.yml 6 | parameters: 7 | vmImage: vs2017-win2016 8 | name: x86_64_msvc 9 | target: x86_64-pc-windows-msvc 10 | - template: ci/azure-job-test-all.yml 11 | parameters: 12 | vmImage: vs2017-win2016 13 | toolchain: nightly-x86_64-gnu 14 | name: x86_64_mingw 15 | target: x86_64-pc-windows-gnu 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | sudo: false 3 | rust: nightly 4 | 5 | matrix: 6 | fast_finish: true 7 | include: 8 | - name: "Tools" 9 | install: true 10 | script: sh ci/tools.sh 11 | 12 | # cross targets: 13 | - name: "aarch64-linux-android" 14 | env: TARGET=aarch64-linux-android CROSS=1 15 | - name: "aarch64-unknown-linux-gnu" 16 | env: TARGET=aarch64-unknown-linux-gnu CROSS=1 17 | - name: "arm-linux-androideabi" 18 | env: TARGET=arm-linux-androideabi CROSS=1 19 | - name: "arm-unknown-linux-gnueabi" 20 | env: TARGET=arm-unknown-linux-gnueabi CROSS=1 21 | - name: "arm-unknown-linux-musleabi" 22 | env: TARGET=arm-unknown-linux-musleabi CROSS=1 23 | - name: "armv7-linux-androideabi" 24 | env: TARGET=armv7-linux-androideabi CROSS=1 25 | - name: "armv7-unknown-linux-gnueabihf" 26 | env: TARGET=armv7-unknown-linux-gnueabihf CROSS=1 27 | - name: "armv7-unknown-linux-musleabihf" 28 | env: TARGET=armv7-unknown-linux-musleabihf CROSS=1 29 | - name: "i586-unknown-linux-gnu" 30 | env: TARGET=i586-unknown-linux-gnu CROSS=1 31 | addons: &gcc_multilib 32 | apt: 33 | packages: 34 | - gcc-multilib 35 | - name: "i586-unknown-linux-musl" 36 | env: TARGET=i586-unknown-linux-musl CROSS=1 37 | addons: &gcc_multilib 38 | apt: 39 | packages: 40 | - gcc-multilib 41 | 42 | - name: "i686-apple-darwin" 43 | env: TARGET=i686-apple-darwin 44 | os: osx 45 | osx_image: xcode10 46 | - name: "i686-linux-android" 47 | env: TARGET=i686-linux-android CROSS=1 48 | - name: "i686-pc-windows-gnu" 49 | env: TARGET=i686-pc-windows-gnu CROSS=1 50 | - name: "i686-unknown-freebsd" 51 | env: TARGET=i686-unknown-freebsd NORUN=1 CROSS=1 52 | - name: "i686-unknown-linux-gnu" 53 | env: TARGET=i686-unknown-linux-gnu CROSS=1 54 | addons: *gcc_multilib 55 | - name: "i686-unknown-linux-musl" 56 | env: TARGET=i686-unknown-linux-musl CROSS=1 57 | - name: "mips-unknown-linux-gnu" 58 | env: TARGET=mips-unknown-linux-gnu CROSS=1 59 | - name: "mips64-unknown-linux-gnuabi64" 60 | env: TARGET=mips64-unknown-linux-gnuabi64 CROSS=1 61 | - name: "mips64el-unknown-linux-gnuabi64" 62 | env: TARGET=mips64el-unknown-linux-gnuabi64 CROSS=1 63 | - name: "mipsel-unknown-linux-gnu" 64 | env: TARGET=mipsel-unknown-linux-gnu CROSS=1 65 | - name: "powerpc-unknown-linux-gnu" 66 | env: TARGET=powerpc-unknown-linux-gnu CROSS=1 67 | - name: "powerpc64-unknown-linux-gnu" 68 | env: TARGET=powerpc64-unknown-linux-gnu CROSS=1 69 | - name: "powerpc64le-unknown-linux-gnu" 70 | env: TARGET=powerpc64le-unknown-linux-gnu CROSS=1 71 | - name: "s390x-unknown-linux-gnu" 72 | env: TARGET=s390x-unknown-linux-gnu CROSS=1 NORUN=1 73 | - name: "sparc64-unknown-linux-gnu" 74 | env: TARGET=sparc64-unknown-linux-gnu CROSS=1 NORUN=1 75 | - name: "x86_64-apple-darwin" 76 | env: TARGET=x86_64-apple-darwin 77 | os: osx 78 | osx_image: xcode10 79 | install: true 80 | - name: "x86_64-linux-android" 81 | env: TARGET=x86_64-linux-android CROSS=1 82 | - name: "x86_64-sun-solaris" 83 | env: TARGET=x86_64-sun-solaris NORUN=1 CROSS=1 84 | - name: "x86_64-unknown-freebsd" 85 | env: TARGET=x86_64-unknown-freebsd NORUN=1 CROSS=1 86 | - name: "x86_64-unknown-linux-gnu" 87 | env: TARGET=x86_64-unknown-linux-gnu 88 | install: true 89 | - name: "x86_64-unknown-linux-musl" 90 | env: TARGET=x86_64-unknown-linux-musl CROSS=1 91 | - name: "x86_64-unknown-netbsd" 92 | env: TARGET=x86_64-unknown-netbsd NORUN=1 CROSS=1 93 | 94 | install: 95 | - travis_retry rustup target add $TARGET 96 | - | 97 | if [ "$CROSS" = "1" ]; then 98 | cargo install cross 99 | fi 100 | 101 | script: 102 | - cargo generate-lockfile 103 | - sh ci/run.sh "${TARGET}" 104 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ "libtest" ] -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **Note:** This repository is no longer in use. The `test` crate lives [in the `rust-lang/rust` repository](https://github.com/rust-lang/rust/tree/master/library/test). 2 | 3 | --- 4 | 5 | [![Build Status](https://travis-ci.com/rust-lang/libtest.svg?branch=master)](https://travis-ci.com/rust-lang/libtest) [![Build Status](https://dev.azure.com/rust-lang/libtest/_apis/build/status/libtest-CI?branchName=master)](https://dev.azure.com/rust-lang/libtest/_build/latest?definitionId=1&branchName=master) [![Latest Version]][crates.io] [![docs]][docs.rs] 6 | 7 | libtest - Rust's built-in unit-testing and benchmarking framework 8 | === 9 | 10 | See [The Rust Programming Language chapter on 11 | Testing](https://doc.rust-lang.org/book/ch11-00-testing.html). 12 | 13 | ## Platform support 14 | 15 | * "build" shows whether the library compiles 16 | * "run" shows whether the full test-suite passes 17 | 18 | | Target | Build | Run | 19 | |-----------------------------------|-------|-----| 20 | | `aarch64-linux-android` | ✓ | ✓ | 21 | | `aarch64-unknown-linux-gnu` | ✓ | ✓ | 22 | | `arm-linux-androideabi` | ✓ | ✓ | 23 | | `arm-unknown-linux-gnueabi` | ✓ | ✓ | 24 | | `arm-unknown-linux-musleabi` | ✓ | ✓ | 25 | | `armv7-linux-androideabi` | ✓ | ✓ | 26 | | `armv7-unknown-linux-gnueabihf` | ✓ | ✓ | 27 | | `armv7-unknown-linux-musleabihf` | ✓ | ✓ | 28 | | `i586-unknown-linux-gnu` | ✓ | ✓ | 29 | | `i586-unknown-linux-musl` | ✓ | ✓ | 30 | | `i686-linux-android` | ✓ | ✓ | 31 | | `i686-pc-windows-gnu` | ✓ | ✓ | 32 | | `i686-apple-darwin` | ✓ | ✓ | 33 | | `i686-unknown-freebsd` | ✓ | ✗ | 34 | | `i686-unknown-linux-gnu` | ✓ | ✓ | 35 | | `i686-unknown-linux-musl` | ✓ | ✓ | 36 | | `mips-unknown-linux-gnu` | ✓ | ✓ | 37 | | `mips64-unknown-linux-gnuabi64` | ✓ | ✓ | 38 | | `mips64el-unknown-linux-gnuabi64` | ✓ | ✓ | 39 | | `mipsel-unknown-linux-gnu` | ✓ | ✓ | 40 | | `powerpc-unknown-linux-gnu` | ✓ | ✓ | 41 | | `powerpc64-unknown-linux-gnu` | ✓ | ✓ | 42 | | `powerpc64le-unknown-linux-gnu` | ✓ | ✓ | 43 | | `sparc64-unknown-linux-gnu` | ✓ | ✗ | 44 | | `s390x-unknown-linux-gnu` | ✓ | ✓ | 45 | | `x86_64-apple-darwin` | ✓ | ✓ | 46 | | `x86_64-sun-solaris` | ✓ | ✗ | 47 | | `x86_64-linux-android` | ✓ | ✓ | 48 | | `x86_64-pc-windows-gnu` | ✓ | ✓ | 49 | | `x86_64-pc-windows-msvc` | ✓ | ✓ | 50 | | `x86_64-unknown-freebsd` | ✓ | ✗ | 51 | | `x86_64-unknown-linux-gnu` | ✓ | ✓ | 52 | | `x86_64-unknown-linux-musl` | ✓ | ✓ | 53 | | `x86_64-unknown-netbsd` | ✓ | ✗ | 54 | 55 | ## License 56 | 57 | This project is licensed under either of 58 | 59 | * [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0) 60 | ([LICENSE-APACHE](LICENSE-APACHE)) 61 | 62 | * [MIT License](http://opensource.org/licenses/MIT) 63 | ([LICENSE-MIT](LICENSE-MIT)) 64 | 65 | at your option. 66 | 67 | ## Contributing 68 | 69 | We welcome all people who want to contribute. 70 | 71 | Contributions in any form (issues, pull requests, etc.) to this project 72 | must adhere to Rust's [Code of Conduct]. 73 | 74 | Unless you explicitly state otherwise, any contribution intentionally submitted 75 | for inclusion in `libtest` by you, as defined in the Apache-2.0 license, shall 76 | be dual licensed as above, without any additional terms or conditions. 77 | 78 | [Code of Conduct]: https://www.rust-lang.org/en-US/conduct.html 79 | [Latest Version]: https://img.shields.io/crates/v/libtest.svg 80 | [crates.io]: https://crates.io/crates/libtest 81 | [docs]: https://docs.rs/libtest/badge.svg 82 | [docs.rs]: https://docs.rs/libtest/ 83 | -------------------------------------------------------------------------------- /ci/azure-install-rust.yml: -------------------------------------------------------------------------------- 1 | parameters: 2 | toolchain: 'nightly' 3 | 4 | steps: 5 | - bash: | 6 | curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain $TOOLCHAIN 7 | echo "##vso[task.setvariable variable=PATH;]$PATH:$HOME/.cargo/bin" 8 | displayName: Install rust 9 | condition: ne( variables['Agent.OS'], 'Windows_NT' ) 10 | env: 11 | TOOLCHAIN: ${{ parameters.toolchain }} 12 | - script: | 13 | curl -sSf -o rustup-init.exe https://win.rustup.rs 14 | rustup-init.exe -y --default-toolchain %TOOLCHAIN% 15 | echo "##vso[task.setvariable variable=PATH;]%PATH%;%USERPROFILE%\.cargo\bin" 16 | displayName: Install rust 17 | condition: eq( variables['Agent.OS'], 'Windows_NT' ) 18 | env: 19 | TOOLCHAIN: ${{ parameters.toolchain }} 20 | - script: | 21 | rustc -Vv 22 | cargo -V 23 | displayName: Query rust and cargo versions 24 | -------------------------------------------------------------------------------- /ci/azure-job-test-all.yml: -------------------------------------------------------------------------------- 1 | parameters: 2 | toolchain: 'nightly' 3 | vmImage: 'ubuntu-16.04' 4 | name: '' 5 | 6 | jobs: 7 | - job: ${{ parameters.name }} 8 | pool: 9 | vmImage: ${{ parameters.vmImage }} 10 | steps: 11 | - template: azure-install-rust.yml 12 | parameters: 13 | toolchain: ${{ parameters.toolchain }} 14 | - script: cargo test -vv --all --target ${{ parameters.target }} 15 | - script: cargo test -vv --all --release --target ${{ parameters.target }} 16 | -------------------------------------------------------------------------------- /ci/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -ex 4 | 5 | : "${TARGET?The TARGET environment variable must be set.}" 6 | 7 | CARGO="cargo" 8 | if [ "${CROSS}" = "1" ]; then 9 | CARGO=cross 10 | fi 11 | 12 | CMD="test" 13 | if [ "${NORUN}" = "1" ]; then 14 | CMD=build 15 | fi 16 | 17 | "${CARGO}" "${CMD}" -vv --all --target="${TARGET}" 18 | "${CARGO}" "${CMD}" -vv --all --target="${TARGET}" --release 19 | -------------------------------------------------------------------------------- /ci/tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -ex 4 | 5 | # rustfmt 6 | if rustup component add rustfmt-preview ; then 7 | cargo-fmt --version 8 | cargo fmt --all -- --check 9 | fi 10 | 11 | # clippy 12 | if rustup component add clippy-preview ; then 13 | cargo-clippy --version 14 | cargo clippy --all -- -D clippy::pedantic 15 | fi 16 | 17 | # sh-check 18 | if command -v shellcheck ; then 19 | shellcheck --version 20 | shellcheck ci/*.sh 21 | fi 22 | -------------------------------------------------------------------------------- /libtest/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["The Rust Project Developers"] 3 | name = "libtest" 4 | version = "0.0.1" 5 | edition = "2018" 6 | description = "Rust's built in unit-test and micro-benchmarking framework" 7 | license = "MIT/Apache-2.0" 8 | documentation = "https://docs.rs/libterm" 9 | homepage = "https://github.com/rust-lang/libtest" 10 | repository = "https://github.com/rust-lang/libtest" 11 | readme = "README.md" 12 | 13 | [lib] 14 | name = "libtest" 15 | path = "lib.rs" 16 | crate-type = ["dylib", "rlib"] 17 | 18 | [dependencies] 19 | getopts = "0.2" 20 | term = "0.5" -------------------------------------------------------------------------------- /libtest/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /libtest/formatters/json.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub(crate) struct JsonFormatter { 4 | out: OutputLocation, 5 | } 6 | 7 | impl JsonFormatter { 8 | pub fn new(out: OutputLocation) -> Self { 9 | Self { out } 10 | } 11 | 12 | fn write_message(&mut self, s: &str) -> io::Result<()> { 13 | assert!(!s.contains('\n')); 14 | 15 | self.out.write_all(s.as_ref())?; 16 | self.out.write_all(b"\n") 17 | } 18 | 19 | fn write_event( 20 | &mut self, 21 | ty: &str, 22 | name: &str, 23 | evt: &str, 24 | extra: Option, 25 | ) -> io::Result<()> { 26 | if let Some(extras) = extra { 27 | self.write_message(&*format!( 28 | r#"{{ "type": "{}", "name": "{}", "event": "{}", {} }}"#, 29 | ty, 30 | EscapedString(name), 31 | evt, 32 | extras 33 | )) 34 | } else { 35 | self.write_message(&*format!( 36 | r#"{{ "type": "{}", "name": "{}", "event": "{}" }}"#, 37 | ty, 38 | EscapedString(name), 39 | evt 40 | )) 41 | } 42 | } 43 | } 44 | 45 | impl OutputFormatter for JsonFormatter { 46 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { 47 | self.write_message(&*format!( 48 | r#"{{ "type": "suite", "event": "started", "test_count": {} }}"#, 49 | test_count 50 | )) 51 | } 52 | 53 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { 54 | self.write_message(&*format!( 55 | r#"{{ "type": "test", "event": "started", "name": "{}" }}"#, 56 | EscapedString(desc.name.as_slice()) 57 | )) 58 | } 59 | 60 | fn write_result( 61 | &mut self, 62 | desc: &TestDesc, 63 | result: &TestResult, 64 | stdout: &[u8], 65 | ) -> io::Result<()> { 66 | match *result { 67 | TestResult::TrOk => { 68 | self.write_event("test", desc.name.as_slice(), "ok", None) 69 | } 70 | 71 | TestResult::TrFailed => { 72 | let extra_data = if stdout.is_empty() { 73 | None 74 | } else { 75 | Some(format!( 76 | r#""stdout": "{}""#, 77 | EscapedString(String::from_utf8_lossy(stdout)) 78 | )) 79 | }; 80 | 81 | self.write_event( 82 | "test", 83 | desc.name.as_slice(), 84 | "failed", 85 | extra_data, 86 | ) 87 | } 88 | 89 | TestResult::TrFailedMsg(ref m) => self.write_event( 90 | "test", 91 | desc.name.as_slice(), 92 | "failed", 93 | Some(format!(r#""message": "{}""#, EscapedString(m))), 94 | ), 95 | 96 | TestResult::TrIgnored => { 97 | self.write_event("test", desc.name.as_slice(), "ignored", None) 98 | } 99 | 100 | TestResult::TrAllowedFail => self.write_event( 101 | "test", 102 | desc.name.as_slice(), 103 | "allowed_failure", 104 | None, 105 | ), 106 | 107 | TestResult::TrBench(ref bs) => { 108 | let median = bs.ns_iter_summ.median as usize; 109 | let deviation = 110 | (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; 111 | 112 | let mbps = if bs.mb_s == 0 { 113 | String::new() 114 | } else { 115 | format!(r#", "mib_per_second": {}"#, bs.mb_s) 116 | }; 117 | 118 | let line = format!( 119 | "{{ \"type\": \"bench\", \ 120 | \"name\": \"{}\", \ 121 | \"median\": {}, \ 122 | \"deviation\": {}{} }}", 123 | desc.name, median, deviation, mbps 124 | ); 125 | 126 | self.write_message(&*line) 127 | } 128 | } 129 | } 130 | 131 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { 132 | self.write_message(&*format!( 133 | r#"{{ "type": "test", "event": "timeout", "name": "{}" }}"#, 134 | desc.name 135 | )) 136 | } 137 | 138 | fn write_run_finish( 139 | &mut self, 140 | state: &ConsoleTestState, 141 | ) -> io::Result { 142 | self.write_message(&*format!( 143 | "{{ \"type\": \"suite\", \ 144 | \"event\": \"{}\", \ 145 | \"passed\": {}, \ 146 | \"failed\": {}, \ 147 | \"allowed_fail\": {}, \ 148 | \"ignored\": {}, \ 149 | \"measured\": {}, \ 150 | \"filtered_out\": {} }}", 151 | if state.failed == 0 { "ok" } else { "failed" }, 152 | state.passed, 153 | state.failed + state.allowed_fail, 154 | state.allowed_fail, 155 | state.ignored, 156 | state.measured, 157 | state.filtered_out 158 | ))?; 159 | 160 | Ok(state.failed == 0) 161 | } 162 | } 163 | 164 | /// A formatting utility used to print strings with characters in need of escaping. 165 | /// Base code taken form `libserialize::json::escape_str` 166 | struct EscapedString>(S); 167 | 168 | impl> ::std::fmt::Display for EscapedString { 169 | fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { 170 | let mut start = 0; 171 | 172 | for (i, byte) in self.0.as_ref().bytes().enumerate() { 173 | let escaped = match byte { 174 | b'"' => "\\\"", 175 | b'\\' => "\\\\", 176 | b'\x00' => "\\u0000", 177 | b'\x01' => "\\u0001", 178 | b'\x02' => "\\u0002", 179 | b'\x03' => "\\u0003", 180 | b'\x04' => "\\u0004", 181 | b'\x05' => "\\u0005", 182 | b'\x06' => "\\u0006", 183 | b'\x07' => "\\u0007", 184 | b'\x08' => "\\b", 185 | b'\t' => "\\t", 186 | b'\n' => "\\n", 187 | b'\x0b' => "\\u000b", 188 | b'\x0c' => "\\f", 189 | b'\r' => "\\r", 190 | b'\x0e' => "\\u000e", 191 | b'\x0f' => "\\u000f", 192 | b'\x10' => "\\u0010", 193 | b'\x11' => "\\u0011", 194 | b'\x12' => "\\u0012", 195 | b'\x13' => "\\u0013", 196 | b'\x14' => "\\u0014", 197 | b'\x15' => "\\u0015", 198 | b'\x16' => "\\u0016", 199 | b'\x17' => "\\u0017", 200 | b'\x18' => "\\u0018", 201 | b'\x19' => "\\u0019", 202 | b'\x1a' => "\\u001a", 203 | b'\x1b' => "\\u001b", 204 | b'\x1c' => "\\u001c", 205 | b'\x1d' => "\\u001d", 206 | b'\x1e' => "\\u001e", 207 | b'\x1f' => "\\u001f", 208 | b'\x7f' => "\\u007f", 209 | _ => { 210 | continue; 211 | } 212 | }; 213 | 214 | if start < i { 215 | f.write_str(&self.0.as_ref()[start..i])?; 216 | } 217 | 218 | f.write_str(escaped)?; 219 | 220 | start = i + 1; 221 | } 222 | 223 | if start != self.0.as_ref().len() { 224 | f.write_str(&self.0.as_ref()[start..])?; 225 | } 226 | 227 | Ok(()) 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /libtest/formatters/mod.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | mod json; 4 | mod pretty; 5 | mod terse; 6 | 7 | pub(crate) use self::json::JsonFormatter; 8 | pub(crate) use self::pretty::PrettyFormatter; 9 | pub(crate) use self::terse::TerseFormatter; 10 | 11 | pub(crate) trait OutputFormatter { 12 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()>; 13 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()>; 14 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()>; 15 | fn write_result( 16 | &mut self, 17 | desc: &TestDesc, 18 | result: &TestResult, 19 | stdout: &[u8], 20 | ) -> io::Result<()>; 21 | fn write_run_finish( 22 | &mut self, 23 | state: &ConsoleTestState, 24 | ) -> io::Result; 25 | } 26 | -------------------------------------------------------------------------------- /libtest/formatters/pretty.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub(crate) struct PrettyFormatter { 4 | out: OutputLocation, 5 | use_color: bool, 6 | 7 | /// Number of columns to fill when aligning names 8 | max_name_len: usize, 9 | 10 | is_multithreaded: bool, 11 | } 12 | 13 | impl PrettyFormatter { 14 | pub fn new( 15 | out: OutputLocation, 16 | use_color: bool, 17 | max_name_len: usize, 18 | is_multithreaded: bool, 19 | ) -> Self { 20 | Self { 21 | out, 22 | use_color, 23 | max_name_len, 24 | is_multithreaded, 25 | } 26 | } 27 | 28 | #[cfg(test)] 29 | pub fn output_location(&self) -> &OutputLocation { 30 | &self.out 31 | } 32 | 33 | pub fn write_ok(&mut self) -> io::Result<()> { 34 | self.write_short_result("ok", term::color::GREEN) 35 | } 36 | 37 | pub fn write_failed(&mut self) -> io::Result<()> { 38 | self.write_short_result("FAILED", term::color::RED) 39 | } 40 | 41 | pub fn write_ignored(&mut self) -> io::Result<()> { 42 | self.write_short_result("ignored", term::color::YELLOW) 43 | } 44 | 45 | pub fn write_allowed_fail(&mut self) -> io::Result<()> { 46 | self.write_short_result("FAILED (allowed)", term::color::YELLOW) 47 | } 48 | 49 | pub fn write_bench(&mut self) -> io::Result<()> { 50 | self.write_pretty("bench", term::color::CYAN) 51 | } 52 | 53 | pub fn write_short_result( 54 | &mut self, 55 | result: &str, 56 | color: term::color::Color, 57 | ) -> io::Result<()> { 58 | self.write_pretty(result, color)?; 59 | self.write_plain("\n") 60 | } 61 | 62 | pub fn write_pretty( 63 | &mut self, 64 | word: &str, 65 | color: term::color::Color, 66 | ) -> io::Result<()> { 67 | match self.out { 68 | OutputLocation::Pretty(ref mut term) => { 69 | if self.use_color { 70 | term.fg(color)?; 71 | } 72 | term.write_all(word.as_bytes())?; 73 | if self.use_color { 74 | term.reset()?; 75 | } 76 | term.flush() 77 | } 78 | OutputLocation::Raw(ref mut stdout) => { 79 | stdout.write_all(word.as_bytes())?; 80 | stdout.flush() 81 | } 82 | } 83 | } 84 | 85 | pub fn write_plain>(&mut self, s: S) -> io::Result<()> { 86 | let s = s.as_ref(); 87 | self.out.write_all(s.as_bytes())?; 88 | self.out.flush() 89 | } 90 | 91 | pub fn write_successes( 92 | &mut self, 93 | state: &ConsoleTestState, 94 | ) -> io::Result<()> { 95 | self.write_plain("\nsuccesses:\n")?; 96 | let mut successes = Vec::new(); 97 | let mut stdouts = String::new(); 98 | for &(ref f, ref stdout) in &state.not_failures { 99 | successes.push(f.name.to_string()); 100 | if !stdout.is_empty() { 101 | stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); 102 | let output = String::from_utf8_lossy(stdout); 103 | stdouts.push_str(&output); 104 | stdouts.push_str("\n"); 105 | } 106 | } 107 | if !stdouts.is_empty() { 108 | self.write_plain("\n")?; 109 | self.write_plain(&stdouts)?; 110 | } 111 | 112 | self.write_plain("\nsuccesses:\n")?; 113 | successes.sort(); 114 | for name in &successes { 115 | self.write_plain(&format!(" {}\n", name))?; 116 | } 117 | Ok(()) 118 | } 119 | 120 | pub fn write_failures( 121 | &mut self, 122 | state: &ConsoleTestState, 123 | ) -> io::Result<()> { 124 | self.write_plain("\nfailures:\n")?; 125 | let mut failures = Vec::new(); 126 | let mut fail_out = String::new(); 127 | for &(ref f, ref stdout) in &state.failures { 128 | failures.push(f.name.to_string()); 129 | if !stdout.is_empty() { 130 | fail_out.push_str(&format!("---- {} stdout ----\n", f.name)); 131 | let output = String::from_utf8_lossy(stdout); 132 | fail_out.push_str(&output); 133 | fail_out.push_str("\n"); 134 | } 135 | } 136 | if !fail_out.is_empty() { 137 | self.write_plain("\n")?; 138 | self.write_plain(&fail_out)?; 139 | } 140 | 141 | self.write_plain("\nfailures:\n")?; 142 | failures.sort(); 143 | for name in &failures { 144 | self.write_plain(&format!(" {}\n", name))?; 145 | } 146 | Ok(()) 147 | } 148 | 149 | fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { 150 | let name = desc.padded_name(self.max_name_len, desc.name.padding()); 151 | self.write_plain(&format!("test {} ... ", name))?; 152 | 153 | Ok(()) 154 | } 155 | } 156 | 157 | impl OutputFormatter for PrettyFormatter { 158 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { 159 | let noun = if test_count == 1 { "test" } else { "tests" }; 160 | self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) 161 | } 162 | 163 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { 164 | // When running tests concurrently, we should not print 165 | // the test's name as the result will be mis-aligned. 166 | // When running the tests serially, we print the name here so 167 | // that the user can see which test hangs. 168 | if !self.is_multithreaded { 169 | self.write_test_name(desc)?; 170 | } 171 | 172 | Ok(()) 173 | } 174 | 175 | fn write_result( 176 | &mut self, 177 | desc: &TestDesc, 178 | result: &TestResult, 179 | _: &[u8], 180 | ) -> io::Result<()> { 181 | if self.is_multithreaded { 182 | self.write_test_name(desc)?; 183 | } 184 | 185 | match *result { 186 | TestResult::TrOk => self.write_ok(), 187 | TestResult::TrFailed | TestResult::TrFailedMsg(_) => { 188 | self.write_failed() 189 | } 190 | TestResult::TrIgnored => self.write_ignored(), 191 | TestResult::TrAllowedFail => self.write_allowed_fail(), 192 | TestResult::TrBench(ref bs) => { 193 | self.write_bench()?; 194 | self.write_plain(&format!(": {}\n", fmt_bench_samples(bs))) 195 | } 196 | } 197 | } 198 | 199 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { 200 | if self.is_multithreaded { 201 | self.write_test_name(desc)?; 202 | } 203 | 204 | self.write_plain(&format!( 205 | "test {} has been running for over {} seconds\n", 206 | desc.name, TEST_WARN_TIMEOUT_S 207 | )) 208 | } 209 | 210 | fn write_run_finish( 211 | &mut self, 212 | state: &ConsoleTestState, 213 | ) -> io::Result { 214 | if state.options.display_output { 215 | self.write_successes(state)?; 216 | } 217 | let success = state.failed == 0; 218 | if !success { 219 | self.write_failures(state)?; 220 | } 221 | 222 | self.write_plain("\ntest result: ")?; 223 | 224 | if success { 225 | // There's no parallelism at this point so it's safe to use color 226 | self.write_pretty("ok", term::color::GREEN)?; 227 | } else { 228 | self.write_pretty("FAILED", term::color::RED)?; 229 | } 230 | 231 | let s = if state.allowed_fail > 0 { 232 | format!( 233 | ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n", 234 | state.passed, 235 | state.failed + state.allowed_fail, 236 | state.allowed_fail, 237 | state.ignored, 238 | state.measured, 239 | state.filtered_out 240 | ) 241 | } else { 242 | format!( 243 | ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n", 244 | state.passed, state.failed, state.ignored, state.measured, state.filtered_out 245 | ) 246 | }; 247 | 248 | self.write_plain(&s)?; 249 | 250 | Ok(success) 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /libtest/formatters/terse.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | 3 | pub(crate) struct TerseFormatter { 4 | out: OutputLocation, 5 | use_color: bool, 6 | is_multithreaded: bool, 7 | /// Number of columns to fill when aligning names 8 | max_name_len: usize, 9 | 10 | test_count: usize, 11 | total_test_count: usize, 12 | } 13 | 14 | impl TerseFormatter { 15 | pub fn new( 16 | out: OutputLocation, 17 | use_color: bool, 18 | max_name_len: usize, 19 | is_multithreaded: bool, 20 | ) -> Self { 21 | Self { 22 | out, 23 | use_color, 24 | max_name_len, 25 | is_multithreaded, 26 | test_count: 0, 27 | total_test_count: 0, // initialized later, when write_run_start is called 28 | } 29 | } 30 | 31 | pub fn write_ok(&mut self) -> io::Result<()> { 32 | self.write_short_result(".", term::color::GREEN) 33 | } 34 | 35 | pub fn write_failed(&mut self) -> io::Result<()> { 36 | self.write_short_result("F", term::color::RED) 37 | } 38 | 39 | pub fn write_ignored(&mut self) -> io::Result<()> { 40 | self.write_short_result("i", term::color::YELLOW) 41 | } 42 | 43 | pub fn write_allowed_fail(&mut self) -> io::Result<()> { 44 | self.write_short_result("a", term::color::YELLOW) 45 | } 46 | 47 | pub fn write_bench(&mut self) -> io::Result<()> { 48 | self.write_pretty("bench", term::color::CYAN) 49 | } 50 | 51 | pub fn write_short_result( 52 | &mut self, 53 | result: &str, 54 | color: term::color::Color, 55 | ) -> io::Result<()> { 56 | self.write_pretty(result, color)?; 57 | if self.test_count % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 58 | { 59 | // we insert a new line every 100 dots in order to flush the 60 | // screen when dealing with line-buffered output (e.g., piping to 61 | // `stamp` in the rust CI). 62 | let out = format!( 63 | " {}/{}\n", 64 | self.test_count + 1, 65 | self.total_test_count 66 | ); 67 | self.write_plain(&out)?; 68 | } 69 | 70 | self.test_count += 1; 71 | Ok(()) 72 | } 73 | 74 | pub fn write_pretty( 75 | &mut self, 76 | word: &str, 77 | color: term::color::Color, 78 | ) -> io::Result<()> { 79 | match self.out { 80 | OutputLocation::Pretty(ref mut term) => { 81 | if self.use_color { 82 | term.fg(color)?; 83 | } 84 | term.write_all(word.as_bytes())?; 85 | if self.use_color { 86 | term.reset()?; 87 | } 88 | term.flush() 89 | } 90 | OutputLocation::Raw(ref mut stdout) => { 91 | stdout.write_all(word.as_bytes())?; 92 | stdout.flush() 93 | } 94 | } 95 | } 96 | 97 | pub fn write_plain>(&mut self, s: S) -> io::Result<()> { 98 | let s = s.as_ref(); 99 | self.out.write_all(s.as_bytes())?; 100 | self.out.flush() 101 | } 102 | 103 | pub fn write_outputs( 104 | &mut self, 105 | state: &ConsoleTestState, 106 | ) -> io::Result<()> { 107 | self.write_plain("\nsuccesses:\n")?; 108 | let mut successes = Vec::new(); 109 | let mut stdouts = String::new(); 110 | for &(ref f, ref stdout) in &state.not_failures { 111 | successes.push(f.name.to_string()); 112 | if !stdout.is_empty() { 113 | stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); 114 | let output = String::from_utf8_lossy(stdout); 115 | stdouts.push_str(&output); 116 | stdouts.push_str("\n"); 117 | } 118 | } 119 | if !stdouts.is_empty() { 120 | self.write_plain("\n")?; 121 | self.write_plain(&stdouts)?; 122 | } 123 | 124 | self.write_plain("\nsuccesses:\n")?; 125 | successes.sort(); 126 | for name in &successes { 127 | self.write_plain(&format!(" {}\n", name))?; 128 | } 129 | Ok(()) 130 | } 131 | 132 | pub fn write_failures( 133 | &mut self, 134 | state: &ConsoleTestState, 135 | ) -> io::Result<()> { 136 | self.write_plain("\nfailures:\n")?; 137 | let mut failures = Vec::new(); 138 | let mut fail_out = String::new(); 139 | for &(ref f, ref stdout) in &state.failures { 140 | failures.push(f.name.to_string()); 141 | if !stdout.is_empty() { 142 | fail_out.push_str(&format!("---- {} stdout ----\n", f.name)); 143 | let output = String::from_utf8_lossy(stdout); 144 | fail_out.push_str(&output); 145 | fail_out.push_str("\n"); 146 | } 147 | } 148 | if !fail_out.is_empty() { 149 | self.write_plain("\n")?; 150 | self.write_plain(&fail_out)?; 151 | } 152 | 153 | self.write_plain("\nfailures:\n")?; 154 | failures.sort(); 155 | for name in &failures { 156 | self.write_plain(&format!(" {}\n", name))?; 157 | } 158 | Ok(()) 159 | } 160 | 161 | fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { 162 | let name = desc.padded_name(self.max_name_len, desc.name.padding()); 163 | self.write_plain(&format!("test {} ... ", name))?; 164 | 165 | Ok(()) 166 | } 167 | } 168 | 169 | impl OutputFormatter for TerseFormatter { 170 | fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { 171 | self.total_test_count = test_count; 172 | let noun = if test_count == 1 { "test" } else { "tests" }; 173 | self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) 174 | } 175 | 176 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { 177 | // Remnants from old libtest code that used the padding value 178 | // in order to indicate benchmarks. 179 | // When running benchmarks, terse-mode should still print their name as if 180 | // it is the Pretty formatter. 181 | if !self.is_multithreaded 182 | && desc.name.padding() == NamePadding::PadOnRight 183 | { 184 | self.write_test_name(desc)?; 185 | } 186 | 187 | Ok(()) 188 | } 189 | 190 | fn write_result( 191 | &mut self, 192 | desc: &TestDesc, 193 | result: &TestResult, 194 | _: &[u8], 195 | ) -> io::Result<()> { 196 | match *result { 197 | TestResult::TrOk => self.write_ok(), 198 | TestResult::TrFailed | TestResult::TrFailedMsg(_) => { 199 | self.write_failed() 200 | } 201 | TestResult::TrIgnored => self.write_ignored(), 202 | TestResult::TrAllowedFail => self.write_allowed_fail(), 203 | TestResult::TrBench(ref bs) => { 204 | if self.is_multithreaded { 205 | self.write_test_name(desc)?; 206 | } 207 | self.write_bench()?; 208 | self.write_plain(&format!(": {}\n", fmt_bench_samples(bs))) 209 | } 210 | } 211 | } 212 | 213 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { 214 | self.write_plain(&format!( 215 | "test {} has been running for over {} seconds\n", 216 | desc.name, TEST_WARN_TIMEOUT_S 217 | )) 218 | } 219 | 220 | fn write_run_finish( 221 | &mut self, 222 | state: &ConsoleTestState, 223 | ) -> io::Result { 224 | if state.options.display_output { 225 | self.write_outputs(state)?; 226 | } 227 | let success = state.failed == 0; 228 | if !success { 229 | self.write_failures(state)?; 230 | } 231 | 232 | self.write_plain("\ntest result: ")?; 233 | 234 | if success { 235 | // There's no parallelism at this point so it's safe to use color 236 | self.write_pretty("ok", term::color::GREEN)?; 237 | } else { 238 | self.write_pretty("FAILED", term::color::RED)?; 239 | } 240 | 241 | let s = if state.allowed_fail > 0 { 242 | format!( 243 | ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n", 244 | state.passed, 245 | state.failed + state.allowed_fail, 246 | state.allowed_fail, 247 | state.ignored, 248 | state.measured, 249 | state.filtered_out 250 | ) 251 | } else { 252 | format!( 253 | ". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n", 254 | state.passed, state.failed, state.ignored, state.measured, state.filtered_out 255 | ) 256 | }; 257 | 258 | self.write_plain(&s)?; 259 | 260 | Ok(success) 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /libtest/lib.rs: -------------------------------------------------------------------------------- 1 | //! Rust's built-in unit-test and micro-benchmarking framework. 2 | #![cfg_attr(any(unix, target_os = "cloudabi", target_os = "fuchsia"), feature(libc, rustc_private))] 3 | #![feature(fnbox)] 4 | #![feature(set_stdio)] 5 | #![feature(panic_unwind)] 6 | #![feature(termination_trait_lib)] 7 | #![feature(test)] 8 | #![deny(rust_2018_idioms)] 9 | #![allow( 10 | clippy::pub_enum_variant_names, 11 | clippy::cast_possible_truncation, 12 | clippy::cast_sign_loss, 13 | clippy::cast_precision_loss 14 | )] 15 | 16 | use getopts; 17 | 18 | extern crate test; 19 | 20 | #[cfg(any(unix, target_os = "cloudabi", target_os = "fuchsia"))] 21 | extern crate libc; 22 | 23 | // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind 24 | // on aarch64-pc-windows-msvc, so we don't link libtest against 25 | // libunwind (for the time being), even though it means that 26 | // libtest won't be fully functional on this platform. 27 | // 28 | // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437 29 | #[cfg(not(all(windows, target_arch = "aarch64")))] 30 | extern crate panic_unwind; 31 | 32 | use std::{ 33 | any::Any, 34 | borrow::Cow, 35 | boxed::FnBox, 36 | cmp, 37 | collections::BTreeMap, 38 | env, fmt, 39 | fs::File, 40 | io::{self, prelude::*}, 41 | panic::{catch_unwind, AssertUnwindSafe}, 42 | path::PathBuf, 43 | process::{self, Termination}, 44 | sync::{ 45 | mpsc::{channel, Sender}, 46 | Arc, Mutex, 47 | }, 48 | thread, 49 | time::{Duration, Instant}, 50 | }; 51 | 52 | const TEST_WARN_TIMEOUT_S: u64 = 60; 53 | const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode 54 | 55 | mod formatters; 56 | pub mod stats; 57 | 58 | use crate::formatters::{ 59 | JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter, 60 | }; 61 | 62 | /// Whether to execute tests concurrently or not 63 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 64 | pub enum Concurrent { 65 | Yes, 66 | No, 67 | } 68 | 69 | // The name of a test. By convention this follows the rules for rust 70 | // paths; i.e., it should be a series of identifiers separated by double 71 | // colons. This way if some test runner wants to arrange the tests 72 | // hierarchically it may. 73 | 74 | #[derive(Clone, PartialEq, Eq, Hash, Debug)] 75 | pub enum TestName { 76 | StaticTestName(&'static str), 77 | DynTestName(String), 78 | AlignedTestName(Cow<'static, str>, NamePadding), 79 | } 80 | impl TestName { 81 | fn as_slice(&self) -> &str { 82 | match *self { 83 | TestName::StaticTestName(s) => s, 84 | TestName::DynTestName(ref s) => s, 85 | TestName::AlignedTestName(ref s, _) => &*s, 86 | } 87 | } 88 | 89 | fn padding(&self) -> NamePadding { 90 | match self { 91 | TestName::AlignedTestName(_, p) => *p, 92 | _ => NamePadding::PadNone, 93 | } 94 | } 95 | 96 | fn with_padding(&self, padding: NamePadding) -> Self { 97 | let name: Cow<'static, str> = match self { 98 | TestName::StaticTestName(name) => Cow::Borrowed(name), 99 | TestName::DynTestName(name) => Cow::Owned(name.to_owned()), 100 | TestName::AlignedTestName(name, _) => name.clone(), 101 | }; 102 | 103 | TestName::AlignedTestName(name, padding) 104 | } 105 | } 106 | impl fmt::Display for TestName { 107 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 108 | fmt::Display::fmt(self.as_slice(), f) 109 | } 110 | } 111 | 112 | #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] 113 | pub enum NamePadding { 114 | PadNone, 115 | PadOnRight, 116 | } 117 | 118 | impl TestDesc { 119 | fn padded_name(&self, column_count: usize, align: NamePadding) -> String { 120 | let mut name = String::from(self.name.as_slice()); 121 | let fill = column_count.saturating_sub(name.len()); 122 | let pad = " ".repeat(fill); 123 | match align { 124 | NamePadding::PadNone => name, 125 | NamePadding::PadOnRight => { 126 | name.push_str(&pad); 127 | name 128 | } 129 | } 130 | } 131 | } 132 | 133 | /// Represents a benchmark function. 134 | pub trait TDynBenchFn: Send { 135 | fn run(&self, harness: &mut Bencher); 136 | } 137 | 138 | // A function that runs a test. If the function returns successfully, 139 | // the test succeeds; if the function panics then the test fails. We 140 | // may need to come up with a more clever definition of test in order 141 | // to support isolation of tests into threads. 142 | pub enum TestFn { 143 | StaticTestFn(fn()), 144 | StaticBenchFn(fn(&mut Bencher)), 145 | DynTestFn(Box), 146 | DynBenchFn(Box), 147 | } 148 | 149 | impl TestFn { 150 | fn padding(&self) -> NamePadding { 151 | match *self { 152 | TestFn::StaticTestFn(..) | TestFn::DynTestFn(..) => { 153 | NamePadding::PadNone 154 | } 155 | TestFn::StaticBenchFn(..) | TestFn::DynBenchFn(..) => { 156 | NamePadding::PadOnRight 157 | } 158 | } 159 | } 160 | } 161 | 162 | impl fmt::Debug for TestFn { 163 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 164 | f.write_str(match *self { 165 | TestFn::StaticTestFn(..) => "StaticTestFn(..)", 166 | TestFn::StaticBenchFn(..) => "StaticBenchFn(..)", 167 | TestFn::DynTestFn(..) => "DynTestFn(..)", 168 | TestFn::DynBenchFn(..) => "DynBenchFn(..)", 169 | }) 170 | } 171 | } 172 | 173 | /// Manager of the benchmarking runs. 174 | /// 175 | /// This is fed into functions marked with `#[bench]` to allow for 176 | /// set-up & tear-down before running a piece of code repeatedly via a 177 | /// call to `iter`. 178 | #[derive(Clone)] 179 | pub struct Bencher { 180 | mode: BenchMode, 181 | summary: Option, 182 | pub bytes: u64, 183 | } 184 | 185 | #[derive(Clone, PartialEq, Eq)] 186 | pub enum BenchMode { 187 | Auto, 188 | Single, 189 | } 190 | 191 | #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] 192 | pub enum ShouldPanic { 193 | No, 194 | Yes, 195 | YesWithMessage(&'static str), 196 | } 197 | 198 | // The definition of a single test. A test runner will run a list of 199 | // these. 200 | #[derive(Clone, Debug, PartialEq, Eq, Hash)] 201 | pub struct TestDesc { 202 | pub name: TestName, 203 | pub ignore: bool, 204 | pub should_panic: ShouldPanic, 205 | pub allow_fail: bool, 206 | } 207 | 208 | #[derive(Debug)] 209 | pub struct TestDescAndFn { 210 | pub desc: TestDesc, 211 | pub testfn: TestFn, 212 | } 213 | 214 | #[derive(Clone, PartialEq, Debug, Copy)] 215 | pub struct Metric { 216 | value: f64, 217 | noise: f64, 218 | } 219 | 220 | impl Metric { 221 | pub fn new(value: f64, noise: f64) -> Self { 222 | Self { value, noise } 223 | } 224 | } 225 | 226 | /// In case we want to add other options as well, just add them in this struct. 227 | #[derive(Copy, Clone, Debug, Default)] 228 | pub struct Options { 229 | display_output: bool, 230 | } 231 | 232 | impl Options { 233 | pub fn new() -> Self { 234 | Self::default() 235 | } 236 | 237 | pub fn display_output(mut self, display_output: bool) -> Self { 238 | self.display_output = display_output; 239 | self 240 | } 241 | } 242 | 243 | // The default console test runner. It accepts the command line 244 | // arguments and a vector of test_descs. 245 | pub fn test_main( 246 | args: &[String], 247 | tests: Vec, 248 | options: Options, 249 | ) { 250 | let mut opts = match parse_opts(args) { 251 | Some(Ok(o)) => o, 252 | Some(Err(msg)) => { 253 | eprintln!("error: {}", msg); 254 | process::exit(101); 255 | } 256 | None => return, 257 | }; 258 | 259 | opts.options = options; 260 | if opts.list { 261 | if let Err(e) = list_tests_console(&opts, tests) { 262 | eprintln!("error: io error when listing tests: {:?}", e); 263 | process::exit(101); 264 | } 265 | } else { 266 | match run_tests_console(&opts, tests) { 267 | Ok(true) => {} 268 | Ok(false) => process::exit(101), 269 | Err(e) => { 270 | eprintln!("error: io error when listing tests: {:?}", e); 271 | process::exit(101); 272 | } 273 | } 274 | } 275 | } 276 | 277 | // A variant optimized for invocation with a static test vector. 278 | // This will panic (intentionally) when fed any dynamic tests, because 279 | // it is copying the static values out into a dynamic vector and cannot 280 | // copy dynamic values. It is doing this because from this point on 281 | // a Vec is used in order to effect ownership-transfer 282 | // semantics into parallel test runners, which in turn requires a Vec<> 283 | // rather than a &[]. 284 | pub fn test_main_static(tests: &[&TestDescAndFn]) { 285 | let args = env::args().collect::>(); 286 | let owned_tests = tests 287 | .iter() 288 | .map(|t| match t.testfn { 289 | TestFn::StaticTestFn(f) => TestDescAndFn { 290 | testfn: TestFn::StaticTestFn(f), 291 | desc: t.desc.clone(), 292 | }, 293 | TestFn::StaticBenchFn(f) => TestDescAndFn { 294 | testfn: TestFn::StaticBenchFn(f), 295 | desc: t.desc.clone(), 296 | }, 297 | _ => panic!("non-static tests passed to test::test_main_static"), 298 | }) 299 | .collect(); 300 | test_main(&args, owned_tests, Options::new()) 301 | } 302 | 303 | /// Invoked when unit tests terminate. Should panic if the unit 304 | /// Tests is considered a failure. By default, invokes `report()` 305 | /// and checks for a `0` result. 306 | pub fn assert_test_result(result: T) { 307 | let code = result.report(); 308 | if code != 0 { 309 | panic!( 310 | "the test returned a termination value with a non-zero status code ({}) \ 311 | which indicates a failure (this most likely means your test returned \ 312 | an `Err(_)` value)", 313 | code, 314 | ); 315 | } 316 | } 317 | 318 | #[derive(Copy, Clone, Debug)] 319 | pub enum ColorConfig { 320 | AutoColor, 321 | AlwaysColor, 322 | NeverColor, 323 | } 324 | 325 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 326 | pub enum OutputFormat { 327 | Pretty, 328 | Terse, 329 | Json, 330 | } 331 | 332 | #[derive(Copy, Clone, Debug, PartialEq, Eq)] 333 | pub enum RunIgnored { 334 | Yes, 335 | No, 336 | Only, 337 | } 338 | 339 | #[derive(Debug)] 340 | pub struct TestOpts { 341 | pub list: bool, 342 | pub filter: Option, 343 | pub filter_exact: bool, 344 | pub exclude_should_panic: bool, 345 | pub run_ignored: RunIgnored, 346 | pub run_tests: bool, 347 | pub bench_benchmarks: bool, 348 | pub logfile: Option, 349 | pub nocapture: bool, 350 | pub color: ColorConfig, 351 | pub format: OutputFormat, 352 | pub test_threads: Option, 353 | pub skip: Vec, 354 | pub options: Options, 355 | } 356 | 357 | impl TestOpts { 358 | #[cfg(test)] 359 | fn new() -> TestOpts { 360 | TestOpts { 361 | list: false, 362 | filter: None, 363 | filter_exact: false, 364 | exclude_should_panic: false, 365 | run_ignored: RunIgnored::No, 366 | run_tests: false, 367 | bench_benchmarks: false, 368 | logfile: None, 369 | nocapture: false, 370 | color: ColorConfig::AutoColor, 371 | format: OutputFormat::Pretty, 372 | test_threads: None, 373 | skip: vec![], 374 | options: Options::new(), 375 | } 376 | } 377 | } 378 | 379 | /// Result of parsing the options. 380 | pub type OptRes = Result; 381 | 382 | fn optgroups() -> getopts::Options { 383 | let mut opts = getopts::Options::new(); 384 | opts.optflag("", "include-ignored", "Run ignored and not ignored tests") 385 | .optflag("", "ignored", "Run only ignored tests") 386 | .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") 387 | .optflag("", "test", "Run tests and not benchmarks") 388 | .optflag("", "bench", "Run benchmarks instead of tests") 389 | .optflag("", "list", "List all tests and benchmarks") 390 | .optflag("h", "help", "Display this message (longer with --help)") 391 | .optopt( 392 | "", 393 | "logfile", 394 | "Write logs to the specified file instead \ 395 | of stdout", 396 | "PATH", 397 | ) 398 | .optflag( 399 | "", 400 | "nocapture", 401 | "don't capture stdout/stderr of each \ 402 | task, allow printing directly", 403 | ) 404 | .optopt( 405 | "", 406 | "test-threads", 407 | "Number of threads used for running tests \ 408 | in parallel", 409 | "n_threads", 410 | ) 411 | .optmulti( 412 | "", 413 | "skip", 414 | "Skip tests whose names contain FILTER (this flag can \ 415 | be used multiple times)", 416 | "FILTER", 417 | ) 418 | .optflag( 419 | "q", 420 | "quiet", 421 | "Display one character per test instead of one line. \ 422 | Alias to --format=terse", 423 | ) 424 | .optflag( 425 | "", 426 | "exact", 427 | "Exactly match filters rather than by substring", 428 | ) 429 | .optopt( 430 | "", 431 | "color", 432 | "Configure coloring of output: 433 | auto = colorize if stdout is a tty and tests are run on serially (default); 434 | always = always colorize output; 435 | never = never colorize output;", 436 | "auto|always|never", 437 | ) 438 | .optopt( 439 | "", 440 | "format", 441 | "Configure formatting of output: 442 | pretty = Print verbose output; 443 | terse = Display one character per test; 444 | json = Output a json document", 445 | "pretty|terse|json", 446 | ) 447 | .optopt( 448 | "Z", 449 | "", 450 | "Enable nightly-only flags: 451 | unstable-options = Allow use of experimental features", 452 | "unstable-options", 453 | ); 454 | opts 455 | } 456 | 457 | fn usage(binary: &str, options: &getopts::Options) { 458 | let message = format!("Usage: {} [OPTIONS] [FILTER]", binary); 459 | println!( 460 | r#"{usage} 461 | 462 | The FILTER string is tested against the name of all tests, and only those 463 | tests whose names contain the filter are run. 464 | 465 | By default, all tests are run in parallel. This can be altered with the 466 | --test-threads flag or the RUST_TEST_THREADS environment variable when running 467 | tests (set it to 1). 468 | 469 | All tests have their standard output and standard error captured by default. 470 | This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE 471 | environment variable to a value other than "0". Logging is not captured by default. 472 | 473 | Test Attributes: 474 | 475 | #[test] - Indicates a function is a test to be run. This function 476 | takes no arguments. 477 | #[bench] - Indicates a function is a benchmark to be run. This 478 | function takes one argument (test::Bencher). 479 | #[should_panic] - This function (also labeled with #[test]) will only pass if 480 | the code causes a panic (an assertion failure or panic!) 481 | A message may be provided, which the failure string must 482 | contain: #[should_panic(expected = "foo")]. 483 | #[ignore] - When applied to a function which is already attributed as a 484 | test, then the test runner will ignore these tests during 485 | normal test runs. Running with --ignored or --include-ignored will run 486 | these tests."#, 487 | usage = options.usage(&message) 488 | ); 489 | } 490 | 491 | // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566 492 | fn is_nightly() -> bool { 493 | // Whether this is a feature-staged build, i.e., on the beta or stable channel 494 | let disable_unstable_features = 495 | option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); 496 | // Whether we should enable unstable features for bootstrapping 497 | let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); 498 | 499 | bootstrap || !disable_unstable_features 500 | } 501 | 502 | // Parses command line arguments into test options 503 | pub fn parse_opts(args: &[String]) -> Option { 504 | let mut allow_unstable = false; 505 | let opts = optgroups(); 506 | let args = args.get(1..).unwrap_or(args); 507 | let matches = match opts.parse(args) { 508 | Ok(m) => m, 509 | Err(f) => return Some(Err(f.to_string())), 510 | }; 511 | 512 | if let Some(opt) = matches.opt_str("Z") { 513 | if !is_nightly() { 514 | return Some(Err( 515 | "the option `Z` is only accepted on the nightly compiler" 516 | .into(), 517 | )); 518 | } 519 | 520 | if let "unstable-options" = &*opt { 521 | allow_unstable = true; 522 | } else { 523 | return Some(Err("Unrecognized option to `Z`".into())); 524 | } 525 | }; 526 | 527 | if matches.opt_present("h") { 528 | usage(&args[0], &opts); 529 | return None; 530 | } 531 | 532 | let filter = if matches.free.is_empty() { 533 | None 534 | } else { 535 | Some(matches.free[0].clone()) 536 | }; 537 | 538 | let exclude_should_panic = matches.opt_present("exclude-should-panic"); 539 | if !allow_unstable && exclude_should_panic { 540 | return Some(Err( 541 | "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(), 542 | )); 543 | } 544 | 545 | let include_ignored = matches.opt_present("include-ignored"); 546 | if !allow_unstable && include_ignored { 547 | return Some(Err( 548 | "The \"include-ignored\" flag is only accepted on the nightly compiler".into(), 549 | )); 550 | } 551 | 552 | let run_ignored = match (include_ignored, matches.opt_present("ignored")) { 553 | (true, true) => { 554 | return Some(Err( 555 | "the options --include-ignored and --ignored are mutually exclusive".into(), 556 | )); 557 | } 558 | (true, false) => RunIgnored::Yes, 559 | (false, true) => RunIgnored::Only, 560 | (false, false) => RunIgnored::No, 561 | }; 562 | let quiet = matches.opt_present("quiet"); 563 | let exact = matches.opt_present("exact"); 564 | let list = matches.opt_present("list"); 565 | 566 | let logfile = matches.opt_str("logfile"); 567 | let logfile = logfile.map(|s| PathBuf::from(&s)); 568 | 569 | let bench_benchmarks = matches.opt_present("bench"); 570 | let run_tests = !bench_benchmarks || matches.opt_present("test"); 571 | 572 | let mut nocapture = matches.opt_present("nocapture"); 573 | if !nocapture { 574 | nocapture = match env::var("RUST_TEST_NOCAPTURE") { 575 | Ok(val) => &val != "0", 576 | Err(_) => false, 577 | }; 578 | } 579 | 580 | let test_threads = match matches.opt_str("test-threads") { 581 | Some(n_str) => match n_str.parse::() { 582 | Ok(0) => { 583 | return Some(Err( 584 | "argument for --test-threads must not be 0".to_string() 585 | )) 586 | } 587 | Ok(n) => Some(n), 588 | Err(e) => { 589 | return Some(Err(format!( 590 | "argument for --test-threads must be a number > 0 \ 591 | (error: {})", 592 | e 593 | ))); 594 | } 595 | }, 596 | None => None, 597 | }; 598 | 599 | let color = match matches.opt_str("color").as_ref().map(|s| &**s) { 600 | Some("auto") | None => ColorConfig::AutoColor, 601 | Some("always") => ColorConfig::AlwaysColor, 602 | Some("never") => ColorConfig::NeverColor, 603 | 604 | Some(v) => { 605 | return Some(Err(format!( 606 | "argument for --color must be auto, always, or never (was \ 607 | {})", 608 | v 609 | ))); 610 | } 611 | }; 612 | 613 | let format = match matches.opt_str("format").as_ref().map(|s| &**s) { 614 | None if quiet => OutputFormat::Terse, 615 | Some("pretty") | None => OutputFormat::Pretty, 616 | Some("terse") => OutputFormat::Terse, 617 | Some("json") => { 618 | if !allow_unstable { 619 | return Some(Err( 620 | "The \"json\" format is only accepted on the nightly compiler".into(), 621 | )); 622 | } 623 | OutputFormat::Json 624 | } 625 | 626 | Some(v) => { 627 | return Some(Err(format!( 628 | "argument for --format must be pretty, terse, or json (was \ 629 | {})", 630 | v 631 | ))); 632 | } 633 | }; 634 | 635 | let test_opts = TestOpts { 636 | list, 637 | filter, 638 | filter_exact: exact, 639 | exclude_should_panic, 640 | run_ignored, 641 | run_tests, 642 | bench_benchmarks, 643 | logfile, 644 | nocapture, 645 | color, 646 | format, 647 | test_threads, 648 | skip: matches.opt_strs("skip"), 649 | options: Options::new(), 650 | }; 651 | 652 | Some(Ok(test_opts)) 653 | } 654 | 655 | #[derive(Clone, PartialEq)] 656 | pub struct BenchSamples { 657 | ns_iter_summ: stats::Summary, 658 | mb_s: usize, 659 | } 660 | 661 | #[derive(Clone, PartialEq)] 662 | pub enum TestResult { 663 | TrOk, 664 | TrFailed, 665 | TrFailedMsg(String), 666 | TrIgnored, 667 | TrAllowedFail, 668 | TrBench(BenchSamples), 669 | } 670 | 671 | unsafe impl Send for TestResult {} 672 | 673 | enum OutputLocation { 674 | Pretty(Box), 675 | Raw(T), 676 | } 677 | 678 | impl Write for OutputLocation { 679 | fn write(&mut self, buf: &[u8]) -> io::Result { 680 | match *self { 681 | OutputLocation::Pretty(ref mut term) => term.write(buf), 682 | OutputLocation::Raw(ref mut stdout) => stdout.write(buf), 683 | } 684 | } 685 | 686 | fn flush(&mut self) -> io::Result<()> { 687 | match *self { 688 | OutputLocation::Pretty(ref mut term) => term.flush(), 689 | OutputLocation::Raw(ref mut stdout) => stdout.flush(), 690 | } 691 | } 692 | } 693 | 694 | struct ConsoleTestState { 695 | log_out: Option, 696 | total: usize, 697 | passed: usize, 698 | failed: usize, 699 | ignored: usize, 700 | allowed_fail: usize, 701 | filtered_out: usize, 702 | measured: usize, 703 | metrics: MetricMap, 704 | failures: Vec<(TestDesc, Vec)>, 705 | not_failures: Vec<(TestDesc, Vec)>, 706 | options: Options, 707 | } 708 | 709 | impl ConsoleTestState { 710 | pub fn new(opts: &TestOpts) -> io::Result { 711 | let log_out = match opts.logfile { 712 | Some(ref path) => Some(File::create(path)?), 713 | None => None, 714 | }; 715 | 716 | Ok(Self { 717 | log_out, 718 | total: 0, 719 | passed: 0, 720 | failed: 0, 721 | ignored: 0, 722 | allowed_fail: 0, 723 | filtered_out: 0, 724 | measured: 0, 725 | metrics: MetricMap::new(), 726 | failures: Vec::new(), 727 | not_failures: Vec::new(), 728 | options: opts.options, 729 | }) 730 | } 731 | 732 | pub fn write_log>(&mut self, msg: S) -> io::Result<()> { 733 | let msg = msg.as_ref(); 734 | match self.log_out { 735 | None => Ok(()), 736 | Some(ref mut o) => o.write_all(msg.as_bytes()), 737 | } 738 | } 739 | 740 | pub fn write_log_result( 741 | &mut self, 742 | test: &TestDesc, 743 | result: &TestResult, 744 | ) -> io::Result<()> { 745 | self.write_log(format!( 746 | "{} {}\n", 747 | match *result { 748 | TestResult::TrOk => "ok".to_owned(), 749 | TestResult::TrFailed => "failed".to_owned(), 750 | TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg), 751 | TestResult::TrIgnored => "ignored".to_owned(), 752 | TestResult::TrAllowedFail => "failed (allowed)".to_owned(), 753 | TestResult::TrBench(ref bs) => fmt_bench_samples(bs), 754 | }, 755 | test.name 756 | )) 757 | } 758 | 759 | fn current_test_count(&self) -> usize { 760 | self.passed 761 | + self.failed 762 | + self.ignored 763 | + self.measured 764 | + self.allowed_fail 765 | } 766 | } 767 | 768 | // Format a number with thousands separators 769 | fn fmt_thousands_sep(mut n: usize, sep: char) -> String { 770 | use std::fmt::Write; 771 | let mut output = String::new(); 772 | let mut trailing = false; 773 | for &pow in &[9, 6, 3, 0] { 774 | let base = 10_usize.pow(pow); 775 | if pow == 0 || trailing || n / base != 0 { 776 | if trailing { 777 | output.write_fmt(format_args!("{:03}", n / base)).unwrap(); 778 | } else { 779 | output.write_fmt(format_args!("{}", n / base)).unwrap(); 780 | } 781 | if pow != 0 { 782 | output.push(sep); 783 | } 784 | trailing = true; 785 | } 786 | n %= base; 787 | } 788 | 789 | output 790 | } 791 | 792 | pub fn fmt_bench_samples(bs: &BenchSamples) -> String { 793 | use std::fmt::Write; 794 | let mut output = String::new(); 795 | 796 | let median = bs.ns_iter_summ.median as usize; 797 | let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; 798 | 799 | output 800 | .write_fmt(format_args!( 801 | "{:>11} ns/iter (+/- {})", 802 | fmt_thousands_sep(median, ','), 803 | fmt_thousands_sep(deviation, ',') 804 | )) 805 | .unwrap(); 806 | if bs.mb_s != 0 { 807 | output 808 | .write_fmt(format_args!(" = {} MB/s", bs.mb_s)) 809 | .unwrap(); 810 | } 811 | output 812 | } 813 | 814 | // List the tests to console, and optionally to logfile. Filters are honored. 815 | pub fn list_tests_console( 816 | opts: &TestOpts, 817 | tests: Vec, 818 | ) -> io::Result<()> { 819 | fn plural(count: u32, s: &str) -> String { 820 | match count { 821 | 1 => format!("{} {}", 1, s), 822 | n => format!("{} {}s", n, s), 823 | } 824 | } 825 | 826 | let mut output = match term::stdout() { 827 | None => OutputLocation::Raw(io::stdout()), 828 | Some(t) => OutputLocation::Pretty(t), 829 | }; 830 | 831 | let quiet = opts.format == OutputFormat::Terse; 832 | let mut st = ConsoleTestState::new(opts)?; 833 | 834 | let mut ntest = 0; 835 | let mut nbench = 0; 836 | 837 | for test in filter_tests(&opts, tests) { 838 | let TestDescAndFn { 839 | desc: TestDesc { name, .. }, 840 | testfn, 841 | } = test; 842 | 843 | let fntype = match testfn { 844 | TestFn::StaticTestFn(..) | TestFn::DynTestFn(..) => { 845 | ntest += 1; 846 | "test" 847 | } 848 | TestFn::StaticBenchFn(..) | TestFn::DynBenchFn(..) => { 849 | nbench += 1; 850 | "benchmark" 851 | } 852 | }; 853 | 854 | writeln!(output, "{}: {}", name, fntype)?; 855 | st.write_log(format!("{} {}\n", fntype, name))?; 856 | } 857 | 858 | if !quiet { 859 | if ntest != 0 || nbench != 0 { 860 | writeln!(output)?; 861 | } 862 | 863 | writeln!( 864 | output, 865 | "{}, {}", 866 | plural(ntest, "test"), 867 | plural(nbench, "benchmark") 868 | )?; 869 | } 870 | 871 | Ok(()) 872 | } 873 | 874 | // A simple console test runner 875 | pub fn run_tests_console( 876 | opts: &TestOpts, 877 | tests: Vec, 878 | ) -> io::Result { 879 | fn callback( 880 | event: &TestEvent, 881 | st: &mut ConsoleTestState, 882 | out: &mut dyn OutputFormatter, 883 | ) -> io::Result<()> { 884 | match (*event).clone() { 885 | TestEvent::TeFiltered(ref filtered_tests) => { 886 | st.total = filtered_tests.len(); 887 | out.write_run_start(filtered_tests.len()) 888 | } 889 | TestEvent::TeFilteredOut(filtered_out) => { 890 | st.filtered_out = filtered_out; 891 | Ok(()) 892 | } 893 | TestEvent::TeWait(ref test) => out.write_test_start(test), 894 | TestEvent::TeTimeout(ref test) => out.write_timeout(test), 895 | TestEvent::TeResult(test, result, stdout) => { 896 | st.write_log_result(&test, &result)?; 897 | out.write_result(&test, &result, &*stdout)?; 898 | match result { 899 | TestResult::TrOk => { 900 | st.passed += 1; 901 | st.not_failures.push((test, stdout)); 902 | } 903 | TestResult::TrIgnored => st.ignored += 1, 904 | TestResult::TrAllowedFail => st.allowed_fail += 1, 905 | TestResult::TrBench(bs) => { 906 | st.metrics.insert_metric( 907 | test.name.as_slice(), 908 | bs.ns_iter_summ.median, 909 | bs.ns_iter_summ.max - bs.ns_iter_summ.min, 910 | ); 911 | st.measured += 1 912 | } 913 | TestResult::TrFailed => { 914 | st.failed += 1; 915 | st.failures.push((test, stdout)); 916 | } 917 | TestResult::TrFailedMsg(msg) => { 918 | st.failed += 1; 919 | let mut stdout = stdout; 920 | stdout.extend_from_slice( 921 | format!("note: {}", msg).as_bytes(), 922 | ); 923 | st.failures.push((test, stdout)); 924 | } 925 | } 926 | Ok(()) 927 | } 928 | } 929 | } 930 | 931 | fn len_if_padded(t: &TestDescAndFn) -> usize { 932 | match t.testfn.padding() { 933 | NamePadding::PadNone => 0, 934 | NamePadding::PadOnRight => t.desc.name.as_slice().len(), 935 | } 936 | } 937 | 938 | let output = match term::stdout() { 939 | None => OutputLocation::Raw(io::stdout()), 940 | Some(t) => OutputLocation::Pretty(t), 941 | }; 942 | 943 | let max_name_len = tests 944 | .iter() 945 | .max_by_key(|t| len_if_padded(*t)) 946 | .map_or(0, |t| t.desc.name.as_slice().len()); 947 | 948 | let is_multithreaded = 949 | opts.test_threads.unwrap_or_else(get_concurrency) > 1; 950 | 951 | let mut out: Box = match opts.format { 952 | OutputFormat::Pretty => Box::new(PrettyFormatter::new( 953 | output, 954 | use_color(opts), 955 | max_name_len, 956 | is_multithreaded, 957 | )), 958 | OutputFormat::Terse => Box::new(TerseFormatter::new( 959 | output, 960 | use_color(opts), 961 | max_name_len, 962 | is_multithreaded, 963 | )), 964 | OutputFormat::Json => Box::new(JsonFormatter::new(output)), 965 | }; 966 | let mut st = ConsoleTestState::new(opts)?; 967 | 968 | run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?; 969 | 970 | assert!(st.current_test_count() == st.total); 971 | 972 | out.write_run_finish(&st) 973 | } 974 | 975 | #[test] 976 | fn should_sort_failures_before_printing_them() { 977 | let test_a = TestDesc { 978 | name: TestName::StaticTestName("a"), 979 | ignore: false, 980 | should_panic: ShouldPanic::No, 981 | allow_fail: false, 982 | }; 983 | 984 | let test_b = TestDesc { 985 | name: TestName::StaticTestName("b"), 986 | ignore: false, 987 | should_panic: ShouldPanic::No, 988 | allow_fail: false, 989 | }; 990 | 991 | let mut out = PrettyFormatter::new( 992 | OutputLocation::Raw(Vec::new()), 993 | false, 994 | 10, 995 | false, 996 | ); 997 | 998 | let st = ConsoleTestState { 999 | log_out: None, 1000 | total: 0, 1001 | passed: 0, 1002 | failed: 0, 1003 | ignored: 0, 1004 | allowed_fail: 0, 1005 | filtered_out: 0, 1006 | measured: 0, 1007 | metrics: MetricMap::new(), 1008 | failures: vec![(test_b, Vec::new()), (test_a, Vec::new())], 1009 | options: Options::new(), 1010 | not_failures: Vec::new(), 1011 | }; 1012 | 1013 | out.write_failures(&st).unwrap(); 1014 | let s = match out.output_location() { 1015 | &OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]), 1016 | &OutputLocation::Pretty(_) => unreachable!(), 1017 | }; 1018 | 1019 | let apos = s.find("a").unwrap(); 1020 | let bpos = s.find("b").unwrap(); 1021 | assert!(apos < bpos); 1022 | } 1023 | 1024 | fn use_color(opts: &TestOpts) -> bool { 1025 | match opts.color { 1026 | ColorConfig::AutoColor => !opts.nocapture && stdout_isatty(), 1027 | ColorConfig::AlwaysColor => true, 1028 | ColorConfig::NeverColor => false, 1029 | } 1030 | } 1031 | 1032 | #[cfg(any( 1033 | target_os = "cloudabi", 1034 | target_os = "redox", 1035 | all(target_arch = "wasm32", not(target_os = "emscripten")), 1036 | all(target_vendor = "fortanix", target_env = "sgx") 1037 | ))] 1038 | fn stdout_isatty() -> bool { 1039 | // FIXME: Implement isatty on Redox and SGX 1040 | false 1041 | } 1042 | #[cfg(any(unix, target_os = "fuchsia"))] 1043 | fn stdout_isatty() -> bool { 1044 | unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } 1045 | } 1046 | #[cfg(windows)] 1047 | fn stdout_isatty() -> bool { 1048 | type DWORD = u32; 1049 | type BOOL = i32; 1050 | type HANDLE = *mut u8; 1051 | type LPDWORD = *mut u32; 1052 | const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; 1053 | extern "system" { 1054 | fn GetStdHandle(which: DWORD) -> HANDLE; 1055 | fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; 1056 | } 1057 | unsafe { 1058 | let handle = GetStdHandle(STD_OUTPUT_HANDLE); 1059 | let mut out = 0; 1060 | GetConsoleMode(handle, &mut out) != 0 1061 | } 1062 | } 1063 | 1064 | #[allow(clippy::large_enum_variant)] // FIXME 1065 | #[derive(Clone)] 1066 | pub enum TestEvent { 1067 | TeFiltered(Vec), 1068 | TeWait(TestDesc), 1069 | TeResult(TestDesc, TestResult, Vec), 1070 | TeTimeout(TestDesc), 1071 | TeFilteredOut(usize), 1072 | } 1073 | 1074 | pub type MonitorMsg = (TestDesc, TestResult, Vec); 1075 | 1076 | struct Sink(Arc>>); 1077 | impl Write for Sink { 1078 | fn write(&mut self, data: &[u8]) -> io::Result { 1079 | Write::write(&mut *self.0.lock().unwrap(), data) 1080 | } 1081 | fn flush(&mut self) -> io::Result<()> { 1082 | Ok(()) 1083 | } 1084 | } 1085 | 1086 | pub fn run_tests( 1087 | opts: &TestOpts, 1088 | tests: Vec, 1089 | mut callback: F, 1090 | ) -> io::Result<()> 1091 | where 1092 | F: FnMut(TestEvent) -> io::Result<()>, 1093 | { 1094 | use std::collections::{self, HashMap}; 1095 | use std::hash::BuildHasherDefault; 1096 | use std::sync::mpsc::RecvTimeoutError; 1097 | // Use a deterministic hasher 1098 | type TestMap = HashMap< 1099 | TestDesc, 1100 | Instant, 1101 | BuildHasherDefault, 1102 | >; 1103 | fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec { 1104 | let now = Instant::now(); 1105 | let timed_out = running_tests 1106 | .iter() 1107 | .filter_map(|(desc, timeout)| { 1108 | if now >= *timeout { 1109 | Some(desc.clone()) 1110 | } else { 1111 | None 1112 | } 1113 | }) 1114 | .collect(); 1115 | for test in &timed_out { 1116 | running_tests.remove(test); 1117 | } 1118 | timed_out 1119 | }; 1120 | 1121 | fn calc_timeout(running_tests: &TestMap) -> Option { 1122 | running_tests.values().min().map(|next_timeout| { 1123 | let now = Instant::now(); 1124 | if *next_timeout >= now { 1125 | *next_timeout - now 1126 | } else { 1127 | Duration::new(0, 0) 1128 | } 1129 | }) 1130 | }; 1131 | 1132 | let tests_len = tests.len(); 1133 | 1134 | let mut filtered_tests = filter_tests(opts, tests); 1135 | if !opts.bench_benchmarks { 1136 | filtered_tests = convert_benchmarks_to_tests(filtered_tests); 1137 | } 1138 | 1139 | let filtered_tests = { 1140 | let mut filtered_tests = filtered_tests; 1141 | for test in &mut filtered_tests { 1142 | test.desc.name = 1143 | test.desc.name.with_padding(test.testfn.padding()); 1144 | } 1145 | 1146 | filtered_tests 1147 | }; 1148 | 1149 | let filtered_out = tests_len - filtered_tests.len(); 1150 | callback(TestEvent::TeFilteredOut(filtered_out))?; 1151 | 1152 | let filtered_descs = 1153 | filtered_tests.iter().map(|t| t.desc.clone()).collect(); 1154 | 1155 | callback(TestEvent::TeFiltered(filtered_descs))?; 1156 | 1157 | let (filtered_tests, filtered_benchs): (Vec<_>, _) = 1158 | filtered_tests.into_iter().partition(|e| match e.testfn { 1159 | TestFn::StaticTestFn(_) | TestFn::DynTestFn(_) => true, 1160 | _ => false, 1161 | }); 1162 | 1163 | let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); 1164 | 1165 | let mut remaining = filtered_tests; 1166 | remaining.reverse(); 1167 | let mut pending = 0; 1168 | 1169 | let (tx, rx) = channel::(); 1170 | 1171 | let mut running_tests: TestMap = HashMap::default(); 1172 | 1173 | if concurrency == 1 { 1174 | while !remaining.is_empty() { 1175 | let test = remaining.pop().unwrap(); 1176 | callback(TestEvent::TeWait(test.desc.clone()))?; 1177 | run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No); 1178 | let (test, result, stdout) = rx.recv().unwrap(); 1179 | callback(TestEvent::TeResult(test, result, stdout))?; 1180 | } 1181 | } else { 1182 | while pending > 0 || !remaining.is_empty() { 1183 | while pending < concurrency && !remaining.is_empty() { 1184 | let test = remaining.pop().unwrap(); 1185 | let timeout = 1186 | Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S); 1187 | running_tests.insert(test.desc.clone(), timeout); 1188 | callback(TestEvent::TeWait(test.desc.clone()))?; //here no pad 1189 | run_test( 1190 | opts, 1191 | !opts.run_tests, 1192 | test, 1193 | tx.clone(), 1194 | Concurrent::Yes, 1195 | ); 1196 | pending += 1; 1197 | } 1198 | 1199 | let mut res; 1200 | loop { 1201 | if let Some(timeout) = calc_timeout(&running_tests) { 1202 | res = rx.recv_timeout(timeout); 1203 | for test in get_timed_out_tests(&mut running_tests) { 1204 | callback(TestEvent::TeTimeout(test))?; 1205 | } 1206 | if res != Err(RecvTimeoutError::Timeout) { 1207 | break; 1208 | } 1209 | } else { 1210 | res = 1211 | rx.recv().map_err(|_| RecvTimeoutError::Disconnected); 1212 | break; 1213 | } 1214 | } 1215 | 1216 | let (desc, result, stdout) = res.unwrap(); 1217 | running_tests.remove(&desc); 1218 | 1219 | callback(TestEvent::TeResult(desc, result, stdout))?; 1220 | pending -= 1; 1221 | } 1222 | } 1223 | 1224 | if opts.bench_benchmarks { 1225 | // All benchmarks run at the end, in serial. 1226 | for b in filtered_benchs { 1227 | callback(TestEvent::TeWait(b.desc.clone()))?; 1228 | run_test(opts, false, b, tx.clone(), Concurrent::No); 1229 | let (test, result, stdout) = rx.recv().unwrap(); 1230 | callback(TestEvent::TeResult(test, result, stdout))?; 1231 | } 1232 | } 1233 | Ok(()) 1234 | } 1235 | 1236 | #[allow(deprecated)] 1237 | fn get_concurrency() -> usize { 1238 | #[cfg(windows)] 1239 | #[allow(nonstandard_style)] 1240 | fn num_cpus() -> usize { 1241 | #[repr(C)] 1242 | struct SYSTEM_INFO { 1243 | wProcessorArchitecture: u16, 1244 | wReserved: u16, 1245 | dwPageSize: u32, 1246 | lpMinimumApplicationAddress: *mut u8, 1247 | lpMaximumApplicationAddress: *mut u8, 1248 | dwActiveProcessorMask: *mut u8, 1249 | dwNumberOfProcessors: u32, 1250 | dwProcessorType: u32, 1251 | dwAllocationGranularity: u32, 1252 | wProcessorLevel: u16, 1253 | wProcessorRevision: u16, 1254 | } 1255 | extern "system" { 1256 | fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32; 1257 | } 1258 | unsafe { 1259 | let mut sysinfo = std::mem::zeroed(); 1260 | GetSystemInfo(&mut sysinfo); 1261 | sysinfo.dwNumberOfProcessors as usize 1262 | } 1263 | } 1264 | 1265 | #[cfg(target_os = "redox")] 1266 | fn num_cpus() -> usize { 1267 | // FIXME: Implement num_cpus on Redox 1268 | 1 1269 | } 1270 | 1271 | #[cfg(any( 1272 | all(target_arch = "wasm32", not(target_os = "emscripten")), 1273 | all(target_vendor = "fortanix", target_env = "sgx") 1274 | ))] 1275 | fn num_cpus() -> usize { 1276 | 1 1277 | } 1278 | 1279 | #[cfg(any( 1280 | target_os = "android", 1281 | target_os = "cloudabi", 1282 | target_os = "emscripten", 1283 | target_os = "fuchsia", 1284 | target_os = "ios", 1285 | target_os = "linux", 1286 | target_os = "macos", 1287 | target_os = "solaris" 1288 | ))] 1289 | fn num_cpus() -> usize { 1290 | unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize } 1291 | } 1292 | 1293 | #[cfg(any( 1294 | target_os = "freebsd", 1295 | target_os = "dragonfly", 1296 | target_os = "bitrig", 1297 | target_os = "netbsd" 1298 | ))] 1299 | fn num_cpus() -> usize { 1300 | use std::ptr; 1301 | 1302 | let mut cpus: libc::c_uint = 0; 1303 | let mut cpus_size = std::mem::size_of_val(&cpus); 1304 | 1305 | unsafe { 1306 | cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint; 1307 | } 1308 | if cpus < 1 { 1309 | let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; 1310 | unsafe { 1311 | libc::sysctl( 1312 | mib.as_mut_ptr(), 1313 | 2, 1314 | &mut cpus as *mut _ as *mut _, 1315 | &mut cpus_size as *mut _ as *mut _, 1316 | ptr::null_mut(), 1317 | 0, 1318 | ); 1319 | } 1320 | if cpus < 1 { 1321 | cpus = 1; 1322 | } 1323 | } 1324 | cpus as usize 1325 | } 1326 | 1327 | #[cfg(target_os = "openbsd")] 1328 | fn num_cpus() -> usize { 1329 | use std::ptr; 1330 | 1331 | let mut cpus: libc::c_uint = 0; 1332 | let mut cpus_size = std::mem::size_of_val(&cpus); 1333 | let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0]; 1334 | 1335 | unsafe { 1336 | libc::sysctl( 1337 | mib.as_mut_ptr(), 1338 | 2, 1339 | &mut cpus as *mut _ as *mut _, 1340 | &mut cpus_size as *mut _ as *mut _, 1341 | ptr::null_mut(), 1342 | 0, 1343 | ); 1344 | } 1345 | if cpus < 1 { 1346 | cpus = 1; 1347 | } 1348 | cpus as usize 1349 | } 1350 | 1351 | #[cfg(target_os = "haiku")] 1352 | fn num_cpus() -> usize { 1353 | // FIXME: implement 1354 | 1 1355 | } 1356 | 1357 | #[cfg(target_os = "l4re")] 1358 | fn num_cpus() -> usize { 1359 | // FIXME: implement 1360 | 1 1361 | } 1362 | 1363 | match env::var("RUST_TEST_THREADS") { 1364 | Ok(s) => { 1365 | let opt_n: Option = s.parse().ok(); 1366 | match opt_n { 1367 | Some(n) if n > 0 => n, 1368 | _ => panic!( 1369 | "RUST_TEST_THREADS is `{}`, should be a positive integer.", 1370 | s 1371 | ), 1372 | } 1373 | } 1374 | Err(..) => num_cpus(), 1375 | } 1376 | } 1377 | 1378 | pub fn filter_tests( 1379 | opts: &TestOpts, 1380 | tests: Vec, 1381 | ) -> Vec { 1382 | let mut filtered = tests; 1383 | let matches_filter = |test: &TestDescAndFn, filter: &str| { 1384 | let test_name = test.desc.name.as_slice(); 1385 | 1386 | if opts.filter_exact { 1387 | test_name == filter 1388 | } else { 1389 | test_name.contains(filter) 1390 | } 1391 | }; 1392 | 1393 | // Remove tests that don't match the test filter 1394 | if let Some(ref filter) = opts.filter { 1395 | filtered.retain(|test| matches_filter(test, filter)); 1396 | } 1397 | 1398 | // Skip tests that match any of the skip filters 1399 | filtered 1400 | .retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); 1401 | 1402 | // Excludes #[should_panic] tests 1403 | if opts.exclude_should_panic { 1404 | filtered.retain(|test| test.desc.should_panic == ShouldPanic::No); 1405 | } 1406 | 1407 | // maybe unignore tests 1408 | match opts.run_ignored { 1409 | RunIgnored::Yes => { 1410 | filtered 1411 | .iter_mut() 1412 | .for_each(|test| test.desc.ignore = false); 1413 | } 1414 | RunIgnored::Only => { 1415 | filtered.retain(|test| test.desc.ignore); 1416 | filtered 1417 | .iter_mut() 1418 | .for_each(|test| test.desc.ignore = false); 1419 | } 1420 | RunIgnored::No => {} 1421 | } 1422 | 1423 | // Sort the tests alphabetically 1424 | filtered.sort_by(|t1, t2| { 1425 | t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()) 1426 | }); 1427 | 1428 | filtered 1429 | } 1430 | 1431 | pub fn convert_benchmarks_to_tests( 1432 | tests: Vec, 1433 | ) -> Vec { 1434 | // convert benchmarks to tests, if we're not benchmarking them 1435 | tests 1436 | .into_iter() 1437 | .map(|x| { 1438 | let testfn = match x.testfn { 1439 | TestFn::DynBenchFn(bench) => { 1440 | TestFn::DynTestFn(Box::new(move || { 1441 | bench::run_once(|b| { 1442 | __rust_begin_short_backtrace(|| bench.run(b)) 1443 | }) 1444 | })) 1445 | } 1446 | TestFn::StaticBenchFn(benchfn) => { 1447 | TestFn::DynTestFn(Box::new(move || { 1448 | bench::run_once(|b| { 1449 | __rust_begin_short_backtrace(|| benchfn(b)) 1450 | }) 1451 | })) 1452 | } 1453 | f => f, 1454 | }; 1455 | TestDescAndFn { 1456 | desc: x.desc, 1457 | testfn, 1458 | } 1459 | }) 1460 | .collect() 1461 | } 1462 | 1463 | pub fn run_test( 1464 | opts: &TestOpts, 1465 | force_ignore: bool, 1466 | test: TestDescAndFn, 1467 | monitor_ch: Sender, 1468 | concurrency: Concurrent, 1469 | ) { 1470 | fn run_test_inner( 1471 | desc: TestDesc, 1472 | monitor_ch: Sender, 1473 | nocapture: bool, 1474 | testfn: Box, 1475 | concurrency: Concurrent, 1476 | ) { 1477 | // Buffer for capturing standard I/O 1478 | let data = Arc::new(Mutex::new(Vec::new())); 1479 | let data2 = data.clone(); 1480 | 1481 | let name = desc.name.clone(); 1482 | let runtest = move || { 1483 | let oldio = if nocapture { 1484 | None 1485 | } else { 1486 | Some(( 1487 | io::set_print(Some(Box::new(Sink(data2.clone())))), 1488 | io::set_panic(Some(Box::new(Sink(data2)))), 1489 | )) 1490 | }; 1491 | 1492 | let result = catch_unwind(AssertUnwindSafe(testfn)); 1493 | 1494 | if let Some((printio, panicio)) = oldio { 1495 | io::set_print(printio); 1496 | io::set_panic(panicio); 1497 | }; 1498 | 1499 | let test_result = calc_result(&desc, result); 1500 | let stdout = data.lock().unwrap().to_vec(); 1501 | monitor_ch 1502 | .send((desc.clone(), test_result, stdout)) 1503 | .unwrap(); 1504 | }; 1505 | 1506 | // If the platform is single-threaded we're just going to run 1507 | // the test synchronously, regardless of the concurrency 1508 | // level. 1509 | let supports_threads = 1510 | !cfg!(any(target_os = "emscripten", target_arch = "wasm32")); 1511 | if concurrency == Concurrent::Yes && supports_threads { 1512 | let cfg = thread::Builder::new().name(name.as_slice().to_owned()); 1513 | cfg.spawn(runtest).unwrap(); 1514 | } else { 1515 | runtest(); 1516 | } 1517 | } 1518 | 1519 | let TestDescAndFn { desc, testfn } = test; 1520 | 1521 | let ignore_because_panic_abort = cfg!(target_arch = "wasm32") 1522 | && !cfg!(target_os = "emscripten") 1523 | && desc.should_panic != ShouldPanic::No; 1524 | 1525 | if force_ignore || desc.ignore || ignore_because_panic_abort { 1526 | monitor_ch 1527 | .send((desc, TestResult::TrIgnored, Vec::new())) 1528 | .unwrap(); 1529 | return; 1530 | } 1531 | 1532 | match testfn { 1533 | TestFn::DynBenchFn(bencher) => { 1534 | crate::bench::benchmark( 1535 | desc, 1536 | &monitor_ch, 1537 | opts.nocapture, 1538 | |harness| bencher.run(harness), 1539 | ); 1540 | } 1541 | TestFn::StaticBenchFn(benchfn) => { 1542 | crate::bench::benchmark( 1543 | desc, 1544 | &monitor_ch, 1545 | opts.nocapture, 1546 | |harness| (benchfn)(harness), 1547 | ); 1548 | } 1549 | TestFn::DynTestFn(f) => { 1550 | let cb = move || __rust_begin_short_backtrace(f); 1551 | run_test_inner( 1552 | desc, 1553 | monitor_ch, 1554 | opts.nocapture, 1555 | Box::new(cb), 1556 | concurrency, 1557 | ) 1558 | } 1559 | TestFn::StaticTestFn(f) => run_test_inner( 1560 | desc, 1561 | monitor_ch, 1562 | opts.nocapture, 1563 | Box::new(move || __rust_begin_short_backtrace(f)), 1564 | concurrency, 1565 | ), 1566 | } 1567 | } 1568 | 1569 | /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. 1570 | #[inline(never)] 1571 | fn __rust_begin_short_backtrace(f: F) { 1572 | f() 1573 | } 1574 | 1575 | fn calc_result( 1576 | desc: &TestDesc, 1577 | task_result: Result<(), Box>, 1578 | ) -> TestResult { 1579 | match (&desc.should_panic, task_result) { 1580 | (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => { 1581 | TestResult::TrOk 1582 | } 1583 | (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { 1584 | if err 1585 | .downcast_ref::() 1586 | .map(|e| &**e) 1587 | .or_else(|| err.downcast_ref::<&'static str>().cloned()) 1588 | .map_or(false, |e| e.contains(msg)) 1589 | { 1590 | TestResult::TrOk 1591 | } else if desc.allow_fail { 1592 | TestResult::TrAllowedFail 1593 | } else { 1594 | TestResult::TrFailedMsg(format!( 1595 | "Panic did not include expected string '{}'", 1596 | msg 1597 | )) 1598 | } 1599 | } 1600 | _ if desc.allow_fail => TestResult::TrAllowedFail, 1601 | _ => TestResult::TrFailed, 1602 | } 1603 | } 1604 | 1605 | #[derive(Clone, PartialEq, Default)] 1606 | pub struct MetricMap(BTreeMap); 1607 | 1608 | impl MetricMap { 1609 | pub fn new() -> Self { 1610 | Self::default() 1611 | } 1612 | 1613 | /// Insert a named `value` (+/- `noise`) metric into the map. The value 1614 | /// must be non-negative. The `noise` indicates the uncertainty of the 1615 | /// metric, which doubles as the "noise range" of acceptable 1616 | /// pairwise-regressions on this named value, when comparing from one 1617 | /// metric to the next using `compare_to_old`. 1618 | /// 1619 | /// If `noise` is positive, then it means this metric is of a value 1620 | /// you want to see grow smaller, so a change larger than `noise` in the 1621 | /// positive direction represents a regression. 1622 | /// 1623 | /// If `noise` is negative, then it means this metric is of a value 1624 | /// you want to see grow larger, so a change larger than `noise` in the 1625 | /// negative direction represents a regression. 1626 | pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { 1627 | let m = Metric { value, noise }; 1628 | self.0.insert(name.to_owned(), m); 1629 | } 1630 | 1631 | pub fn fmt_metrics(&self) -> String { 1632 | let v = self 1633 | .0 1634 | .iter() 1635 | .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) 1636 | .collect::>(); 1637 | v.join(", ") 1638 | } 1639 | } 1640 | 1641 | // Benchmarking 1642 | 1643 | impl Bencher { 1644 | /// Callback for benchmark functions to run in their body. 1645 | pub fn iter(&mut self, mut inner: F) 1646 | where 1647 | F: FnMut() -> T, 1648 | { 1649 | if self.mode == BenchMode::Single { 1650 | ns_iter_inner(&mut inner, 1); 1651 | return; 1652 | } 1653 | 1654 | self.summary = Some(iter(&mut inner)); 1655 | } 1656 | 1657 | pub fn bench(&mut self, mut f: F) -> Option 1658 | where 1659 | F: FnMut(&mut Self), 1660 | { 1661 | f(self); 1662 | self.summary 1663 | } 1664 | } 1665 | 1666 | fn ns_from_dur(dur: Duration) -> u64 { 1667 | dur.as_secs() * 1_000_000_000 + u64::from(dur.subsec_nanos()) 1668 | } 1669 | 1670 | fn ns_iter_inner(inner: &mut F, k: u64) -> u64 1671 | where 1672 | F: FnMut() -> T, 1673 | { 1674 | let start = Instant::now(); 1675 | for _ in 0..k { 1676 | test::black_box(inner()); 1677 | } 1678 | ns_from_dur(start.elapsed()) 1679 | } 1680 | 1681 | pub fn iter(inner: &mut F) -> stats::Summary 1682 | where 1683 | F: FnMut() -> T, 1684 | { 1685 | // Initial bench run to get ballpark figure. 1686 | let ns_single = ns_iter_inner(inner, 1); 1687 | 1688 | // Try to estimate iter count for 1ms falling back to 1m 1689 | // iterations if first run took < 1ns. 1690 | let ns_target_total = 1_000_000; // 1ms 1691 | let mut n = ns_target_total / cmp::max(1, ns_single); 1692 | 1693 | // if the first run took more than 1ms we don't want to just 1694 | // be left doing 0 iterations on every loop. The unfortunate 1695 | // side effect of not being able to do as many runs is 1696 | // automatically handled by the statistical analysis below 1697 | // (i.e., larger error bars). 1698 | n = cmp::max(1, n); 1699 | 1700 | let mut total_run = Duration::new(0, 0); 1701 | let samples: &mut [f64] = &mut [0.0_f64; 50]; 1702 | loop { 1703 | let loop_start = Instant::now(); 1704 | 1705 | for p in &mut *samples { 1706 | *p = ns_iter_inner(inner, n) as f64 / n as f64; 1707 | } 1708 | 1709 | stats::winsorize(samples, 5.0); 1710 | let summ = stats::Summary::new(samples); 1711 | 1712 | for p in &mut *samples { 1713 | let ns = ns_iter_inner(inner, 5 * n); 1714 | *p = ns as f64 / (5 * n) as f64; 1715 | } 1716 | 1717 | stats::winsorize(samples, 5.0); 1718 | let summ5 = stats::Summary::new(samples); 1719 | 1720 | let loop_run = loop_start.elapsed(); 1721 | 1722 | // If we've run for 100ms and seem to have converged to a 1723 | // stable median. 1724 | if loop_run > Duration::from_millis(100) 1725 | && summ.median_abs_dev_pct < 1.0 1726 | && summ.median - summ5.median < summ5.median_abs_dev 1727 | { 1728 | return summ5; 1729 | } 1730 | 1731 | total_run += loop_run; 1732 | // Longest we ever run for is 3s. 1733 | if total_run > Duration::from_secs(3) { 1734 | return summ5; 1735 | } 1736 | 1737 | // If we overflow here just return the results so far. We check a 1738 | // multiplier of 10 because we're about to multiply by 2 and the 1739 | // next iteration of the loop will also multiply by 5 (to calculate 1740 | // the summ5 result) 1741 | n = if n.checked_mul(10).is_some() { 1742 | n * 2 1743 | } else { 1744 | return summ5; 1745 | }; 1746 | } 1747 | } 1748 | 1749 | pub mod bench { 1750 | use super::{ 1751 | BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, 1752 | TestResult, 1753 | }; 1754 | use crate::stats; 1755 | use std::cmp; 1756 | use std::io; 1757 | use std::panic::{catch_unwind, AssertUnwindSafe}; 1758 | use std::sync::{Arc, Mutex}; 1759 | 1760 | pub fn benchmark( 1761 | desc: TestDesc, 1762 | monitor_ch: &Sender, 1763 | nocapture: bool, 1764 | f: F, 1765 | ) where 1766 | F: FnMut(&mut Bencher), 1767 | { 1768 | let mut bs = Bencher { 1769 | mode: BenchMode::Auto, 1770 | summary: None, 1771 | bytes: 0, 1772 | }; 1773 | 1774 | let data = Arc::new(Mutex::new(Vec::new())); 1775 | let data2 = data.clone(); 1776 | 1777 | let oldio = if nocapture { 1778 | None 1779 | } else { 1780 | Some(( 1781 | io::set_print(Some(Box::new(Sink(data2.clone())))), 1782 | io::set_panic(Some(Box::new(Sink(data2)))), 1783 | )) 1784 | }; 1785 | 1786 | let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); 1787 | 1788 | if let Some((printio, panicio)) = oldio { 1789 | io::set_print(printio); 1790 | io::set_panic(panicio); 1791 | }; 1792 | 1793 | let test_result = match result { 1794 | //bs.bench(f) { 1795 | Ok(Some(ns_iter_summ)) => { 1796 | let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); 1797 | let mb_s = bs.bytes * 1000 / ns_iter; 1798 | 1799 | let bs = BenchSamples { 1800 | ns_iter_summ, 1801 | mb_s: mb_s as usize, 1802 | }; 1803 | TestResult::TrBench(bs) 1804 | } 1805 | Ok(None) => { 1806 | // iter not called, so no data. 1807 | // FIXME: error in this case? 1808 | let samples: &mut [f64] = &mut [0.0_f64; 1]; 1809 | let bs = BenchSamples { 1810 | ns_iter_summ: stats::Summary::new(samples), 1811 | mb_s: 0, 1812 | }; 1813 | TestResult::TrBench(bs) 1814 | } 1815 | Err(_) => TestResult::TrFailed, 1816 | }; 1817 | 1818 | let stdout = data.lock().unwrap().to_vec(); 1819 | monitor_ch.send((desc, test_result, stdout)).unwrap(); 1820 | } 1821 | 1822 | pub fn run_once(f: F) 1823 | where 1824 | F: FnMut(&mut Bencher), 1825 | { 1826 | let mut bs = Bencher { 1827 | mode: BenchMode::Single, 1828 | summary: None, 1829 | bytes: 0, 1830 | }; 1831 | bs.bench(f); 1832 | } 1833 | } 1834 | 1835 | #[cfg(test)] 1836 | mod tests { 1837 | use crate::{ 1838 | bench, filter_tests, parse_opts, run_test, Bencher, Concurrent, 1839 | MetricMap, RunIgnored, ShouldPanic, TestDesc, TestDescAndFn, TestFn, 1840 | TestName, TestOpts, TestResult, 1841 | }; 1842 | use std::sync::mpsc::channel; 1843 | 1844 | fn one_ignored_one_unignored_test() -> Vec { 1845 | vec![ 1846 | TestDescAndFn { 1847 | desc: TestDesc { 1848 | name: TestName::StaticTestName("1"), 1849 | ignore: true, 1850 | should_panic: ShouldPanic::No, 1851 | allow_fail: false, 1852 | }, 1853 | testfn: TestFn::DynTestFn(Box::new(move || {})), 1854 | }, 1855 | TestDescAndFn { 1856 | desc: TestDesc { 1857 | name: TestName::StaticTestName("2"), 1858 | ignore: false, 1859 | should_panic: ShouldPanic::No, 1860 | allow_fail: false, 1861 | }, 1862 | testfn: TestFn::DynTestFn(Box::new(move || {})), 1863 | }, 1864 | ] 1865 | } 1866 | 1867 | #[test] 1868 | pub fn do_not_run_ignored_tests() { 1869 | fn f() { 1870 | panic!(); 1871 | } 1872 | let desc = TestDescAndFn { 1873 | desc: TestDesc { 1874 | name: TestName::StaticTestName("whatever"), 1875 | ignore: true, 1876 | should_panic: ShouldPanic::No, 1877 | allow_fail: false, 1878 | }, 1879 | testfn: TestFn::DynTestFn(Box::new(f)), 1880 | }; 1881 | let (tx, rx) = channel(); 1882 | run_test(&TestOpts::new(), false, desc, tx, Concurrent::No); 1883 | let (_, res, _) = rx.recv().unwrap(); 1884 | assert!(res != TestResult::TrOk); 1885 | } 1886 | 1887 | #[test] 1888 | pub fn ignored_tests_result_in_ignored() { 1889 | fn f() {} 1890 | let desc = TestDescAndFn { 1891 | desc: TestDesc { 1892 | name: TestName::StaticTestName("whatever"), 1893 | ignore: true, 1894 | should_panic: ShouldPanic::No, 1895 | allow_fail: false, 1896 | }, 1897 | testfn: TestFn::DynTestFn(Box::new(f)), 1898 | }; 1899 | let (tx, rx) = channel(); 1900 | run_test(&TestOpts::new(), false, desc, tx, Concurrent::No); 1901 | let (_, res, _) = rx.recv().unwrap(); 1902 | assert!(res == TestResult::TrIgnored); 1903 | } 1904 | 1905 | #[test] 1906 | fn test_should_panic() { 1907 | fn f() { 1908 | panic!(); 1909 | } 1910 | let desc = TestDescAndFn { 1911 | desc: TestDesc { 1912 | name: TestName::StaticTestName("whatever"), 1913 | ignore: false, 1914 | should_panic: ShouldPanic::Yes, 1915 | allow_fail: false, 1916 | }, 1917 | testfn: TestFn::DynTestFn(Box::new(f)), 1918 | }; 1919 | let (tx, rx) = channel(); 1920 | run_test(&TestOpts::new(), false, desc, tx, Concurrent::No); 1921 | let (_, res, _) = rx.recv().unwrap(); 1922 | assert!(res == TestResult::TrOk); 1923 | } 1924 | 1925 | #[test] 1926 | fn test_should_panic_good_message() { 1927 | fn f() { 1928 | panic!("an error message"); 1929 | } 1930 | let desc = TestDescAndFn { 1931 | desc: TestDesc { 1932 | name: TestName::StaticTestName("whatever"), 1933 | ignore: false, 1934 | should_panic: ShouldPanic::YesWithMessage("error message"), 1935 | allow_fail: false, 1936 | }, 1937 | testfn: TestFn::DynTestFn(Box::new(f)), 1938 | }; 1939 | let (tx, rx) = channel(); 1940 | run_test(&TestOpts::new(), false, desc, tx, Concurrent::No); 1941 | let (_, res, _) = rx.recv().unwrap(); 1942 | assert!(res == TestResult::TrOk); 1943 | } 1944 | 1945 | #[test] 1946 | fn test_should_panic_bad_message() { 1947 | fn f() { 1948 | panic!("an error message"); 1949 | } 1950 | let expected = "foobar"; 1951 | let failed_msg = "Panic did not include expected string"; 1952 | let desc = TestDescAndFn { 1953 | desc: TestDesc { 1954 | name: TestName::StaticTestName("whatever"), 1955 | ignore: false, 1956 | should_panic: ShouldPanic::YesWithMessage(expected), 1957 | allow_fail: false, 1958 | }, 1959 | testfn: TestFn::DynTestFn(Box::new(f)), 1960 | }; 1961 | let (tx, rx) = channel(); 1962 | run_test(&TestOpts::new(), false, desc, tx, Concurrent::No); 1963 | let (_, res, _) = rx.recv().unwrap(); 1964 | assert!( 1965 | res == TestResult::TrFailedMsg(format!( 1966 | "{} '{}'", 1967 | failed_msg, expected 1968 | )) 1969 | ); 1970 | } 1971 | 1972 | #[test] 1973 | fn test_should_panic_but_succeeds() { 1974 | fn f() {} 1975 | let desc = TestDescAndFn { 1976 | desc: TestDesc { 1977 | name: TestName::StaticTestName("whatever"), 1978 | ignore: false, 1979 | should_panic: ShouldPanic::Yes, 1980 | allow_fail: false, 1981 | }, 1982 | testfn: TestFn::DynTestFn(Box::new(f)), 1983 | }; 1984 | let (tx, rx) = channel(); 1985 | run_test(&TestOpts::new(), false, desc, tx, Concurrent::No); 1986 | let (_, res, _) = rx.recv().unwrap(); 1987 | assert!(res == TestResult::TrFailed); 1988 | } 1989 | 1990 | #[test] 1991 | fn parse_ignored_flag() { 1992 | let args = vec![ 1993 | "progname".to_string(), 1994 | "filter".to_string(), 1995 | "--ignored".to_string(), 1996 | ]; 1997 | let opts = parse_opts(&args).unwrap().unwrap(); 1998 | assert_eq!(opts.run_ignored, RunIgnored::Only); 1999 | } 2000 | 2001 | #[test] 2002 | fn parse_include_ignored_flag() { 2003 | let args = vec![ 2004 | "progname".to_string(), 2005 | "filter".to_string(), 2006 | "-Zunstable-options".to_string(), 2007 | "--include-ignored".to_string(), 2008 | ]; 2009 | let opts = parse_opts(&args).unwrap().unwrap(); 2010 | assert_eq!(opts.run_ignored, RunIgnored::Yes); 2011 | } 2012 | 2013 | #[test] 2014 | pub fn filter_for_ignored_option() { 2015 | // When we run ignored tests the test filter should filter out all the 2016 | // unignored tests and flip the ignore flag on the rest to false 2017 | 2018 | let mut opts = TestOpts::new(); 2019 | opts.run_tests = true; 2020 | opts.run_ignored = RunIgnored::Only; 2021 | 2022 | let tests = one_ignored_one_unignored_test(); 2023 | let filtered = filter_tests(&opts, tests); 2024 | 2025 | assert_eq!(filtered.len(), 1); 2026 | assert_eq!(filtered[0].desc.name.to_string(), "1"); 2027 | assert!(!filtered[0].desc.ignore); 2028 | } 2029 | 2030 | #[test] 2031 | pub fn run_include_ignored_option() { 2032 | // When we "--include-ignored" tests, the ignore flag should be set to false on 2033 | // all tests and no test filtered out 2034 | 2035 | let mut opts = TestOpts::new(); 2036 | opts.run_tests = true; 2037 | opts.run_ignored = RunIgnored::Yes; 2038 | 2039 | let tests = one_ignored_one_unignored_test(); 2040 | let filtered = filter_tests(&opts, tests); 2041 | 2042 | assert_eq!(filtered.len(), 2); 2043 | assert!(!filtered[0].desc.ignore); 2044 | assert!(!filtered[1].desc.ignore); 2045 | } 2046 | 2047 | #[test] 2048 | pub fn exclude_should_panic_option() { 2049 | let mut opts = TestOpts::new(); 2050 | opts.run_tests = true; 2051 | opts.exclude_should_panic = true; 2052 | 2053 | let mut tests = one_ignored_one_unignored_test(); 2054 | tests.push(TestDescAndFn { 2055 | desc: TestDesc { 2056 | name: TestName::StaticTestName("3"), 2057 | ignore: false, 2058 | should_panic: ShouldPanic::Yes, 2059 | allow_fail: false, 2060 | }, 2061 | testfn: TestFn::DynTestFn(Box::new(move || {})), 2062 | }); 2063 | 2064 | let filtered = filter_tests(&opts, tests); 2065 | 2066 | assert_eq!(filtered.len(), 2); 2067 | assert!(filtered 2068 | .iter() 2069 | .all(|test| test.desc.should_panic == ShouldPanic::No)); 2070 | } 2071 | 2072 | #[test] 2073 | pub fn exact_filter_match() { 2074 | fn tests() -> Vec { 2075 | vec!["base", "base::test", "base::test1", "base::test2"] 2076 | .into_iter() 2077 | .map(|name| TestDescAndFn { 2078 | desc: TestDesc { 2079 | name: TestName::StaticTestName(name), 2080 | ignore: false, 2081 | should_panic: ShouldPanic::No, 2082 | allow_fail: false, 2083 | }, 2084 | testfn: TestFn::DynTestFn(Box::new(move || {})), 2085 | }) 2086 | .collect() 2087 | } 2088 | 2089 | let substr = filter_tests( 2090 | &TestOpts { 2091 | filter: Some("base".into()), 2092 | ..TestOpts::new() 2093 | }, 2094 | tests(), 2095 | ); 2096 | assert_eq!(substr.len(), 4); 2097 | 2098 | let substr = filter_tests( 2099 | &TestOpts { 2100 | filter: Some("bas".into()), 2101 | ..TestOpts::new() 2102 | }, 2103 | tests(), 2104 | ); 2105 | assert_eq!(substr.len(), 4); 2106 | 2107 | let substr = filter_tests( 2108 | &TestOpts { 2109 | filter: Some("::test".into()), 2110 | ..TestOpts::new() 2111 | }, 2112 | tests(), 2113 | ); 2114 | assert_eq!(substr.len(), 3); 2115 | 2116 | let substr = filter_tests( 2117 | &TestOpts { 2118 | filter: Some("base::test".into()), 2119 | ..TestOpts::new() 2120 | }, 2121 | tests(), 2122 | ); 2123 | assert_eq!(substr.len(), 3); 2124 | 2125 | let exact = filter_tests( 2126 | &TestOpts { 2127 | filter: Some("base".into()), 2128 | filter_exact: true, 2129 | ..TestOpts::new() 2130 | }, 2131 | tests(), 2132 | ); 2133 | assert_eq!(exact.len(), 1); 2134 | 2135 | let exact = filter_tests( 2136 | &TestOpts { 2137 | filter: Some("bas".into()), 2138 | filter_exact: true, 2139 | ..TestOpts::new() 2140 | }, 2141 | tests(), 2142 | ); 2143 | assert_eq!(exact.len(), 0); 2144 | 2145 | let exact = filter_tests( 2146 | &TestOpts { 2147 | filter: Some("::test".into()), 2148 | filter_exact: true, 2149 | ..TestOpts::new() 2150 | }, 2151 | tests(), 2152 | ); 2153 | assert_eq!(exact.len(), 0); 2154 | 2155 | let exact = filter_tests( 2156 | &TestOpts { 2157 | filter: Some("base::test".into()), 2158 | filter_exact: true, 2159 | ..TestOpts::new() 2160 | }, 2161 | tests(), 2162 | ); 2163 | assert_eq!(exact.len(), 1); 2164 | } 2165 | 2166 | #[test] 2167 | pub fn sort_tests() { 2168 | let mut opts = TestOpts::new(); 2169 | opts.run_tests = true; 2170 | 2171 | let names = vec![ 2172 | "sha1::test".to_string(), 2173 | "isize::test_to_str".to_string(), 2174 | "isize::test_pow".to_string(), 2175 | "test::do_not_run_ignored_tests".to_string(), 2176 | "test::ignored_tests_result_in_ignored".to_string(), 2177 | "test::first_free_arg_should_be_a_filter".to_string(), 2178 | "test::parse_ignored_flag".to_string(), 2179 | "test::parse_include_ignored_flag".to_string(), 2180 | "test::filter_for_ignored_option".to_string(), 2181 | "test::run_include_ignored_option".to_string(), 2182 | "test::sort_tests".to_string(), 2183 | ]; 2184 | let tests = { 2185 | fn testfn() {} 2186 | let mut tests = Vec::new(); 2187 | for name in &names { 2188 | let test = TestDescAndFn { 2189 | desc: TestDesc { 2190 | name: TestName::DynTestName((*name).clone()), 2191 | ignore: false, 2192 | should_panic: ShouldPanic::No, 2193 | allow_fail: false, 2194 | }, 2195 | testfn: TestFn::DynTestFn(Box::new(testfn)), 2196 | }; 2197 | tests.push(test); 2198 | } 2199 | tests 2200 | }; 2201 | let filtered = filter_tests(&opts, tests); 2202 | 2203 | let expected = vec![ 2204 | "isize::test_pow".to_string(), 2205 | "isize::test_to_str".to_string(), 2206 | "sha1::test".to_string(), 2207 | "test::do_not_run_ignored_tests".to_string(), 2208 | "test::filter_for_ignored_option".to_string(), 2209 | "test::first_free_arg_should_be_a_filter".to_string(), 2210 | "test::ignored_tests_result_in_ignored".to_string(), 2211 | "test::parse_ignored_flag".to_string(), 2212 | "test::parse_include_ignored_flag".to_string(), 2213 | "test::run_include_ignored_option".to_string(), 2214 | "test::sort_tests".to_string(), 2215 | ]; 2216 | 2217 | for (a, b) in expected.iter().zip(filtered) { 2218 | assert!(*a == b.desc.name.to_string()); 2219 | } 2220 | } 2221 | 2222 | #[test] 2223 | pub fn test_metricmap_compare() { 2224 | let mut m1 = MetricMap::new(); 2225 | let mut m2 = MetricMap::new(); 2226 | m1.insert_metric("in-both-noise", 1000.0, 200.0); 2227 | m2.insert_metric("in-both-noise", 1100.0, 200.0); 2228 | 2229 | m1.insert_metric("in-first-noise", 1000.0, 2.0); 2230 | m2.insert_metric("in-second-noise", 1000.0, 2.0); 2231 | 2232 | m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0); 2233 | m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0); 2234 | 2235 | m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0); 2236 | m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0); 2237 | 2238 | m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0); 2239 | m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0); 2240 | 2241 | m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0); 2242 | m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0); 2243 | } 2244 | 2245 | #[test] 2246 | pub fn test_bench_once_no_iter() { 2247 | fn f(_: &mut Bencher) {} 2248 | bench::run_once(f); 2249 | } 2250 | 2251 | #[test] 2252 | pub fn test_bench_once_iter() { 2253 | fn f(b: &mut Bencher) { 2254 | b.iter(|| {}) 2255 | } 2256 | bench::run_once(f); 2257 | } 2258 | 2259 | #[test] 2260 | pub fn test_bench_no_iter() { 2261 | fn f(_: &mut Bencher) {} 2262 | 2263 | let (tx, rx) = channel(); 2264 | 2265 | let desc = TestDesc { 2266 | name: TestName::StaticTestName("f"), 2267 | ignore: false, 2268 | should_panic: ShouldPanic::No, 2269 | allow_fail: false, 2270 | }; 2271 | 2272 | crate::bench::benchmark(desc, &tx, true, f); 2273 | rx.recv().unwrap(); 2274 | } 2275 | 2276 | #[test] 2277 | pub fn test_bench_iter() { 2278 | fn f(b: &mut Bencher) { 2279 | b.iter(|| {}) 2280 | } 2281 | 2282 | let (tx, rx) = channel(); 2283 | 2284 | let desc = TestDesc { 2285 | name: TestName::StaticTestName("f"), 2286 | ignore: false, 2287 | should_panic: ShouldPanic::No, 2288 | allow_fail: false, 2289 | }; 2290 | 2291 | crate::bench::benchmark(desc, &tx, true, f); 2292 | rx.recv().unwrap(); 2293 | } 2294 | } 2295 | -------------------------------------------------------------------------------- /libtest/stats.rs: -------------------------------------------------------------------------------- 1 | #![allow(missing_docs)] 2 | #![allow(deprecated)] // Float 3 | 4 | use std::cmp::Ordering::{self, Equal, Greater, Less}; 5 | use std::mem; 6 | 7 | fn local_cmp(x: f64, y: f64) -> Ordering { 8 | // arbitrarily decide that NaNs are larger than everything. 9 | if y.is_nan() { 10 | Less 11 | } else if x.is_nan() { 12 | Greater 13 | } else if x < y { 14 | Less 15 | } else if (x - y).abs() < std::f64::EPSILON { 16 | Equal 17 | } else { 18 | Greater 19 | } 20 | } 21 | 22 | fn local_sort(v: &mut [f64]) { 23 | v.sort_by(|x: &f64, y: &f64| local_cmp(*x, *y)); 24 | } 25 | 26 | /// Trait that provides simple descriptive statistics on a univariate set of numeric samples. 27 | pub trait Stats { 28 | /// Sum of the samples. 29 | /// 30 | /// Note: this method sacrifices performance at the altar of accuracy 31 | /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at: 32 | /// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric 33 | /// Predicates"][paper] 34 | /// 35 | /// [paper]: http://www.cs.cmu.edu/~quake-papers/robust-arithmetic.ps 36 | fn sum(&self) -> f64; 37 | 38 | /// Minimum value of the samples. 39 | fn min(&self) -> f64; 40 | 41 | /// Maximum value of the samples. 42 | fn max(&self) -> f64; 43 | 44 | /// Arithmetic mean (average) of the samples: sum divided by sample-count. 45 | /// 46 | /// See: 47 | fn mean(&self) -> f64; 48 | 49 | /// Median of the samples: value separating the lower half of the samples from the higher half. 50 | /// Equal to `self.percentile(50.0)`. 51 | /// 52 | /// See: 53 | fn median(&self) -> f64; 54 | 55 | /// Variance of the samples: bias-corrected mean of the squares of the differences of each 56 | /// sample from the sample mean. Note that this calculates the _sample variance_ rather than the 57 | /// population variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n` 58 | /// bias that would appear if we calculated a population variance, by dividing by `(n-1)` rather 59 | /// than `n`. 60 | /// 61 | /// See: 62 | fn var(&self) -> f64; 63 | 64 | /// Standard deviation: the square root of the sample variance. 65 | /// 66 | /// Note: this is not a robust statistic for non-normal distributions. Prefer the 67 | /// `median_abs_dev` for unknown distributions. 68 | /// 69 | /// See: 70 | fn std_dev(&self) -> f64; 71 | 72 | /// Standard deviation as a percent of the mean value. See `std_dev` and `mean`. 73 | /// 74 | /// Note: this is not a robust statistic for non-normal distributions. Prefer the 75 | /// `median_abs_dev_pct` for unknown distributions. 76 | fn std_dev_pct(&self) -> f64; 77 | 78 | /// Scaled median of the absolute deviations of each sample from the sample median. This is a 79 | /// robust (distribution-agnostic) estimator of sample variability. Use this in preference to 80 | /// `std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled 81 | /// by the constant `1.4826` to allow its use as a consistent estimator for the standard 82 | /// deviation. 83 | /// 84 | /// See: 85 | fn median_abs_dev(&self) -> f64; 86 | 87 | /// Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`. 88 | fn median_abs_dev_pct(&self) -> f64; 89 | 90 | /// Percentile: the value below which `pct` percent of the values in `self` fall. For example, 91 | /// percentile(95.0) will return the value `v` such that 95% of the samples `s` in `self` 92 | /// satisfy `s <= v`. 93 | /// 94 | /// Calculated by linear interpolation between closest ranks. 95 | /// 96 | /// See: 97 | fn percentile(&self, pct: f64) -> f64; 98 | 99 | /// Quartiles of the sample: three values that divide the sample into four equal groups, each 100 | /// with 1/4 of the data. The middle value is the median. See `median` and `percentile`. This 101 | /// function may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but 102 | /// is otherwise equivalent. 103 | /// 104 | /// See also: 105 | fn quartiles(&self) -> (f64, f64, f64); 106 | 107 | /// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th 108 | /// percentile (3rd quartile). See `quartiles`. 109 | /// 110 | /// See also: 111 | fn iqr(&self) -> f64; 112 | } 113 | 114 | /// Extracted collection of all the summary statistics of a sample set. 115 | #[derive(Clone, PartialEq, Copy)] 116 | #[allow(missing_docs)] 117 | pub struct Summary { 118 | pub sum: f64, 119 | pub min: f64, 120 | pub max: f64, 121 | pub mean: f64, 122 | pub median: f64, 123 | pub var: f64, 124 | pub std_dev: f64, 125 | pub std_dev_pct: f64, 126 | pub median_abs_dev: f64, 127 | pub median_abs_dev_pct: f64, 128 | pub quartiles: (f64, f64, f64), 129 | pub iqr: f64, 130 | } 131 | 132 | impl Summary { 133 | /// Construct a new summary of a sample set. 134 | pub fn new(samples: &[f64]) -> Self { 135 | Self { 136 | sum: samples.sum(), 137 | min: samples.min(), 138 | max: samples.max(), 139 | mean: samples.mean(), 140 | median: samples.median(), 141 | var: samples.var(), 142 | std_dev: samples.std_dev(), 143 | std_dev_pct: samples.std_dev_pct(), 144 | median_abs_dev: samples.median_abs_dev(), 145 | median_abs_dev_pct: samples.median_abs_dev_pct(), 146 | quartiles: samples.quartiles(), 147 | iqr: samples.iqr(), 148 | } 149 | } 150 | } 151 | 152 | impl Stats for [f64] { 153 | // FIXME #11059 handle NaN, inf and overflow 154 | fn sum(&self) -> f64 { 155 | let mut partials = vec![]; 156 | 157 | for &x in self { 158 | let mut x = x; 159 | let mut j = 0; 160 | // This inner loop applies `hi`/`lo` summation to each 161 | // partial so that the list of partial sums remains exact. 162 | for i in 0..partials.len() { 163 | let mut y: f64 = partials[i]; 164 | if x.abs() < y.abs() { 165 | mem::swap(&mut x, &mut y); 166 | } 167 | // Rounded `x+y` is stored in `hi` with round-off stored in 168 | // `lo`. Together `hi+lo` are exactly equal to `x+y`. 169 | let hi = x + y; 170 | let lo = y - (hi - x); 171 | if lo != 0.0 { 172 | partials[j] = lo; 173 | j += 1; 174 | } 175 | x = hi; 176 | } 177 | if j >= partials.len() { 178 | partials.push(x); 179 | } else { 180 | partials[j] = x; 181 | partials.truncate(j + 1); 182 | } 183 | } 184 | let zero: f64 = 0.0; 185 | partials.iter().fold(zero, |p, q| p + *q) 186 | } 187 | 188 | fn min(&self) -> f64 { 189 | assert!(!self.is_empty()); 190 | self.iter().fold(self[0], |p, q| p.min(*q)) 191 | } 192 | 193 | fn max(&self) -> f64 { 194 | assert!(!self.is_empty()); 195 | self.iter().fold(self[0], |p, q| p.max(*q)) 196 | } 197 | 198 | fn mean(&self) -> f64 { 199 | assert!(!self.is_empty()); 200 | self.sum() / (self.len() as f64) 201 | } 202 | 203 | fn median(&self) -> f64 { 204 | self.percentile(50_f64) 205 | } 206 | 207 | fn var(&self) -> f64 { 208 | if self.len() < 2 { 209 | 0.0 210 | } else { 211 | let mean = self.mean(); 212 | let mut v: f64 = 0.0; 213 | for s in self { 214 | let x = *s - mean; 215 | v += x * x; 216 | } 217 | // N.B., this is _supposed to be_ len-1, not len. If you 218 | // change it back to len, you will be calculating a 219 | // population variance, not a sample variance. 220 | let denom = (self.len() - 1) as f64; 221 | v / denom 222 | } 223 | } 224 | 225 | fn std_dev(&self) -> f64 { 226 | self.var().sqrt() 227 | } 228 | 229 | fn std_dev_pct(&self) -> f64 { 230 | let hundred = 100_f64; 231 | (self.std_dev() / self.mean()) * hundred 232 | } 233 | 234 | fn median_abs_dev(&self) -> f64 { 235 | let med = self.median(); 236 | let abs_devs: Vec = 237 | self.iter().map(|&v| (med - v).abs()).collect(); 238 | // This constant is derived by smarter statistics brains than me, but it is 239 | // consistent with how R and other packages treat the MAD. 240 | let number = 1.4826; 241 | abs_devs.median() * number 242 | } 243 | 244 | fn median_abs_dev_pct(&self) -> f64 { 245 | let hundred = 100_f64; 246 | (self.median_abs_dev() / self.median()) * hundred 247 | } 248 | 249 | fn percentile(&self, pct: f64) -> f64 { 250 | let mut tmp = self.to_vec(); 251 | local_sort(&mut tmp); 252 | percentile_of_sorted(&tmp, pct) 253 | } 254 | 255 | fn quartiles(&self) -> (f64, f64, f64) { 256 | let mut tmp = self.to_vec(); 257 | local_sort(&mut tmp); 258 | let first = 25_f64; 259 | let a = percentile_of_sorted(&tmp, first); 260 | let second = 50_f64; 261 | let b = percentile_of_sorted(&tmp, second); 262 | let third = 75_f64; 263 | let c = percentile_of_sorted(&tmp, third); 264 | (a, b, c) 265 | } 266 | 267 | fn iqr(&self) -> f64 { 268 | let (a, _, c) = self.quartiles(); 269 | c - a 270 | } 271 | } 272 | 273 | // Helper function: extract a value representing the `pct` percentile of a 274 | // sorted sample-set, using linear interpolation. If samples are not sorted, 275 | // return nonsensical value. 276 | fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 { 277 | assert!(!sorted_samples.is_empty()); 278 | if sorted_samples.len() == 1 { 279 | return sorted_samples[0]; 280 | } 281 | let zero = 0_f64; 282 | assert!(zero <= pct); 283 | let hundred = 100_f64; 284 | assert!(pct <= hundred); 285 | if (pct - hundred).abs() < std::f64::EPSILON { 286 | return sorted_samples[sorted_samples.len() - 1]; 287 | } 288 | let length = (sorted_samples.len() - 1) as f64; 289 | let rank = (pct / hundred) * length; 290 | let lower_rank = rank.floor(); 291 | let d = rank - lower_rank; 292 | let n = lower_rank as usize; 293 | let lo = sorted_samples[n]; 294 | let hi = sorted_samples[n + 1]; 295 | lo + (hi - lo) * d 296 | } 297 | 298 | /// Winsorize a set of samples, replacing values above the `100-pct` percentile 299 | /// and below the `pct` percentile with those percentiles themselves. This is a 300 | /// way of minimizing the effect of outliers, at the cost of biasing the sample. 301 | /// It differs from trimming in that it does not change the number of samples, 302 | /// just changes the values of those that are outliers. 303 | /// 304 | /// See: 305 | pub fn winsorize(samples: &mut [f64], pct: f64) { 306 | let mut tmp = samples.to_vec(); 307 | local_sort(&mut tmp); 308 | let lo = percentile_of_sorted(&tmp, pct); 309 | let hundred = 100_f64; 310 | let hi = percentile_of_sorted(&tmp, hundred - pct); 311 | for samp in samples { 312 | if *samp > hi { 313 | *samp = hi 314 | } else if *samp < lo { 315 | *samp = lo 316 | } 317 | } 318 | } 319 | 320 | // Test vectors generated from R, using the script src/etc/stat-test-vectors.r. 321 | 322 | #[cfg(test)] 323 | mod tests { 324 | use crate::stats::Stats; 325 | use crate::stats::Summary; 326 | use std::f64; 327 | use std::io; 328 | use std::io::prelude::*; 329 | 330 | macro_rules! assert_approx_eq { 331 | ($a: expr, $b: expr) => {{ 332 | let (a, b) = (&$a, &$b); 333 | assert!( 334 | (*a - *b).abs() < 1.0e-6, 335 | "{} is not approximately equal to {}", 336 | *a, 337 | *b 338 | ); 339 | }}; 340 | } 341 | 342 | fn check(samples: &[f64], summ: &Summary) { 343 | let summ2 = Summary::new(samples); 344 | 345 | let mut w = io::sink(); 346 | let w = &mut w; 347 | writeln!(w).unwrap(); 348 | 349 | assert_eq!(summ.sum, summ2.sum); 350 | assert_eq!(summ.min, summ2.min); 351 | assert_eq!(summ.max, summ2.max); 352 | assert_eq!(summ.mean, summ2.mean); 353 | assert_eq!(summ.median, summ2.median); 354 | 355 | // We needed a few more digits to get exact equality on these 356 | // but they're within float epsilon, which is 1.0e-6. 357 | assert_approx_eq!(summ.var, summ2.var); 358 | assert_approx_eq!(summ.std_dev, summ2.std_dev); 359 | assert_approx_eq!(summ.std_dev_pct, summ2.std_dev_pct); 360 | assert_approx_eq!(summ.median_abs_dev, summ2.median_abs_dev); 361 | assert_approx_eq!(summ.median_abs_dev_pct, summ2.median_abs_dev_pct); 362 | 363 | assert_eq!(summ.quartiles, summ2.quartiles); 364 | assert_eq!(summ.iqr, summ2.iqr); 365 | } 366 | 367 | #[test] 368 | fn test_min_max_nan() { 369 | let xs = &[1.0, 2.0, f64::NAN, 3.0, 4.0]; 370 | let summary = Summary::new(xs); 371 | assert_eq!(summary.min, 1.0); 372 | assert_eq!(summary.max, 4.0); 373 | } 374 | 375 | #[test] 376 | fn test_norm2() { 377 | let val = &[958.0000000000, 924.0000000000]; 378 | let summ = &Summary { 379 | sum: 1882.0000000000, 380 | min: 924.0000000000, 381 | max: 958.0000000000, 382 | mean: 941.0000000000, 383 | median: 941.0000000000, 384 | var: 578.0000000000, 385 | std_dev: 24.0416305603, 386 | std_dev_pct: 2.5549022912, 387 | median_abs_dev: 25.2042000000, 388 | median_abs_dev_pct: 2.6784484591, 389 | quartiles: (932.5000000000, 941.0000000000, 949.5000000000), 390 | iqr: 17.0000000000, 391 | }; 392 | check(val, summ); 393 | } 394 | #[test] 395 | fn test_norm10narrow() { 396 | let val = &[ 397 | 966.0000000000, 398 | 985.0000000000, 399 | 1110.0000000000, 400 | 848.0000000000, 401 | 821.0000000000, 402 | 975.0000000000, 403 | 962.0000000000, 404 | 1157.0000000000, 405 | 1217.0000000000, 406 | 955.0000000000, 407 | ]; 408 | let summ = &Summary { 409 | sum: 9996.0000000000, 410 | min: 821.0000000000, 411 | max: 1217.0000000000, 412 | mean: 999.6000000000, 413 | median: 970.5000000000, 414 | var: 16050.7111111111, 415 | std_dev: 126.6914010938, 416 | std_dev_pct: 12.6742097933, 417 | median_abs_dev: 102.2994000000, 418 | median_abs_dev_pct: 10.5408964451, 419 | quartiles: (956.7500000000, 970.5000000000, 1078.7500000000), 420 | iqr: 122.0000000000, 421 | }; 422 | check(val, summ); 423 | } 424 | #[test] 425 | fn test_norm10medium() { 426 | let val = &[ 427 | 954.0000000000, 428 | 1064.0000000000, 429 | 855.0000000000, 430 | 1000.0000000000, 431 | 743.0000000000, 432 | 1084.0000000000, 433 | 704.0000000000, 434 | 1023.0000000000, 435 | 357.0000000000, 436 | 869.0000000000, 437 | ]; 438 | let summ = &Summary { 439 | sum: 8653.0000000000, 440 | min: 357.0000000000, 441 | max: 1084.0000000000, 442 | mean: 865.3000000000, 443 | median: 911.5000000000, 444 | var: 48628.4555555556, 445 | std_dev: 220.5186059170, 446 | std_dev_pct: 25.4846418487, 447 | median_abs_dev: 195.7032000000, 448 | median_abs_dev_pct: 21.4704552935, 449 | quartiles: (771.0000000000, 911.5000000000, 1017.2500000000), 450 | iqr: 246.2500000000, 451 | }; 452 | check(val, summ); 453 | } 454 | #[test] 455 | fn test_norm10wide() { 456 | let val = &[ 457 | 505.0000000000, 458 | 497.0000000000, 459 | 1591.0000000000, 460 | 887.0000000000, 461 | 1026.0000000000, 462 | 136.0000000000, 463 | 1580.0000000000, 464 | 940.0000000000, 465 | 754.0000000000, 466 | 1433.0000000000, 467 | ]; 468 | let summ = &Summary { 469 | sum: 9349.0000000000, 470 | min: 136.0000000000, 471 | max: 1591.0000000000, 472 | mean: 934.9000000000, 473 | median: 913.5000000000, 474 | var: 239208.9888888889, 475 | std_dev: 489.0899599142, 476 | std_dev_pct: 52.3146817750, 477 | median_abs_dev: 611.5725000000, 478 | median_abs_dev_pct: 66.9482758621, 479 | quartiles: (567.2500000000, 913.5000000000, 1331.2500000000), 480 | iqr: 764.0000000000, 481 | }; 482 | check(val, summ); 483 | } 484 | #[test] 485 | fn test_norm25verynarrow() { 486 | let val = &[ 487 | 991.0000000000, 488 | 1018.0000000000, 489 | 998.0000000000, 490 | 1013.0000000000, 491 | 974.0000000000, 492 | 1007.0000000000, 493 | 1014.0000000000, 494 | 999.0000000000, 495 | 1011.0000000000, 496 | 978.0000000000, 497 | 985.0000000000, 498 | 999.0000000000, 499 | 983.0000000000, 500 | 982.0000000000, 501 | 1015.0000000000, 502 | 1002.0000000000, 503 | 977.0000000000, 504 | 948.0000000000, 505 | 1040.0000000000, 506 | 974.0000000000, 507 | 996.0000000000, 508 | 989.0000000000, 509 | 1015.0000000000, 510 | 994.0000000000, 511 | 1024.0000000000, 512 | ]; 513 | let summ = &Summary { 514 | sum: 24926.0000000000, 515 | min: 948.0000000000, 516 | max: 1040.0000000000, 517 | mean: 997.0400000000, 518 | median: 998.0000000000, 519 | var: 393.2066666667, 520 | std_dev: 19.8294393937, 521 | std_dev_pct: 1.9888308788, 522 | median_abs_dev: 22.2390000000, 523 | median_abs_dev_pct: 2.2283567134, 524 | quartiles: (983.0000000000, 998.0000000000, 1013.0000000000), 525 | iqr: 30.0000000000, 526 | }; 527 | check(val, summ); 528 | } 529 | #[test] 530 | fn test_exp10a() { 531 | let val = &[ 532 | 23.0000000000, 533 | 11.0000000000, 534 | 2.0000000000, 535 | 57.0000000000, 536 | 4.0000000000, 537 | 12.0000000000, 538 | 5.0000000000, 539 | 29.0000000000, 540 | 3.0000000000, 541 | 21.0000000000, 542 | ]; 543 | let summ = &Summary { 544 | sum: 167.0000000000, 545 | min: 2.0000000000, 546 | max: 57.0000000000, 547 | mean: 16.7000000000, 548 | median: 11.5000000000, 549 | var: 287.7888888889, 550 | std_dev: 16.9643416875, 551 | std_dev_pct: 101.5828843560, 552 | median_abs_dev: 13.3434000000, 553 | median_abs_dev_pct: 116.0295652174, 554 | quartiles: (4.2500000000, 11.5000000000, 22.5000000000), 555 | iqr: 18.2500000000, 556 | }; 557 | check(val, summ); 558 | } 559 | #[test] 560 | fn test_exp10b() { 561 | let val = &[ 562 | 24.0000000000, 563 | 17.0000000000, 564 | 6.0000000000, 565 | 38.0000000000, 566 | 25.0000000000, 567 | 7.0000000000, 568 | 51.0000000000, 569 | 2.0000000000, 570 | 61.0000000000, 571 | 32.0000000000, 572 | ]; 573 | let summ = &Summary { 574 | sum: 263.0000000000, 575 | min: 2.0000000000, 576 | max: 61.0000000000, 577 | mean: 26.3000000000, 578 | median: 24.5000000000, 579 | var: 383.5666666667, 580 | std_dev: 19.5848580967, 581 | std_dev_pct: 74.4671410520, 582 | median_abs_dev: 22.9803000000, 583 | median_abs_dev_pct: 93.7971428571, 584 | quartiles: (9.5000000000, 24.5000000000, 36.5000000000), 585 | iqr: 27.0000000000, 586 | }; 587 | check(val, summ); 588 | } 589 | #[test] 590 | fn test_exp10c() { 591 | let val = &[ 592 | 71.0000000000, 593 | 2.0000000000, 594 | 32.0000000000, 595 | 1.0000000000, 596 | 6.0000000000, 597 | 28.0000000000, 598 | 13.0000000000, 599 | 37.0000000000, 600 | 16.0000000000, 601 | 36.0000000000, 602 | ]; 603 | let summ = &Summary { 604 | sum: 242.0000000000, 605 | min: 1.0000000000, 606 | max: 71.0000000000, 607 | mean: 24.2000000000, 608 | median: 22.0000000000, 609 | var: 458.1777777778, 610 | std_dev: 21.4050876611, 611 | std_dev_pct: 88.4507754589, 612 | median_abs_dev: 21.4977000000, 613 | median_abs_dev_pct: 97.7168181818, 614 | quartiles: (7.7500000000, 22.0000000000, 35.0000000000), 615 | iqr: 27.2500000000, 616 | }; 617 | check(val, summ); 618 | } 619 | #[test] 620 | fn test_exp25() { 621 | let val = &[ 622 | 3.0000000000, 623 | 24.0000000000, 624 | 1.0000000000, 625 | 19.0000000000, 626 | 7.0000000000, 627 | 5.0000000000, 628 | 30.0000000000, 629 | 39.0000000000, 630 | 31.0000000000, 631 | 13.0000000000, 632 | 25.0000000000, 633 | 48.0000000000, 634 | 1.0000000000, 635 | 6.0000000000, 636 | 42.0000000000, 637 | 63.0000000000, 638 | 2.0000000000, 639 | 12.0000000000, 640 | 108.0000000000, 641 | 26.0000000000, 642 | 1.0000000000, 643 | 7.0000000000, 644 | 44.0000000000, 645 | 25.0000000000, 646 | 11.0000000000, 647 | ]; 648 | let summ = &Summary { 649 | sum: 593.0000000000, 650 | min: 1.0000000000, 651 | max: 108.0000000000, 652 | mean: 23.7200000000, 653 | median: 19.0000000000, 654 | var: 601.0433333333, 655 | std_dev: 24.5161851301, 656 | std_dev_pct: 103.3565983562, 657 | median_abs_dev: 19.2738000000, 658 | median_abs_dev_pct: 101.4410526316, 659 | quartiles: (6.0000000000, 19.0000000000, 31.0000000000), 660 | iqr: 25.0000000000, 661 | }; 662 | check(val, summ); 663 | } 664 | #[test] 665 | fn test_binom25() { 666 | let val = &[ 667 | 18.0000000000, 668 | 17.0000000000, 669 | 27.0000000000, 670 | 15.0000000000, 671 | 21.0000000000, 672 | 25.0000000000, 673 | 17.0000000000, 674 | 24.0000000000, 675 | 25.0000000000, 676 | 24.0000000000, 677 | 26.0000000000, 678 | 26.0000000000, 679 | 23.0000000000, 680 | 15.0000000000, 681 | 23.0000000000, 682 | 17.0000000000, 683 | 18.0000000000, 684 | 18.0000000000, 685 | 21.0000000000, 686 | 16.0000000000, 687 | 15.0000000000, 688 | 31.0000000000, 689 | 20.0000000000, 690 | 17.0000000000, 691 | 15.0000000000, 692 | ]; 693 | let summ = &Summary { 694 | sum: 514.0000000000, 695 | min: 15.0000000000, 696 | max: 31.0000000000, 697 | mean: 20.5600000000, 698 | median: 20.0000000000, 699 | var: 20.8400000000, 700 | std_dev: 4.5650848842, 701 | std_dev_pct: 22.2037202539, 702 | median_abs_dev: 5.9304000000, 703 | median_abs_dev_pct: 29.6520000000, 704 | quartiles: (17.0000000000, 20.0000000000, 24.0000000000), 705 | iqr: 7.0000000000, 706 | }; 707 | check(val, summ); 708 | } 709 | #[test] 710 | fn test_pois25lambda30() { 711 | let val = &[ 712 | 27.0000000000, 713 | 33.0000000000, 714 | 34.0000000000, 715 | 34.0000000000, 716 | 24.0000000000, 717 | 39.0000000000, 718 | 28.0000000000, 719 | 27.0000000000, 720 | 31.0000000000, 721 | 28.0000000000, 722 | 38.0000000000, 723 | 21.0000000000, 724 | 33.0000000000, 725 | 36.0000000000, 726 | 29.0000000000, 727 | 37.0000000000, 728 | 32.0000000000, 729 | 34.0000000000, 730 | 31.0000000000, 731 | 39.0000000000, 732 | 25.0000000000, 733 | 31.0000000000, 734 | 32.0000000000, 735 | 40.0000000000, 736 | 24.0000000000, 737 | ]; 738 | let summ = &Summary { 739 | sum: 787.0000000000, 740 | min: 21.0000000000, 741 | max: 40.0000000000, 742 | mean: 31.4800000000, 743 | median: 32.0000000000, 744 | var: 26.5933333333, 745 | std_dev: 5.1568724372, 746 | std_dev_pct: 16.3814245145, 747 | median_abs_dev: 5.9304000000, 748 | median_abs_dev_pct: 18.5325000000, 749 | quartiles: (28.0000000000, 32.0000000000, 34.0000000000), 750 | iqr: 6.0000000000, 751 | }; 752 | check(val, summ); 753 | } 754 | #[test] 755 | fn test_pois25lambda40() { 756 | let val = &[ 757 | 42.0000000000, 758 | 50.0000000000, 759 | 42.0000000000, 760 | 46.0000000000, 761 | 34.0000000000, 762 | 45.0000000000, 763 | 34.0000000000, 764 | 49.0000000000, 765 | 39.0000000000, 766 | 28.0000000000, 767 | 40.0000000000, 768 | 35.0000000000, 769 | 37.0000000000, 770 | 39.0000000000, 771 | 46.0000000000, 772 | 44.0000000000, 773 | 32.0000000000, 774 | 45.0000000000, 775 | 42.0000000000, 776 | 37.0000000000, 777 | 48.0000000000, 778 | 42.0000000000, 779 | 33.0000000000, 780 | 42.0000000000, 781 | 48.0000000000, 782 | ]; 783 | let summ = &Summary { 784 | sum: 1019.0000000000, 785 | min: 28.0000000000, 786 | max: 50.0000000000, 787 | mean: 40.7600000000, 788 | median: 42.0000000000, 789 | var: 34.4400000000, 790 | std_dev: 5.8685603004, 791 | std_dev_pct: 14.3978417577, 792 | median_abs_dev: 5.9304000000, 793 | median_abs_dev_pct: 14.1200000000, 794 | quartiles: (37.0000000000, 42.0000000000, 45.0000000000), 795 | iqr: 8.0000000000, 796 | }; 797 | check(val, summ); 798 | } 799 | #[test] 800 | fn test_pois25lambda50() { 801 | let val = &[ 802 | 45.0000000000, 803 | 43.0000000000, 804 | 44.0000000000, 805 | 61.0000000000, 806 | 51.0000000000, 807 | 53.0000000000, 808 | 59.0000000000, 809 | 52.0000000000, 810 | 49.0000000000, 811 | 51.0000000000, 812 | 51.0000000000, 813 | 50.0000000000, 814 | 49.0000000000, 815 | 56.0000000000, 816 | 42.0000000000, 817 | 52.0000000000, 818 | 51.0000000000, 819 | 43.0000000000, 820 | 48.0000000000, 821 | 48.0000000000, 822 | 50.0000000000, 823 | 42.0000000000, 824 | 43.0000000000, 825 | 42.0000000000, 826 | 60.0000000000, 827 | ]; 828 | let summ = &Summary { 829 | sum: 1235.0000000000, 830 | min: 42.0000000000, 831 | max: 61.0000000000, 832 | mean: 49.4000000000, 833 | median: 50.0000000000, 834 | var: 31.6666666667, 835 | std_dev: 5.6273143387, 836 | std_dev_pct: 11.3913245723, 837 | median_abs_dev: 4.4478000000, 838 | median_abs_dev_pct: 8.8956000000, 839 | quartiles: (44.0000000000, 50.0000000000, 52.0000000000), 840 | iqr: 8.0000000000, 841 | }; 842 | check(val, summ); 843 | } 844 | #[test] 845 | fn test_unif25() { 846 | let val = &[ 847 | 99.0000000000, 848 | 55.0000000000, 849 | 92.0000000000, 850 | 79.0000000000, 851 | 14.0000000000, 852 | 2.0000000000, 853 | 33.0000000000, 854 | 49.0000000000, 855 | 3.0000000000, 856 | 32.0000000000, 857 | 84.0000000000, 858 | 59.0000000000, 859 | 22.0000000000, 860 | 86.0000000000, 861 | 76.0000000000, 862 | 31.0000000000, 863 | 29.0000000000, 864 | 11.0000000000, 865 | 41.0000000000, 866 | 53.0000000000, 867 | 45.0000000000, 868 | 44.0000000000, 869 | 98.0000000000, 870 | 98.0000000000, 871 | 7.0000000000, 872 | ]; 873 | let summ = &Summary { 874 | sum: 1242.0000000000, 875 | min: 2.0000000000, 876 | max: 99.0000000000, 877 | mean: 49.6800000000, 878 | median: 45.0000000000, 879 | var: 1015.6433333333, 880 | std_dev: 31.8691595957, 881 | std_dev_pct: 64.1488719719, 882 | median_abs_dev: 45.9606000000, 883 | median_abs_dev_pct: 102.1346666667, 884 | quartiles: (29.0000000000, 45.0000000000, 79.0000000000), 885 | iqr: 50.0000000000, 886 | }; 887 | check(val, summ); 888 | } 889 | 890 | #[test] 891 | fn test_sum_f64s() { 892 | assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999); 893 | } 894 | #[test] 895 | fn test_sum_f64_between_ints_that_sum_to_0() { 896 | assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2); 897 | } 898 | } 899 | 900 | #[cfg(test)] 901 | mod bench { 902 | extern crate test; 903 | use self::test::Bencher; 904 | use crate::stats::Stats; 905 | 906 | #[bench] 907 | pub fn sum_three_items(b: &mut Bencher) { 908 | b.iter(|| { 909 | [1e20f64, 1.5f64, -1e20f64].sum(); 910 | }) 911 | } 912 | #[bench] 913 | pub fn sum_many_f64(b: &mut Bencher) { 914 | let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60]; 915 | let v = (0..500).map(|i| nums[i % 5]).collect::>(); 916 | 917 | b.iter(|| { 918 | v.sum(); 919 | }) 920 | } 921 | 922 | #[bench] 923 | pub fn no_iter(_: &mut Bencher) {} 924 | } 925 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 79 --------------------------------------------------------------------------------