├── .github └── workflows │ └── tests.yaml ├── .gitignore ├── DETAILS.md ├── LICENSE ├── PLAN.md ├── README.md ├── devbox.json ├── devbox.lock ├── nupm.nuon ├── nutest ├── completions.nu ├── discover.nu ├── display │ ├── display_nothing.nu │ ├── display_table.nu │ └── display_terminal.nu ├── errors.nu ├── formatter.nu ├── mod.nu ├── orchestrator.nu ├── report │ ├── report_junit.nu │ └── report_nothing.nu ├── returns │ ├── returns_nothing.nu │ ├── returns_summary.nu │ └── returns_table.nu ├── runner.nu ├── store.nu └── theme.nu ├── resources ├── test-run-terminal.png └── test-run.png └── tests ├── display └── test_display_table_errors.nu ├── harness.nu ├── report └── test_report_junit.nu ├── test_completions.nu ├── test_discover.nu ├── test_errors.nu ├── test_external_tools.nu ├── test_formatter.nu ├── test_integration.nu ├── test_module.nu ├── test_orchestrator.nu ├── test_output.nu ├── test_runner.nu ├── test_store_locking.nu ├── test_store_manage.nu ├── test_store_query_tests.nu └── test_store_success.nu /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | workflow_dispatch: 5 | pull_request: 6 | push: 7 | branches: 8 | - main 9 | schedule: 10 | - cron: '0 7 * * *' # Run every morning at 7am UTC 11 | 12 | permissions: 13 | contents: read 14 | 15 | jobs: 16 | nutest-tests: 17 | name: Run Tests 18 | 19 | permissions: 20 | checks: write 21 | pull-requests: write 22 | 23 | strategy: 24 | fail-fast: true 25 | matrix: 26 | version: ["0.103.0", "*", "nightly"] # Earliest supported, latest and nightly 27 | platform: [ubuntu-latest, windows-latest, macos-latest] 28 | 29 | runs-on: ${{ matrix.platform }} 30 | 31 | steps: 32 | - uses: actions/checkout@v4 33 | 34 | - name: Install Nushell Binary 35 | uses: hustcer/setup-nu@v3 36 | with: 37 | version: ${{ matrix.version }} 38 | 39 | - name: Test Nutest 40 | shell: nu {0} 41 | run: | 42 | nu -c 'use nutest; ( 43 | nutest run-tests 44 | --fail 45 | --display terminal 46 | --report { type: junit, path: test-report.xml } 47 | --returns summary | to json | save --force test-summary.json 48 | )' 49 | 50 | - name: Publish Test Results 51 | uses: EnricoMi/publish-unit-test-result-action@v2 52 | if: runner.os == 'Linux' && always() 53 | with: 54 | files: test-report.xml 55 | 56 | - name: Publish Test Results 57 | uses: EnricoMi/publish-unit-test-result-action/macos@v2 58 | if: runner.os == 'macOS' && always() 59 | with: 60 | files: test-report.xml 61 | 62 | - name: Publish Test Results 63 | uses: EnricoMi/publish-unit-test-result-action/windows@v2 64 | if: runner.os == 'Windows' && always() 65 | with: 66 | files: test-report.xml 67 | 68 | - name: Publish Test Summary 69 | if: runner.os == 'Linux' && matrix.version == '*' && github.ref == 'refs/heads/main' 70 | shell: nu {0} 71 | run: | 72 | let gist_id = "0cbdca67f966d7ea2e6e1eaf7c9083a3" 73 | let filename = "test-summary.json" 74 | 75 | let data = { 76 | files: { 77 | "test-summary.json": { 78 | content: (open --raw $filename) 79 | } 80 | } 81 | } 82 | 83 | ( 84 | $data | http patch 85 | --redirect-mode "follow" 86 | --content-type "application/json" 87 | --headers { 88 | "Authorization": $"Bearer ${{ secrets.GIST_TOKEN }}" 89 | "Accept": "application/vnd.github+json" 90 | "X-GitHub-Api-Version": "2022-11-28" 91 | } 92 | $"https://api.github.com/gists/($gist_id)" 93 | ) | ignore 94 | 95 | nushell-tests: 96 | name: Run Nushell Tests 97 | 98 | strategy: 99 | fail-fast: true 100 | matrix: 101 | platform: [ubuntu-latest, windows-latest, macos-latest] 102 | 103 | runs-on: ${{ matrix.platform }} 104 | 105 | steps: 106 | - uses: actions/checkout@v4 107 | 108 | - name: Checkout Nushell 109 | uses: actions/checkout@v4 110 | with: 111 | repository: nushell/nushell 112 | ref: main 113 | path: nushell 114 | 115 | - name: Install Nushell Binary 116 | uses: hustcer/setup-nu@v3 117 | with: 118 | version: "nightly" 119 | 120 | - name: Test Nushell 121 | # Nushell used here so use of workspace directory works consistently across platforms 122 | shell: nu {0} 123 | run: nu -c $"use ($env.GITHUB_WORKSPACE)/nutest; nutest run-tests --fail --path tests" 124 | working-directory: nushell/crates/nu-std 125 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .devbox 3 | -------------------------------------------------------------------------------- /DETAILS.md: -------------------------------------------------------------------------------- 1 | # Implementation Details 2 | 3 | ## How Does It Work? 4 | 5 | Nutest discovers tests by scanning matching files in the path, sourcing that code and collecting test annotations on methods via `scope commands`. The file patterns detected are `test_*.nu`, `test-*.nu`, `*_test.nu` and `*-test.nu` to match the most common styles and file groupings. 6 | 7 | Each test file, which include multiple tests (a suite) is dispatched to run on a single Nu subshell. 8 | 9 | Test results are captured using encoded events written to stdout (one event per line), which carry all the required context indicating what suite and test it is associated with, allowing tests to be run in parallel. 10 | 11 | Success and failure events are implied by an error being thrown, such as an assertion failure. 12 | 13 | Output from tests is captured by aliasing the print command. This may include structured data, which is preserved in the event data. This is achieved by converting each item sent to a print command as nuon and then encoding as base64, which ensures a single line event is emitted even if it contains multi-line text. 14 | 15 | All events for all suites and tests being run are then collated, ready to use to present to the user with flexible output and rendering. 16 | 17 | ## Concurrency 18 | 19 | Nutest runs both test suites (a file of tests) and each test in parallel with minimal Nu subshells. 20 | 21 | There are two levels of concurrency used in Nutest, leveraging `par-each`, where the following are run concurrently: 22 | - Suites (file of tests). 23 | - Tests within a suite. 24 | 25 | This means that an 8-core CPU would run 8 suites concurrently and within each suite, it would run 8 tests in concurrently. This might suggest Nutest potentially causing excessive CPU context switching, and the run taking longer than is strictly needed. However, this is not necessarily the case as Nushell leverages [Rayon](https://github.com/rayon-rs/rayon) for `par-test`, which purports to be efficient at managing the number of threads and of scheduling work across available CPU cores. For more on this, see Rayon's notion of [potential concurrency](https://smallcultfollowing.com/babysteps/blog/2015/12/18/rayon-data-parallelism-in-rust/), the dynamic nature of it's [parallel iterators](https://github.com/rayon-rs/rayon?tab=readme-ov-file#parallel-iterators-and-more) and the underlying use of Rust's [available parallelism](https://doc.rust-lang.org/stable/std/thread/fn.available_parallelism.html). However, it#s still not clear how well this works across multiple processes. 26 | 27 | Additionally, given the kinds of use-cases Nushell is used for, many tests are likely to be I/O bound. 28 | 29 | Feedback on how well this works in practice is very welcome. 30 | 31 | ## SQLite 32 | 33 | Given Nutest runs as much as possible concurrently, this puts an unusual level of pressure on SQLite that collects test results and the output. For this reason, INSERTs sometimes fail and so a retry mechanism has been added to attempt to insert the data again up to a particular maximum tries at which point Nutest may give up and throw an error. The retries have had some stress testing to come to a pragmatic value, but please let us know if you're seeing issues. 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Kieron Wilkinson 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /PLAN.md: -------------------------------------------------------------------------------- 1 | # Planned Features and Ideas 2 | 3 | ## Known Issues 4 | 5 | - The ordering of before/after all output is not reflected well as they are only kept in the database once for the suite and then re-produced for each test. A better strategy might be to reflect them as before-all and after-all output events in the database (using another record field?), and then query and order them appropriately in the final output. 6 | - After-all/after-each processing may not happen if before-all/before-each commands fail: 7 | - Currently, a test will be marked as failed on the first before-each that fails, the test will not be run and neither will the after-each. So a before-each that creates temporary files before a failure will not be removed. 8 | - Similarly, execution will stop on the first after-each that fails. 9 | - Same for before-all and after-all. 10 | - We could try to accumulate as much context as possible, but it doesn't seem worth complicating the existing design currently. 11 | 12 | ## Post v1 Roadmap 13 | 14 | - Ensure badges are still generated even if tests fail 15 | - This is a case where multiple outputs would be useful 16 | - Or perhaps better, we could expose a data api. E.g. --results-hook { |provider| ... } 17 | - Get Topiary Nushell formatting working as commit hook (if it's readable) 18 | - JUnit test reports: 19 | - Add error information into the expected JUnit failure elements 20 | - Add test output 21 | - Investigate use of styling of errors and strip as necessary 22 | - Fluent assertion module with pluggable matchers. 23 | - Generate test coverage (in llvm-cov format to allow combining with Nushell coverage) 24 | 25 | ## Future Ideas 26 | 27 | - Support matchers in `list-tests` (a trivial win) 28 | - Allow before-all and before-each to be specified without returning context 29 | - Optimisation: If nothing requires test output (e.g. summary), we can avoid having to process it 30 | - Optionally write decoded event stream to file to help debug Nutest itself. 31 | - Optionally allow running ignored tests. 32 | - Better support for direct-to-stdout tests by external tools that don't use the print statement. Allow running with sequential or subshell-based processing to capture output. Or even auto-detect and re-run tests. 33 | - Detect flaky tests by re-running failed tests a few times. 34 | - More sophisticated change display rather than simple assertion module output, e.g. differences in records and tables, perhaps displayed as tables 35 | - Perhaps highlight differences in output using background colours like a diff tool. 36 | - Pluggable displays and reports 37 | - Test timing. 38 | - Dynamic terminal UI, showing the currently executing suites and tests. 39 | - This will resolve not being able to see the currently running tests in the terminal display 40 | - Would include things like a progress bar, running total of completed, fails, skips, etc. 41 | - Would retain error information and output on tail failure 42 | - If we save historical test run timings, we could: 43 | - Estimate time left 44 | - Provide difference reports to provide idea of regressions 45 | - Stream test results. Each suite is run in a separate nu process via `complete` and therefore each suite's results are not reported until the whole suite completed. There are some limitations here due to not being able to process Nushell sub-processes concurrently. However, we may be able to avoid the `complete` command to resolve this. This would also help better reflect current status in the event-based terminal UI. 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nutest 2 | 3 | ![CI/CD](https://github.com/vyadh/nutest/actions/workflows/tests.yaml/badge.svg) 4 | ![Tests](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.total&label=Tests) 5 | ![Passed](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.passed&label=Passed&color=%2331c654) 6 | ![Failed](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.failed&label=Failed&color=red) 7 | ![Skipped](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.skipped&label=Skipped&color=yellow) 8 | 9 | A [Nushell](https://www.nushell.sh) test framework. 10 | 11 | ![An example nutest run](resources/test-run.png) 12 | 13 | *^ Tests are structured data that can be processed just like any other table.* 14 | 15 | ![An example nutest run](resources/test-run-terminal.png) 16 | 17 | *^ Terminal mode - test results appear as they complete.* 18 | 19 | 20 | ## Requirements 21 | 22 | Needs Nushell 0.103.0 or later. 23 | If Nushell 0.101.0+ is required, use Nutest v1.0.1. 24 | 25 | 26 | ## Motivation 27 | 28 | Writing tests in Nushell is both powerful and expressive. Not only for testing Nushell code, but also other things, such as APIs, infrastructure, and other scripts. Nutest aims to encourage writing tests for all sorts of things by making testing more accessible. 29 | 30 | 31 | ## Install and Run 32 | 33 | ### Using [nupm](https://github.com/nushell/nupm) 34 | 35 | First-time installation: 36 | 37 | ```nushell 38 | git https://github.com/vyadh/nutest.git 39 | do { cd nutest; git checkout } # Where is the latest release 40 | nupm install nutest --path 41 | ``` 42 | 43 | Usage: 44 | 45 | ```nushell 46 | cd 47 | use nutest 48 | nutest run-tests 49 | ``` 50 | 51 | ### Standalone 52 | 53 | First-time installation: 54 | 55 | ```nushell 56 | git https://github.com/vyadh/nutest.git 57 | do { cd nutest; git checkout } # Where is the latest release 58 | cp -r nutest/nutest 59 | ``` 60 | 61 | Usage: 62 | 63 | ```nushell 64 | cd 65 | use nutest 66 | nutest run-tests 67 | ``` 68 | 69 | 70 | ## Writing Tests 71 | 72 | ### Test Suites 73 | 74 | A recognised test suite (a Nushell file containing tests) is recognised by nutest as a filename matching one of the following patterns somewhere within the search path, being the working directory tree or via `--path`: 75 | - `test_*.nu` 76 | - `test-*.nu` 77 | - `*_test.nu` 78 | - `*-test.nu` 79 | 80 | ### Test Commands 81 | 82 | **Nutest** uses Nushell command attributes as a tag system for tests, test discovery will ignore non-tagged commands. It supports: 83 | 84 | | attribute | description | 85 | |----------------|-----------------------------------------| 86 | | `@test` | this is the main tag to annotate tests. | 87 | | `@before-all` | this is run once before all tests. | 88 | | `@before-each` | this is run before each test. | 89 | | `@after-all` | this is run once after all tests. | 90 | | `@after-each` | this is run after each test. | 91 | | `@ignore` | ignores the test but still collects it. | 92 | 93 | For example: 94 | 95 | ```nushell 96 | use std assert 97 | use std/testing * 98 | 99 | @before-each 100 | def setup [] { 101 | print "before each" 102 | { 103 | data: "xxx" 104 | } 105 | } 106 | 107 | @test 108 | def "some-data is xxx" [] { 109 | let context = $in 110 | print $"Running test A: ($context.data)" 111 | assert equal "xxx" $context.data 112 | } 113 | 114 | @test 115 | def "is one equal one" [] { 116 | print $"Running test B: ($in.data)" 117 | assert equal 1 1 118 | } 119 | 120 | @test 121 | def "is two equal two" [] { 122 | print $"Running test C: ($in.data)" 123 | assert equal 2 2 124 | } 125 | 126 | @after-each 127 | def cleanup [] { 128 | let context = $in 129 | print "after each" 130 | print $context 131 | } 132 | ``` 133 | 134 | Will return: 135 | ``` 136 | ╭───────────┬──────────────────┬────────┬─────────────────────╮ 137 | │ suite │ test │ result │ output │ 138 | ├───────────┼──────────────────┼────────┼─────────────────────┤ 139 | │ test_base │ is one equal one │ PASS │ before each │ 140 | │ │ │ │ Running test B: xxx │ 141 | │ │ │ │ after each │ 142 | │ │ │ │ {data: xxx} │ 143 | │ test_base │ is two equal two │ PASS │ before each │ 144 | │ │ │ │ Running test C: xxx │ 145 | │ │ │ │ after each │ 146 | │ │ │ │ {data: xxx} │ 147 | │ test_base │ some-data is xxx │ PASS │ before each │ 148 | │ │ │ │ Running test A: xxx │ 149 | │ │ │ │ after each │ 150 | │ │ │ │ {data: xxx} │ 151 | ╰───────────┴──────────────────┴────────┴─────────────────────╯ 152 | ``` 153 | 154 | 155 | ## Current Features 156 | 157 | - [x] Supports using Nushell attributes (e.g. `@test`) 158 | - Note: The previous format of `#[test]` annotations is still supported but deprecated 159 | - [x] Flexible test definitions 160 | - [x] Setup/teardown with created context available to tests 161 | - [x] Filtering of suites and tests 162 | - [x] Terminal completions for suites and tests 163 | - [x] Outputting test results in various ways, including queryable Nushell data tables 164 | - [x] Test output captured and shown against test results 165 | - [x] Parallel test execution and concurrency control 166 | - [x] CI/CD support 167 | - [x] Non-zero exit code in the form of a `--fail` flag 168 | - [x] Test report integration compatible with a wide array of tools 169 | 170 | ### Flexible Tests 171 | 172 | Supports running various configurations of tests scripts in flexible configurations, whether defined as a 173 | Nushell module or scripts that reference other Nushell commands. 174 | 175 | Scripts being tested can either be utilised from their public interface as a module via `use .nu` or testing their private interface by `source .nu`. 176 | 177 | Tests are not limited to use with just Nushell scripts. Nutest combined with the power of Nushell can be used to test command-line tools, APIs, infrastructure or bash/other scripts. Add in use of something like [WireMock](https://wiremock.org) Nushell's `http` commands and mocked HTTP endpoints can be configured for tools under tests with the convenience of Nushell records and defined with the test. 178 | 179 | ### Context and Setup/Teardown 180 | 181 | Specify before/after stages for each test via `[before-each]` and `[after-each]` annotations, or for all tests via `[before-all]` and `[after-all]`. 182 | 183 | These setup/teardown commands can also be used to generate contexts used by each test, see Writing Tests section for an example. 184 | 185 | ### Filtering Suites and Tests 186 | 187 | Allows filter of suites and tests to run via a pattern, such as: 188 | ```nushell 189 | run-tests --match-suites api --match-tests test[0-9] 190 | ``` 191 | This will run all files that include `api` in the name and tests that contain `test` followed by a digit. 192 | 193 | ### Completions 194 | 195 | Completions are available not only for normal command values, they are also available for suites and tests, making it easier to run specific suites and tests from the command line. 196 | 197 | For example, typing the following and pressing tab will show all available suites that contain the word `api`: 198 | ```nushell 199 | run-tests --match-suites api 200 | ``` 201 | 202 | Typing the following and pressing tab will show all available tests that contain the word `parse`: 203 | ```nushell 204 | run-tests --match-tests parse 205 | ``` 206 | 207 | While test discovery is done concurrently and performant even with many test files, you can specify `--match-suites ` before `--match-tests` to greatly reduce the amount of work nutest needs to do to find the tests you want to run. 208 | 209 | ### Results Output 210 | 211 | There are several ways to output test results in nutest: 212 | - Displaying to the terminal 213 | - Returning data for pipelines 214 | - Reporting to file 215 | 216 | #### Terminal Display 217 | 218 | By default, nutest displays tests in a textual format so they can be displayed as they complete, or explicitly as `--display terminal`. Results can also be displayed as a table using `--display table`, which will appear at the end of the run. Examples of these two display types can be seen in the screenshots above. 219 | 220 | Terminal output can also be turned off using `--display nothing`. 221 | 222 | #### Returning Data 223 | 224 | In line with the Nushell philosophy, tests results are also data that can be queried and manipulated. For example, to show only tests that need attention using: 225 | 226 | ```nushell 227 | run-tests --returns table | where result in [SKIP, FAIL] 228 | ``` 229 | 230 | Alternatively, you can return a summary of the test run as a record using: 231 | ```nushell 232 | run-tests --returns summary 233 | ``` 234 | 235 | Which will be shown as: 236 | ``` 237 | ╭─────────┬────╮ 238 | │ total │ 54 │ 239 | │ passed │ 50 │ 240 | │ failed │ 1 │ 241 | │ skipped │ 3 │ 242 | ╰─────────┴────╯ 243 | ``` 244 | 245 | This particular feature is used to generate the badges at the top of this README as part of the CI test run. 246 | 247 | If a `--returns` is specified, the display report will be deactivated by default, but can be re-enabled by using a `--display` option explicitly. 248 | 249 | The combination of `--display` and `--returns` can be used to both see the running tests and also query and manipulate the output once it is complete. It is also helpful for saving output to a file in a format not supported out of the box by the reporting functionality. 250 | 251 | #### Reporting to File 252 | 253 | Lastly, tests reports can be output to file. See the CI/CD Integration for more details. 254 | 255 | 256 | ### Test Output 257 | 258 | Output from the `print` command to stdout and stderr will be captured and shown against test results, which is useful for debugging failing tests. 259 | 260 | Output of external commands cannot currently be captured unless specifically handled in the tests by outputting using the `print` command. 261 | 262 | 263 | ### Parallel Test Execution 264 | 265 | Tests written in Nutest are run concurrently by default. 266 | 267 | This is a good design constraint for self-contained tests that run efficiently. The default concurrency strategy is geared for CPU-bound tests, maximising the use of available CPU cores. However, some cases may need adjustment to run efficiently. For example, IO-bound tests may benefit from lower concurrency and tests waiting on external resources may benefit by not being limited to the available CPU cores. 268 | 269 | The level of concurrency adjusted or even disabled by specifying the `--strategy { threads: }` option to the `run-tests` command, where `` is the number of concurrently executing machine threads. The default handles the concurrency level automatically based on the available hardware. 270 | 271 | See the Concurrency section under How Does It Work? for more details. 272 | 273 | The concurrency level can also be specified at the suite-level by way of a `strategy` annotation. For example, the following strategy will run all tests in the suite sequentially: 274 | 275 | ```nushell 276 | #[strategy] 277 | def threads []: nothing -> record { 278 | { threads: 1 } 279 | } 280 | ``` 281 | 282 | This would be beneficial in a project where most tests should run concurrently by default, but a subset perhaps require exclusive access to a resource, or one that needs a setup/tear down cycle via `before-each` and `after-each`. 283 | 284 | 285 | ### CI/CD Support 286 | 287 | #### Exit Codes 288 | 289 | In normal operation the tests will be run and the results will be returned as a table with the exit code always set to 0. To avoid manually checking the results, the `--fail` flag can be used to set the exit code to 1 if any tests fail. In this mode, if a test fails, the results will only be printed in the default format and cannot be interrogated due to the need to invoke `exit 1` without a result. 290 | 291 | ```nushell 292 | run-tests --fail 293 | ``` 294 | 295 | This is useful for CI/CD pipelines where it is desirable to fail the current 296 | job. However, note that using this directly in your shell will exit your shell session! 297 | 298 | ### Test Report Integration 299 | 300 | In order to integrate with CI/CD tools, such as the excellent [GitHub Action to Publish Test Results](https://github.com/EnricoMi/publish-unit-test-result-action), you can output the result in the JUnit XML format. The JUnit format was chosen simply as it appears to have the widest level of support by tooling. The report can be created by by specifying the `--report` option to the `run-tests` command: 301 | 302 | ```nushell 303 | run-tests --fail --report { type: junit, path: "test-report.xml" } 304 | ``` 305 | 306 | ### Badges 307 | 308 | ![Tests](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.total&label=Tests) 309 | ![Passed](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.passed&label=Passed&color=%2331c654) 310 | ![Failed](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.failed&label=Failed&color=red) 311 | ![Skipped](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fgist.githubusercontent.com%2Fvyadh%2F0cbdca67f966d7ea2e6e1eaf7c9083a3%2Fraw%2Ftest-summary.json&query=%24.skipped&label=Skipped&color=yellow) 312 | 313 | The above badges serve as an example of how to directly leverage nutest for downstream use. In this case, these badges are generated from the last run on the main branch by saving a summary of the test run to a Gist and leveraging the [shields.io](https://shields.io) project by to query that data by generating a [Dynamic JSON Badge](https://shields.io/badges/dynamic-json-badge). You can see how that can be achieved by looking at [the GitHub Actions workflow in this repository](.github/workflows/tests.yaml). 314 | 315 | ## Alternative Tools 316 | 317 | Nushell has an internal runner for the standard library `testing.nu` but is not itself part of the standard library. 318 | 319 | The Nushell package manager [Nupm](https://github.com/nushell/nupm), provides module-focused testing for exported commands. 320 | -------------------------------------------------------------------------------- /devbox.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.13.7/.schema/devbox.schema.json", 3 | "packages": [ 4 | "nushell@latest" 5 | ], 6 | "shell": { 7 | "init_hook": [ 8 | "nu" 9 | ] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /devbox.lock: -------------------------------------------------------------------------------- 1 | { 2 | "lockfile_version": "1", 3 | "packages": { 4 | "nushell@latest": { 5 | "last_modified": "2024-12-26T01:25:00Z", 6 | "resolved": "github:NixOS/nixpkgs/16e046229f3b4f53257973a5532bcbb72457d2f2#nushell", 7 | "source": "devbox-search", 8 | "version": "0.101.0", 9 | "systems": { 10 | "aarch64-darwin": { 11 | "outputs": [ 12 | { 13 | "name": "out", 14 | "path": "/nix/store/gjgck7ays80f1fkhwc4ny5jwcdpi093h-nushell-0.101.0", 15 | "default": true 16 | } 17 | ], 18 | "store_path": "/nix/store/gjgck7ays80f1fkhwc4ny5jwcdpi093h-nushell-0.101.0" 19 | }, 20 | "aarch64-linux": { 21 | "outputs": [ 22 | { 23 | "name": "out", 24 | "path": "/nix/store/ciymw62irm1ny3cd3m9px72l7l8myrqh-nushell-0.101.0", 25 | "default": true 26 | } 27 | ], 28 | "store_path": "/nix/store/ciymw62irm1ny3cd3m9px72l7l8myrqh-nushell-0.101.0" 29 | }, 30 | "x86_64-darwin": { 31 | "outputs": [ 32 | { 33 | "name": "out", 34 | "path": "/nix/store/z0f7kgp7pl3iyzdqns8gk084g6dchkvj-nushell-0.101.0", 35 | "default": true 36 | } 37 | ], 38 | "store_path": "/nix/store/z0f7kgp7pl3iyzdqns8gk084g6dchkvj-nushell-0.101.0" 39 | }, 40 | "x86_64-linux": { 41 | "outputs": [ 42 | { 43 | "name": "out", 44 | "path": "/nix/store/sbkllymdppc0fkv6c6qhw81194xzvmkd-nushell-0.101.0", 45 | "default": true 46 | } 47 | ], 48 | "store_path": "/nix/store/sbkllymdppc0fkv6c6qhw81194xzvmkd-nushell-0.101.0" 49 | } 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /nupm.nuon: -------------------------------------------------------------------------------- 1 | { 2 | name: "nutest" 3 | type: "module" 4 | version: "1.0.0" 5 | description: "Nutest, The Nushell test framework" 6 | license: "MIT" 7 | } 8 | -------------------------------------------------------------------------------- /nutest/completions.nu: -------------------------------------------------------------------------------- 1 | use discover.nu 2 | 3 | export def "nu-complete display" []: nothing -> record> { 4 | { 5 | options: { 6 | sort: false 7 | } 8 | completions: [ 9 | [value description]; 10 | [ 11 | "none" # rename nothing 12 | "No display output during test run (default when returning a result)." 13 | ] 14 | [ 15 | "terminal" 16 | "Output test results as they complete (default when returning nothing)." 17 | ] 18 | [ 19 | "table" 20 | "A table listing all tests with decorations and color." 21 | ] 22 | ] 23 | } 24 | } 25 | 26 | export def "nu-complete returns" []: nothing -> record> { 27 | { 28 | options: { 29 | sort: false 30 | } 31 | completions: [ 32 | [value description]; 33 | [ 34 | "nothing" 35 | "Returns no results from the test run." 36 | ] 37 | [ 38 | "table" 39 | "Returns a table listing all test results." 40 | ] 41 | [ 42 | "summary" 43 | "Returns a summary of the test results." 44 | ] 45 | ] 46 | } 47 | } 48 | 49 | export def "nu-complete suites" [context: string]: nothing -> record { 50 | let options = $context | parse-command-context 51 | let suites = $options.path 52 | | discover suite-files --matcher $options.suite 53 | | each { path parse | get stem } 54 | | sort 55 | 56 | { 57 | options: { 58 | completion_algorithm: "prefix" 59 | positional: false # Use substring matching 60 | } 61 | completions: $suites 62 | } 63 | } 64 | 65 | export def "nu-complete tests" [context: string]: nothing -> record { 66 | let options = $context | parse-command-context 67 | 68 | let tests = $options.path 69 | | discover suite-files --matcher $options.suite 70 | | discover test-suites --matcher $options.test 71 | | each { |suite| $suite.tests | where { $in.type in ["test", "ignore"] } } 72 | | flatten 73 | | sort 74 | | each { 75 | if ($in.name | str contains " ") { 76 | $'"($in.name)"' 77 | } else { 78 | $in.name 79 | } 80 | } 81 | 82 | { 83 | options: { 84 | completion_algorithm: "prefix" 85 | positional: false # Use substring matching 86 | } 87 | completions: $tests 88 | } 89 | } 90 | 91 | def parse-command-context []: string -> record { 92 | let options = ( 93 | $in 94 | # Strip everything before the actual arguments 95 | | str replace --regex '^.*?--' '--' 96 | # Group into parameter name and value pairs, being: table 97 | | parse --regex '--(?P[-\w]+)\s+(?P[^--]+)' 98 | # Extract into a table that can be converted into a record of "name: value" pairs 99 | | each { |pair| [ ($pair | get name), ($pair | get value | str trim) ] } 100 | | into record 101 | ) 102 | 103 | { 104 | suite: ($options | get-or-null "match-suites" | default ".*") 105 | test: ($options | get-or-null "match-tests" | default ".*") 106 | path: ($options | get-or-null "path" | default ".") 107 | } 108 | } 109 | 110 | # A slight variation on get, which also translates empty strings to null 111 | def get-or-null [name: string]: record -> string { 112 | let value = $in | get --ignore-errors $name 113 | if ($value | is-empty) { 114 | null 115 | } else { 116 | $value 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /nutest/discover.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | 3 | const default_pattern = '**/{*[\-_]test,test[\-_]*}.nu' 4 | 5 | # Also see the filtering in runner.nu 6 | const supported_types = [ 7 | "test", 8 | "ignore", 9 | "before-all", 10 | "after-all", 11 | "before-each", 12 | "after-each", 13 | "strategy" 14 | ] 15 | 16 | export def suite-files [ 17 | --glob: string = $default_pattern 18 | --matcher: string = ".*" 19 | ]: string -> list { 20 | 21 | let path = $in 22 | $path 23 | | list-files $glob 24 | | where ($it | path parse | get stem) =~ $matcher 25 | } 26 | 27 | def list-files [ pattern: string ]: string -> list { 28 | let path = $in 29 | if ($path | path type) == file { 30 | [$path] 31 | } else { 32 | cd $path 33 | glob $pattern 34 | } 35 | } 36 | 37 | export def test-suites [ 38 | --matcher: string = ".*" 39 | ]: list -> table> { 40 | 41 | let suite_files = $in 42 | let result = $suite_files 43 | | par-each { discover-suite $in } 44 | | filter-tests $matcher 45 | 46 | # The following manifests the data to avoid laziness causing errors to be thrown in the wrong context 47 | # Some parser errors might be a `list`, collecting will cause it to be thrown here 48 | $result | collect 49 | # Others are only apparent collecting the tests table 50 | $result | each { |suite| $suite.tests | collect } 51 | 52 | $result 53 | } 54 | 55 | def discover-suite [test_file: string]: nothing -> record> { 56 | let query = test-query $test_file 57 | let result = (^$nu.current-exe --no-config-file --commands $query) 58 | | complete 59 | 60 | if $result.exit_code == 0 { 61 | parse-suite $test_file ($result.stdout | from nuon) 62 | } else { 63 | error make { msg: $result.stderr } 64 | } 65 | } 66 | 67 | # Query any method with attributes or a specific tag in the description 68 | # This may include non-test commands but they will be filtered out later 69 | def test-query [file: string]: nothing -> string { 70 | let query = " 71 | scope commands 72 | | where ( $it.type == 'custom' and ( 73 | ($it.attributes | is-not-empty) or ($it.description =~ '\\[[a-z-]+\\]') 74 | )) 75 | | each { |item| { 76 | name: $item.name 77 | attributes: ($item.attributes | get name) 78 | description: $item.description 79 | } } 80 | | to nuon 81 | " 82 | $"source ($file); ($query)" 83 | } 84 | 85 | def parse-suite [ 86 | test_file: string 87 | tests: list, description: string>> 88 | ]: nothing -> record> { 89 | 90 | { 91 | name: ($test_file | path parse | get stem) 92 | path: $test_file 93 | tests: ($tests | each { parse-test $in }) 94 | } 95 | } 96 | 97 | def parse-test [ 98 | test: record, description: string> 99 | ]: nothing -> record { 100 | 101 | { 102 | name: $test.name 103 | type: ($test | parse-type) 104 | } 105 | } 106 | 107 | def parse-type []: record, description: string> -> string { 108 | let metadata = $in 109 | 110 | $metadata.attributes 111 | | append ($metadata.description | description-attributes) 112 | | where $it in $supported_types 113 | | get 0 --ignore-errors 114 | | default "unsupported" 115 | } 116 | 117 | def description-attributes []: string -> list { 118 | $in | parse --regex '.*\[([a-z-]+)\].*' | get capture0 119 | } 120 | 121 | def filter-tests [ 122 | matcher: string 123 | ]: table> -> table> { 124 | 125 | let tests = $in 126 | $tests 127 | | each { |suite| 128 | { 129 | name: $suite.name 130 | path: $suite.path 131 | tests: ( $suite.tests 132 | # Filter out unsupported types 133 | | where $it.type in $supported_types 134 | # Filter only 'test' and 'ignore' by pattern 135 | | where ($it.type != "test" and $it.type != "ignore") or $it.name =~ $matcher 136 | ) 137 | } 138 | } 139 | # Remove suites that have no actual tests to run 140 | | where ($it.tests | where type in ["test", "ignore"] | is-not-empty) 141 | } 142 | -------------------------------------------------------------------------------- /nutest/display/display_nothing.nu: -------------------------------------------------------------------------------- 1 | 2 | export def create []: nothing -> record { 3 | { 4 | name: "display nothing" 5 | run-start: { || ignore } 6 | run-complete: { || ignore } 7 | test-start: { |row| ignore } 8 | test-complete: { |row| ignore } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /nutest/display/display_table.nu: -------------------------------------------------------------------------------- 1 | # Collects results into a table to display 2 | 3 | use ../store.nu 4 | use ../theme.nu 5 | use ../formatter.nu 6 | 7 | export def create []: nothing -> record { 8 | let theme = theme standard 9 | let error_format = "compact" 10 | let formatter = formatter pretty $theme $error_format 11 | 12 | { 13 | name: "display table" 14 | run-start: { || ignore } 15 | run-complete: { || print (query-results $theme $formatter) } 16 | test-start: { |row| ignore } 17 | test-complete: { |row| ignore } 18 | 19 | # Easier testing 20 | results: { query-results $theme $formatter } 21 | } 22 | } 23 | 24 | def query-results [ 25 | theme: closure 26 | formatter: closure 27 | ]: nothing -> table { 28 | 29 | store query | each { |row| 30 | { 31 | suite: ({ type: "suite", text: $row.suite } | do $theme) 32 | test: ({ type: "test", text: $row.test } | do $theme) 33 | result: (format-result $row.result $theme) 34 | output: ($row.output | do $formatter) 35 | } 36 | } 37 | } 38 | 39 | def format-result [result: string, theme: closure]: nothing -> string { 40 | match $result { 41 | "PASS" => ({ type: "pass", text: $result } | do $theme) 42 | "SKIP" => ({ type: "skip", text: $result } | do $theme) 43 | "FAIL" => ({ type: "fail", text: $result } | do $theme) 44 | _ => $result 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /nutest/display/display_terminal.nu: -------------------------------------------------------------------------------- 1 | # Displays test results in the terminal as they are output. 2 | 3 | use ../store.nu 4 | use ../theme.nu 5 | use ../formatter.nu 6 | 7 | export def create []: nothing -> record { 8 | let theme = theme standard 9 | let error_format = "rendered" 10 | let formatter = formatter pretty $theme $error_format 11 | 12 | { 13 | name: "display terminal" 14 | run-start: { start-suite } 15 | run-complete: { complete-suite } 16 | test-start: { |row| start-test $row } 17 | test-complete: { |row| $row | complete-test $theme $formatter } 18 | } 19 | } 20 | 21 | def start-suite []: nothing -> nothing { 22 | print "Running tests..." 23 | } 24 | 25 | def complete-suite []: nothing -> nothing { 26 | let results = store query 27 | let by_result = $results | group-by result 28 | 29 | let total = $results | length 30 | let passed = $by_result | count "PASS" 31 | let failed = $by_result | count "FAIL" 32 | let skipped = $by_result | count "SKIP" 33 | 34 | let output = $"($total) total, ($passed) passed, ($failed) failed, ($skipped) skipped" 35 | print $"Test run completed: ($output)" 36 | } 37 | 38 | def count [key: string]: record -> int { 39 | $in 40 | | get --ignore-errors $key 41 | | default [] 42 | | length 43 | } 44 | 45 | def start-test [row: record]: nothing -> nothing { 46 | } 47 | 48 | def complete-test [theme: closure, formatter: closure]: record -> nothing { 49 | let event = $in 50 | let suite = { type: "suite", text: $event.suite } | do $theme 51 | let test = { type: "test", text: $event.test } | do $theme 52 | 53 | let result = store query-test $event.suite $event.test 54 | if ($result | is-empty) { 55 | error make { msg: $"No test results found for: ($event)" } 56 | } 57 | let row = $result | first 58 | let formatted = format-result $row.result $theme 59 | 60 | if ($row.output | is-not-empty) { 61 | let output = $row.output | format-output $formatter 62 | print $"($formatted) ($suite) ($test)\n($output)" 63 | } else { 64 | print $"($formatted) ($suite) ($test)" 65 | } 66 | } 67 | 68 | def format-result [result: string, theme: closure]: nothing -> string { 69 | match $result { 70 | "PASS" => ({ type: "pass", text: $result } | do $theme) 71 | "SKIP" => ({ type: "skip", text: $result } | do $theme) 72 | "FAIL" => ({ type: "fail", text: $result } | do $theme) 73 | _ => $result 74 | } 75 | } 76 | 77 | def format-output [formatter: closure]: table -> string { 78 | let output = $in 79 | let formatted = $output | do $formatter 80 | if ($formatted | describe) == "string" { 81 | $formatted | indent 82 | } else { 83 | $formatted 84 | } 85 | } 86 | 87 | def indent []: string -> string { 88 | " " + ($in | str replace --all "\n" "\n ") 89 | } 90 | -------------------------------------------------------------------------------- /nutest/errors.nu: -------------------------------------------------------------------------------- 1 | # The backtrace errors in 0.103 are helpful but not while testing Nutest itself 2 | # This script allow us to unpack them so they look like the original error given that 3 | # for whatever reason, `$env.NU_BACKTRACE = 0` doesn't appear to work 4 | 5 | export def unwrap-error []: record -> record { 6 | let original = $in | select msg rendered json 7 | 8 | mut error = $original 9 | mut json = $error.json | from json 10 | while (("inner" in $json) and ($json.inner | is-not-empty)) { 11 | $json = $error.json | from json | get inner | first 12 | $error = $error | merge { 13 | msg: $json.msg 14 | json: ($json | to json) 15 | } 16 | } 17 | 18 | $original | merge { 19 | msg: $error.msg 20 | rendered: ($error.rendered | last-rendered) 21 | labels: $json.labels 22 | json: $error.json 23 | } 24 | } 25 | 26 | def last-rendered []: string -> string { 27 | let rendered = $in 28 | let lines = $rendered | lines 29 | let errors_start = $lines 30 | | enumerate 31 | | where item like "^Error: *" 32 | | get index 33 | 34 | if (($errors_start | is-empty) | (($errors_start | length) == 1)) { 35 | $rendered 36 | } else { 37 | $lines 38 | | slice ($errors_start | last).. 39 | | str join "\n" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /nutest/formatter.nu: -------------------------------------------------------------------------------- 1 | use errors.nu 2 | 3 | # A formatter that preserves the data as-is, including stream metadata, useful for tests. 4 | export def preserved []: nothing -> closure { 5 | { $in } 6 | } 7 | 8 | # A formatter that preserves the data only, useful for querying. 9 | export def unformatted []: nothing -> closure { 10 | #table> -> list 11 | { 12 | $in 13 | | each { |message| $message.items } 14 | | flatten 15 | } 16 | } 17 | 18 | # A formatter that formats items as a string against a theme 19 | export def pretty [ 20 | theme: closure 21 | error_format: string 22 | ]: nothing -> closure { 23 | 24 | #table> -> string 25 | { 26 | let events = $in 27 | $events 28 | | each { |event| $event | pretty-format-event $theme $error_format } 29 | | str join "\n" 30 | } 31 | } 32 | 33 | def pretty-format-event [ 34 | theme: closure 35 | error_format: string 36 | ]: record> -> string { 37 | 38 | let event = $in 39 | match $event { 40 | { stream: "output", items: $items } => { 41 | $items | str join "\n" 42 | } 43 | { stream: "error", items: $items } => { 44 | let formatted = $items | each { $in | pretty-format-item $error_format } 45 | let text = ($formatted | str join "\n") 46 | { type: "warning", text: $text } | do $theme 47 | } 48 | } 49 | } 50 | 51 | def pretty-format-item [error_format: string]: any -> any { 52 | let item = $in 53 | if ($item | looks-like-error) { 54 | $item | format-error $error_format 55 | } else { 56 | $item 57 | } 58 | } 59 | 60 | def looks-like-error []: any -> bool { 61 | let value = $in 62 | if ($value | describe | str starts-with "record") { 63 | let columns = $value | columns 64 | ("msg" in $columns) and ("rendered" in $columns) and ("json" in $columns) 65 | } else { 66 | false 67 | } 68 | } 69 | 70 | # returns: string|record 71 | def format-error [error_format: string]: record -> any { 72 | let error = $in 73 | match $error_format { 74 | "rendered" => ($error | error-format-rendered) 75 | "compact" => ($error | error-format-compact) 76 | "record" => $error 77 | _ => (error make { msg: $"Unknown error format: ($error_format)" }) 78 | } 79 | } 80 | 81 | # Rendered errors have useful info for terminal mode but too much for table data 82 | def error-format-rendered []: record -> string { 83 | $in.rendered 84 | } 85 | 86 | def error-format-compact []: record -> string { 87 | let error = $in | errors unwrap-error 88 | 89 | let json = $error.json | from json 90 | let message = $json.msg 91 | let help = $json | get help? 92 | let labels = $json | get labels? 93 | 94 | if $help != null { 95 | $"($message)\n($help)" 96 | } else if ($labels != null) { 97 | let detail = $labels | each { |label| 98 | | get text 99 | # Not sure why this is in the middle of the error json... 100 | | str replace --all "originates from here" '' 101 | } | str join "\n" 102 | 103 | if ($message | str contains "Assertion failed") { 104 | let formatted = ($detail 105 | | str replace --all --regex '\n[ ]+Left' "|>Left" 106 | | str replace --all --regex '\n[ ]+Right' "|>Right" 107 | | str replace --all --regex '[\n\r]+' '\n' 108 | | str replace --all "|>" "\n|>") 109 | | str join "" 110 | [$message, ...($formatted | lines)] | str join "\n" 111 | } else { 112 | [$message, ...($detail | lines)] | str join "\n" 113 | } 114 | } else { 115 | $message 116 | } 117 | } 118 | 119 | -------------------------------------------------------------------------------- /nutest/mod.nu: -------------------------------------------------------------------------------- 1 | 2 | # This module is for running tests. 3 | # 4 | # Example Usage: 5 | # use nutest; nutest run-tests 6 | 7 | # Discover annotated test commands. 8 | export def list-tests [ 9 | --path: string # Location of tests (defaults to current directory) 10 | ]: nothing -> table { 11 | 12 | use discover.nu 13 | 14 | let path = $path | default $env.PWD | check-path 15 | let suites = $path | discover suite-files | discover test-suites 16 | 17 | $suites | each { |suite| 18 | $suite.tests 19 | | where { $in.type in ["test", "ignore"] } 20 | | each { |test| { suite: $suite.name, test: $test.name } } 21 | } | flatten | sort-by suite test 22 | } 23 | 24 | # todo thread panic on below options? 25 | use completions.nu * 26 | 27 | # Discover and run annotated test commands. 28 | export def run-tests [ 29 | --path: path # Location of tests (defaults to current directory) 30 | --match-suites: string@"nu-complete suites" # Regular expression to match against suite names (defaults to all) 31 | --match-tests: string@"nu-complete tests" # Regular expression to match against test names (defaults to all) 32 | --strategy: record # Overrides test run behaviour, such as test concurrency (defaults to automatic) 33 | --display: string@"nu-complete display" # Display during test run (defaults to terminal, or none if result specified) 34 | --returns: string@"nu-complete returns" = "nothing" # Results to return in a pipeline (defaults to nothing) 35 | --report: record # Save a test report to file, e.g. `{ type: "junit", path: "report.xml" }` 36 | --fail # Print results and exit with non-zero status if any tests fail (useful for CI/CD systems) 37 | ]: nothing -> any { 38 | 39 | use discover.nu 40 | use orchestrator.nu 41 | use store.nu 42 | 43 | let path = $path | default $env.PWD | check-path 44 | let suite = $match_suites | default ".*" 45 | let test = $match_tests | default ".*" 46 | let strategy = $strategy | select-strategy 47 | let display = $display | select-display $returns 48 | let returns = $returns | select-returns 49 | let report = $report | select-report 50 | 51 | # Discovered suites are of the type: 52 | # list>> 53 | 54 | let test_suites = $path 55 | | discover suite-files --matcher $suite 56 | | discover test-suites --matcher $test 57 | 58 | store create 59 | 60 | do $display.run-start 61 | $test_suites | (orchestrator run-suites $display $strategy) 62 | do $display.run-complete 63 | 64 | let result = do $returns.results 65 | let success = store success 66 | try { do $report.save $result } catch { |error| print -e $error } 67 | 68 | store delete 69 | 70 | # To reflect the exit code we need to print the results instead 71 | if ($fail and not $success) { 72 | print $result 73 | exit 1 74 | } else { 75 | $result 76 | } 77 | } 78 | 79 | def check-path []: string -> string { 80 | let path = $in 81 | if (not ($path | path exists)) { 82 | error make { msg: $"Path doesn't exist: ($path)" } 83 | } 84 | $path 85 | } 86 | 87 | def select-strategy []: any -> record { 88 | let strategy = $in | default { } 89 | let default = { 90 | # Rather than using `sys cpu` (an expensive operation), platform-specific 91 | # mechanisms, or complicating the code with different invocations of par-each, 92 | # we can leverage that Rayon's default behaviour can be activated by setting 93 | # the number of threads to 0. See [ThreadPoolBuilder.num_threads](https://docs.rs/rayon/latest/rayon/struct.ThreadPoolBuilder.html#method.num_threads). 94 | # This is also what the par-each implementation does. 95 | threads: 0 96 | } 97 | $default | merge $strategy 98 | } 99 | 100 | # A display implements the event processor interface of the orchestrator 101 | def select-display [ 102 | returns_option: any 103 | ]: any -> record { 104 | 105 | let display_option = $in 106 | let display_option = match $display_option { 107 | null if $returns_option != null and $returns_option != "nothing" => "nothing" 108 | null => "terminal" 109 | _ => $display_option 110 | } 111 | 112 | match $display_option { 113 | "nothing" => { 114 | use display/display_nothing.nu 115 | display_nothing create 116 | } 117 | "terminal" => { 118 | use display/display_terminal.nu 119 | display_terminal create 120 | } 121 | "table" => { 122 | use display/display_table.nu 123 | display_table create 124 | } 125 | _ => { 126 | error make { msg: $"Unknown display: ($display_option)" } 127 | } 128 | } 129 | } 130 | 131 | # The `returns` provides data to downstream pipeline steps 132 | def select-returns []: any -> record { 133 | let returns_option = $in 134 | 135 | match $returns_option { 136 | "nothing" => { 137 | use returns/returns_nothing.nu 138 | returns_nothing create 139 | } 140 | "summary" => { 141 | use returns/returns_summary.nu 142 | returns_summary create 143 | } 144 | "table" => { 145 | use returns/returns_table.nu 146 | returns_table create 147 | } 148 | _ => { 149 | error make { msg: $"Unknown return: ($returns_option)" } 150 | } 151 | } 152 | } 153 | 154 | def select-report []: any -> record { 155 | let report_option = $in 156 | 157 | match $report_option { 158 | null => { 159 | use report/report_nothing.nu 160 | report_nothing create 161 | } 162 | { type: "junit", path: $path } => { 163 | use report/report_junit.nu 164 | report_junit create $path 165 | } 166 | _ => { 167 | error make { msg: $"Unknown report: ($report_option)" } 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /nutest/orchestrator.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use store.nu 3 | 4 | # This script generates the test suite data and embeds a runner into a nushell sub-process to execute. 5 | 6 | # INPUT DATA STRUCTURES 7 | # 8 | # test: 9 | # { 10 | # name: string 11 | # type: string 12 | # } 13 | # 14 | # suite: 15 | # { 16 | # name: string 17 | # path: string 18 | # tests: list 19 | # } 20 | export def run-suites [ 21 | event_processor: record 22 | strategy: record 23 | ]: list> -> nothing { 24 | 25 | $in | par-each --threads $strategy.threads { |suite| 26 | run-suite $event_processor $strategy $suite.name $suite.path $suite.tests 27 | } 28 | } 29 | 30 | def run-suite [ 31 | event_processor: record 32 | strategy: record 33 | suite: string 34 | path: string 35 | tests: table 36 | ] { 37 | let plan_data = create-suite-plan-data $tests 38 | 39 | # Run with forced colour to get colourised rendered error output 40 | let result = with-env { FORCE_COLOR: true } { 41 | const runner_module = path self "runner.nu" 42 | (^$nu.current-exe 43 | --commands $" 44 | use ($runner_module) * 45 | source ($path) 46 | nutest-299792458-execute-suite ($strategy | to nuon) ($suite) ($plan_data) 47 | ") 48 | } | complete 49 | 50 | # Useful for understanding plan 51 | #print $'($plan_data)' 52 | 53 | if $result.exit_code == 0 { 54 | for line in ($result.stdout | lines) { 55 | try { 56 | let event = $line | from nuon 57 | 58 | # Useful for understanding event stream 59 | #print ($event | table --expand) 60 | 61 | $event | process-event $event_processor 62 | } catch { |error| 63 | if $error.msg == "error when loading nuon text" { 64 | # Test printed direct to stdout so runner could not capture output, 65 | # which means we cannot associate with a specific test 66 | print -e $"Warning: Non-captured output for '($suite)': ($line)" 67 | } else { 68 | $error.raw 69 | } 70 | } 71 | } 72 | } else { 73 | # This is only triggered on a suite-level failure so not caught by the embedded runner 74 | # This replicates this suite-level failure down to each test 75 | for test in $tests { 76 | let template = { timestamp: (date now | format date "%+"), suite: $suite, test: $test.name } 77 | $template | merge { type: "start", payload: null } | process-event $event_processor 78 | $template | merge { type: "result", payload: "FAIL" } | process-event $event_processor 79 | $template | merge (as-error-output $result.stderr) | process-event $event_processor 80 | $template | merge { type: "finish", payload: null } | process-event $event_processor 81 | } 82 | } 83 | } 84 | 85 | export def create-suite-plan-data [tests: table]: nothing -> string { 86 | let plan_data = $tests 87 | | each { |test| create-test-plan-data $test } 88 | | str join ", " 89 | 90 | $"[ ($plan_data) ]" 91 | } 92 | 93 | def create-test-plan-data [test: record]: nothing -> string { 94 | $'{ name: "($test.name)", type: "($test.type)", execute: { ($test.name) } }' 95 | } 96 | 97 | # Need to encode orchestrator errors as the runner would do, and compatible with the store output 98 | def as-error-output [error: string]: nothing -> record { 99 | { 100 | type: "output" 101 | payload: ({ stream: "error", items: [$error] } | to nuon | encode base64) 102 | } 103 | } 104 | 105 | def process-event [ 106 | event_processor: record 107 | ] { 108 | let event = $in 109 | let template = { suite: $event.suite, test: $event.test } 110 | 111 | match $event { 112 | { type: "start" } => { 113 | do $event_processor.test-start $template 114 | } 115 | { type: "finish" } => { 116 | do $event_processor.test-complete $template 117 | } 118 | { type: "result" } => { 119 | let message = $template | merge { result: $event.payload } 120 | store insert-result $message 121 | } 122 | { type: "output" } => { 123 | let decoded = $event.payload | decode base64 | decode 124 | let message = $template | merge { data: $decoded } 125 | store insert-output $message 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /nutest/report/report_junit.nu: -------------------------------------------------------------------------------- 1 | use ../store.nu 2 | 3 | # JUnit XML format. 4 | # https://llg.cubic.org/docs/junit 5 | # Example: 6 | # 7 | # 8 | # 9 | # 10 | # 11 | # 12 | # 13 | # 14 | # 15 | # 16 | # 17 | # 18 | # 19 | 20 | export def create [path: string]: nothing -> record { 21 | { 22 | name: "report junit" 23 | results: { create-report } 24 | save: { create-report | save $path } 25 | } 26 | } 27 | 28 | def create-report []: nothing -> string { 29 | query-results | collect | to junit 30 | } 31 | 32 | def query-results []: nothing -> table> { 33 | store query | each { |row| 34 | { 35 | suite: $row.suite 36 | test: $row.test 37 | result: $row.result 38 | output: $row.output 39 | } 40 | } 41 | } 42 | 43 | export def "to junit" []: table> -> string { 44 | $in | testsuites | to xml --self-closed --indent 2 45 | } 46 | 47 | # 48 | def testsuites []: table> -> record { 49 | let rows = $in 50 | let stats = $rows | count 51 | { 52 | tag: "testsuites" 53 | attributes: { 54 | name: "nutest" 55 | tests: $"($stats.total)" 56 | disabled: $"($stats.skipped)" 57 | failures: $"($stats.failed)" 58 | } 59 | content: ( 60 | $rows 61 | | group-by suite 62 | | items { |_, suite_results| 63 | $suite_results | testsuite 64 | } 65 | ) 66 | } 67 | } 68 | 69 | # 70 | def testsuite []: table> -> record { 71 | let rows = $in 72 | if ($rows | is-empty) { 73 | error make { msg: "No test entries" } 74 | } 75 | 76 | let suite = $rows | first | get suite 77 | let stats = $rows | count 78 | { 79 | tag: "testsuite" 80 | attributes: { 81 | name: $suite 82 | tests: $"($stats.total)" 83 | disabled: $"($stats.skipped)" 84 | failures: $"($stats.failed)" 85 | } 86 | content: ($rows | each { testcase }) 87 | } 88 | } 89 | 90 | # 91 | # 92 | # 93 | # 94 | # 95 | # 96 | # 97 | def testcase []: record> -> record> { 98 | let test = $in 99 | { 100 | tag: "testcase" 101 | attributes: { 102 | name: $test.test 103 | classname: $test.suite 104 | } 105 | content: ( 106 | match $test.result { 107 | "PASS" => [] 108 | "FAIL" => [{ 109 | tag: "failure" 110 | attributes: { 111 | type: "Error" # Exception class name 112 | message: "" # Error message, e.g. e.getMessage() 113 | } 114 | content: [""] # Failure detail 115 | }] 116 | "SKIP" => [{ 117 | tag: "skipped" 118 | }] 119 | } 120 | ) 121 | } 122 | } 123 | 124 | def count []: table -> record { 125 | let rows = $in 126 | { 127 | total: ($rows | length) 128 | failed: ($rows | where result == "FAIL" | length) 129 | skipped: ($rows | where result == "SKIP" | length) 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /nutest/report/report_nothing.nu: -------------------------------------------------------------------------------- 1 | 2 | export def create []: nothing -> record { 3 | { 4 | name: "report nothing" 5 | save: { || ignore } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /nutest/returns/returns_nothing.nu: -------------------------------------------------------------------------------- 1 | 2 | export def create []: nothing -> record { 3 | { 4 | name: "returns nothing" 5 | results: { null } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /nutest/returns/returns_summary.nu: -------------------------------------------------------------------------------- 1 | # Collects summary of results to return 2 | 3 | use ../store.nu 4 | 5 | export def create []: nothing -> record { 6 | { 7 | name: "returns summary" 8 | results: { query-summary } 9 | } 10 | } 11 | 12 | def query-summary []: nothing -> record { 13 | let results = store query 14 | let by_result = $results | group-by result 15 | 16 | { 17 | total: ($results | length) 18 | passed: ($by_result | count "PASS") 19 | failed: ($by_result | count "FAIL") 20 | skipped: ($by_result | count "SKIP") 21 | } 22 | } 23 | 24 | def count [key: string]: record -> int { 25 | $in 26 | | get --ignore-errors $key 27 | | default [] 28 | | length 29 | } 30 | -------------------------------------------------------------------------------- /nutest/returns/returns_table.nu: -------------------------------------------------------------------------------- 1 | use ../store.nu 2 | use ../formatter.nu 3 | 4 | export def create []: nothing -> record { 5 | let formatter = formatter unformatted 6 | 7 | { 8 | name: "returns table" 9 | results: { query-results $formatter } 10 | } 11 | } 12 | 13 | def query-results [ 14 | formatter: closure 15 | ]: nothing -> table { 16 | 17 | store query | each { |row| 18 | { 19 | suite: $row.suite 20 | test: $row.test 21 | result: $row.result 22 | output: ($row.output | do $formatter) 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /nutest/runner.nu: -------------------------------------------------------------------------------- 1 | # 2 | # This script is used by the runner to directly invoke tests from the plan data. 3 | # 4 | # INPUT DATA STRUCTURES 5 | # 6 | # suite_data: 7 | # [ 8 | # { 9 | # name: string 10 | # type: string 11 | # execute: closure 12 | # } 13 | # ] 14 | # 15 | # Where: 16 | # `type` can be "test", "before-all", etc. 17 | # `execute` is the closure function of `type` 18 | # 19 | 20 | # Note: The below commands all have a prefix to avoid possible conflicts with user test files. 21 | 22 | export def nutest-299792458-execute-suite [ 23 | default_strategy: record 24 | suite: string 25 | suite_data: table 26 | ] { 27 | # We reset the test name to avoid collisions around tests within tests 28 | with-env { NU_TEST_SUITE_NAME: $suite, NU_TEST_NAME: null } { 29 | nutest-299792458-execute-suite-internal $default_strategy $suite_data 30 | } 31 | 32 | # Don't output any result 33 | null 34 | } 35 | 36 | def nutest-299792458-execute-suite-internal [ 37 | default_strategy: record 38 | suite_data: table 39 | ] { 40 | 41 | let plan = $suite_data | group-by type 42 | 43 | def find-or-default [key: string, default: record]: record -> record { 44 | let values = $in | get --ignore-errors $key 45 | if ($values | is-empty) { $default } else { $values | first } 46 | } 47 | def get-or-empty [key: string]: record -> list { 48 | $in | get --ignore-errors $key | default [] 49 | } 50 | 51 | # Also see the list in discover.nu 52 | let strategy = $plan | find-or-default "strategy" { execute: { {} } } # Closure in record 53 | let before_all = $plan | get-or-empty "before-all" 54 | let before_each = $plan | get-or-empty "before-each" 55 | let after_each = $plan | get-or-empty "after-each" 56 | let after_all = $plan | get-or-empty "after-all" 57 | let tests = $plan | get-or-empty "test" 58 | let ignored = $plan | get-or-empty "ignore" 59 | 60 | # Highlight skipped tests first as there is no error handling required 61 | nutest-299792458-force-result $ignored "SKIP" 62 | 63 | try { 64 | let strategy = $default_strategy | merge (do $strategy.execute) 65 | let context_all = { } | nutest-299792458-execute-before $before_all 66 | $tests | nutest-299792458-execute-tests $strategy $context_all $before_each $after_each 67 | $context_all | nutest-299792458-execute-after $after_all 68 | } catch { |error| 69 | # This should only happen when strategy or before/after all fails, so mark all tests failed 70 | # Each test run has its own exception handling so is not expected to trigger this 71 | nutest-299792458-force-error $tests $error 72 | } 73 | } 74 | 75 | def nutest-299792458-execute-tests [ 76 | strategy: record 77 | context_all: record 78 | before_each: list 79 | after_each: list 80 | ]: list -> nothing { 81 | 82 | let tests = $in 83 | 84 | $tests | par-each --threads $strategy.threads { |test| 85 | # Allow print output to be associated with specific tests by adding name to the environment 86 | with-env { NU_TEST_NAME: $test.name } { 87 | nutest-299792458-emit "start" 88 | nutest-299792458-execute-test $context_all $before_each $after_each $test 89 | nutest-299792458-emit "finish" 90 | } 91 | } 92 | } 93 | 94 | def nutest-299792458-execute-test [ 95 | context_all: record 96 | before_each: list 97 | after_each: list 98 | test: record 99 | ] { 100 | let context = try { 101 | $context_all | nutest-299792458-execute-before $before_each 102 | } catch { |error| 103 | nutest-299792458-fail $error 104 | return 105 | } 106 | 107 | try { 108 | $context | do $test.execute 109 | nutest-299792458-emit "result" "PASS" 110 | # Note that although we have emitted PASS the after-each may still fail (see below) 111 | } catch { |error| 112 | nutest-299792458-fail $error 113 | } 114 | 115 | try { 116 | $context | nutest-299792458-execute-after $after_each 117 | } catch { |error| 118 | # It's possible to get a test PASS above then emit FAIL when processing after-each. 119 | # This needs to be handled by the store. We could work around it here, but since we have 120 | # to handle for after-all outside concurrent processing of tests anyway this is simpler. 121 | nutest-299792458-fail $error 122 | } 123 | } 124 | 125 | def nutest-299792458-force-result [tests: list, status: string] { 126 | for test in $tests { 127 | with-env { NU_TEST_NAME: $test.name } { 128 | nutest-299792458-emit "start" 129 | nutest-299792458-emit "result" $status 130 | nutest-299792458-emit "finish" 131 | } 132 | } 133 | } 134 | 135 | def nutest-299792458-force-error [tests: list, error: record] { 136 | for test in $tests { 137 | with-env { NU_TEST_NAME: $test.name } { 138 | nutest-299792458-emit "start" 139 | nutest-299792458-fail $error 140 | nutest-299792458-emit "finish" 141 | } 142 | } 143 | } 144 | 145 | def nutest-299792458-execute-before [items: list]: record -> record { 146 | let initial_context = $in 147 | $items | reduce --fold $initial_context { |it, acc| 148 | let next = (do $it.execute) | default { } 149 | let type = $next | describe 150 | if (not ($type | str starts-with "record")) { 151 | error make { msg: $"The before-each/all command '($it.name)' must return a record or nothing, not '($type)'" } 152 | } 153 | $acc | merge $next 154 | } 155 | } 156 | 157 | def nutest-299792458-execute-after [items: list]: record -> nothing { 158 | let context = $in 159 | for item in $items { 160 | $context | do $item.execute 161 | } 162 | } 163 | 164 | def nutest-299792458-fail [error: record] { 165 | nutest-299792458-emit "result" "FAIL" 166 | # Exclude raw so it can be converted to Nuon 167 | # Exclude debug as it reduces noise in the output 168 | print -e ($error | reject raw debug) 169 | } 170 | 171 | # Keep a reference to the internal print command 172 | alias nutest-299792458-print = print 173 | 174 | # Override the print command to provide context for output 175 | export def print [--stderr (-e), --raw (-r), --no-newline (-n), ...rest: any] { 176 | # Capture the stream type to allow downstream rendering to differentiate between the two 177 | let stream = if $stderr { "error" } else { "output" } 178 | 179 | # Associate the stream type with the list of data items being output 180 | let output = { 181 | stream: $stream 182 | items: $rest 183 | } 184 | 185 | # Encode to nuon to preserve datatypes of what is being printed for display-specific rendering 186 | # Encode to base64 to avoid newlines in any strings breaking the line-based protocol 187 | let encoded = $output | to nuon --raw | encode base64 188 | 189 | nutest-299792458-emit output $encoded 190 | } 191 | 192 | def nutest-299792458-emit [type: string, payload: any = null] { 193 | let event = { 194 | timestamp: (date now | format date "%+") 195 | suite: $env.NU_TEST_SUITE_NAME 196 | test: $env.NU_TEST_NAME? 197 | type: $type 198 | payload: $payload 199 | } 200 | 201 | let packet = $event | to nuon --raw 202 | 203 | nutest-299792458-print $packet 204 | } 205 | -------------------------------------------------------------------------------- /nutest/store.nu: -------------------------------------------------------------------------------- 1 | use errors.nu 2 | 3 | # We use `query db` here rather than `stor create` as we need full SQLite features 4 | export def create [] { 5 | delete 6 | 7 | let db = stor open 8 | 9 | $db | query db " 10 | CREATE TABLE nu_test_results ( 11 | suite TEXT NOT NULL, 12 | test TEXT NULL, 13 | result TEXT NOT NULL, 14 | PRIMARY KEY (suite, test) 15 | ) 16 | " 17 | 18 | $db | query db " 19 | CREATE TABLE nu_test_output ( 20 | suite TEXT NOT NULL, 21 | test TEXT NULL, 22 | data TEXT NOT NULL 23 | ) 24 | " 25 | 26 | $db | query db " 27 | CREATE INDEX idx_suite_test ON nu_test_output (suite, test) 28 | " 29 | } 30 | 31 | # We close the store so tests of this do not open the store multiple times 32 | export def delete [] { 33 | let db = stor open 34 | $db | query db "DROP TABLE IF EXISTS nu_test_results" 35 | $db | query db "DROP TABLE IF EXISTS nu_test_output" 36 | } 37 | 38 | export def insert-result [ row: record ] { 39 | retry-on-lock "nu_test_results" { 40 | stor open | query db " 41 | INSERT INTO nu_test_results (suite, test, result) 42 | VALUES (:suite, :test, :result) 43 | ON CONFLICT(suite, test) 44 | DO UPDATE SET result = excluded.result 45 | " --params { 46 | suite: $row.suite 47 | test: $row.test 48 | result: $row.result 49 | } 50 | } 51 | 52 | # Unfortunately some inserts silently fail and it's not clear why. 53 | # It does seem that Nushell performs `query db` with different code to `stor insert` 54 | # and using query to insert seems wrong, but we need the conflict handling above. 55 | # So as a horrible hack, we check for insertion and retry if it fails. 56 | if (query-test $row.suite $row.test | is-empty) { 57 | sleep 10ms 58 | insert-result $row 59 | } 60 | } 61 | 62 | # Test is "any" as it can be a string or null if emitted from before/after all 63 | export def insert-output [ row: record ] { 64 | retry-on-lock "nu_test_output" { 65 | $row | stor insert --table-name nu_test_output 66 | } 67 | } 68 | 69 | # Parallel execution of tests causes contention on the SQLite database, 70 | # which leads to failed inserts or missing data. 71 | def retry-on-lock [table: string, operation: closure] { 72 | # We should eventually give up as an error flagging a bug is better than an infinite loop 73 | # Through stress testing, this number should be good for 500 tests with 50 lines of output/error 74 | let max_attempts = 20 75 | mut attempt = $max_attempts 76 | while $attempt > 0 { 77 | $attempt -= 1 78 | try { 79 | do $operation 80 | break 81 | } catch { |e| 82 | let error = $e | errors unwrap-error 83 | let reason = ($error.json | from json).labels?.0?.text? 84 | if $reason == $"database table is locked: ($table)" { 85 | # Retry after a random sleep to avoid contention 86 | sleep (random int ..25 | into duration --unit ms) 87 | continue 88 | } else { 89 | $e.raw # Rethrow anything else 90 | } 91 | } 92 | } 93 | if $attempt == 0 { 94 | error make { msg: $"Failed to insert into ($table) after ($max_attempts) attempts" } 95 | } 96 | } 97 | 98 | export def success []: nothing -> bool { 99 | let has_failures = stor open | query db " 100 | SELECT EXISTS ( 101 | SELECT 1 102 | FROM nu_test_results 103 | WHERE result = 'FAIL' 104 | ) AS failures 105 | " | get failures.0 | into bool 106 | 107 | not $has_failures 108 | } 109 | 110 | export def query []: nothing -> table>> { 111 | let db = stor open 112 | $db | query db " 113 | SELECT suite, test, result 114 | FROM nu_test_results 115 | ORDER BY suite, test 116 | " | insert output { |row| 117 | query-output $db $row.suite $row.test 118 | } 119 | } 120 | 121 | export def query-test [ 122 | suite: string 123 | test: string 124 | ]: nothing -> table>> { 125 | 126 | let db = stor open 127 | query-result $db $suite $test 128 | | insert output { |row| 129 | query-output $db $row.suite $row.test 130 | } 131 | } 132 | 133 | def query-result [ 134 | db: any 135 | suite: string 136 | test: string 137 | ]: nothing -> table { 138 | 139 | $db 140 | | query db " 141 | SELECT suite, test, result 142 | FROM nu_test_results 143 | WHERE suite = :suite AND test = :test 144 | " --params { suite: $suite test: $test } 145 | } 146 | 147 | def query-output [ 148 | db: any 149 | suite: string 150 | test: string 151 | ]: nothing -> table> { 152 | 153 | let result = $db | query db " 154 | SELECT data 155 | FROM nu_test_output 156 | -- A test is NULL when emitted from before/after all 157 | WHERE suite = :suite AND (test = :test OR test IS NULL) 158 | " --params { suite: $suite test: $test } 159 | 160 | $result 161 | | get data # The column name 162 | | each { $in | from nuon } 163 | } 164 | -------------------------------------------------------------------------------- /nutest/theme.nu: -------------------------------------------------------------------------------- 1 | 2 | export def none []: nothing -> closure { 3 | { 4 | match $in { 5 | { type: _, text: $text } => $text 6 | } 7 | } 8 | } 9 | 10 | export def standard []: nothing -> closure { 11 | { 12 | match $in { 13 | { type: "pass", text: $text } => $"✅ (ansi green)($text)(ansi reset)" 14 | { type: "skip", text: $text } => $"🚧 (ansi yellow)($in.text)(ansi reset)" 15 | { type: "fail", text: $text } => $"❌ (ansi red)($in.text)(ansi reset)" 16 | { type: "warning", text: $text } => $"(ansi yellow)($in.text)(ansi reset)" 17 | { type: "error", text: $text } => $"(ansi red)($in.text)(ansi reset)" 18 | { type: "suite", text: $text } => $"(ansi light_blue)($in.text)(ansi reset)" 19 | { type: "test", text: $text } => $text 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /resources/test-run-terminal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vyadh/nutest/f5b251469aa51c3a95fd1caeac8acbcbccb91a27/resources/test-run-terminal.png -------------------------------------------------------------------------------- /resources/test-run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vyadh/nutest/f5b251469aa51c3a95fd1caeac8acbcbccb91a27/resources/test-run.png -------------------------------------------------------------------------------- /tests/display/test_display_table_errors.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../harness.nu 4 | use ../../nutest/formatter.nu 5 | use ../../nutest/theme.nu 6 | use ../../nutest/display/display_table.nu 7 | use ../../nutest/errors.nu 8 | 9 | 10 | @before-all 11 | def setup-tests []: record -> record { 12 | $in | harness setup-tests 13 | } 14 | 15 | @after-all 16 | def cleanup-tests []: record -> nothing { 17 | $in | harness cleanup-tests 18 | } 19 | 20 | @before-each 21 | def setup-test []: record -> record { 22 | $in | harness setup-test 23 | } 24 | 25 | @after-each 26 | def cleanup-test []: record -> nothing { 27 | $in | harness cleanup-test 28 | } 29 | 30 | @test 31 | def "assertion is compact" [] { 32 | let test = { 33 | assert equal 1 2 34 | } 35 | 36 | let result = $in | run $test 37 | 38 | assert equal ($result.output | trim-all) (" 39 | Assertion failed. 40 | These are not equal. 41 | |>Left : '1' 42 | |>Right : '2' 43 | " | trim-all) 44 | } 45 | 46 | @test 47 | def "basic compact" [] { 48 | let code = { 49 | error make { msg: 'some error' } 50 | } 51 | 52 | let result = $in | run $code 53 | 54 | assert equal $result.output "some error" 55 | } 56 | 57 | @test 58 | def "full unformatted" [] { 59 | let code = { 60 | let variable = 'span source' 61 | 62 | error make { 63 | msg: 'a decorated error' 64 | label: { 65 | text: 'happened here' 66 | span: (metadata $variable).span 67 | } 68 | help: 'some help' 69 | } 70 | } 71 | 72 | # Use default 'unformatted' formatter 73 | let result = $in | run $code 74 | 75 | let error = $result.data.output.0 | errors unwrap-error 76 | let details = $error.json | from json 77 | assert equal ($details.msg) "a decorated error" 78 | assert equal ($details.labels.0.text) "happened here" 79 | assert equal ($details.help) "some help" 80 | } 81 | 82 | @test 83 | def "full compact" [] { 84 | let code = { 85 | let variable = 'span source' 86 | 87 | error make { 88 | msg: 'a decorated error' 89 | label: { 90 | text: 'happened here' 91 | span: (metadata $variable).span 92 | } 93 | help: 'some help' 94 | } 95 | } 96 | 97 | let result = $in | run $code 98 | 99 | assert equal $result.output "a decorated error\nsome help" 100 | } 101 | 102 | # See test_integration.nu / "terminal display with rendered error" for rendered test 103 | 104 | def run [code: closure]: record -> record { 105 | let result = $in | harness run $code 106 | assert equal $result.result "FAIL" 107 | 108 | let output = do (display_table create).results 109 | | where test == $result.test 110 | | first 111 | | get output 112 | | ansi strip 113 | 114 | { 115 | data: $result 116 | output: $output 117 | } 118 | } 119 | 120 | def trim-all []: string -> string { 121 | $in | str trim | str replace --all --regex '[\n\r\t ]+' ' ' 122 | } 123 | -------------------------------------------------------------------------------- /tests/harness.nu: -------------------------------------------------------------------------------- 1 | use ../nutest/orchestrator.nu 2 | use ../nutest/returns/returns_table.nu 3 | use ../nutest/theme.nu 4 | use ../nutest/formatter.nu 5 | use ../nutest/store.nu 6 | 7 | # A harness for running tests against nutest itself. 8 | 9 | # Encapsulate before-all behaviour 10 | export def setup-tests []: record -> record { 11 | store create 12 | $in 13 | } 14 | 15 | # Encapsulate after-all behaviour 16 | export def cleanup-tests []: record -> nothing { 17 | store delete 18 | } 19 | 20 | # Encapsulate before-each behaviour 21 | export def setup-test []: record -> record { 22 | $in | merge { 23 | temp_dir: (mktemp --tmpdir --directory) 24 | } 25 | } 26 | 27 | # Encapsulate after-each behaviour 28 | export def cleanup-test []: record -> nothing { 29 | if $in.temp_dir? != null { 30 | rm --recursive $in.temp_dir 31 | } 32 | } 33 | 34 | export def run [ 35 | code: closure 36 | strategy: record = { } 37 | ]: record -> record { 38 | 39 | let context = $in 40 | let temp = $context.temp_dir 41 | let returns = returns_table create 42 | let strategy = { threads: 1 } | merge $strategy 43 | 44 | let test = random chars 45 | let suite = $code | create-closure-suite $temp $test 46 | [$suite] | orchestrator run-suites (noop-event-processor) $strategy 47 | let results = do $returns.results 48 | 49 | let result = $results | where test == $test 50 | if ($result | is-empty) { 51 | error make { msg: $"No results found for test: ($test)" } 52 | } else { 53 | $result | first 54 | } 55 | } 56 | 57 | def noop-event-processor []: nothing -> record { 58 | { 59 | run-start: { || ignore } 60 | run-complete: { || ignore } 61 | test-start: { |row| ignore } 62 | test-complete: { |row| ignore } 63 | } 64 | } 65 | 66 | def create-closure-suite [temp: string, test: string]: closure -> record { 67 | let path = $temp | path join $"suite.nu" 68 | let code = view source $in 69 | 70 | $" 71 | use std/assert 72 | use std/testing * 73 | 74 | def ($test) [] { 75 | do ($code) 76 | } 77 | " | save --append $path 78 | 79 | { 80 | name: "suite" 81 | path: $path 82 | tests: [{ name: $test, type: "test" }] 83 | } 84 | } 85 | 86 | def trim-all []: string -> string { 87 | $in | str trim | str replace --all --regex '[\n\t ]+' ' ' 88 | } 89 | -------------------------------------------------------------------------------- /tests/report/test_report_junit.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | source ../../nutest/report/report_junit.nu 4 | 5 | @test 6 | def "count when no tests" [] { 7 | let data = [] 8 | 9 | let result = $data | count 10 | 11 | assert equal $result { 12 | total: 0 13 | failed: 0 14 | skipped: 0 15 | } 16 | } 17 | 18 | @test 19 | def "count with suites of all states" [] { 20 | let data = [ 21 | { suite: "suite1", test: "test1A", result: "PASS" } 22 | { suite: "suite1", test: "test1B", result: "PASS" } 23 | { suite: "suite1", test: "test1C", result: "PASS" } 24 | { suite: "suite1", test: "test1D", result: "FAIL" } 25 | { suite: "suite1", test: "test1E", result: "FAIL" } 26 | { suite: "suite1", test: "test1F", result: "SKIP" } 27 | 28 | { suite: "suite2", test: "test2A", result: "SKIP" } 29 | { suite: "suite2", test: "test2B", result: "SKIP" } 30 | { suite: "suite2", test: "test2C", result: "SKIP" } 31 | { suite: "suite2", test: "test2D", result: "PASS" } 32 | { suite: "suite2", test: "test2E", result: "PASS" } 33 | { suite: "suite2", test: "test2F", result: "FAIL" } 34 | 35 | { suite: "suite3", test: "test3A", result: "PASS" } 36 | ] 37 | 38 | assert equal ($data | count) { 39 | total: 13 40 | failed: 3 41 | skipped: 4 42 | } 43 | 44 | assert equal ($data | where suite == "suite1" | count) { 45 | total: 6 46 | failed: 2 47 | skipped: 1 48 | } 49 | assert equal ($data | where suite == "suite2" | count) { 50 | total: 6 51 | failed: 1 52 | skipped: 3 53 | } 54 | assert equal ($data | where suite == "suite3" | count) { 55 | total: 1 56 | failed: 0 57 | skipped: 0 58 | } 59 | } 60 | 61 | @test 62 | def "testcase pass" [] { 63 | let data = { suite: "suite", test: "test", result: "PASS", output: [] } 64 | 65 | let result = $data | testcase | to xml --self-closed 66 | 67 | assert equal $result (' 68 | 69 | ' | strip-xml-whitespace) 70 | } 71 | 72 | @test 73 | def "testcase fail" [] { 74 | let data = { suite: "suite", test: "test", result: "FAIL", output: [] } 75 | 76 | let result = $data | testcase | to xml --self-closed 77 | 78 | assert equal $result (' 79 | 80 | 81 | 82 | ' | strip-xml-whitespace) 83 | } 84 | 85 | @test 86 | def "testcase skip" [] { 87 | let data = { suite: "suite", test: "test", result: "SKIP", output: [] } 88 | 89 | let result = $data | testcase | to xml --self-closed 90 | 91 | assert equal $result (' 92 | 93 | 94 | 95 | ' | strip-xml-whitespace) 96 | } 97 | 98 | @test 99 | def "testsuite with no tests" [] { 100 | let data = [] 101 | 102 | try { 103 | $data | testsuite | to xml --self-closed 104 | assert false "Should have errored" 105 | } catch { |error| 106 | assert equal $error.msg "No test entries" 107 | } 108 | } 109 | 110 | @test 111 | def "testsuite with test stats" [] { 112 | let data = [[suite, test, result, output]; 113 | ["suite1", "test1A", "PASS", []] 114 | ["suite1", "test1B", "PASS", []] 115 | ["suite1", "test1C", "PASS", []] 116 | ["suite1", "test1D", "FAIL", []] 117 | ["suite1", "test1E", "FAIL", []] 118 | ["suite1", "test1F", "SKIP", []] 119 | ] 120 | 121 | let result = $data | testsuite | to xml --self-closed 122 | 123 | assert str contains $result (' 124 | 125 | ' | strip-xml-whitespace) 126 | } 127 | 128 | @test 129 | def "testsuite with tests" [] { 130 | let data = [[suite, test, result, output]; 131 | ["suite1", "test1A", "PASS", []] 132 | ["suite1", "test1B", "FAIL", []] 133 | ["suite1", "test1C", "SKIP", []] 134 | ] 135 | 136 | let result = $data | testsuite | to xml --self-closed 137 | 138 | assert equal $result (' 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | ' | strip-xml-whitespace) 149 | } 150 | 151 | @test 152 | def "testsuites with suites" [] { 153 | let data = [[suite, test, result, output]; 154 | ["suite1", "testA", "PASS", []] 155 | ["suite2", "testB", "FAIL", []] 156 | ["suite3", "testC", "SKIP", []] 157 | ] 158 | 159 | let result = $data | testsuites | to xml --self-closed 160 | 161 | assert equal $result (' 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | ' | strip-xml-whitespace) 178 | } 179 | 180 | def strip-xml-whitespace []: string -> string { 181 | $in | str trim | str replace --all --regex '>[\n\r ]+<' '><' 182 | } 183 | -------------------------------------------------------------------------------- /tests/test_completions.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | source ../nutest/completions.nu 4 | 5 | @before-each 6 | def setup [] { 7 | let temp = mktemp --directory 8 | { 9 | temp: $temp 10 | } 11 | } 12 | 13 | @after-each 14 | def cleanup [] { 15 | let context = $in 16 | rm --recursive $context.temp 17 | } 18 | 19 | @test 20 | def "parse with empty option" [] { 21 | let result = "nutest run-tests --returns table --match-suites " | parse-command-context 22 | 23 | assert equal $result { 24 | suite: ".*" 25 | test: ".*" 26 | path: "." 27 | } 28 | } 29 | 30 | @test 31 | def "parse with specified option" [] { 32 | let result = "nutest run-tests --returns table --match-suites orc" | parse-command-context 33 | 34 | assert equal $result { 35 | suite: "orc" 36 | test: ".*" 37 | path: "." 38 | } 39 | } 40 | 41 | @test 42 | def "parse with extra space" [] { 43 | let result = "nutest run-tests --match-suites orc" | parse-command-context 44 | 45 | assert equal $result { 46 | suite: "orc" 47 | test: ".*" 48 | path: "." 49 | } 50 | } 51 | 52 | @test 53 | def "parse when fully specified" [] { 54 | let result = "nutest run-tests --match-suites sui --match-tests te --path ../something" | parse-command-context 55 | 56 | assert equal $result { 57 | suite: "sui" 58 | test: "te" 59 | path: "../something" 60 | } 61 | } 62 | 63 | @test 64 | def "parse with space in value" [] { 65 | let result = 'nutest run-tests --match-tests "parse some" --path ../something' | parse-command-context 66 | 67 | assert equal $result { 68 | suite: ".*" 69 | test: "\"parse some\"" 70 | path: "../something" 71 | } 72 | } 73 | 74 | @test 75 | def "parse with prior commands" [] { 76 | let result = "use nutest; nutest run-tests --match-suites sui --match-tests te --path ../something" | parse-command-context 77 | 78 | assert equal $result { 79 | suite: "sui" 80 | test: "te" 81 | path: "../something" 82 | } 83 | } 84 | 85 | @test 86 | def "complete suites" [] { 87 | let temp = $in.temp 88 | touch ($temp | path join "test_foo.nu") 89 | touch ($temp | path join "test_bar.nu") 90 | touch ($temp | path join "test_baz.nu") 91 | 92 | let result = nu-complete suites $"--path ($temp) --match-suites ba" 93 | 94 | assert equal $result.completions [ 95 | "test_bar" 96 | "test_baz" 97 | ] 98 | } 99 | 100 | @test 101 | def "complete tests" [] { 102 | let temp = $in.temp 103 | 104 | let temp = $in.temp 105 | let test_file_1 = $temp | path join "test_1.nu" 106 | let test_file_2 = $temp | path join "test_2.nu" 107 | 108 | " 109 | use std/testing * 110 | 111 | @test 112 | def some_foo1 [] { } 113 | " | save $test_file_1 114 | ' 115 | use std/testing * 116 | 117 | @test 118 | def "some foo2" [] { } 119 | @ignore 120 | def some_foo3 [] { } 121 | @before-each 122 | def some_foo4 [] { } 123 | @test 124 | def some_foo5 [] { } 125 | ' | save $test_file_2 126 | 127 | 128 | touch ($temp | path join "test_foo.nu") 129 | touch ($temp | path join "test_bar.nu") 130 | touch ($temp | path join "test_baz.nu") 131 | 132 | let result = nu-complete tests $"--path ($temp) --match-suites _2 --match-tests foo[1234]" 133 | 134 | assert equal $result.completions [ 135 | # foo1 is excluded via suite pattern 136 | '"some foo2"' # Commands with spaces are quoted 137 | "some_foo3" 138 | # foo4 is excluded as it's not a test 139 | # foo5 is excluded test pattern 140 | ] 141 | } 142 | -------------------------------------------------------------------------------- /tests/test_discover.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../nutest/discover.nu 4 | 5 | @before-each 6 | def setup [] { 7 | let temp = mktemp --directory 8 | { 9 | temp: $temp 10 | } 11 | } 12 | 13 | @after-each 14 | def cleanup [] { 15 | let context = $in 16 | rm --recursive $context.temp 17 | } 18 | 19 | @test 20 | def "suite files with none available" [] { 21 | let temp = $in.temp 22 | 23 | let result = $temp | discover suite-files 24 | 25 | assert equal $result [] 26 | } 27 | 28 | @test 29 | def "suite files with specified file path" [] { 30 | let temp = $in.temp 31 | let file = $temp | path join "test_foo.nu" 32 | touch $file 33 | 34 | let result = $file | discover suite-files 35 | 36 | assert equal $result [ 37 | ($temp | path join "test_foo.nu") 38 | ] 39 | } 40 | 41 | @test 42 | def "suite files with default glob" [] { 43 | let temp = $in.temp 44 | mkdir ($temp | path join "subdir") 45 | 46 | touch ($temp | path join "test_foo.nu") 47 | touch ($temp | path join "test-foo2.nu") 48 | touch ($temp | path join "bar_test.nu") 49 | touch ($temp | path join "bar2-test.nu") 50 | touch ($temp | path join "subdir" "test_baz.nu") 51 | 52 | let result = $temp | discover suite-files | sort 53 | 54 | assert equal $result [ 55 | ($temp | path join "bar2-test.nu" | path expand) 56 | ($temp | path join "bar_test.nu" | path expand) 57 | ($temp | path join "subdir" "test_baz.nu" | path expand) 58 | ($temp | path join "test-foo2.nu" | path expand) 59 | ($temp | path join "test_foo.nu" | path expand) 60 | ] 61 | } 62 | 63 | @test 64 | def "suite files via specified glob" [] { 65 | let temp = $in.temp 66 | 67 | touch ($temp | path join "test_foo.nu") 68 | touch ($temp | path join "any.nu") 69 | 70 | let result = $temp | discover suite-files --glob "**/*.nu" | sort 71 | 72 | assert equal $result [ 73 | ($temp | path join "any.nu" | path expand) 74 | ($temp | path join "test_foo.nu" | path expand) 75 | ] 76 | } 77 | 78 | @test 79 | def "suite files with matcher" [] { 80 | let temp = $in.temp 81 | mkdir ($temp | path join "subdir") 82 | 83 | touch ($temp | path join "test_foo.nu") 84 | touch ($temp | path join "test-foo2.nu") 85 | touch ($temp | path join "bar_test.nu") 86 | touch ($temp | path join "bar2-test.nu") 87 | touch ($temp | path join "subdir" "test_baz.nu") 88 | 89 | let result = $temp | discover suite-files --matcher "ba" | sort 90 | 91 | assert equal $result [ 92 | ($temp | path join "bar2-test.nu" | path expand) 93 | ($temp | path join "bar_test.nu" | path expand) 94 | ($temp | path join "subdir" "test_baz.nu" | path expand) 95 | ] 96 | } 97 | 98 | @test 99 | def "list tests when no suites" [] { 100 | let temp = $in.temp 101 | let suite_files = [] 102 | 103 | let result = $suite_files | discover test-suites 104 | 105 | assert equal $result [] 106 | } 107 | 108 | @test 109 | def "tests for all supported test directives" [] { 110 | let temp = $in.temp 111 | let test_file = $temp | path join "test.nu" 112 | 113 | " 114 | use std/testing * 115 | 116 | @test 117 | def attr-test [] { } 118 | @ignore 119 | def attr-ignore [] { } 120 | @before-all 121 | def attr-before-all [] { } 122 | @after-all 123 | def attr-after-all [] { } 124 | @before-each 125 | def attr-before-each [] { } 126 | @after-each 127 | def attr-after-each [] { } 128 | 129 | #[test] 130 | def desc-test [] { } 131 | #[ignore] 132 | def desc-ignore [] { } 133 | #[before-all] 134 | def desc-before-all [] { } 135 | #[after-all] 136 | def desc-after-all [] { } 137 | #[before-each] 138 | def desc-before-each [] { } 139 | #[after-each] 140 | def desc-after-each [] { } 141 | 142 | #[strategy] 143 | def desc-strategy [] { } 144 | " | save $test_file 145 | 146 | let result = [$test_file] | discover test-suites | sort 147 | 148 | assert equal $result [{ 149 | name: "test" 150 | path: $test_file 151 | tests: [ 152 | { name: "attr-after-all", type: "after-all" } 153 | { name: "attr-after-each", type: "after-each" } 154 | { name: "attr-before-all", type: "before-all" } 155 | { name: "attr-before-each", type: "before-each" } 156 | { name: "attr-ignore", type: "ignore" } 157 | # todo no equivalent to @strategy yet 158 | #{ name: "attr-strategy", type: "strategy" } 159 | { name: "attr-test", type: "test" } 160 | { name: "desc-after-all", type: "after-all" } 161 | { name: "desc-after-each", type: "after-each" } 162 | { name: "desc-before-all", type: "before-all" } 163 | { name: "desc-before-each", type: "before-each" } 164 | { name: "desc-ignore", type: "ignore" } 165 | { name: "desc-strategy", type: "strategy" } 166 | { name: "desc-test", type: "test" } 167 | ] 168 | }] 169 | } 170 | 171 | @test 172 | def "tests with an unsupported attribute specified first" [] { 173 | let temp = $in.temp 174 | let test_file = $temp | path join "test.nu" 175 | 176 | " 177 | use std/testing * 178 | 179 | alias \"attr other\" = echo 180 | 181 | @other 182 | @test 183 | def some-test [] { 184 | } 185 | " | save $test_file 186 | 187 | let result = [$test_file] | discover test-suites | sort 188 | 189 | assert equal $result [{ 190 | name: "test" 191 | path: $test_file 192 | tests: [ 193 | { name: "some-test", type: "test" } 194 | ] 195 | }] 196 | } 197 | 198 | @test 199 | def "tests with an unsupported description and supported attribute" [] { 200 | let temp = $in.temp 201 | let test_file = $temp | path join "test.nu" 202 | 203 | " 204 | use std/testing * 205 | 206 | #[other] 207 | @test 208 | def some-test [] { 209 | } 210 | " | save $test_file 211 | 212 | let result = [$test_file] | discover test-suites | sort 213 | 214 | assert equal $result [{ 215 | name: "test" 216 | path: $test_file 217 | tests: [ 218 | { name: "some-test", type: "test" } 219 | ] 220 | }] 221 | } 222 | 223 | @test 224 | def "tests with an unsupported attribute and supported description" [] { 225 | let temp = $in.temp 226 | let test_file = $temp | path join "test.nu" 227 | 228 | " 229 | use std/testing * 230 | 231 | alias \"attr other\" = echo 232 | 233 | #[test] 234 | @other 235 | def some-test [] { 236 | } 237 | " | save $test_file 238 | 239 | let result = [$test_file] | discover test-suites | sort 240 | 241 | assert equal $result [{ 242 | name: "test" 243 | path: $test_file 244 | tests: [ 245 | { name: "some-test", type: "test" } 246 | ] 247 | }] 248 | } 249 | 250 | @test 251 | def "tests for unsupported test directives are not discovered" [] { 252 | let temp = $in.temp 253 | let test_file = $temp | path join "test.nu" 254 | 255 | " 256 | use std/testing * 257 | 258 | alias \"attr two\" = echo 259 | 260 | #[one] 261 | @two 262 | def some-command [] { 263 | } 264 | 265 | @test 266 | def stub [] { 267 | } 268 | " | save $test_file 269 | 270 | let result = [$test_file] | discover test-suites | sort 271 | 272 | assert equal $result [{ 273 | name: "test" 274 | path: $test_file 275 | tests: [ 276 | { name: "stub", type: "test" } 277 | ] 278 | }] 279 | } 280 | 281 | @test 282 | def "tests in multiple suites" [] { 283 | let temp = $in.temp 284 | let test_file_1 = $temp | path join "test_1.nu" 285 | let test_file_2 = $temp | path join "test_2.nu" 286 | let suite_files = [$test_file_1, $test_file_2] 287 | 288 | " 289 | use std/testing * 290 | 291 | @test 292 | def test_foo [] { } 293 | @test 294 | def test_bar [] { } 295 | " | save $test_file_1 296 | 297 | " 298 | use std/testing * 299 | 300 | @test 301 | def test_baz [] { } 302 | def test_qux [] { } 303 | #[other] 304 | def test_quux [] { } 305 | " | save $test_file_2 306 | 307 | let result = $suite_files | discover test-suites | sort 308 | 309 | assert equal $result [ 310 | { 311 | name: "test_1" 312 | path: $test_file_1 313 | tests: [ 314 | { name: "test_bar", type: "test" } 315 | { name: "test_foo", type: "test" } 316 | ] 317 | } 318 | { 319 | name: "test_2" 320 | path: $test_file_2 321 | tests: [ 322 | { name: "test_baz", type: "test" } 323 | # Unsupported types removed 324 | ] 325 | } 326 | ] 327 | } 328 | 329 | @test 330 | def "tests for suites with matcher" [] { 331 | let temp = $in.temp 332 | let test_file_1 = $temp | path join "test_1.nu" 333 | let test_file_2 = $temp | path join "test_2.nu" 334 | let suite_files = [$test_file_1, $test_file_2] 335 | 336 | " 337 | use std/testing * 338 | 339 | @test 340 | def test_foo [] { } 341 | @ignore 342 | def test_bar [] { } 343 | " | save $test_file_1 344 | 345 | " 346 | use std/testing * 347 | 348 | @test 349 | def test_baz [] { } 350 | @ignore 351 | def test_qux [] { } 352 | " | save $test_file_2 353 | 354 | let result = $suite_files | discover test-suites --matcher "ba" | sort 355 | 356 | assert equal $result [ 357 | { 358 | name: "test_1" 359 | path: $test_file_1 360 | tests: [ 361 | { name: "test_bar", type: "ignore" } 362 | ] 363 | } 364 | { 365 | name: "test_2" 366 | path: $test_file_2 367 | tests: [ 368 | { name: "test_baz", type: "test" } 369 | ] 370 | } 371 | ] 372 | } 373 | 374 | @test 375 | def "tests suites retaining non-tests when no-match" [] { 376 | let temp = $in.temp 377 | let test_file = $temp | path join "test.nu" 378 | let suite_files = [$test_file] 379 | 380 | " 381 | use std/testing * 382 | 383 | @ignore 384 | def test_foo [] { } 385 | 386 | @test 387 | def test_bar [] { } 388 | 389 | @before-each 390 | def test_baz [] { } 391 | 392 | @after-all 393 | def test_qux [] { } 394 | " | save $test_file 395 | 396 | let result = $suite_files | discover test-suites --matcher "ba" | sort 397 | 398 | assert equal $result [ 399 | { 400 | name: "test" 401 | path: $test_file 402 | tests: [ 403 | { name: "test_bar", type: "test" } 404 | { name: "test_baz", type: "before-each" } 405 | { name: "test_qux", type: "after-all" } 406 | ] 407 | } 408 | ] 409 | } 410 | 411 | @test 412 | def "tests suites excluded suites with no test matches" [] { 413 | let temp = $in.temp 414 | let test_file = $temp | path join "test.nu" 415 | let suite_files = [$test_file] 416 | 417 | " 418 | use std/testing * 419 | 420 | @test 421 | def test_foo [] { } 422 | 423 | @test 424 | def test_bar [] { } 425 | 426 | @ignore 427 | def test_baz [] { } 428 | 429 | #[other] 430 | def test_qux [] { } 431 | " | save $test_file 432 | 433 | let result = $suite_files | discover test-suites --matcher "qux" | sort 434 | 435 | assert equal $result [ ] 436 | } 437 | -------------------------------------------------------------------------------- /tests/test_errors.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | source ../nutest/errors.nu 4 | 5 | @before-all 6 | def setup [] { 7 | $env.NU_BACKTRACE = 1 8 | } 9 | 10 | @after-all 11 | def teardown [] { 12 | $env.NU_BACKTRACE = 0 13 | } 14 | 15 | @test 16 | def normal-error-is-unmodified [] { 17 | let error = try { error make { msg: "normal error", help: "help text" } } catch { $in } 18 | 19 | let result = $error | unwrap-error 20 | 21 | assert equal $result.msg "normal error" 22 | assert ($result.rendered | ansi strip | find --regex "^Error: [ ×x\n]+ normal error.*" | is-not-empty) 23 | assert equal ($result.json | from json | select msg help) { 24 | msg: "normal error" 25 | help: "help text" 26 | } 27 | } 28 | 29 | @test 30 | def chained-error-is-unwrapped [] { 31 | def throw-error [] { 32 | error make { msg: "original error", help: "help text" } 33 | } 34 | def calling-function [] { 35 | throw-error 36 | } 37 | let error = try { calling-function } catch { $in } 38 | 39 | let result = $error | unwrap-error 40 | 41 | assert equal $result.msg "original error" 42 | assert ($result.rendered | ansi strip | find --regex "^Error: [ ×x\n]+ original error.*" | is-not-empty) 43 | assert equal ($result.json | from json | select msg help) { 44 | msg: "original error" 45 | help: "help text" 46 | } 47 | } 48 | 49 | @test 50 | def nested-chain-error-is-unwrapped [] { 51 | def throw-error [] { 52 | error make { msg: "original error", help: "help text" } 53 | } 54 | def nested-error [] { 55 | throw-error 56 | } 57 | def calling-function [] { 58 | nested-error 59 | } 60 | let error = try { calling-function } catch { $in } 61 | 62 | let result = $error | unwrap-error 63 | 64 | assert equal $result.msg "original error" 65 | assert ($result.rendered | ansi strip | find --regex "^Error: [ ×x\n]+ original error.*" | is-not-empty) 66 | assert equal ($result.json | from json | select msg help) { 67 | msg: "original error" 68 | help: "help text" 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /tests/test_external_tools.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use harness.nu 4 | 5 | # This suite tests use of external tools that would send output to stdout or stderr 6 | # directly rather than what would otherwise be captured by runner aliasing of `print`. 7 | 8 | @before-all 9 | def setup-tests []: record -> record { 10 | $in | harness setup-tests 11 | } 12 | 13 | @after-all 14 | def cleanup-tests []: record -> nothing { 15 | $in | harness cleanup-tests 16 | } 17 | 18 | @before-each 19 | def setup-test []: record -> record { 20 | $in | harness setup-test 21 | } 22 | 23 | @after-each 24 | def cleanup-test []: record -> nothing { 25 | $in | harness cleanup-test 26 | } 27 | 28 | @test 29 | def non-captured-output-is-ignored [] { 30 | let code = { 31 | ^$nu.current-exe --version # This will print direct to stdout 32 | print "Only this text" 33 | } 34 | 35 | let result = $in | harness run $code 36 | 37 | assert equal ($result | reject suite test) { 38 | result: "PASS" 39 | output: ["Only this text"] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /tests/test_formatter.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../nutest/formatter.nu 4 | use ../nutest/theme.nu 5 | 6 | # The follow tests provide a unit-test focused view of the formatter module. 7 | # More comprehensive integration tests can be found in output and error tests. 8 | 9 | @test 10 | def data-and-metadata [] { 11 | let formatter = formatter preserved 12 | 13 | assert equal ([] | do $formatter) [] 14 | 15 | assert equal ([ 16 | { stream: "output", items: [1, 2, 3]} 17 | { stream: "error", items: ["a", "b", "c"]} 18 | ] | do $formatter) [ 19 | { stream: "output", items: [1, 2, 3]} 20 | { stream: "error", items: ["a", "b", "c"]} 21 | ] 22 | } 23 | 24 | @test 25 | def data-only [] { 26 | let formatter = formatter unformatted 27 | 28 | assert equal ([] | do $formatter) [] 29 | 30 | assert equal ([ 31 | { stream: "output", items: [1, 2, 3]} 32 | { stream: "error", items: ["a", "b", "c"]} 33 | ] | do $formatter) [ 34 | 1, 2, 3, "a", "b", "c" 35 | ] 36 | } 37 | 38 | @test 39 | def pretty-with-theme-none [] { 40 | let formatter = formatter pretty (theme none) "compact" 41 | 42 | assert equal ([] | do $formatter) "" 43 | 44 | assert equal ([ 45 | { stream: "error", items: [1, 2, 3]} 46 | ] | do $formatter) "1\n2\n3" 47 | 48 | assert equal ([ 49 | { stream: "output", items: [1, 2, 3]} 50 | { stream: "error", items: ["a", "b", "c"]} 51 | ] | do $formatter) ( 52 | "1\n2\n3\na\nb\nc" 53 | ) 54 | } 55 | 56 | @test 57 | def pretty-with-theme-standard [] { 58 | let formatter = formatter pretty (theme standard) "compact" 59 | 60 | assert equal ([] | do $formatter) "" 61 | 62 | assert equal ([ 63 | { stream: "output", items: [1, 2, 3]} 64 | { stream: "error", items: ["a", "b", "c"]} 65 | ] | do $formatter) ( 66 | $"1\n2\n3\n(ansi yellow)a\nb\nc(ansi reset)" 67 | ) 68 | } 69 | 70 | @test 71 | def "pretty with rendered error" [] { 72 | let formatter = formatter pretty (theme standard) "rendered" 73 | 74 | assert equal ([] | do $formatter) "" 75 | 76 | assert equal ([ 77 | { stream: "error", items: [ 78 | { 79 | msg: 'placeholder' 80 | json: '[]' 81 | rendered: 'a wonderfully decorated error' 82 | } 83 | ]} 84 | ] | do $formatter | ansi strip) ( 85 | "a wonderfully decorated error" 86 | ) 87 | } 88 | -------------------------------------------------------------------------------- /tests/test_integration.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | 4 | # To avoid collisions with the database, we run each test in a subshell. 5 | 6 | @before-each 7 | def setup []: nothing -> record { 8 | let temp = mktemp --tmpdir --directory 9 | setup-tests $temp 10 | 11 | { 12 | temp: $temp 13 | } 14 | } 15 | 16 | def setup-tests [temp: string] { 17 | let test_file_1 = $temp | path join "test_1.nu" 18 | let test_file_2 = $temp | path join "test_2.nu" 19 | 20 | " 21 | use std/testing * 22 | 23 | @test 24 | def test_foo [] { print oof } 25 | @test 26 | def test_bar [] { print -e rab } 27 | " | save $test_file_1 28 | 29 | " 30 | use std/testing * 31 | 32 | @test 33 | def test_baz [] { print zab } 34 | @ignore 35 | def test_qux [] { print xuq } 36 | def test_quux [] { print xuuq } 37 | " | save $test_file_2 38 | } 39 | 40 | @after-each 41 | def cleanup [] { 42 | let context = $in 43 | rm --recursive $context.temp 44 | } 45 | 46 | @test 47 | def with-default-table-options [] { 48 | let temp = $in.temp 49 | 50 | let results = test-run $"run-tests --path '($temp)' --returns table" 51 | 52 | assert equal $results [ 53 | { suite: test_1, test: test_bar, result: "PASS", output: ["rab"] } 54 | { suite: test_1, test: test_foo, result: "PASS", output: ["oof"] } 55 | { suite: test_2, test: test_baz, result: "PASS", output: ["zab"] } 56 | { suite: test_2, test: test_qux, result: "SKIP", output: [] } 57 | ] 58 | } 59 | 60 | @test 61 | def with-different-returns [] { 62 | let temp = $in.temp 63 | 64 | let results = test-run $"run-tests --path '($temp)' --returns summary" 65 | 66 | assert equal $results { 67 | total: 4 68 | passed: 3 69 | failed: 0 70 | skipped: 1 71 | } 72 | } 73 | 74 | @test 75 | def with-specific-file [] { 76 | let temp = $in.temp 77 | let path = $temp | path join "test_2.nu" 78 | 79 | let results = test-run $"run-tests --path '($path)' --returns table" 80 | 81 | assert equal $results [ 82 | { suite: test_2, test: test_baz, result: "PASS", output: ["zab"] } 83 | { suite: test_2, test: test_qux, result: "SKIP", output: [] } 84 | ] 85 | } 86 | 87 | @test 88 | def with-matching-suite-and-test [] { 89 | let temp = $in.temp 90 | 91 | let results = test-run $"run-tests --path '($temp)' --returns table --match-suites _1 --match-tests test_ba[rz]" 92 | 93 | assert equal $results [ 94 | { suite: test_1, test: test_bar, result: "PASS", output: ["rab"] } 95 | ] 96 | } 97 | 98 | @test 99 | def "fail option still returns result on passing tests" [] { 100 | let temp = $in.temp 101 | 102 | let result = ( 103 | ^$nu.current-exe 104 | --no-config-file 105 | --commands ($" 106 | use nutest * 107 | run-tests --path ($temp) --fail --display table --returns summary 108 | | get total" + ' | $"Total: ($in)"' 109 | ) 110 | ) | complete 111 | 112 | let output = $result.stdout | ansi strip 113 | assert ($output =~ "test_1[ │]+test_foo[ │]+✅ PASS[ │]+") "Table is output" 114 | assert ($output =~ "Total: 4") "Result is available to query" 115 | assert equal $result.exit_code 0 "Exit code is 0" 116 | } 117 | 118 | @test 119 | def "fail option exit code on failing tests" [] { 120 | let temp = $in.temp 121 | let test_file_3 = $temp | path join "test_3.nu" 122 | " 123 | use std/testing * 124 | 125 | @test 126 | def test_quux [] { error make { msg: 'Ouch' } } 127 | " | save $test_file_3 128 | 129 | let result = ( 130 | ^$nu.current-exe 131 | --no-config-file 132 | --commands $" 133 | use nutest * 134 | run-tests --path ($temp) --returns table --fail 135 | " 136 | ) | complete 137 | 138 | let output = $result.stdout | ansi strip 139 | assert ($output =~ "test_3[ │]+test_quux[ │]+FAIL[ │]+") "Tests are output" 140 | assert equal $result.exit_code 1 141 | } 142 | 143 | @test 144 | def useful-error-on-non-existent-path [] { 145 | let missing_path = ["non", "existant", "path"] | path join 146 | let result = ( 147 | ^$nu.current-exe 148 | --no-config-file 149 | --commands $" 150 | use nutest * 151 | run-tests --path ($missing_path) 152 | " 153 | ) | complete 154 | 155 | assert str contains $result.stderr $"Path doesn't exist: " 156 | assert str contains $result.stderr $missing_path 157 | assert equal $result.exit_code 1 158 | } 159 | 160 | @test 161 | def with-summary-returns [] { 162 | let temp = $in.temp 163 | let test_file_3 = $temp | path join "test_3.nu" 164 | " 165 | use std/testing * 166 | 167 | @test 168 | def test_quux [] { error make { msg: 'Ouch' } } 169 | @ignore 170 | def test_oof [] { } 171 | " | save $test_file_3 172 | 173 | let results = test-run $"run-tests --path '($temp)' --returns summary" 174 | 175 | assert equal $results { 176 | total: 6 177 | passed: 3 178 | failed: 1 179 | skipped: 2 180 | } 181 | } 182 | 183 | @test 184 | def list-tests-as-table [] { 185 | let temp = $in.temp 186 | 187 | " 188 | use std/testing * 189 | 190 | @test 191 | def test_zat [] { print oof } 192 | @before-each 193 | def setup [] { print -e rab } 194 | " | save ($temp | path join "test_3.nu") 195 | 196 | let results = test-run $"list-tests --path ($temp)" 197 | 198 | assert equal $results [ 199 | { suite: test_1, test: test_bar } 200 | { suite: test_1, test: test_foo } 201 | { suite: test_2, test: test_baz } 202 | { suite: test_2, test: test_qux } 203 | { suite: test_3, test: test_zat } 204 | ] 205 | } 206 | 207 | @test 208 | def "terminal display" [] { 209 | let temp = $in.temp 210 | let test_file_3 = $temp | path join "test_3.nu" 211 | " 212 | use std/testing * 213 | 214 | @test 215 | def test_quux [] { error make { msg: 'Ouch' } } 216 | @ignore 217 | def test_oof [] { } 218 | " | save $test_file_3 219 | 220 | let results = test-run-raw $"run-tests --path '($temp)' --display terminal --strategy { threads: 1 }" 221 | | ansi strip 222 | 223 | # The ordering of the suites is currently indeterminate so we need to match tests specifically 224 | assert ($results | str starts-with "Running tests...") 225 | assert ($results =~ "✅ PASS test_1 test_foo\n oof") 226 | assert ($results =~ "✅ PASS test_1 test_bar\n rab") 227 | assert ($results =~ "✅ PASS test_2 test_baz\n zab") 228 | assert ($results =~ "🚧 SKIP test_2 test_qux") 229 | # We use '.' as version 0.101.0 used '×', newer versions use 'x' 230 | # We need to account for chained errors introduced in 0.103.0 here 231 | assert ($results =~ "❌ FAIL test_3 test_quux(?:.|\n)*?Error:[\n ]+[×x] Ouch") 232 | assert ($results =~ "🚧 SKIP test_3 test_oof") 233 | assert ($results | str ends-with "Test run completed: 6 total, 3 passed, 1 failed, 2 skipped\n") 234 | } 235 | 236 | @test 237 | def "terminal display with rendered error" [] { 238 | let temp = $in.temp 239 | let test_file_3 = $temp | path join "test_3.nu" 240 | " 241 | use std/testing * 242 | 243 | @test 244 | def test_quux [] { 245 | let variable = 'span source' 246 | 247 | error make { 248 | msg: 'a decorated error' 249 | label: { 250 | text: 'happened here' 251 | span: (metadata $variable).span 252 | } 253 | help: 'some help' 254 | } 255 | } 256 | " | save $test_file_3 257 | 258 | let results = test-run-raw $"run-tests --path '($test_file_3)' --display terminal --strategy { threads: 1 }" 259 | | ansi strip 260 | 261 | assert str contains $results "a decorated error" 262 | assert str contains $results "happened here" 263 | assert str contains $results "some help" 264 | } 265 | 266 | @test 267 | def with-junit-report [] { 268 | let temp = $in.temp 269 | let test_file_3 = $temp | path join "test_3.nu" 270 | " 271 | use std/testing * 272 | 273 | @test 274 | def test_quux [] { error make { msg: 'Ouch' } } 275 | @ignore 276 | def test_oof [] { } 277 | " | save $test_file_3 278 | let report_path = $temp | path join "report.xml" 279 | 280 | test-run-raw $"run-tests --path '($temp)' --report { type: junit, path: '($report_path)' }" 281 | 282 | assert equal ($report_path | open --raw | strip-xml-whitespace) (' 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | ' | strip-xml-whitespace) 303 | } 304 | 305 | def test-run [command: string] { 306 | let result = ( 307 | ^$nu.current-exe 308 | --no-config-file 309 | --commands $" 310 | use nutest * 311 | ($command) | to nuon 312 | " 313 | ) | complete 314 | 315 | if $result.exit_code != 0 { 316 | $"[sub-process failed: ($result.stderr)]" 317 | } else { 318 | $result.stdout | from nuon 319 | } 320 | } 321 | 322 | def test-run-raw [command: string]: nothing -> string { 323 | let result = ( 324 | ^$nu.current-exe 325 | --no-config-file 326 | --commands $" 327 | use nutest * 328 | ($command) 329 | " 330 | ) | complete 331 | 332 | if $result.exit_code != 0 { 333 | $"[sub-process failed: ($result.stderr)]" 334 | } else { 335 | $result.stdout 336 | } 337 | } 338 | 339 | def strip-xml-whitespace []: string -> string { 340 | $in | str trim | str replace --all --regex '>[\n\r ]+<' '><' 341 | } 342 | -------------------------------------------------------------------------------- /tests/test_module.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | source ../nutest/mod.nu 4 | 5 | @test 6 | def "strategy default" [] { 7 | assert equal (null | select-strategy) { threads: 0 } 8 | } 9 | 10 | @test 11 | def "strategy override" [] { 12 | assert equal ({ threads: 1 } | select-strategy) { threads: 1 } 13 | assert equal ({ other: "abc" } | select-strategy) { threads: 0, other: "abc" } 14 | } 15 | 16 | @test 17 | def "display default" [] { 18 | assert equal (null | select-display null | get name) "display terminal" 19 | assert equal (null | select-display "nothing" | get name) "display terminal" 20 | } 21 | 22 | @test 23 | def "display defaults to nothing with result" [] { 24 | assert equal (null | select-display "table" | get name) "display nothing" 25 | assert equal (null | select-display "summary" | get name) "display nothing" 26 | } 27 | 28 | @test 29 | def "display retains specified with result" [] { 30 | assert equal ("terminal" | select-display "table" | get name) "display terminal" 31 | assert equal ("table" | select-display "summary" | get name) "display table" 32 | } 33 | 34 | @test 35 | def "returns default" [] { 36 | assert equal ("nothing" | select-returns | get name) "returns nothing" 37 | assert equal (do ("nothing" | select-returns | get results)) null 38 | } 39 | 40 | @test 41 | def "returns options" [] { 42 | assert equal ("summary" | select-returns | get name) "returns summary" 43 | assert equal ("table" | select-returns | get name) "returns table" 44 | } 45 | 46 | @test 47 | def "report default" [] { 48 | assert equal (null | select-report | get name) "report nothing" 49 | } 50 | 51 | @test 52 | def "report junit" [] { 53 | assert equal ({ type: junit, path: "report.xml" } | select-report | get name) "report junit" 54 | } 55 | -------------------------------------------------------------------------------- /tests/test_orchestrator.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../nutest/orchestrator.nu [ 4 | create-suite-plan-data 5 | run-suites 6 | ] 7 | use ../nutest/store.nu 8 | use ../nutest/theme.nu 9 | use ../nutest/formatter.nu 10 | 11 | @test 12 | def validate-test-plan [] { 13 | let tests = [ 14 | { name: "test_a", type: "test" } 15 | { name: "test_b", type: "test" } 16 | { name: "setup", type: "before-all" } 17 | { name: "cleanup", type: "after-each" } 18 | ] 19 | 20 | let plan = create-suite-plan-data $tests 21 | 22 | assert equal $plan ('[ 23 | { name: "test_a", type: "test", execute: { test_a } }, 24 | { name: "test_b", type: "test", execute: { test_b } }, 25 | { name: "setup", type: "before-all", execute: { setup } }, 26 | { name: "cleanup", type: "after-each", execute: { cleanup } } 27 | ]' | trim) 28 | } 29 | 30 | def trim []: string -> string { 31 | $in | str replace --all --regex '[\n\r ]+' ' ' 32 | } 33 | 34 | # We also need to ensure we narrow down results to the unique ones used in each test. 35 | @before-all 36 | def setup-store []: nothing -> record { 37 | store create 38 | { } 39 | } 40 | 41 | @after-all 42 | def teardown-store [] { 43 | store delete 44 | } 45 | 46 | @before-each 47 | def setup-temp-dir []: nothing -> record { 48 | let temp = mktemp --tmpdir --directory 49 | { temp: $temp } 50 | } 51 | 52 | @after-each 53 | def cleanup-temp-dir [] { 54 | let context = $in 55 | rm --recursive $context.temp 56 | } 57 | 58 | @test 59 | def run-suite-with-no-tests [] { 60 | let context = $in 61 | let temp = $context.temp 62 | let test_file = $temp | path join "test.nu" 63 | touch $test_file 64 | 65 | let suites = [{name: "none", path: $test_file, tests: []}] 66 | let results = $suites | test-run $context 67 | 68 | assert equal $results [] 69 | } 70 | 71 | @test 72 | def run-suite-with-passing-test [] { 73 | let context = $in 74 | let temp = $context.temp 75 | 76 | let suite = "assert equal 1 1" | create-single-test-suite $temp "passing" 77 | let suites = [{ name: $suite.name, path: $suite.path, tests: $suite.tests }] 78 | let results = $suites | test-run $context 79 | 80 | assert equal $results [ 81 | { 82 | suite: "passing" 83 | test: "passing" 84 | result: "PASS" 85 | output: [] 86 | } 87 | ] 88 | } 89 | 90 | @test 91 | def run-suite-with-ignored-test [] { 92 | let context = $in 93 | let temp = $context.temp 94 | 95 | mut suite = create-suite $temp "ignored" 96 | let suites = [ ("assert equal 1 2" | append-test $temp $suite "ignored-test" --type "ignore") ] 97 | let results = $suites | test-run $context 98 | 99 | assert equal $results [ 100 | { 101 | suite: "ignored" 102 | test: "ignored-test" 103 | result: "SKIP" 104 | output: [] 105 | } 106 | ] 107 | } 108 | 109 | @test 110 | def run-suite-with-broken-test [] { 111 | let context = $in 112 | let temp = $context.temp 113 | 114 | let test_file = $temp | path join "broken-test.nu" 115 | "def broken-test" | save $test_file # Parse error 116 | let tests = [{ name: "broken-test", type: "test" }] 117 | let suites = [{ name: "broken", path: $test_file, tests: $tests }] 118 | let results = $suites | test-run $context 119 | 120 | assert equal ($results | reject output) [ 121 | { 122 | suite: "broken" 123 | test: "broken-test" 124 | result: "FAIL" 125 | } 126 | ] 127 | 128 | let output = $results | get output | str join "\n" 129 | assert str contains $output "Missing required positional argument" 130 | assert str contains $output "def broken-test" 131 | } 132 | 133 | @test 134 | def run-suite-with-missing-test [] { 135 | let context = $in 136 | let temp = $context.temp 137 | 138 | let test_file = $temp | path join "missing-test.nu" 139 | touch $test_file 140 | let tests = [{ name: "missing-test", type: "test" }] 141 | let suites = [{ name: "missing", path: $test_file, tests: $tests }] 142 | let results = $suites | test-run $context 143 | 144 | assert equal ($results | reject output) [ 145 | { 146 | suite: "missing" 147 | test: "missing-test" 148 | result: "FAIL" 149 | } 150 | ] 151 | 152 | let output = $results | get output | first 153 | assert str contains ($output.items | str join '') "`missing-test` is neither a Nushell built-in or a known external command" 154 | } 155 | 156 | @test 157 | def run-suite-with-failing-test [] { 158 | let context = $in 159 | let temp = $context.temp 160 | 161 | let suite = "assert equal 1 2" | create-single-test-suite $temp "failing" 162 | let suites = [{ name: $suite.name, path: $suite.path, tests: $suite.tests }] 163 | let results = $suites | test-run $context 164 | 165 | assert equal ($results | reject output) [ 166 | { 167 | suite: "failing" 168 | test: "failing" 169 | result: "FAIL" 170 | } 171 | ] 172 | 173 | let output = $results | get output | each { |data| $data.items | str join '' } | str join "\n" 174 | assert str contains $output "Assertion failed." 175 | assert str contains $output "These are not equal." 176 | } 177 | 178 | @test 179 | def run-suite-with-multiple-tests [] { 180 | let context = $in 181 | let temp = $context.temp 182 | 183 | mut suite = create-suite $temp "multi" 184 | let suite = "assert equal 1 1" | append-test $temp $suite "test1" 185 | let suite = "assert equal 1 2" | append-test $temp $suite "test2" 186 | let results = [ $suite ] | test-run $context | reject output 187 | 188 | assert equal $results [ 189 | { 190 | suite: "multi" 191 | test: "test1" 192 | result: "PASS" 193 | } 194 | { 195 | suite: "multi" 196 | test: "test2" 197 | result: "FAIL" 198 | } 199 | ] 200 | } 201 | 202 | @test 203 | def run-multiple-suites [] { 204 | let context = $in 205 | let temp = $context.temp 206 | 207 | mut suite1 = create-suite $temp "suite1" 208 | let suite1 = "assert equal 1 1" | append-test $temp $suite1 "test1" 209 | let suite1 = "assert equal 1 2" | append-test $temp $suite1 "test2" 210 | mut suite2 = create-suite $temp "suite2" 211 | let suite2 = "assert equal 1 1" | append-test $temp $suite2 "test3" 212 | let suite2 = "assert equal 1 2" | append-test $temp $suite2 "test4" 213 | let results = [$suite1, $suite2] | test-run $context | reject output 214 | 215 | assert equal $results ([ 216 | { suite: "suite1", test: "test1", result: "PASS" } 217 | { suite: "suite1", test: "test2", result: "FAIL" } 218 | { suite: "suite2", test: "test3", result: "PASS" } 219 | { suite: "suite2", test: "test4", result: "FAIL" } 220 | ] | sort-by suite test) 221 | } 222 | 223 | @test 224 | def run-test-with-output [] { 225 | let context = $in 226 | let temp = $context.temp 227 | 228 | mut suite = create-suite $temp "test-with-output" 229 | let suites = [ ("print 1 2; print -e 3 4" | append-test $temp $suite "test") ] 230 | let results = $suites | test-run $context 231 | 232 | assert equal $results [ 233 | { 234 | suite: "test-with-output" 235 | test: "test" 236 | result: "PASS" 237 | output: [[stream, items]; ["output", [1, 2]], ["error", [3, 4]]] 238 | } 239 | ] 240 | } 241 | 242 | @test 243 | def run-before-after-with-output [] { 244 | let context = $in 245 | let temp = $context.temp 246 | 247 | mut suite = create-suite $temp "all-with-output" 248 | let suite = ("print bao; print -e bao" | append-test $temp $suite "ba" --type "before-all") 249 | let suite = ("print beo; print -e beo" | append-test $temp $suite "be" --type "before-each") 250 | let suite = ("print to; print -e te" | append-test $temp $suite "test") 251 | let suite = ("print aeo; print -e aee" | append-test $temp $suite "ae" --type "after-each") 252 | let suite = ("print aao; print -e aae" | append-test $temp $suite "aa" --type "after-all") 253 | let results = [$suite] | test-run $context 254 | 255 | assert equal $results [ 256 | { 257 | suite: "all-with-output" 258 | test: "test" 259 | result: "PASS" 260 | output: [ 261 | [stream, items]; 262 | ["output", ["bao"]], ["error", ["bao"]] 263 | # Since only one before/after all in DB, we cannot guarantee order 264 | ["output", ["aao"]], ["error", ["aae"]] 265 | ["output", ["beo"]], ["error", ["beo"]] 266 | ["output", ["to"]], ["error", ["te"]] 267 | ["output", ["aeo"]], ["error", ["aee"]] 268 | ] 269 | } 270 | ] 271 | } 272 | 273 | # This test is to ensure that even though we get multiple results for a test, 274 | # (both a PASS then a FAIL) the end result is just a FAIL 275 | @test 276 | def after-all-failure-should-mark-all-failed [] { 277 | let context = $in 278 | let temp = $context.temp 279 | 280 | mut suite = create-suite $temp "suite" 281 | let suite = "assert equal 1 1" | append-test $temp $suite "test1" 282 | let suite = "assert equal 1 1" | append-test $temp $suite "test2" 283 | let suite = "assert equal 1 2" | append-test $temp $suite "after-all" --type "after-all" 284 | let results = [ $suite ] | test-run $context | reject output 285 | 286 | assert equal $results ([ 287 | { 288 | suite: "suite" 289 | test: "test1" 290 | result: "FAIL" 291 | } 292 | { 293 | suite: "suite" 294 | test: "test2" 295 | result: "FAIL" 296 | } 297 | ] | sort-by test) 298 | } 299 | 300 | 301 | def test-run [context: record]: list -> list { 302 | let suites = $in 303 | 304 | $suites | run-suites (noop-event-processor) { threads: 1 } 305 | 306 | let results = store query 307 | $results | where suite in ($suites | get name) 308 | } 309 | 310 | def noop-event-processor []: nothing -> record { 311 | { 312 | run-start: { || ignore } 313 | run-complete: { || ignore } 314 | test-start: { |row| ignore } 315 | test-complete: { |row| ignore } 316 | } 317 | } 318 | 319 | def create-single-test-suite [temp: string, test: string]: string -> record { 320 | let suite = create-suite $temp $test 321 | $in | append-test $temp $suite $test 322 | } 323 | 324 | def create-suite [temp: string, suite: string]: nothing -> record { 325 | let path = $temp | path join $"($suite).nu" 326 | 327 | $" 328 | use std/assert 329 | use std/testing * 330 | " | save $path 331 | 332 | { 333 | name: $suite 334 | path: $path 335 | tests: [] 336 | } 337 | } 338 | 339 | def append-test [temp: string, suite: record, test: string, --type: string = "test"]: string -> record { 340 | let path = $temp | path join $"($suite.name).nu" 341 | 342 | $" 343 | def ($test) [] { 344 | ($in) 345 | } 346 | " | save --append $path 347 | 348 | $suite | merge { 349 | tests: ($suite.tests | append { name: $test, type: $type }) 350 | } 351 | } 352 | -------------------------------------------------------------------------------- /tests/test_output.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use harness.nu 4 | use ../nutest/formatter.nu 5 | use ../nutest/store.nu 6 | 7 | # This suite ensures that various printed outputs are represented as would be 8 | # expected if the test code was being run directly and interactively. 9 | 10 | @before-all 11 | def setup-tests []: record -> record { 12 | $in | harness setup-tests 13 | } 14 | 15 | @after-all 16 | def cleanup-tests []: record -> nothing { 17 | $in | harness cleanup-tests 18 | } 19 | 20 | @before-each 21 | def setup-test []: record -> record { 22 | $in | harness setup-test 23 | } 24 | 25 | @after-each 26 | def cleanup-test []: record -> nothing { 27 | $in | harness cleanup-test 28 | } 29 | 30 | @test 31 | def nulls [] { 32 | let code = { print null } 33 | let output = $in | run $code 34 | assert equal $output [ [null] ] 35 | 36 | let code = { print null null null} 37 | let output = $in | run $code 38 | assert equal $output [ [null, null, null] ] 39 | } 40 | 41 | @test 42 | def numbers [] { 43 | let code = { print 1 } 44 | let output = $in | run $code 45 | assert equal $output [ [1] ] 46 | 47 | let code = { print 1 2 3} 48 | let output = $in | run $code 49 | assert equal $output [ [1, 2, 3] ] 50 | } 51 | 52 | @test 53 | def strings [] { 54 | let code = { print "str" } 55 | let output = $in | run $code 56 | assert equal $output [ ["str"] ] 57 | 58 | let code = { print "one" "two" "three" } 59 | let output = $in | run $code 60 | assert equal $output [ ["one", "two", "three"] ] 61 | } 62 | 63 | @test 64 | def durations [] { 65 | let code = { print 2min } 66 | let output = $in | run $code 67 | assert equal $output [ [2min] ] 68 | } 69 | 70 | @test 71 | def lists [] { 72 | let code = { print [] } 73 | let output = $in | run $code 74 | assert equal $output [ [[]] ] 75 | 76 | let code = { print [1, "two", 3] } 77 | let output = $in | run $code 78 | assert equal $output [ [[1, two, 3]] ] 79 | 80 | let code = { print [1, "two", 3] [4, "five", 6] } 81 | let output = $in | run $code 82 | assert equal $output [ [[1, two, 3], [4, five, 6]] ] 83 | } 84 | 85 | @test 86 | def records [] { 87 | let code = { print {} } 88 | let output = $in | run $code 89 | assert equal $output [ [{}] ] 90 | 91 | let code = { print { a: 1, b: "two" } } 92 | let output = $in | run $code 93 | assert equal $output [ [{a: 1, b: two}] ] 94 | 95 | let code = { print { a: 1, b: "two" } { c: 3, d: "four" } } 96 | let output = $in | run $code 97 | assert equal $output [ [{a: 1, b: "two"}, {c: 3, d: "four"}] ] 98 | } 99 | 100 | @test 101 | def tables [] { 102 | let code = { print ([[a, b, c]; [1, 2, 3]] | take 0) } 103 | let output = $in | run $code 104 | assert equal $output [ [[]] ] 105 | 106 | let code = { print [[a, b, c]; [1, "two", 3], [4, "five", 6]] } 107 | let output = $in | run $code 108 | assert equal $output [ [[{a: 1, b: two, c: 3}, {a: 4, b: five, c: 6}]] ] 109 | 110 | let code = { print [[a, b, c]; [1, "two", 3]] [[d, e, f]; [4, "five", 6]] } 111 | let output = $in | run $code 112 | assert equal $output [ [[{a: 1, b: two, c: 3}], [{d: 4, e: five, f: 6}]] ] 113 | } 114 | 115 | @test 116 | def "table in record" [] { 117 | let code = { print { a: 1, b: [[c, d]; [1, 2]] } } 118 | let output = $in | run $code 119 | assert equal $output [ [{a: 1, b: [{c: 1, d: 2}]}] ] 120 | } 121 | 122 | @test 123 | def "record in table" [] { 124 | let code = { print [[a, b]; [1, {c: 2, d: 3}]] } 125 | let output = $in | run $code 126 | assert equal $output [ [[[a, b]; [1, {c: 2, d: 3}]]] ] 127 | } 128 | 129 | @test 130 | def "capture print fidelity" [] { 131 | let code = { print 1; print 2 3; print "more" "args" } 132 | let output = $in | run $code 133 | assert equal $output [ [1], [2, 3], ["more", "args"] ] 134 | } 135 | 136 | def run [code: closure]: record -> list { 137 | let result = $in | harness run $code 138 | assert equal $result.result "PASS" 139 | 140 | query-results 141 | | where test == $result.test 142 | | first 143 | | get output 144 | | each { |row| $row.items } # Unpack from stream record 145 | } 146 | 147 | def query-results []: nothing -> table { 148 | store query | each { |row| 149 | { 150 | suite: $row.suite 151 | test: $row.test 152 | result: $row.result 153 | output: $row.output 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /tests/test_runner.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../nutest/formatter.nu 4 | use ../nutest/theme.nu 5 | use ../nutest/errors.nu 6 | 7 | const success_message = "I'd much rather be happy than right any day" 8 | const warning_message = "Don't Panic" 9 | const failure_message = "No tea" 10 | 11 | @test 12 | def execute-plan-empty [] { 13 | let plan = [] 14 | 15 | let results = test-run "empty-suite" $plan 16 | 17 | assert equal $results [] 18 | } 19 | 20 | @test 21 | def execute-plan-test [] { 22 | let plan = [ 23 | { name: "testing", type: "test", execute: "{ success }" } 24 | ] 25 | 26 | let results = test-run "suite" $plan 27 | 28 | assert equal $results [ 29 | [suite test type payload]; 30 | [ "suite", "testing", "start", null ] 31 | [ "suite", "testing", "output", { stream: "output", items: [$success_message] } ] 32 | [ "suite", "testing", "result", "PASS" ] 33 | [ "suite", "testing", "finish", null ] 34 | ] 35 | } 36 | 37 | @test 38 | def execute-plan-with-error [] { 39 | let plan = [ 40 | { name: "testing", type: "test", execute: "{ failure }" } 41 | ] 42 | 43 | let results = test-run "suite" $plan 44 | 45 | assert equal $results [ 46 | [suite test type payload]; 47 | [ "suite", "testing", "start", null ] 48 | [ "suite", "testing", "result", "FAIL" ] 49 | [ "suite", "testing", "output", { stream: "error", items: [$failure_message] } ] 50 | [ "suite", "testing", "finish", null ] 51 | ] 52 | } 53 | 54 | @test 55 | def execute-plan-tests [] { 56 | let plan = [ 57 | { name: "test_success", type: "test", execute: "{ success }" } 58 | { name: "test_success_warning", type: "test", execute: "{ warning; success }" } 59 | { name: "test_failure", type: "test", execute: "{ failure }" } 60 | { name: "test_half_failure", type: "test", execute: "{ success; warning; failure }" } 61 | ] 62 | 63 | let results = test-run "suite" $plan 64 | 65 | assert equal $results ([ 66 | [suite test type payload]; 67 | [ "suite", "test_success", "start", null ] 68 | [ "suite", "test_success", "output", { stream: "output", items: [$success_message] } ] 69 | [ "suite", "test_success", "result", "PASS" ] 70 | [ "suite", "test_success", "finish", null ] 71 | [ "suite", "test_success_warning", "start", null ] 72 | [ "suite", "test_success_warning", "output", { stream: "error", items: [$warning_message] } ] 73 | [ "suite", "test_success_warning", "output", { stream: "output", items: [$success_message] } ] 74 | [ "suite", "test_success_warning", "result", "PASS" ] 75 | [ "suite", "test_success_warning", "finish", null ] 76 | [ "suite", "test_failure", "start", null ] 77 | [ "suite", "test_failure", "result", "FAIL" ] 78 | [ "suite", "test_failure", "output", { stream: "error", items: [$failure_message] } ] 79 | [ "suite", "test_failure", "finish", null ] 80 | [ "suite", "test_half_failure", "start", null ] 81 | [ "suite", "test_half_failure", "output", { stream: "output", items: [$success_message] } ] 82 | [ "suite", "test_half_failure", "output", { stream: "error", items: [$warning_message] } ] 83 | [ "suite", "test_half_failure", "result", "FAIL" ] 84 | [ "suite", "test_half_failure", "output", { stream: "error", items: [$failure_message] } ] 85 | [ "suite", "test_half_failure", "finish", null ] 86 | ] | sort-by suite test) 87 | } 88 | 89 | @test 90 | def execute-test-types-basic [] { 91 | let plan = [ 92 | { name: "bool", type: "test", execute: "{ print true }" } 93 | { name: "datetime", type: "test", execute: "{ print 2022-02-02T14:30:00+05:00 }" } 94 | { name: "duration", type: "test", execute: "{ print 2min }" } 95 | { name: "filesize", type: "test", execute: "{ print 8KiB }" } 96 | { name: "float", type: "test", execute: "{ print 0.5 }" } 97 | { name: "int", type: "test", execute: "{ print 1 }" } 98 | ] 99 | 100 | let results = test-run "types" $plan | where type == "output" 101 | 102 | assert equal $results [ 103 | [suite test type payload]; 104 | [ "types", "bool", "output", { stream: "output", items: [true] } ] 105 | [ "types", "datetime", "output", { stream: "output", items: [2022-02-02T14:30:00+05:00] } ] 106 | [ "types", "duration", "output", { stream: "output", items: [2min] } ] 107 | [ "types", "filesize", "output", { stream: "output", items: [8KiB] } ] 108 | [ "types", "float", "output", { stream: "output", items: [0.5] } ] 109 | [ "types", "int", "output", { stream: "output", items: [1] } ] 110 | ] 111 | } 112 | 113 | @test 114 | def execute-test-types-structured [] { 115 | let plan = [ 116 | { name: "list", type: "test", execute: "{ print [1, '2', 3min] }" } 117 | { name: "record", type: "test", execute: "{ print { a: 1, b: 2 } }" } 118 | ] 119 | 120 | let results = test-run "types" $plan | where type in ["result", "output", "error"] 121 | 122 | assert equal $results [ 123 | [suite test type payload]; 124 | [ "types", "list", "output", { stream: "output", items: [[1, "2", 3min]] } ] 125 | [ "types", "list", "result", "PASS" ] 126 | [ "types", "record", "output", { stream: "output", items: [{a: 1, b: 2}] } ] 127 | [ "types", "record", "result", "PASS" ] 128 | ] 129 | } 130 | 131 | @test 132 | def execute-test-with-multiple-lines [] { 133 | let plan = [ 134 | { name: "multi-print", type: "test", execute: "{ print 'one'; print 'two' }" } 135 | { name: "print-rest", type: "test", execute: "{ print 'one' 'two' }" } 136 | { name: "with-newlines", type: "test", execute: "{ print 'one\ntwo' }" } 137 | ] 138 | 139 | let results = test-run "suite" $plan | where type == "output" 140 | 141 | assert equal $results [ 142 | [suite test type payload]; 143 | [ "suite", "multi-print", "output", { stream: "output", items: ["one"] } ] 144 | [ "suite", "multi-print", "output", { stream: "output", items: ["two"] } ] 145 | [ "suite", "print-rest", "output", { stream: "output", items: ["one", "two"] } ] 146 | [ "suite", "with-newlines", "output", { stream: "output", items: ["one\ntwo"] } ] 147 | ] 148 | } 149 | 150 | @test 151 | def execute-test-with-multiple-lines-deep [] { 152 | let plan = [ 153 | { name: "list", type: "test", execute: "{ print [1, '2\n3', 4min] }" } 154 | { name: "record", type: "test", execute: "{ print { a: 1, b: '2\n3' } }" } 155 | ] 156 | 157 | let results = test-run "types" $plan | where type in ["result", "output", "error"] 158 | 159 | assert equal $results [ 160 | [suite test type payload]; 161 | [ "types", "list", "output", { stream: "output", items: [[1, "2\n3", 4min]] } ] 162 | [ "types", "list", "result", "PASS" ] 163 | [ "types", "record", "output", { stream: "output", items: [{a: 1, b: "2\n3"}] } ] 164 | [ "types", "record", "result", "PASS" ] 165 | ] 166 | } 167 | 168 | @test 169 | def execute-before-each-test [] { 170 | let plan = [ 171 | { name: "test", type: "test", execute: "{ assert-context-received }" } 172 | { name: "before-each", type: "before-each", execute: "{ get-context }" } 173 | ] 174 | 175 | let results = test-run "before-suite" $plan 176 | 177 | assert equal $results [ 178 | [suite test type payload]; 179 | [ "before-suite", "test", "start", null ] 180 | [ "before-suite", "test", "output", { stream: "output", items: ["What do you get if you multiply six by nine?", 42] } ] 181 | [ "before-suite", "test", "result", "PASS" ] 182 | [ "before-suite", "test", "finish", null ] 183 | ] 184 | } 185 | 186 | @test 187 | def execute-after-each-test [] { 188 | let plan = [ 189 | { name: "test", type: "test", execute: "{ assert-context-received }" } 190 | { name: "setup", type: "before-each", execute: "{ get-context }" } 191 | { name: "cleanup", type: "after-each", execute: "{ assert-context-received }" } 192 | ] 193 | 194 | let results = test-run "after-suite" $plan 195 | 196 | assert equal $results [ 197 | [suite test type payload]; 198 | [ "after-suite", "test", "start", null ] 199 | [ "after-suite", "test", "output", { stream: "output", items: ["What do you get if you multiply six by nine?", 42] } ] 200 | [ "after-suite", "test", "result", "PASS" ] 201 | [ "after-suite", "test", "output", { stream: "output", items: ["What do you get if you multiply six by nine?", 42] } ] 202 | [ "after-suite", "test", "finish", null ] 203 | ] 204 | } 205 | 206 | @test 207 | def execute-before-and-after-each-captures-output [] { 208 | let plan = [ 209 | { name: "before-each", type: "before-each", execute: "{ success; get-context }" } 210 | { name: "test1", type: "test", execute: "{ noop }" } 211 | { name: "test2", type: "test", execute: "{ noop }" } 212 | { name: "after-each", type: "after-each", execute: "{ warning }" } 213 | ] 214 | 215 | let results = test-run "suite" $plan 216 | 217 | assert equal $results [ 218 | [suite test type payload]; 219 | [ "suite", "test1", "start", null ] 220 | [ "suite", "test1", "output", { stream: "output", items: [$success_message] } ] 221 | [ "suite", "test1", "result", "PASS" ] 222 | [ "suite", "test1", "output", { stream: "error", items: [$warning_message] } ] 223 | [ "suite", "test1", "finish", null ] 224 | [ "suite", "test2", "start", null ] 225 | [ "suite", "test2", "output", { stream: "output", items: [$success_message] } ] 226 | [ "suite", "test2", "result", "PASS" ] 227 | [ "suite", "test2", "output", { stream: "error", items: [$warning_message] } ] 228 | [ "suite", "test2", "finish", null ] 229 | ] 230 | } 231 | 232 | # This kind output is not associated with tests by the runner 233 | @test 234 | def execute-before-and-after-all-captures-output [] { 235 | let plan = [ 236 | { name: "before-all", type: "before-all", execute: "{ print 1; print -e 2; get-context }" } 237 | { name: "test1", type: "test", execute: "{ print 3; print -e 4 }" } 238 | { name: "test2", type: "test", execute: "{ print 5; print -e 6 }" } 239 | { name: "after-all", type: "after-all", execute: "{ print 7; print -e 8 }" } 240 | ] 241 | 242 | let results = test-run "suite" $plan 243 | 244 | assert equal $results [ 245 | [suite test type payload]; 246 | [ "suite", "test1", "start", null ] 247 | [ "suite", "test1", "output", { stream: "output", items: [3] } ] 248 | [ "suite", "test1", "output", { stream: "error", items: [4] } ] 249 | [ "suite", "test1", "result", "PASS" ] 250 | [ "suite", "test1", "finish", null ] 251 | [ "suite", "test2", "start", null ] 252 | [ "suite", "test2", "output", { stream: "output", items: [5] } ] 253 | [ "suite", "test2", "output", { stream: "error", items: [6] } ] 254 | [ "suite", "test2", "result", "PASS" ] 255 | [ "suite", "test2", "finish", null ] 256 | # Ordering is due to this suite performing sorting due to parallelism 257 | [ "suite", null, "output", { stream: "output", items: [1] } ] 258 | [ "suite", null, "output", { stream: "error", items: [2] } ] 259 | [ "suite", null, "output", { stream: "output", items: [7] } ] 260 | [ "suite", null, "output", { stream: "error", items: [8] } ] 261 | ] 262 | } 263 | 264 | @test 265 | def execute-before-each-error-handling [] { 266 | let plan = [ 267 | { name: "test", type: "test", execute: "{ noop }" } 268 | { name: "before-each", type: "before-each", execute: "{ failure }" } 269 | ] 270 | 271 | let results = test-run "suite" $plan 272 | 273 | assert equal $results [ 274 | [suite test type payload]; 275 | [ "suite", "test", "start", null ] 276 | [ "suite", "test", "result", "FAIL" ] 277 | [ "suite", "test", "output", { stream: "error", items: [$failure_message] } ] 278 | [ "suite", "test", "finish", null ] 279 | ] 280 | } 281 | 282 | @test 283 | def execute-after-each-error-handling [] { 284 | let plan = [ 285 | { name: "test", type: "test", execute: "{ noop }" } 286 | { name: "after-each", type: "after-each", execute: "{ failure }" } 287 | ] 288 | 289 | let results = test-run "suite" $plan 290 | 291 | assert equal $results [ 292 | [suite test type payload]; 293 | [ "suite", "test", "start", null ] 294 | [ "suite", "test", "result", "PASS" ] # The test passed 295 | [ "suite", "test", "result", "FAIL" ] # But after-each failed 296 | [ "suite", "test", "output", { stream: "error", items: [$failure_message] } ] 297 | [ "suite", "test", "finish", null ] 298 | ] 299 | } 300 | 301 | @test 302 | def execute-before-all-error-handling [] { 303 | let plan = [ 304 | { name: "test1", type: "test", execute: "{ noop }" } 305 | { name: "test2", type: "test", execute: "{ noop }" } 306 | { name: "before-all", type: "before-all", execute: "{ failure }" } 307 | ] 308 | 309 | let results = test-run "suite" $plan 310 | 311 | assert equal $results [ 312 | [suite test type payload]; 313 | [ "suite", "test1", "start", null ] 314 | [ "suite", "test1", "result", "FAIL" ] 315 | [ "suite", "test1", "output", { stream: "error", items: [$failure_message] } ] 316 | [ "suite", "test1", "finish", null ] 317 | [ "suite", "test2", "start", null ] 318 | [ "suite", "test2", "result", "FAIL" ] 319 | [ "suite", "test2", "output", { stream: "error", items: [$failure_message] } ] 320 | [ "suite", "test2", "finish", null ] 321 | ] 322 | } 323 | 324 | @test 325 | def execute-after-all-error-handling [] { 326 | let plan = [ 327 | { name: "test1", type: "test", execute: "{ noop }" } 328 | { name: "test2", type: "test", execute: "{ noop }" } 329 | { name: "after-all", type: "after-all", execute: "{ failure }" } 330 | ] 331 | 332 | let results = test-run "suite" $plan 333 | 334 | # Note how the test passes first and then fails because of the after-all failure 335 | assert equal $results [ 336 | [suite test type payload]; 337 | [ "suite", "test1", "start", null ] 338 | [ "suite", "test1", "result", "PASS" ] 339 | [ "suite", "test1", "finish", null ] 340 | [ "suite", "test1", "start", null ] 341 | [ "suite", "test1", "result", "FAIL" ] 342 | [ "suite", "test1", "output", { stream: "error", items: [$failure_message] } ] 343 | [ "suite", "test1", "finish", null ] 344 | [ "suite", "test2", "start", null ] 345 | [ "suite", "test2", "result", "PASS" ] 346 | [ "suite", "test2", "finish", null ] 347 | [ "suite", "test2", "start", null ] 348 | [ "suite", "test2", "result", "FAIL" ] 349 | [ "suite", "test2", "output", { stream: "error", items: [$failure_message] } ] 350 | [ "suite", "test2", "finish", null ] 351 | ] 352 | } 353 | 354 | def noop [] { 355 | } 356 | 357 | def success [] { 358 | print $success_message 359 | } 360 | 361 | def warning [] { 362 | print -e $warning_message 363 | } 364 | 365 | def failure [] { 366 | error make { msg: $failure_message } 367 | } 368 | 369 | def get-context [] { 370 | { 371 | question: "What do you get if you multiply six by nine?" 372 | answer: 42 373 | } 374 | } 375 | 376 | def assert-context-received [] { 377 | let context = $in 378 | print ($context | get question) ($context | get answer) 379 | assert equal $context (get-context) 380 | } 381 | 382 | @test 383 | def signature-before-that-returns-nothing [] { 384 | let plan = [ 385 | { name: "all-has-output", type: "before-all", execute: "{ { value1: 'preserved-all' } }" } 386 | { name: "all-no-output", type: "before-all", execute: "{ null }" } 387 | { name: "each-has-output", type: "before-each", execute: "{ { value2: 'preserved-each' } }" } 388 | { name: "each-no-output", type: "before-each", execute: "{ null }" } 389 | { name: "test", type: "test", execute: "{ print $in.value1; print $in.value2 }" } 390 | ] 391 | 392 | let result = test-run "suite" $plan | 393 | where type in ["result", "output", "error"] 394 | 395 | assert equal $result [ 396 | [suite test type payload]; 397 | [ "suite", "test", "output", { stream: "output", items: [ "preserved-all" ] } ] 398 | [ "suite", "test", "output", { stream: "output", items: [ "preserved-each" ] } ] 399 | [ "suite", "test", "result", "PASS" ] 400 | ] 401 | } 402 | 403 | @test 404 | def signature-after-that-accepts-nothing [] { 405 | let plan = [ 406 | { name: "some-context", type: "before-all", execute: "{ { key: 'value' } }" } 407 | { name: "test", type: "test", execute: "{ noop }" } 408 | { name: "each-no-input", type: "after-each", execute: "{ after-no-input }" } 409 | { name: "all-no-input", type: "after-all", execute: "{ after-no-input }" } 410 | ] 411 | 412 | let result = test-run "suite" $plan | 413 | where type in ["result", "output", "error"] 414 | 415 | assert equal $result [ 416 | [suite test type payload]; 417 | [ "suite", "test", "result", "PASS" ] 418 | ] 419 | } 420 | 421 | def after-no-input []: nothing -> nothing { 422 | } 423 | 424 | @test 425 | def signature-before-each-that-returns-non-record [] { 426 | let plan = [ 427 | { name: "returns-string", type: "before-each", execute: "{ 'value' }" } 428 | { name: "test", type: "test", execute: "{ noop }" } 429 | ] 430 | 431 | let result = test-run "suite" $plan | 432 | where type in ["result", "output", "error"] 433 | 434 | assert equal $result [ 435 | [suite test type payload]; 436 | [ "suite", "test", "result", "FAIL" ] 437 | [ "suite", "test", "output", { stream: "error", items: [ 438 | "The before-each/all command 'returns-string' must return a record or nothing, not 'string'" 439 | ] } ] 440 | ] 441 | } 442 | 443 | @test 444 | def signature-before-all-that-returns-non-record [] { 445 | let plan = [ 446 | { name: "returns-string", type: "before-all", execute: "{ 'value' }" } 447 | { name: "test", type: "test", execute: "{ noop }" } 448 | ] 449 | 450 | let result = test-run "suite" $plan | 451 | where type in ["result", "output", "error"] 452 | 453 | assert equal $result [ 454 | [suite test type payload]; 455 | [ "suite", "test", "result", "FAIL" ] 456 | [ "suite", "test", "output", { stream: "error", items: [ 457 | "The before-each/all command 'returns-string' must return a record or nothing, not 'string'" 458 | ] } ] 459 | ] 460 | } 461 | 462 | @test 463 | def signature-after-that-accepts-non-record [] { 464 | let plan = [ 465 | [name, type, execute]; 466 | ["context", "before-all", "{ { key: context } }"] 467 | ["test", "test", "{ noop }"] 468 | ["accepts-string", "after-all", "{ accepts-string }"] 469 | ] 470 | 471 | let result = test-run "suite" $plan | 472 | where type in ["result", "output"] 473 | 474 | if (supports-non-record-types) { 475 | assert equal $result [ 476 | [suite test type payload]; 477 | # Nushell currently allows this, perhaps because we're not using the type as a string. 478 | # We still test to capture unintended behaviour changes. 479 | [ 480 | "suite" 481 | "test" 482 | "result" 483 | "PASS" 484 | ] 485 | [ 486 | "suite" 487 | "test" 488 | "result" 489 | "FAIL" 490 | ] 491 | [ 492 | "suite" 493 | "test" 494 | "output" 495 | { stream: "error", items: ["Input type not supported."] } 496 | ] 497 | ] 498 | } 499 | } 500 | 501 | def supports-non-record-types []: nothing -> bool { 502 | let version_str = version | get version 503 | if ($version_str | str contains "nightly") { 504 | return true 505 | } else { 506 | # Only supported on Nushell >= 0.101.1 507 | let version = $version_str | split row '.' | each { into int } 508 | $version.0 >= 0 and $version.1 >= 101 and $version.2 >= 1 509 | } 510 | } 511 | 512 | def accepts-string []: string -> nothing { 513 | print $in 514 | } 515 | 516 | @test 517 | def full-cycle-context [] { 518 | let plan = [ 519 | { name: "before-all", type: "before-all", execute: "{ fc-before-all }" } 520 | { name: "before-each", type: "before-each", execute: "{ fc-before-each }" } 521 | { name: "test1", type: "test", execute: "{ fc-test }" } 522 | { name: "test2", type: "test", execute: "{ fc-test }" } 523 | { name: "after-each", type: "after-each", execute: "{ fc-after-each }" } 524 | { name: "after-all", type: "after-all", execute: "{ fc-after-all }" } 525 | ] 526 | 527 | let results = test-run "full-cycle" $plan 528 | 529 | assert equal $results ([ 530 | [suite test type payload]; 531 | # Before all is only executed once at the beginning 532 | [ "full-cycle", null, "output", { stream: "output", items: ["ba"] } ] 533 | 534 | [ "full-cycle", "test1", "start", null ] 535 | [ "full-cycle", "test1", "output", { stream: "output", items: [ "b" ] } ] 536 | [ "full-cycle", "test1", "output", { stream: "output", items: [ "t" ] } ] 537 | [ "full-cycle", "test1", "result", "PASS" ] 538 | [ "full-cycle", "test1", "output", { stream: "output", items: [ "a" ] } ] 539 | [ "full-cycle", "test1", "finish", null ] 540 | 541 | [ "full-cycle", "test2", "start", null ] 542 | [ "full-cycle", "test2", "output", { stream: "output", items: [ "b" ] } ] 543 | [ "full-cycle", "test2", "output", { stream: "output", items: [ "t" ] } ] 544 | [ "full-cycle", "test2", "result", "PASS" ] 545 | [ "full-cycle", "test2", "output", { stream: "output", items: [ "a" ] } ] 546 | [ "full-cycle", "test2", "finish", null ] 547 | 548 | # After all is only executed once at the end 549 | [ "full-cycle", null, "output", { stream: "output", items: ["aa"] } ] 550 | ] | sort-by suite test) 551 | } 552 | 553 | def fc-before-all []: record -> record { 554 | print "ba" 555 | { before-all: true } 556 | } 557 | 558 | def fc-before-each []: record -> record { 559 | print "b" 560 | 561 | $in | merge { before: true } 562 | } 563 | 564 | def fc-test []: record -> nothing { 565 | print "t" 566 | assert equal $in { 567 | before-all: true 568 | before: true 569 | } 570 | } 571 | 572 | def fc-after-each []: record -> nothing { 573 | print "a" 574 | } 575 | 576 | def fc-after-all []: record -> nothing { 577 | print "aa" 578 | } 579 | 580 | def test-run [suite: string, plan: list]: nothing -> table { 581 | const this_file = path self 582 | let result = ( 583 | ^$nu.current-exe 584 | --no-config-file 585 | --commands $" 586 | use nutest/runner.nu * 587 | source ($this_file) 588 | nutest-299792458-execute-suite { threads: 0 } ($suite) ($plan) 589 | " 590 | ) | complete 591 | 592 | if $result.exit_code != 0 { 593 | error make { msg: $result.stderr } 594 | } 595 | 596 | ( 597 | $result.stdout 598 | | lines 599 | | each { $in | from nuon } 600 | | sort-by suite test 601 | | reject timestamp 602 | | update payload { |row| 603 | if ($row.type in ["output", "error"]) { 604 | # Decode output to testable format 605 | ($row.payload | decode-output ) 606 | } else { 607 | $row.payload 608 | } 609 | } 610 | ) 611 | } 612 | 613 | def decode-output []: string -> record> { 614 | $in | decode base64 | decode | from nuon | reformat-errors 615 | } 616 | 617 | def reformat-errors []: record> -> record> { 618 | $in | update items { |event| 619 | $event.items | each { |item| 620 | if ($item | looks-like-error) { 621 | $item | errors unwrap-error | get msg 622 | } else { 623 | $item 624 | } 625 | } 626 | } 627 | } 628 | 629 | def looks-like-error []: any -> bool { 630 | let value = $in 631 | if ($value | describe | str starts-with "record") { 632 | let columns = $value | columns 633 | ("msg" in $columns) and ("rendered" in $columns) and ("json" in $columns) 634 | } else { 635 | false 636 | } 637 | } 638 | -------------------------------------------------------------------------------- /tests/test_store_locking.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | source ../nutest/store.nu 4 | use ../nutest/errors.nu 5 | 6 | @before-all 7 | def create-store []: record -> record { 8 | create 9 | { } 10 | } 11 | 12 | @after-all 13 | def delete-store [] { 14 | delete 15 | } 16 | 17 | @before-each 18 | def create-state-file []: record -> record { 19 | let state_file = mktemp 20 | { state_file: $state_file } 21 | } 22 | 23 | @after-each 24 | def delete-state-file []: record -> nothing { 25 | let state_file = $in.state_file 26 | rm -f $state_file 27 | } 28 | 29 | def initialise-attempts-file []: record -> nothing { 30 | let context = $in 31 | "0" | save -f $context.state_file 32 | } 33 | 34 | def new-attempt []: record -> nothing { 35 | let context = $in 36 | ($context | attempt-count) + 1 | save -f $context.state_file 37 | } 38 | 39 | def attempt-count []: record -> int { 40 | let context = $in 41 | (open $context.state_file | into int) 42 | } 43 | 44 | @test 45 | def retry-on-table-lock-fails [] { 46 | let context = $in 47 | $context | initialise-attempts-file 48 | let table = "test_table" 49 | 50 | let operation = { 51 | $context | new-attempt 52 | throw-database-locked-error $table 53 | } 54 | 55 | try { 56 | retry-on-lock $table $operation 57 | assert false # Should not reach here 58 | } catch { |e| 59 | let result = $e | errors unwrap-error | get json | from json | get msg 60 | assert str contains $result $"Failed to insert into ($table) after" 61 | } 62 | assert equal ($context | attempt-count) 20 63 | } 64 | 65 | @test 66 | def retry-on-table-lock-eventually-succeeds [] { 67 | let context = $in 68 | $context | initialise-attempts-file 69 | let table = "test_table" 70 | 71 | let operation = { 72 | $context | new-attempt 73 | if ($context | attempt-count) < 5 { 74 | throw-database-locked-error $table 75 | } 76 | } 77 | 78 | try { 79 | retry-on-lock $table $operation 80 | } catch { |e| 81 | assert false # Should not reach here 82 | } 83 | assert equal ($context | attempt-count) 5 84 | } 85 | 86 | @test 87 | def retry-on-table-lock-throws-other-errors [] { 88 | let context = $in 89 | $context | initialise-attempts-file 90 | let table = "test_table" 91 | 92 | let operation = { 93 | $context | new-attempt 94 | error make { msg: "some other error" } 95 | } 96 | 97 | try { 98 | retry-on-lock $table $operation 99 | assert false # Should not reach here 100 | } catch { |e| 101 | let result = $e | errors unwrap-error | get json | from json | get msg 102 | assert equal $result "some other error" 103 | } 104 | assert equal ($context | attempt-count) 1 105 | } 106 | 107 | def throw-database-locked-error [table: string] { 108 | error make { 109 | msg: "database error" 110 | label: { 111 | text: $"database table is locked: ($table)" 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /tests/test_store_manage.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../nutest/store.nu 4 | 5 | #[strategy] 6 | def sequential []: nothing -> record { 7 | { threads: 1 } 8 | } 9 | 10 | @before-each 11 | def create-test-dir []: record -> record { 12 | let temp = mktemp --tmpdir --directory 13 | { 14 | temp: $temp 15 | } 16 | } 17 | 18 | @after-each 19 | def cleanup-test-dir [] { 20 | let context = $in 21 | rm --recursive $context.temp 22 | } 23 | 24 | @test 25 | def "delete a created store" [] { 26 | let store = store create 27 | store delete 28 | } 29 | 30 | @test 31 | def "delete succeeds even no results tables" [] { 32 | store delete 33 | } 34 | 35 | @test 36 | def "runs with previous unclean run" [] { 37 | let context = $in 38 | let temp = $context.temp 39 | 40 | let result = ( 41 | ^$nu.current-exe 42 | --no-config-file 43 | --commands $" 44 | use nutest/store.nu 45 | store create 46 | 47 | use nutest 48 | nutest run-tests --path '($temp)' --returns table 49 | " 50 | ) | complete 51 | 52 | if $result.exit_code != 0 { 53 | print $result.stderr 54 | assert false "Resets result store on new run" 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /tests/test_store_query_tests.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | use ../nutest/store.nu 4 | 5 | #[strategy] 6 | def sequential []: nothing -> record { 7 | { threads: 1 } 8 | } 9 | 10 | @before-each 11 | def create-store []: record -> record { 12 | store create 13 | { } 14 | } 15 | 16 | @after-each 17 | def delete-store [] { 18 | store delete 19 | } 20 | 21 | def create-suites [] { 22 | store insert-result { suite: "suite1", test: "pass1", result: "PASS" } 23 | store insert-result { suite: "suite2", test: "pass1", result: "PASS" } 24 | store insert-result { suite: "suite2", test: "fail1", result: "FAIL" } 25 | store insert-output { suite: "suite2", test: "fail1", data: ([{stream: "output", items: ["line"]}] | to nuon) } 26 | store insert-result { suite: "suite3", test: "fail1", result: "PASS" } 27 | # Pass then fail possible for `after-all` error 28 | store insert-result { suite: "suite3", test: "fail1", result: "FAIL" } 29 | } 30 | 31 | @test 32 | def "query tests" [] { 33 | create-suites 34 | 35 | let results = store query 36 | 37 | assert equal $results [ 38 | { suite: "suite1", test: "pass1", result: "PASS", output: [] } 39 | { suite: "suite2", test: "fail1", result: "FAIL", output: [ [{ stream: "output", items: ["line"]}] ] } 40 | { suite: "suite2", test: "pass1", result: "PASS", output: [] } 41 | { suite: "suite3", test: "fail1", result: "FAIL", output: [] } 42 | ] 43 | } 44 | 45 | @test 46 | def "query for specific test" [] { 47 | create-suites 48 | 49 | let results = store query-test "suite2" "fail1" 50 | 51 | assert equal $results [ 52 | { suite: "suite2", test: "fail1", result: "FAIL", output: [ [{ stream: "output", items: ["line"] }] ]} 53 | ] 54 | } 55 | 56 | @test 57 | def "query with before or after all output" [] { 58 | store insert-output { suite: "suite1", test: null, data: ([{stream: "output", items: ["abc"]}] | to nuon) } 59 | store insert-result { suite: "suite1", test: "pass1", result: "PASS" } 60 | store insert-result { suite: "suite1", test: "pass2", result: "PASS" } 61 | store insert-result { suite: "suite2", test: "pass3", result: "PASS" } 62 | 63 | let results = store query 64 | 65 | assert equal $results [ 66 | [suite, test, result, output]; 67 | ["suite1", "pass1", PASS, [ [[stream, items]; [output, [abc]]] ]] 68 | ["suite1", "pass2", PASS, [ [[stream, items]; [output, [abc]]] ]] 69 | ["suite2", "pass3", PASS, [ ]] 70 | ] 71 | } 72 | -------------------------------------------------------------------------------- /tests/test_store_success.nu: -------------------------------------------------------------------------------- 1 | use std/assert 2 | use std/testing * 3 | source ../nutest/store.nu 4 | 5 | #[strategy] 6 | def sequential []: nothing -> record { 7 | { threads: 1 } 8 | } 9 | 10 | @before-each 11 | def create-store []: record -> record { 12 | create 13 | { } 14 | } 15 | 16 | @after-each 17 | def delete-store [] { 18 | delete 19 | } 20 | 21 | @test 22 | def result-success-when-no-tests [] { 23 | let result = success 24 | 25 | assert equal $result true 26 | } 27 | 28 | @test 29 | def result-failure-when-failing-tests [] { 30 | insert-result { suite: "suite", test: "pass1", result: "PASS" } 31 | insert-result { suite: "suite", test: "failure", result: "FAIL" } 32 | insert-result { suite: "suite", test: "pass2", result: "PASS" } 33 | 34 | let result = success 35 | 36 | assert equal $result false 37 | } 38 | 39 | @test 40 | def result-success-when-only-passing-tests [] { 41 | insert-result { suite: "suite", test: "pass1", result: "PASS" } 42 | insert-result { suite: "suite", test: "pass2", result: "PASS" } 43 | 44 | let result = success 45 | 46 | assert equal $result true 47 | } 48 | --------------------------------------------------------------------------------