├── .env.example ├── .gitmodules ├── .nvmrc ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.md ├── adrs ├── TEMPLATE.md ├── adr-001-changing-error-detection-mechanism.md ├── adr-002-using-booleans.md ├── adr-003-parallel-testing.md └── adr-004-metadata-prefix.md ├── bashunit ├── bpkg.json ├── build.sh ├── install.sh ├── package-lock.json ├── package.json └── src ├── assert.sh ├── assert_arrays.sh ├── assert_files.sh ├── assert_folders.sh ├── assert_snapshot.sh ├── assertions.sh ├── bashunit.sh ├── benchmark.sh ├── check_os.sh ├── clock.sh ├── colors.sh ├── console_header.sh ├── console_results.sh ├── dependencies.sh ├── dev └── debug.sh ├── env.sh ├── globals.sh ├── helpers.sh ├── io.sh ├── main.sh ├── math.sh ├── parallel.sh ├── reports.sh ├── runner.sh ├── skip_todo.sh ├── state.sh ├── str.sh ├── test_doubles.sh └── upgrade.sh /.env.example: -------------------------------------------------------------------------------- 1 | BASHUNIT_DEFAULT_PATH= 2 | BASHUNIT_DEV_LOG= 3 | BASHUNIT_BOOTSTRAP= 4 | BASHUNIT_LOG_JUNIT= 5 | BASHUNIT_REPORT_HTML= 6 | 7 | # Booleans 8 | BASHUNIT_PARALLEL_RUN= 9 | BASHUNIT_SHOW_HEADER= 10 | BASHUNIT_HEADER_ASCII_ART= 11 | BASHUNIT_SIMPLE_OUTPUT= 12 | BASHUNIT_STOP_ON_FAILURE= 13 | BASHUNIT_SHOW_EXECUTION_TIME= 14 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "example/tools/bashunit"] 2 | path = example/tools/bashunit 3 | url = git@github.com:TypedDevs/bashunit.git 4 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | lts/jod 2 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Unreleased 4 | 5 | - Fix typo "to has been called" 6 | - Add weekly downloads to the docs 7 | - Fix parallel runner 8 | - Count data providers when counting total tests 9 | - Add benchmark feature 10 | - Support placeholder `::ignore::` in snapshots 11 | - Add project overview docs 12 | - Improve clock performance 13 | - Make install.sh args more flexible 14 | - Improve Windows detection allowing parallel tests on Git Bash, MSYS and Cygwin 15 | 16 | ## [0.20.0](https://github.com/TypedDevs/bashunit/compare/0.19.1...0.20.0) - 2025-06-01 17 | 18 | - Fix asserts on test doubles in subshell 19 | - Allow interpolating arguments in data providers output 20 | - Deprecate `# data_provider` in favor of `# @data_provider` 21 | - Allow `assert_have_been_called_with` to check arguments of specific calls 22 | - Enable parallel tests on Windows 23 | - Add `assert_not_called` 24 | - Improve `find_total_tests` performance 25 | - Added `assert_match_snapshot_ignore_colors` 26 | - Optimize `runner::parse_result_sync` 27 | - Fix `parse_result_parallel` template 28 | 29 | ## [0.19.1](https://github.com/TypedDevs/bashunit/compare/0.19.0...0.19.1) - 2025-05-23 30 | 31 | - Replace `#!/bin/bash` with `#!/usr/bin/env bash` 32 | - Usage printf with awk, which correctly handles float rounding and improves portability 33 | 34 | ## [0.19.0](https://github.com/TypedDevs/bashunit/compare/0.18.0...0.19.0) - 2025-02-19 35 | 36 | - Fixed false negative with `set -e` 37 | - Fixed name rendered when having `test_test_*` 38 | - Fixed duplicate function detection 39 | - Fixed display test with multiple outputs in multiline 40 | - Improved output: adding a space between each test file 41 | - Removed `BASHUNIT_DEV_MODE` in favor of `BASHUNIT_DEV_LOG` 42 | - Added source file and line on global dev function `log` 43 | 44 | ## [0.18.0](https://github.com/TypedDevs/bashunit/compare/0.17.0...0.18.0) - 2024-10-16 45 | 46 | - Added `-p|--parallel` to enable running tests in parallel 47 | - Enabled only in macOS and Ubuntu 48 | - Added `assert_file_contains` and `assert_file_not_contains` 49 | - Added `assert_true` and `assert_false` 50 | - Added `BASHUNIT_DEV_LOG` 51 | - Added global util functions 52 | - current_dir 53 | - current_filename 54 | - caller_filename 55 | - caller_line 56 | - current_timestamp 57 | - is_command_available 58 | - random_str 59 | - temp_file 60 | - temp_dir 61 | - cleanup_temp_files 62 | - log 63 | - Add default env values: 64 | - `BASHUNIT_DEFAULT_PATH="tests"` 65 | - `BASHUNIT_BOOTSTRAP="tests/bootstrap.sh"` 66 | - Add check that git is installed to `install.sh` 67 | - Add `-vvv|--verbose` to display internal details of each test 68 | - Fixed `-S|--stop-on-failure` behaviour 69 | - Improved time taken display 70 | - Improved clean up temporal files and directories 71 | - Improved CI test speed by running them in parallel 72 | - Removed git dependency for stable installations 73 | - Rename option `--verbose` to `--detailed` 74 | - which is the default display behaviour, the opposite as `--simple` 75 | - Added `assert_not_same` 76 | 77 | ## [0.17.0](https://github.com/TypedDevs/bashunit/compare/0.16.0...0.17.0) - 2024-10-01 78 | 79 | - Fixed simple output for non-successful states 80 | - Added support for Alpine (Linux Distro) 81 | - Added optional file-path as 2nd arg to `--debug` option 82 | - Added runtime duration per test 83 | - Added defer expressions with when using standalone assertions 84 | - Added failing tests after running the entire suite 85 | - Improved runtime errors handling 86 | - Simplified total tests display on the header 87 | - Renamed `BASHUNIT_TESTS_ENV` to `BASHUNIT_BOOTSTRAP` 88 | - Better handler runtime errors 89 | - Display failing tests after running the entire suite 90 | - Added defer expressions with `eval` when using standalone assertions 91 | - Fixed simple output for non-successful states 92 | - Remove deprecated assertions 93 | - Some required dependencies now optional: perl, coreutils 94 | - Upgrade and install script can now use `wget` if `curl` is not installed 95 | - Tests can be also be timed by making use of `EPOCHREALTIME` on supported system 96 | - Switch to testing the environment of capabilities 97 | - rather than assuming various operating systems and Linux distributions have programs installed 98 | 99 | ## [0.16.0](https://github.com/TypedDevs/bashunit/compare/0.15.0...0.16.0) - 2024-09-15 100 | 101 | - Fixed `clock::now` can't locate Time when is not available 102 | - Fixed failing tests when `command not found` and `unbound variable` 103 | - Fixed total tests wrong number 104 | - Update GitHub actions installation steps documentation 105 | - Added `assert_files_equals`, `assert_files_not_equals` 106 | - Added `BASHUNIT_TESTS_ENV` 107 | 108 | ## [0.15.0](https://github.com/TypedDevs/bashunit/compare/0.14.0...0.15.0) - 2024-09-01 109 | 110 | - Fixed `--filter|-f` to work with `test_*` matching function name input. 111 | - Added assertions to log file 112 | - Rename the current `assert_equals` to `assert_same` 113 | - Rename `assert_equals_ignore_colors` to `assert_equals` and ignore all special chars 114 | - Data providers support multiple arguments 115 | - Remove `multi-invokers` in favor of `data providers` 116 | - Removing trailing slashes `/` from the test directories naming output. 117 | - Align "Expected" and "but got" on `assert_*` fails message. 118 | - Change `-v` as shortcut for `--version` 119 | - Add `-vvv` as shortcut for `--verbose` 120 | - Fix wrong commit id when installing beta 121 | - Add display total tests upfront when running bashunit 122 | - Add `BASHUNIT_` suffix to all .env config keys 123 | - BASHUNIT_SHOW_HEADER 124 | - BASHUNIT_HEADER_ASCII_ART 125 | - BASHUNIT_SIMPLE_OUTPUT 126 | - BASHUNIT_STOP_ON_FAILURE 127 | - BASHUNIT_SHOW_EXECUTION_TIME 128 | - BASHUNIT_DEFAULT_PATH 129 | - BASHUNIT_LOG_JUNIT 130 | - BASHUNIT_REPORT_HTML 131 | 132 | ## [0.14.0](https://github.com/TypedDevs/bashunit/compare/0.13.0...0.14.0) - 2024-07-14 133 | 134 | - Fix echo does not break test execution results 135 | - Add bashunit facade to enable custom assertions 136 | - Document how to verify the `sha256sum` of the final executable 137 | - Generate checksum on build 138 | - Enable display execution time on macOS with `SHOW_EXECUTION_TIME` 139 | - Support for displaying the clock without `perl` (for non-macOS) 140 | - Enable strict mode 141 | - Add `-l|--log-junit ` option 142 | - Add `-r|--report-html ` option 143 | - Add `--debug` option 144 | - Add `dump` and `dd` functions for local debugging 145 | 146 | ## [0.13.0](https://github.com/TypedDevs/bashunit/compare/0.12.0...0.13.0) - 2024-06-23 147 | 148 | - Allow calling assertions standalone outside tests 149 | - Add the latest version when installing beta 150 | - Add `assert_line_count` 151 | - Add hash to the installation script when installing a beta version 152 | - Add GitHub Actions to installation doc 153 | 154 | ## [0.12.0](https://github.com/TypedDevs/bashunit/compare/0.11.0...0.12.0) - 2024-06-11 155 | 156 | - Add missing assertion in non-stable versions 157 | - Fix test with `rm` command in macOS 158 | - Add multi-invokers; consolidate parameterized-testing documentation 159 | - Add `fail()` function 160 | - Remove all test mocks after each test case 161 | 162 | ## [0.11.0](https://github.com/TypedDevs/bashunit/compare/0.10.1...0.11.0) - 2024-03-02 163 | 164 | - Add `--upgrade` option to `./bashunit` 165 | - Remove support to deprecated `setUp`, `tearDown`, `setUpBeforeScript` and `tearDownAfterScript` functions 166 | - Optimize test execution time 167 | - Test functions are now run in the order they're defined in a test file 168 | - Increase contrast of test results 169 | 170 | ## [0.10.1](https://github.com/TypedDevs/bashunit/compare/0.10.0...0.10.1) - 2023-11-13 171 | 172 | - Fix find tests inside folder 173 | - Add current date on beta installation version 174 | 175 | ## [0.10.0](https://github.com/TypedDevs/bashunit/compare/0.9.0...0.10.0) - 2023-11-09 176 | 177 | - Installer no longer needs git 178 | - Add `assert_contains_ignore_case` 179 | - Add `assert_equals_ignore_colors` 180 | - Add `assert_match_snapshot` 181 | - Add `SHOW_EXECUTION_TIME` to environment config 182 | - Add docs for environment variables 183 | - Improve data provider output 184 | - Add .env variable `DEFAULT_PATH` 185 | - Improve duplicated function names output 186 | - Allow installing (non-stable) beta using the installer 187 | 188 | ## [0.9.0](https://github.com/TypedDevs/bashunit/compare/0.8.0...0.9.0) - 2023-10-15 189 | 190 | - Optimised docs Fonts (Serving directly from origin instead of Google Fonts _proxy_) 191 | - Add Brew installation to docs 192 | - Add `--help` option 193 | - Add `-e|--env` option 194 | - Add `-S|--stop-on-failure` option 195 | - Add data_provider 196 | - Add blog posts to the website 197 | - Add `assert_string_not_starts_with` 198 | - Add `assert_string_starts_with` 199 | - Add `assert_string_ends_with` 200 | - Add `assert_string_not_ends_with` 201 | - Add `assert_less_than` 202 | - Add `assert_less_or_equal_than` 203 | - Add `assert_greater_than` 204 | - Add `assert_greater_or_equal_than` 205 | 206 | ## [0.8.0](https://github.com/TypedDevs/bashunit/compare/0.7.0...0.8.0) - 2023-10-08 207 | 208 | - Rename these functions from camelCase to snake_case: 209 | - `setUp` -> `set_up` 210 | - `tearDown` -> `tear_down` 211 | - `setUpBeforeScript` -> `set_up_before_script` 212 | - `tearDownAfterScript` -> `tear_down_after_script` 213 | - Add --version option 214 | - Add -v|--verbose option 215 | - Add ASCII art logo 216 | - Find all test on a directory 217 | - Add skip and todo functions 218 | - Add SIMPLE_OUTPUT to `.env` 219 | - Allow using `main` or `latest` when using install.sh 220 | 221 | ## [0.7.0](https://github.com/TypedDevs/bashunit/compare/0.6.0...0.7.0) - 2023-10-02 222 | 223 | - Added `--simple` argument for a simpler output 224 | - Manage error when test execution fails 225 | - Split install and build scripts 226 | - Added these functions 227 | - `mock` 228 | - `spy` 229 | - `assert_have_been_called` 230 | - `assert_have_been_called_with` 231 | - `assert_have_been_called_times` 232 | - `assert_file_exists` 233 | - `assert_file_not_exists` 234 | - `assert_is_file_empty` 235 | - `assert_is_file` 236 | - `assert_directory_exists` 237 | - `assert_directory_not_exists` 238 | - `assert_is_directory` 239 | - `assert_is_directory_empty` 240 | - `assert_is_directory_not_empty` 241 | - `assert_is_directory_readable` 242 | - `assert_is_directory_not_readable` 243 | - `assert_is_directory_writable` 244 | - `assert_is_directory_not_writable` 245 | - Rename assertions from camelCase to snake_case: 246 | - `assertEquals` -> `assert_equals` 247 | - `assertNotEquals` -> `assert_not_equals` 248 | - `assertEmpty` -> `assert_empty` 249 | - `assertNotEmpty` -> `assert_not_empty` 250 | - `assertContains` -> `assert_contains` 251 | - `assertNotContains` -> `assert_not_contains` 252 | - `assertMatches` -> `assert_matches` 253 | - `assertNotMatches` -> `assert_not_matches` 254 | - `assertExitCode` -> `assert_exit_code` 255 | - `assertSuccessfulCode` -> `assert_successful_code` 256 | - `assertGeneralError` -> `assert_general_error` 257 | - `assertCommandNotFound` -> `assert_command_not_found` 258 | - `assertArrayContains` -> `assert_array_contains` 259 | - `assertArrayNotContains` -> `assert_array_not_contains` 260 | 261 | ## [0.6.0](https://github.com/TypedDevs/bashunit/compare/0.5.0...0.6.0) - 2023-09-19 262 | 263 | - Added `assertExitCode` 264 | - Added `assertSuccessfulCode` 265 | - Added `assertGeneralError` 266 | - Added `assertCommandNotFound` 267 | - Added `assertArrayContains` 268 | - Added `assertArrayNotContains` 269 | - Added `assertEmpty` 270 | - Added `assertNotEmpty` 271 | - Added `setUp`, `setUpBeforeScript`, `tearDown` and `tearDownAfterScript` function execution before and/or after test and/or script execution 272 | - Improved the readability of the assert by using guard clause 273 | - Update documentation 274 | - Add support for the static analysis on macOS 275 | - Fix bug with watcher for the development of bashunit 276 | - Fix error on count assertions 277 | - Added pipeline to add contributors to the readme 278 | - Added documentation with VitePress 279 | - Stop runner when found duplicate test functions 280 | 281 | ## [0.5.0](https://github.com/TypedDevs/bashunit/compare/0.4.0...0.5.0) - 2023-09-10 282 | 283 | - Added logo 284 | - Added `assertNotEquals` 285 | - Added `assertMatches` 286 | - Added `assertNotMatches` 287 | - Added `make test/watch` to run your test every second 288 | - Added time taken to run the test in ms (only to Linux) 289 | - Simplified assertions over test results 290 | - Added acceptance test to the library 291 | - Added pre-commit to the project 292 | - Allow parallel tests to run base on a .env configuration enabled by default 293 | - Added static analysis tools to the deployment pipelines 294 | - New summary output 295 | 296 | ## [0.4.0](https://github.com/TypedDevs/bashunit/compare/0.3.0...0.4.0) - 2023-09-08 297 | 298 | - Better output colors and symbols 299 | - Add option `--filter` to `./bashunit` script 300 | - Trigger tests filtered by name 301 | - Change the output styles 302 | - Emojis 303 | - Colors 304 | - Bolds 305 | - Added count to all test 306 | 307 | ## [0.3.0](https://github.com/TypedDevs/bashunit/compare/0.2.0...0.3.0) - 2023-09-07 308 | 309 | - Added `assertContains` 310 | - Added `assertNotContains` 311 | - Display Passed tests in green, and Failed tests in red 312 | - Avoid stop running tests after a failing one test 313 | 314 | ## [0.2.0](https://github.com/TypedDevs/bashunit/compare/0.1.0...0.2.0) - 2023-09-05 315 | 316 | - Fix keeping in memory test func after running them 317 | - Create a `./bashunit` entry point 318 | - Change ROOT_DIR to BASHUNIT_ROOT_DIR 319 | - Allow writing test with camelCase as well 320 | - Allow running example log_test from anywhere 321 | 322 | ## [0.1.0](https://github.com/TypedDevs/bashunit/compare/27269c2...0.1.0) - 2023-09-04 323 | 324 | - Added `assertEquals` function 325 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 TypedDevs 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL=/bin/bash 2 | 3 | -include .env 4 | 5 | STATIC_ANALYSIS_CHECKER := $(shell which shellcheck 2> /dev/null) 6 | LINTER_CHECKER := $(shell which ec 2> /dev/null) 7 | GIT_DIR = $(shell git rev-parse --git-dir 2> /dev/null) 8 | 9 | OS:= 10 | ifeq ($(OS),Windows_NT) 11 | OS +=WIN32 12 | ifeq ($(PROCESSOR_ARCHITECTURE),AMD64) 13 | OS +=_AMD64 14 | endif 15 | ifeq ($(PROCESSOR_ARCHITECTURE),x86) 16 | OS +=_IA32 17 | endif 18 | else 19 | UNAME_S := $(shell uname -s) 20 | ifeq ($(UNAME_S),Linux) 21 | OS+=LINUX 22 | endif 23 | ifeq ($(UNAME_S),Darwin) 24 | OS+=OSX 25 | endif 26 | UNAME_P := $(shell uname -p) 27 | ifeq ($(UNAME_P),x86_64) 28 | OS +=_AMD64 29 | endif 30 | ifneq ($(filter %86,$(UNAME_P)),) 31 | OS+=_IA32 32 | endif 33 | ifneq ($(filter arm%,$(UNAME_P)),) 34 | OS+=_ARM 35 | endif 36 | endif 37 | 38 | help: 39 | @echo "" 40 | @echo "Usage: make [command]" 41 | @echo "" 42 | @echo "Commands:" 43 | @echo " test Run the tests" 44 | @echo " test/list List all tests under the tests directory" 45 | @echo " test/watch Automatically run tests every second" 46 | @echo " docker/alpine Run into a Docker Linux/Alpine:latest image" 47 | @echo " pre_commit/install Install the pre-commit hook" 48 | @echo " pre_commit/run Function that will be called when the pre-commit hook runs" 49 | @echo " sa Run shellcheck static analysis tool" 50 | @echo " lint Run editorconfig linter tool" 51 | 52 | SRC_SCRIPTS_DIR=src 53 | TEST_SCRIPTS_DIR=tests 54 | EXAMPLE_TEST_SCRIPTS=./example/logic_test.sh 55 | PRE_COMMIT_SCRIPTS_FILE=./bin/pre-commit 56 | 57 | TEST_SCRIPTS = $(wildcard $(TEST_SCRIPTS_DIR)/*/*[tT]est.sh) 58 | 59 | test/list: 60 | @echo "Test scripts found:" 61 | @echo $(TEST_SCRIPTS) | tr ' ' '\n' 62 | 63 | test: $(TEST_SCRIPTS) 64 | @./bashunit $(TEST_SCRIPTS) 65 | 66 | test/watch: $(TEST_SCRIPTS) 67 | @./bashunit $(TEST_SCRIPTS) 68 | @fswatch -m poll_monitor -or $(SRC_SCRIPTS_DIR) $(TEST_SCRIPTS_DIR) .env Makefile | xargs -n1 ./bashunit $(TEST_SCRIPTS) 69 | 70 | docker/alpine: 71 | @docker run --rm -it -v "$(shell pwd)":/project -w /project alpine:latest \ 72 | sh -c "apk add bash make shellcheck git && bash" 73 | 74 | docker/ubuntu: 75 | @docker run --rm -it -v "$(shell pwd)":/project -w /project ubuntu:latest \ 76 | sh -c "apt update && apt install -y bash make shellcheck git && bash" 77 | 78 | pre_commit/install: 79 | @echo "Installing pre-commit hook" 80 | cp $(PRE_COMMIT_SCRIPTS_FILE) $(GIT_DIR)/hooks/ 81 | 82 | pre_commit/run: test sa lint 83 | 84 | sa: 85 | ifndef STATIC_ANALYSIS_CHECKER 86 | @printf "\e[1m\e[31m%s\e[0m\n" "Shellcheck not installed: Static analysis not performed!" && exit 1 87 | else 88 | @find . -name "*.sh" -not -path "./local/*" | xargs shellcheck -xC && printf "\e[1m\e[32m%s\e[0m\n" "ShellCheck: OK!" 89 | endif 90 | 91 | lint: 92 | ifndef LINTER_CHECKER 93 | @printf "\e[1m\e[31m%s\e[0m\n" "Editorconfig not installed: Lint not performed!" && exit 1 94 | else 95 | @ec -config .editorconfig && printf "\e[1m\e[32m%s\e[0m\n" "editorconfig-check: OK!" 96 | endif 97 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 | Tests 4 | 5 | 6 | Static analysis 7 | 8 | 9 | Publish Docs 10 | 11 | 12 | Editorconfig checker 13 | 14 | 15 | MIT Software License 16 | 17 |

18 |
19 |

20 | 21 | 22 | bashunit 23 | 24 |

25 | 26 |

A simple testing framework for bash scripts

27 | 28 |

29 | Test your bash scripts in the fastest and simplest way, discover the most modern bash testing framework. 30 |

31 | 32 | ## Description 33 | 34 | **bashunit** is a comprehensive and lightweight testing framework for Bash, focused on the development experience. 35 | It boasts hundreds of assertions and functionalities like spies, mocks, providers and more, offers concise and clear documentation, and has a very active community. 36 | 37 | ## Documentation 38 | 39 | You can find the complete documentation for **bashunit** online, including installation instructions and the various features it provides, in the [official bashunit documentation](https://bashunit.typeddevs.com). 40 | 41 | ## Contribute 42 | 43 | You are welcome to contribute reporting issues, sharing ideas, 44 | or with your pull requests. 45 | 46 | Make sure to read our [contribution guide](.github/CONTRIBUTING.md) where you will find, among other things, how to set up your environment with the various tools we use to develop this framework. 47 | 48 | ## Contributors 49 | 50 |

51 | Contributors list 52 |

53 | -------------------------------------------------------------------------------- /adrs/TEMPLATE.md: -------------------------------------------------------------------------------- 1 | # [short title of solved problem and solution] 2 | 3 | * Status: [proposed | rejected | accepted | deprecated | … | superseded by [ADR-0005](0005-example.md)] 4 | * Deciders: [list everyone involved in the decision] 5 | * Date: [YYYY-MM-DD when the decision was last updated] 6 | 7 | Technical Story: [description | ticket/issue URL] 8 | 9 | ## Context and Problem Statement 10 | 11 | [Describe the context and problem statement, e.g., in free form using two to three sentences. You may want to articulate the problem in form of a question.] 12 | 13 | ## Decision Drivers 14 | 15 | * [driver 1, e.g., a force, facing concern, …] 16 | * [driver 2, e.g., a force, facing concern, …] 17 | * … 18 | 19 | ## Considered Options 20 | 21 | * [option 1] 22 | * [option 2] 23 | * [option 3] 24 | * … 25 | 26 | ## Decision Outcome 27 | 28 | Chosen option: "[option 1]", because [justification. e.g., only option, which meets k.o. criterion decision driver | which resolves force force | … | comes out best (see below)]. 29 | 30 | ### Positive Consequences 31 | 32 | * [e.g., improvement of quality attribute satisfaction, follow-up decisions required, …] 33 | * … 34 | 35 | ### Negative Consequences 36 | 37 | * [e.g., compromising quality attribute, follow-up decisions required, …] 38 | * … 39 | 40 | ## Pros and Cons of the Options 41 | 42 | ### [option 1] 43 | 44 | [example | description | pointer to more information | …] 45 | 46 | * Good, because [argument a] 47 | * Good, because [argument b] 48 | * Bad, because [argument c] 49 | * … 50 | 51 | ### [option 2] 52 | 53 | [example | description | pointer to more information | …] 54 | 55 | * Good, because [argument a] 56 | * Good, because [argument b] 57 | * Bad, because [argument c] 58 | * … 59 | 60 | ### [option 3] 61 | 62 | [example | description | pointer to more information | …] 63 | 64 | * Good, because [argument a] 65 | * Good, because [argument b] 66 | * Bad, because [argument c] 67 | * … 68 | 69 | ## Links 70 | 71 | * [Link type] [Link to ADR] 72 | * … 73 | -------------------------------------------------------------------------------- /adrs/adr-001-changing-error-detection-mechanism.md: -------------------------------------------------------------------------------- 1 | # Title: Changing Error Detection Mechanism in Bashunit 2 | 3 | * Status: accepted 4 | * Authors: @Tito-Kati, with consensus from @khru and @Chemaclass 5 | * Date: 2023-10-14 6 | 7 | Technical Story: 8 | - Issue: [TypedDevs/bashunit#182](https://github.com/TypedDevs/bashunit/issues/182) 9 | - Pull Request: [TypedDevs/bashunit#189](https://github.com/TypedDevs/bashunit/pull/189) 10 | 11 | ## Context and Problem Statement 12 | 13 | In the existing setup of bashunit, error detection within tests was based on return codes along with `set -e`. 14 | This mechanism would interrupt a test script if any execution within the script returned an error code other than 0. 15 | A specific scenario was identified where a non-existing function call within a test did not cause the test to fail as it should, as illustrated in issue [#182](https://github.com/TypedDevs/bashunit/issues/182). 16 | 17 | ## Considered Options 18 | * Use stderr instead return codes and set -e. 19 | 20 | ## Decision Outcome 21 | 22 | To rectify this, a new error detection mechanism was proposed in pull request [#189](https://github.com/TypedDevs/bashunit/pull/189). 23 | The changes shifted error detection from relying on return codes to utilizing stderr. 24 | Now, if any execution within a script writes something to stderr, it will be considered as failed. 25 | This adjustment also changes the behavior of the test runner slightly as tests will now run to the end even if there’s a failure at the beginning, aligning the behavior across different scenarios. 26 | 27 | ### Positive Consequences 28 | 29 | The consequences include: 30 | - Enabling true Test Driven Development (TDD) in bashunit by ensuring that tests fail as expected when there's an error, providing a more accurate and reliable testing environment. 31 | - Altering the runner's behavior to continue executing tests even after an initial failure, which may be viewed as strange but is consistent with the new error detection mechanism. 32 | - Refining error reporting to align with standard practices, providing more descriptive insight into the errors. 33 | 34 | ### Negative Consequences 35 | 36 | Unknown at the moment. 37 | -------------------------------------------------------------------------------- /adrs/adr-002-using-booleans.md: -------------------------------------------------------------------------------- 1 | # Title: Using native bash booleans 2 | 3 | * Status: accepted 4 | * Authors: @Chemaclass 5 | * Date: 2024-10-03 6 | 7 | Technical Story: 8 | - Pull Request: [TypedDevs/bashunit#345](https://github.com/TypedDevs/bashunit/pull/345#discussion_r1782226289) 9 | 10 | ## Context and Problem Statement 11 | 12 | We are using booleans with different syntax in different parts of the project. 13 | 14 | ## Considered Options 15 | 16 | * Use true and false as `0`, `1` native shell booleans 17 | * Use true and false as strings: `"true"`, `"false"` 18 | * Use true and false as native programs: `true`, `false` 19 | 20 | ## Decision Outcome 21 | 22 | To keep consistency in the project, we want to use the standard and best practices of booleans while 23 | keeping a great DX. 24 | 25 | When using return, we must use a number: 26 | - `return 0` # for success 27 | - `return 1` # for failure 28 | 29 | When using variables, we must use `true` and `false` as commands (not strings!): 30 | - `true` is a command that always returns a successful exit code (0) 31 | - `false` is a command that always returns a failure exit code (1) 32 | 33 | When possible, extract a condition into a function. For example: 34 | ```bash 35 | function env::is_show_header_enabled() { 36 | # this is a string comparison because it is coming from the .env 37 | [[ "$BASHUNIT_SHOW_HEADER" == "true" ]] 38 | } 39 | ``` 40 | Usage 41 | ```bash 42 | if env::is_show_header_enabled; then 43 | # ... 44 | fi 45 | ``` 46 | 47 | ### Positive Consequences 48 | 49 | We keep the native shell boolean syntax in conditions. 50 | 51 | ### Negative Consequences 52 | 53 | Not that I am aware of. 54 | -------------------------------------------------------------------------------- /adrs/adr-003-parallel-testing.md: -------------------------------------------------------------------------------- 1 | # Title: Parallel testing 2 | 3 | * Status: accepted 4 | * Authors: @Chemaclass 5 | * Date: 2024-10-11 6 | 7 | Technical Story: 8 | - Pull Request: [TypedDevs/bashunit#358](https://github.com/TypedDevs/bashunit/pull/358) 9 | 10 | ## Context and Problem Statement 11 | 12 | We aim to enhance testing performance by running tests in parallel processes while capturing and aggregating results effectively. 13 | 14 | ## Considered Options 15 | 16 | - Implement parallel execution using subprocesses. 17 | - Aggregate test results from temporary files. 18 | - Use a spinner for user feedback during result aggregation. 19 | 20 | ## Decision Outcome 21 | 22 | - Implemented parallel test execution using subprocesses. 23 | - Each test creates a temporary directory to store results, later aggregated. 24 | 25 | ### Positive Consequences 26 | 27 | - Reduced test execution time considerably. 28 | - Clear feedback via a spinner during aggregation. 29 | 30 | ### Negative Consequences 31 | 32 | - Potential complexity 33 | - with handling temporary files during interruptions. 34 | - in handling temporary files and managing subprocesses. 35 | 36 | ## Technical Details 37 | 38 | When the `--parallel` flag is used, each test is run in its own subprocess by calling: 39 | 40 | > runner::call_test_functions "$test_file" "$filter" 2>/dev/null & 41 | 42 | Each test script creates a temporary directory and stores individual test results in temp files. 43 | After all tests finish, the results are aggregated by traversing these directories and files. 44 | This approach ensures isolation of test execution while improving performance by running tests concurrently. 45 | 46 | The aggregation (which collects all test outcomes into a final result set) is handled by the function: 47 | 48 | > parallel::aggregate_test_results "$TEMP_DIR_PARALLEL_TEST_SUITE" 49 | 50 | 51 | -------------------------------------------------------------------------------- /adrs/adr-004-metadata-prefix.md: -------------------------------------------------------------------------------- 1 | # Title: Prefix metadata comments with @ 2 | 3 | * Status: accepted 4 | * Authors: @Chemaclass 5 | * Date: 2025-05-29 6 | 7 | 8 | ## Context and Problem Statement 9 | 10 | Data providers are defined via a special comment `# data_provider`. We want to 11 | clearly differentiate these meta comments from ordinary comments. 12 | 13 | ## Considered Options 14 | 15 | * Keep using `# data_provider` as is. 16 | * Introduce an `@` prefix for special comments while supporting the old syntax. 17 | 18 | ## Decision Outcome 19 | 20 | We decided to prefix the metadata provider directives with `@`, 21 | eg: using `# @data_provider provider_name`. 22 | 23 | > The previous form without the prefix is still supported for backward compatibility but is now deprecated. 24 | 25 | ### Positive Consequences 26 | 27 | * Highlights special bashunit directives clearly. 28 | * Allows future directives to consistently use the `@` prefix. 29 | 30 | ### Negative Consequences 31 | 32 | * Projects must eventually update old comments to the new syntax. 33 | 34 | ## Technical Details 35 | 36 | `helper::get_provider_data` now matches both `# @data_provider` and the old 37 | `# data_provider` when locating provider functions. 38 | -------------------------------------------------------------------------------- /bashunit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # shellcheck disable=SC2034 5 | declare -r BASHUNIT_VERSION="0.20.0" 6 | 7 | # shellcheck disable=SC2155 8 | declare -r BASHUNIT_ROOT_DIR="$(dirname "${BASH_SOURCE[0]}")" 9 | export BASHUNIT_ROOT_DIR 10 | 11 | source "$BASHUNIT_ROOT_DIR/src/dev/debug.sh" 12 | source "$BASHUNIT_ROOT_DIR/src/check_os.sh" 13 | source "$BASHUNIT_ROOT_DIR/src/str.sh" 14 | source "$BASHUNIT_ROOT_DIR/src/globals.sh" 15 | source "$BASHUNIT_ROOT_DIR/src/dependencies.sh" 16 | source "$BASHUNIT_ROOT_DIR/src/io.sh" 17 | source "$BASHUNIT_ROOT_DIR/src/math.sh" 18 | source "$BASHUNIT_ROOT_DIR/src/parallel.sh" 19 | source "$BASHUNIT_ROOT_DIR/src/env.sh" 20 | source "$BASHUNIT_ROOT_DIR/src/clock.sh" 21 | source "$BASHUNIT_ROOT_DIR/src/state.sh" 22 | source "$BASHUNIT_ROOT_DIR/src/colors.sh" 23 | source "$BASHUNIT_ROOT_DIR/src/console_header.sh" 24 | source "$BASHUNIT_ROOT_DIR/src/console_results.sh" 25 | source "$BASHUNIT_ROOT_DIR/src/helpers.sh" 26 | source "$BASHUNIT_ROOT_DIR/src/upgrade.sh" 27 | source "$BASHUNIT_ROOT_DIR/src/assertions.sh" 28 | source "$BASHUNIT_ROOT_DIR/src/reports.sh" 29 | source "$BASHUNIT_ROOT_DIR/src/runner.sh" 30 | source "$BASHUNIT_ROOT_DIR/src/bashunit.sh" 31 | source "$BASHUNIT_ROOT_DIR/src/main.sh" 32 | 33 | _ASSERT_FN="" 34 | _FILTER="" 35 | _RAW_ARGS=() 36 | _ARGS=() 37 | _BENCH_MODE=false 38 | 39 | check_os::init 40 | clock::init 41 | 42 | # Argument parsing 43 | while [[ $# -gt 0 ]]; do 44 | case "$1" in 45 | -a|--assert) 46 | _ASSERT_FN="$2" 47 | shift 48 | ;; 49 | -f|--filter) 50 | _FILTER="$2" 51 | shift 52 | ;; 53 | -s|--simple) 54 | export BASHUNIT_SIMPLE_OUTPUT=true 55 | ;; 56 | --detailed) 57 | export BASHUNIT_SIMPLE_OUTPUT=false 58 | ;; 59 | --debug) 60 | OUTPUT_FILE="${2:-}" 61 | if [[ -n "$OUTPUT_FILE" ]]; then 62 | exec > "$OUTPUT_FILE" 2>&1 63 | fi 64 | set -x 65 | ;; 66 | -b|--bench) 67 | _BENCH_MODE=true 68 | export BASHUNIT_BENCH_MODE=true 69 | source "$BASHUNIT_ROOT_DIR/src/benchmark.sh" 70 | ;; 71 | -S|--stop-on-failure) 72 | export BASHUNIT_STOP_ON_FAILURE=true 73 | ;; 74 | -p|--parallel) 75 | export BASHUNIT_PARALLEL_RUN=true 76 | ;; 77 | --no-parallel) 78 | export BASHUNIT_PARALLEL_RUN=false 79 | ;; 80 | -e|--env|--boot) 81 | # shellcheck disable=SC1090 82 | source "$2" 83 | shift 84 | ;; 85 | -l|--log-junit) 86 | export BASHUNIT_LOG_JUNIT="$2" 87 | shift 88 | ;; 89 | -r|--report-html) 90 | export BASHUNIT_REPORT_HTML="$2" 91 | shift 92 | ;; 93 | -vvv|--verbose) 94 | export BASHUNIT_VERBOSE=true 95 | ;; 96 | -v|--version) 97 | console_header::print_version 98 | trap '' EXIT && exit 0 99 | ;; 100 | --upgrade) 101 | upgrade::upgrade 102 | trap '' EXIT && exit 0 103 | ;; 104 | -h|--help) 105 | console_header::print_help 106 | trap '' EXIT && exit 0 107 | ;; 108 | *) 109 | _RAW_ARGS+=("$1") 110 | ;; 111 | esac 112 | shift 113 | done 114 | 115 | # Expand positional arguments after all options have been processed 116 | if [[ ${#_RAW_ARGS[@]} -gt 0 ]]; then 117 | pattern='*[tT]est.sh' 118 | [[ "$_BENCH_MODE" == true ]] && pattern='*[bB]ench.sh' 119 | for arg in "${_RAW_ARGS[@]}"; do 120 | while IFS= read -r file; do 121 | _ARGS+=("$file") 122 | done < <(helper::find_files_recursive "$arg" "$pattern") 123 | done 124 | fi 125 | 126 | # Optional bootstrap 127 | # shellcheck disable=SC1090 128 | [[ -f "${BASHUNIT_BOOTSTRAP:-}" ]] && source "$BASHUNIT_BOOTSTRAP" 129 | 130 | set +eu 131 | 132 | ################# 133 | # Main execution 134 | ################# 135 | if [[ -n "$_ASSERT_FN" ]]; then 136 | main::exec_assert "$_ASSERT_FN" "${_ARGS[@]}" 137 | elif [[ "$_BENCH_MODE" == true ]]; then 138 | main::exec_benchmarks "$_FILTER" "${_ARGS[@]}" 139 | else 140 | main::exec_tests "$_FILTER" "${_ARGS[@]}" 141 | fi 142 | -------------------------------------------------------------------------------- /bpkg.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bashunit", 3 | "description": "A simple testing library for bash scripts.", 4 | "scripts": [ "bashunit" ], 5 | "install": "install -b bashunit ${PREFIX:-/usr/local}/bin/bashunit", 6 | "global": "false" 7 | } 8 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source src/check_os.sh 4 | 5 | BASHUNIT_ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | export BASHUNIT_ROOT_DIR 7 | 8 | function build() { 9 | local out=$1 10 | 11 | build::generate_bin "$out" 12 | build::generate_checksum "$out" 13 | 14 | echo "⚡️ Build completed ⚡️" 15 | } 16 | 17 | function build::verify() { 18 | local out=$1 19 | 20 | echo "Verifying build ⏱️" 21 | 22 | "$out" tests \ 23 | --simple \ 24 | --parallel \ 25 | --log-junit "bin/log-junit.xml" \ 26 | --report-html "bin/report.html" \ 27 | --stop-on-failure 28 | 29 | # shellcheck disable=SC2181 30 | if [[ $? -eq 0 ]]; then 31 | echo "✅ Build verified ✅" 32 | fi 33 | } 34 | 35 | function build::generate_bin() { 36 | local out=$1 37 | local temp 38 | temp="$(dirname "$out")/temp.sh" 39 | 40 | echo '#!/usr/bin/env bash' > "$temp" 41 | echo "Generating bashunit in the '$(dirname "$out")' folder..." 42 | 43 | for file in $(build::dependencies); do 44 | build::process_file "$file" "$temp" 45 | done 46 | 47 | cat bashunit >> "$temp" 48 | grep -v '^source' "$temp" > "$out" 49 | rm "$temp" 50 | chmod u+x "$out" 51 | } 52 | 53 | # Recursive function to process each file and any files it sources 54 | function build::process_file() { 55 | local file=$1 56 | local temp=$2 57 | 58 | { 59 | echo "# $(basename "$file")" 60 | tail -n +2 "$file" >> "$temp" 61 | echo "" 62 | } >> "$temp" 63 | 64 | # Search for any 'source' lines in the current file 65 | grep '^source ' "$file" | while read -r line; do 66 | # Extract the path from the 'source' command 67 | local sourced_file 68 | sourced_file=$(echo "$line" | awk '{print $2}' | sed 's/^"//;s/"$//') # Remove any quotes 69 | 70 | # Handle cases where the path uses $BASHUNIT_ROOT_DIR or other variables 71 | sourced_file=$(eval echo "$sourced_file") 72 | 73 | # Handle relative paths if necessary 74 | if [[ ! "$sourced_file" =~ ^/ ]]; then 75 | sourced_file="$(dirname "$file")/$sourced_file" 76 | fi 77 | 78 | # Recursively process the sourced file if it exists 79 | if [[ -f "$sourced_file" ]]; then 80 | build::process_file "$sourced_file" "$temp" 81 | fi 82 | done 83 | } 84 | 85 | function build::dependencies() { 86 | deps=( 87 | "src/check_os.sh" 88 | "src/str.sh" 89 | "src/globals.sh" 90 | "src/dependencies.sh" 91 | "src/io.sh" 92 | "src/math.sh" 93 | "src/parallel.sh" 94 | "src/env.sh" 95 | "src/clock.sh" 96 | "src/state.sh" 97 | "src/colors.sh" 98 | "src/console_header.sh" 99 | "src/console_results.sh" 100 | "src/helpers.sh" 101 | "src/upgrade.sh" 102 | "src/assertions.sh" 103 | "src/reports.sh" 104 | "src/runner.sh" 105 | "src/bashunit.sh" 106 | "src/main.sh" 107 | ) 108 | 109 | echo "${deps[@]}" 110 | } 111 | 112 | function build::generate_checksum() { 113 | local out=$1 114 | 115 | if [[ "$_OS" == "Windows" ]]; then 116 | return 117 | fi 118 | 119 | # Use a single command for both macOS and Linux 120 | if command -v shasum &>/dev/null; then 121 | checksum=$(shasum -a 256 "$out") 122 | else 123 | checksum=$(sha256sum "$out") 124 | fi 125 | 126 | echo "$checksum" > "$(dirname "$out")/checksum" 127 | echo "$checksum" 128 | } 129 | 130 | ######################## 131 | ######### MAIN ######### 132 | ######################## 133 | 134 | DIR="bin" 135 | SHOULD_VERIFY_BUILD=false 136 | 137 | for arg in "$@"; do 138 | case $arg in 139 | -v|--verify) 140 | SHOULD_VERIFY_BUILD=true 141 | ;; 142 | *) 143 | DIR=$arg 144 | ;; 145 | esac 146 | done 147 | 148 | mkdir -p "$DIR" 149 | OUT="$DIR/bashunit" 150 | 151 | build "$OUT" 152 | 153 | if [[ $SHOULD_VERIFY_BUILD == true ]]; then 154 | build::verify "$OUT" 155 | fi 156 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=SC2155 3 | # shellcheck disable=SC2164 4 | 5 | function is_git_installed() { 6 | command -v git > /dev/null 2>&1 7 | } 8 | 9 | function get_latest_tag() { 10 | local repository_url=$1 11 | 12 | git ls-remote --tags "$repository_url" | 13 | awk '{print $2}' | 14 | sed 's|^refs/tags/||' | 15 | sort -Vr | 16 | head -n 1 17 | } 18 | 19 | function build_and_install_beta() { 20 | echo "> Downloading non-stable version: 'beta'" 21 | 22 | if ! is_git_installed; then 23 | echo "Error: git is not installed." >&2 24 | exit 1 25 | fi 26 | 27 | git clone --depth 1 --no-tags "$BASHUNIT_GIT_REPO" temp_bashunit 2>/dev/null 28 | cd temp_bashunit 29 | ./build.sh >/dev/null 30 | local latest_commit=$(git rev-parse --short=7 HEAD) 31 | # shellcheck disable=SC2103 32 | cd .. 33 | 34 | local beta_version=$(printf "(non-stable) beta after %s [%s] 🐍 #%s" \ 35 | "$LATEST_BASHUNIT_VERSION" \ 36 | "$(date +'%Y-%m-%d')" \ 37 | "$latest_commit") 38 | 39 | sed -i -e 's/BASHUNIT_VERSION=".*"/BASHUNIT_VERSION="'"$beta_version"'"/g' temp_bashunit/bin/bashunit 40 | cp temp_bashunit/bin/bashunit ./ 41 | rm -rf temp_bashunit 42 | } 43 | 44 | function install() { 45 | if [[ $VERSION != 'latest' ]]; then 46 | TAG="$VERSION" 47 | echo "> Downloading a concrete version: '$TAG'" 48 | else 49 | echo "> Downloading the latest version: '$TAG'" 50 | fi 51 | 52 | if command -v curl > /dev/null 2>&1; then 53 | curl -L -O -J "$BASHUNIT_GIT_REPO/releases/download/$TAG/bashunit" 2>/dev/null 54 | elif command -v wget > /dev/null 2>&1; then 55 | wget "$BASHUNIT_GIT_REPO/releases/download/$TAG/bashunit" 2>/dev/null 56 | else 57 | echo "Cannot download bashunit: curl or wget not found." 58 | fi 59 | chmod u+x "bashunit" 60 | } 61 | 62 | ######################### 63 | ######### MAIN ########## 64 | ######################### 65 | 66 | # Defaults 67 | DIR="lib" 68 | VERSION="latest" 69 | 70 | function is_version() { 71 | [[ "$1" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ || "$1" == "latest" || "$1" == "beta" ]] 72 | } 73 | 74 | # Parse arguments flexibly 75 | if [[ $# -eq 1 ]]; then 76 | if is_version "$1"; then 77 | VERSION="$1" 78 | else 79 | DIR="$1" 80 | fi 81 | elif [[ $# -eq 2 ]]; then 82 | if is_version "$1"; then 83 | VERSION="$1" 84 | DIR="$2" 85 | elif is_version "$2"; then 86 | DIR="$1" 87 | VERSION="$2" 88 | else 89 | echo "Invalid arguments. Expected version or directory." >&2 90 | exit 1 91 | fi 92 | fi 93 | 94 | BASHUNIT_GIT_REPO="https://github.com/TypedDevs/bashunit" 95 | if is_git_installed; then 96 | LATEST_BASHUNIT_VERSION="$(get_latest_tag "$BASHUNIT_GIT_REPO")" 97 | else 98 | LATEST_BASHUNIT_VERSION="0.20.0" 99 | fi 100 | TAG="$LATEST_BASHUNIT_VERSION" 101 | 102 | cd "$(dirname "$0")" 103 | rm -f "$DIR"/bashunit 104 | [ -d "$DIR" ] || mkdir "$DIR" 105 | cd "$DIR" 106 | 107 | if [[ $VERSION == 'beta' ]]; then 108 | build_and_install_beta 109 | else 110 | install 111 | fi 112 | 113 | echo "> bashunit has been installed in the '$DIR' folder" 114 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bashunit-docs", 3 | "version": "0.20.0", 4 | "checksum": "d1eed647b705ff91a3ce233b06cf300fcaf540a411404e2287d50c66699773a8", 5 | "description": "Docs for bashunit a simple testing library for bash scripts", 6 | "main": "index.js", 7 | "repository": "git@github.com:TypedDevs/bashunit.git", 8 | "author": "TypedDevs ", 9 | "license": "MIT", 10 | "type": "module", 11 | "scripts": { 12 | "docs:dev": "vitepress dev docs", 13 | "docs:build": "vitepress build docs", 14 | "docs:preview": "vitepress preview docs" 15 | }, 16 | "dependencies": { 17 | "chart.js": "^4.4.9", 18 | "vanilla-tilt": "^1.8.1" 19 | }, 20 | "devDependencies": { 21 | "vitepress": "^1.6.3", 22 | "vue": "^3.5.16" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/assert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function fail() { 4 | local message="${1:-${FUNCNAME[1]}}" 5 | 6 | local label 7 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 8 | state::add_assertions_failed 9 | console_results::print_failure_message "${label}" "$message" 10 | } 11 | 12 | function assert_true() { 13 | local actual="$1" 14 | 15 | # Check for expected literal values first 16 | case "$actual" in 17 | "true"|"0") state::add_assertions_passed; return ;; 18 | "false"|"1") handle_bool_assertion_failure "true or 0" "$actual"; return ;; 19 | esac 20 | 21 | # Run command or eval and check the exit code 22 | run_command_or_eval "$actual" 23 | local exit_code=$? 24 | 25 | if [[ $exit_code -ne 0 ]]; then 26 | handle_bool_assertion_failure "command or function with zero exit code" "exit code: $exit_code" 27 | else 28 | state::add_assertions_passed 29 | fi 30 | } 31 | 32 | function assert_false() { 33 | local actual="$1" 34 | 35 | # Check for expected literal values first 36 | case "$actual" in 37 | "false"|"1") state::add_assertions_passed; return ;; 38 | "true"|"0") handle_bool_assertion_failure "false or 1" "$actual"; return ;; 39 | esac 40 | 41 | # Run command or eval and check the exit code 42 | run_command_or_eval "$actual" 43 | local exit_code=$? 44 | 45 | if [[ $exit_code -eq 0 ]]; then 46 | handle_bool_assertion_failure "command or function with non-zero exit code" "exit code: $exit_code" 47 | else 48 | state::add_assertions_passed 49 | fi 50 | } 51 | 52 | function run_command_or_eval() { 53 | local cmd="$1" 54 | 55 | if [[ "$cmd" =~ ^eval ]]; then 56 | eval "${cmd#eval }" &> /dev/null 57 | elif [[ "$(command -v "$cmd")" =~ ^alias ]]; then 58 | eval "$cmd" &> /dev/null 59 | else 60 | "$cmd" &> /dev/null 61 | fi 62 | return $? 63 | } 64 | 65 | function handle_bool_assertion_failure() { 66 | local expected="$1" 67 | local got="$2" 68 | local label 69 | label="$(helper::normalize_test_function_name "${FUNCNAME[2]}")" 70 | 71 | state::add_assertions_failed 72 | console_results::print_failed_test "$label" "$expected" "but got " "$got" 73 | } 74 | 75 | function assert_same() { 76 | local expected="$1" 77 | local actual="$2" 78 | 79 | if [[ "$expected" != "$actual" ]]; then 80 | local label 81 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 82 | state::add_assertions_failed 83 | console_results::print_failed_test "${label}" "${expected}" "but got " "${actual}" 84 | return 85 | fi 86 | 87 | state::add_assertions_passed 88 | } 89 | 90 | function assert_equals() { 91 | local expected="$1" 92 | local actual="$2" 93 | 94 | # Remove ANSI escape sequences (color codes) 95 | local actual_cleaned 96 | actual_cleaned=$(echo -e "$actual" | sed -r "s/\x1B\[[0-9;]*[mK]//g") 97 | local expected_cleaned 98 | expected_cleaned=$(echo -e "$expected" | sed -r "s/\x1B\[[0-9;]*[mK]//g") 99 | 100 | # Remove all control characters and whitespace (optional, depending on your needs) 101 | actual_cleaned=$(echo "$actual_cleaned" | tr -d '[:cntrl:]') 102 | expected_cleaned=$(echo "$expected_cleaned" | tr -d '[:cntrl:]') 103 | 104 | if [[ "$expected_cleaned" != "$actual_cleaned" ]]; then 105 | local label 106 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 107 | state::add_assertions_failed 108 | console_results::print_failed_test "${label}" "${expected_cleaned}" "but got " "${actual_cleaned}" 109 | return 110 | fi 111 | 112 | state::add_assertions_passed 113 | } 114 | 115 | function assert_not_equals() { 116 | local expected="$1" 117 | local actual="$2" 118 | 119 | # Remove ANSI escape sequences (color codes) 120 | local actual_cleaned 121 | actual_cleaned=$(echo -e "$actual" | sed -r "s/\x1B\[[0-9;]*[mK]//g") 122 | local expected_cleaned 123 | expected_cleaned=$(echo -e "$expected" | sed -r "s/\x1B\[[0-9;]*[mK]//g") 124 | 125 | # Remove all control characters and whitespace (optional, depending on your needs) 126 | actual_cleaned=$(echo "$actual_cleaned" | tr -d '[:cntrl:]') 127 | expected_cleaned=$(echo "$expected_cleaned" | tr -d '[:cntrl:]') 128 | 129 | if [[ "$expected_cleaned" == "$actual_cleaned" ]]; then 130 | local label 131 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 132 | state::add_assertions_failed 133 | console_results::print_failed_test "${label}" "${expected_cleaned}" "but got " "${actual_cleaned}" 134 | return 135 | fi 136 | 137 | state::add_assertions_passed 138 | } 139 | 140 | function assert_empty() { 141 | local expected="$1" 142 | 143 | if [[ "$expected" != "" ]]; then 144 | local label 145 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 146 | state::add_assertions_failed 147 | console_results::print_failed_test "${label}" "to be empty" "but got " "${expected}" 148 | return 149 | fi 150 | 151 | state::add_assertions_passed 152 | } 153 | 154 | function assert_not_empty() { 155 | local expected="$1" 156 | 157 | if [[ "$expected" == "" ]]; then 158 | local label 159 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 160 | state::add_assertions_failed 161 | console_results::print_failed_test "${label}" "to not be empty" "but got " "${expected}" 162 | return 163 | fi 164 | 165 | state::add_assertions_passed 166 | } 167 | 168 | function assert_not_same() { 169 | local expected="$1" 170 | local actual="$2" 171 | 172 | if [[ "$expected" == "$actual" ]]; then 173 | local label 174 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 175 | state::add_assertions_failed 176 | console_results::print_failed_test "${label}" "${expected}" "but got " "${actual}" 177 | return 178 | fi 179 | 180 | state::add_assertions_passed 181 | } 182 | 183 | function assert_contains() { 184 | local expected="$1" 185 | local actual_arr=("${@:2}") 186 | local actual 187 | actual=$(printf '%s\n' "${actual_arr[@]}") 188 | 189 | if ! [[ $actual == *"$expected"* ]]; then 190 | local label 191 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 192 | state::add_assertions_failed 193 | console_results::print_failed_test "${label}" "${actual}" "to contain" "${expected}" 194 | return 195 | fi 196 | 197 | state::add_assertions_passed 198 | } 199 | 200 | function assert_contains_ignore_case() { 201 | local expected="$1" 202 | local actual="$2" 203 | 204 | shopt -s nocasematch 205 | 206 | if ! [[ $actual =~ $expected ]]; then 207 | local label 208 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 209 | state::add_assertions_failed 210 | console_results::print_failed_test "${label}" "${actual}" "to contain" "${expected}" 211 | shopt -u nocasematch 212 | return 213 | fi 214 | 215 | shopt -u nocasematch 216 | state::add_assertions_passed 217 | } 218 | 219 | function assert_not_contains() { 220 | local expected="$1" 221 | local actual_arr=("${@:2}") 222 | local actual 223 | actual=$(printf '%s\n' "${actual_arr[@]}") 224 | 225 | if [[ $actual == *"$expected"* ]]; then 226 | local label 227 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 228 | state::add_assertions_failed 229 | console_results::print_failed_test "${label}" "${actual}" "to not contain" "${expected}" 230 | return 231 | fi 232 | 233 | state::add_assertions_passed 234 | } 235 | 236 | function assert_matches() { 237 | local expected="$1" 238 | local actual_arr=("${@:2}") 239 | local actual 240 | actual=$(printf '%s\n' "${actual_arr[@]}") 241 | 242 | if ! [[ $actual =~ $expected ]]; then 243 | local label 244 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 245 | state::add_assertions_failed 246 | console_results::print_failed_test "${label}" "${actual}" "to match" "${expected}" 247 | return 248 | fi 249 | 250 | state::add_assertions_passed 251 | } 252 | 253 | function assert_not_matches() { 254 | local expected="$1" 255 | local actual_arr=("${@:2}") 256 | local actual 257 | actual=$(printf '%s\n' "${actual_arr[@]}") 258 | 259 | if [[ $actual =~ $expected ]]; then 260 | local label 261 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 262 | state::add_assertions_failed 263 | console_results::print_failed_test "${label}" "${actual}" "to not match" "${expected}" 264 | return 265 | fi 266 | 267 | state::add_assertions_passed 268 | } 269 | 270 | function assert_exit_code() { 271 | local actual_exit_code=${3-"$?"} 272 | local expected_exit_code="$1" 273 | 274 | if [[ "$actual_exit_code" -ne "$expected_exit_code" ]]; then 275 | local label 276 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 277 | state::add_assertions_failed 278 | console_results::print_failed_test "${label}" "${actual_exit_code}" "to be" "${expected_exit_code}" 279 | return 280 | fi 281 | 282 | state::add_assertions_passed 283 | } 284 | 285 | function assert_successful_code() { 286 | local actual_exit_code=${3-"$?"} 287 | local expected_exit_code=0 288 | 289 | if [[ "$actual_exit_code" -ne "$expected_exit_code" ]]; then 290 | local label 291 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 292 | state::add_assertions_failed 293 | console_results::print_failed_test "${label}" "${actual_exit_code}" "to be exactly" "${expected_exit_code}" 294 | return 295 | fi 296 | 297 | state::add_assertions_passed 298 | } 299 | 300 | function assert_general_error() { 301 | local actual_exit_code=${3-"$?"} 302 | local expected_exit_code=1 303 | 304 | if [[ "$actual_exit_code" -ne "$expected_exit_code" ]]; then 305 | local label 306 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 307 | state::add_assertions_failed 308 | console_results::print_failed_test "${label}" "${actual_exit_code}" "to be exactly" "${expected_exit_code}" 309 | return 310 | fi 311 | 312 | state::add_assertions_passed 313 | } 314 | 315 | function assert_command_not_found() { 316 | local actual_exit_code=${3-"$?"} 317 | local expected_exit_code=127 318 | 319 | if [[ $actual_exit_code -ne "$expected_exit_code" ]]; then 320 | local label 321 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 322 | state::add_assertions_failed 323 | console_results::print_failed_test "${label}" "${actual_exit_code}" "to be exactly" "${expected_exit_code}" 324 | return 325 | fi 326 | 327 | state::add_assertions_passed 328 | } 329 | 330 | function assert_string_starts_with() { 331 | local expected="$1" 332 | local actual_arr=("${@:2}") 333 | local actual 334 | actual=$(printf '%s\n' "${actual_arr[@]}") 335 | 336 | if ! [[ $actual =~ ^"$expected"* ]]; then 337 | local label 338 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 339 | state::add_assertions_failed 340 | console_results::print_failed_test "${label}" "${actual}" "to start with" "${expected}" 341 | return 342 | fi 343 | 344 | state::add_assertions_passed 345 | } 346 | 347 | function assert_string_not_starts_with() { 348 | local expected="$1" 349 | local actual="$2" 350 | 351 | if [[ $actual =~ ^"$expected"* ]]; then 352 | local label 353 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 354 | state::add_assertions_failed 355 | console_results::print_failed_test "${label}" "${actual}" "to not start with" "${expected}" 356 | return 357 | fi 358 | 359 | state::add_assertions_passed 360 | } 361 | 362 | function assert_string_ends_with() { 363 | local expected="$1" 364 | local actual_arr=("${@:2}") 365 | local actual 366 | actual=$(printf '%s\n' "${actual_arr[@]}") 367 | 368 | if ! [[ $actual =~ .*"$expected"$ ]]; then 369 | local label 370 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 371 | state::add_assertions_failed 372 | console_results::print_failed_test "${label}" "${actual}" "to end with" "${expected}" 373 | return 374 | fi 375 | 376 | state::add_assertions_passed 377 | } 378 | 379 | function assert_string_not_ends_with() { 380 | local expected="$1" 381 | local actual_arr=("${@:2}") 382 | local actual 383 | actual=$(printf '%s\n' "${actual_arr[@]}") 384 | 385 | if [[ $actual =~ .*"$expected"$ ]]; then 386 | local label 387 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 388 | state::add_assertions_failed 389 | console_results::print_failed_test "${label}" "${actual}" "to not end with" "${expected}" 390 | return 391 | fi 392 | 393 | state::add_assertions_passed 394 | } 395 | 396 | function assert_less_than() { 397 | local expected="$1" 398 | local actual="$2" 399 | 400 | if ! [[ "$actual" -lt "$expected" ]]; then 401 | local label 402 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 403 | state::add_assertions_failed 404 | console_results::print_failed_test "${label}" "${actual}" "to be less than" "${expected}" 405 | return 406 | fi 407 | 408 | state::add_assertions_passed 409 | } 410 | 411 | function assert_less_or_equal_than() { 412 | local expected="$1" 413 | local actual="$2" 414 | 415 | if ! [[ "$actual" -le "$expected" ]]; then 416 | local label 417 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 418 | state::add_assertions_failed 419 | console_results::print_failed_test "${label}" "${actual}" "to be less or equal than" "${expected}" 420 | return 421 | fi 422 | 423 | state::add_assertions_passed 424 | } 425 | 426 | function assert_greater_than() { 427 | local expected="$1" 428 | local actual="$2" 429 | 430 | if ! [[ "$actual" -gt "$expected" ]]; then 431 | local label 432 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 433 | state::add_assertions_failed 434 | console_results::print_failed_test "${label}" "${actual}" "to be greater than" "${expected}" 435 | return 436 | fi 437 | 438 | state::add_assertions_passed 439 | } 440 | 441 | function assert_greater_or_equal_than() { 442 | local expected="$1" 443 | local actual="$2" 444 | 445 | if ! [[ "$actual" -ge "$expected" ]]; then 446 | local label 447 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 448 | state::add_assertions_failed 449 | console_results::print_failed_test "${label}" "${actual}" "to be greater or equal than" "${expected}" 450 | return 451 | fi 452 | 453 | state::add_assertions_passed 454 | } 455 | 456 | function assert_line_count() { 457 | local expected="$1" 458 | local input_arr=("${@:2}") 459 | local input_str 460 | input_str=$(printf '%s\n' "${input_arr[@]}") 461 | 462 | if [ -z "$input_str" ]; then 463 | local actual=0 464 | else 465 | local actual 466 | actual=$(echo "$input_str" | wc -l | tr -d '[:blank:]') 467 | additional_new_lines=$(grep -o '\\n' <<< "$input_str" | wc -l | tr -d '[:blank:]') 468 | ((actual+=additional_new_lines)) 469 | fi 470 | 471 | if [[ "$expected" != "$actual" ]]; then 472 | local label 473 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 474 | 475 | state::add_assertions_failed 476 | console_results::print_failed_test "${label}" "${input_str}"\ 477 | "to contain number of lines equal to" "${expected}"\ 478 | "but found" "${actual}" 479 | return 480 | fi 481 | 482 | state::add_assertions_passed 483 | } 484 | -------------------------------------------------------------------------------- /src/assert_arrays.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function assert_array_contains() { 4 | local expected="$1" 5 | local label 6 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 7 | shift 8 | 9 | local actual=("${@}") 10 | 11 | if ! [[ "${actual[*]}" == *"$expected"* ]]; then 12 | state::add_assertions_failed 13 | console_results::print_failed_test "${label}" "${actual[*]}" "to contain" "${expected}" 14 | return 15 | fi 16 | 17 | state::add_assertions_passed 18 | } 19 | 20 | function assert_array_not_contains() { 21 | local expected="$1" 22 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 23 | shift 24 | local actual=("$@") 25 | 26 | if [[ "${actual[*]}" == *"$expected"* ]]; then 27 | state::add_assertions_failed 28 | console_results::print_failed_test "${label}" "${actual[*]}" "to not contain" "${expected}" 29 | return 30 | fi 31 | 32 | state::add_assertions_passed 33 | } 34 | -------------------------------------------------------------------------------- /src/assert_files.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function assert_file_exists() { 4 | local expected="$1" 5 | local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 6 | 7 | if [[ ! -f "$expected" ]]; then 8 | state::add_assertions_failed 9 | console_results::print_failed_test "${label}" "${expected}" "to exist but" "do not exist" 10 | return 11 | fi 12 | 13 | state::add_assertions_passed 14 | } 15 | 16 | function assert_file_not_exists() { 17 | local expected="$1" 18 | local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 19 | 20 | if [[ -f "$expected" ]]; then 21 | state::add_assertions_failed 22 | console_results::print_failed_test "${label}" "${expected}" "to not exist but" "the file exists" 23 | return 24 | fi 25 | 26 | state::add_assertions_passed 27 | } 28 | 29 | function assert_is_file() { 30 | local expected="$1" 31 | local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 32 | 33 | if [[ ! -f "$expected" ]]; then 34 | state::add_assertions_failed 35 | console_results::print_failed_test "${label}" "${expected}" "to be a file" "but is not a file" 36 | return 37 | fi 38 | 39 | state::add_assertions_passed 40 | } 41 | 42 | function assert_is_file_empty() { 43 | local expected="$1" 44 | local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 45 | 46 | if [[ -s "$expected" ]]; then 47 | state::add_assertions_failed 48 | console_results::print_failed_test "${label}" "${expected}" "to be empty" "but is not empty" 49 | return 50 | fi 51 | 52 | state::add_assertions_passed 53 | } 54 | 55 | function assert_files_equals() { 56 | local expected="$1" 57 | local actual="$2" 58 | 59 | if [[ "$(diff -u "$expected" "$actual")" != '' ]] ; then 60 | local label 61 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 62 | state::add_assertions_failed 63 | 64 | console_results::print_failed_test "${label}" "${expected}" "Compared" "${actual}" \ 65 | "Diff" "$(diff -u "$expected" "$actual" | sed '1,2d')" 66 | return 67 | fi 68 | 69 | state::add_assertions_passed 70 | } 71 | 72 | function assert_files_not_equals() { 73 | local expected="$1" 74 | local actual="$2" 75 | 76 | if [[ "$(diff -u "$expected" "$actual")" == '' ]] ; then 77 | local label 78 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 79 | state::add_assertions_failed 80 | 81 | console_results::print_failed_test "${label}" "${expected}" "Compared" "${actual}" \ 82 | "Diff" "Files are equals" 83 | return 84 | fi 85 | 86 | state::add_assertions_passed 87 | } 88 | 89 | function assert_file_contains() { 90 | local file="$1" 91 | local string="$2" 92 | 93 | if ! grep -F -q "$string" "$file"; then 94 | local label 95 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 96 | state::add_assertions_failed 97 | 98 | console_results::print_failed_test "${label}" "${file}" "to contain" "${string}" 99 | return 100 | fi 101 | 102 | state::add_assertions_passed 103 | } 104 | 105 | function assert_file_not_contains() { 106 | local file="$1" 107 | local string="$2" 108 | 109 | if grep -q "$string" "$file"; then 110 | local label 111 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 112 | state::add_assertions_failed 113 | 114 | console_results::print_failed_test "${label}" "${file}" "to not contain" "${string}" 115 | return 116 | fi 117 | 118 | state::add_assertions_passed 119 | } 120 | -------------------------------------------------------------------------------- /src/assert_folders.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function assert_directory_exists() { 4 | local expected="$1" 5 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 6 | 7 | if [[ ! -d "$expected" ]]; then 8 | state::add_assertions_failed 9 | console_results::print_failed_test "${label}" "${expected}" "to exist but" "do not exist" 10 | return 11 | fi 12 | 13 | state::add_assertions_passed 14 | } 15 | 16 | function assert_directory_not_exists() { 17 | local expected="$1" 18 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 19 | 20 | if [[ -d "$expected" ]]; then 21 | state::add_assertions_failed 22 | console_results::print_failed_test "${label}" "${expected}" "to not exist but" "the directory exists" 23 | return 24 | fi 25 | 26 | state::add_assertions_passed 27 | } 28 | 29 | function assert_is_directory() { 30 | local expected="$1" 31 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 32 | 33 | if [[ ! -d "$expected" ]]; then 34 | state::add_assertions_failed 35 | console_results::print_failed_test "${label}" "${expected}" "to be a directory" "but is not a directory" 36 | return 37 | fi 38 | 39 | state::add_assertions_passed 40 | } 41 | 42 | function assert_is_directory_empty() { 43 | local expected="$1" 44 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 45 | 46 | if [[ ! -d "$expected" || -n "$(ls -A "$expected")" ]]; then 47 | state::add_assertions_failed 48 | console_results::print_failed_test "${label}" "${expected}" "to be empty" "but is not empty" 49 | return 50 | fi 51 | 52 | state::add_assertions_passed 53 | } 54 | 55 | function assert_is_directory_not_empty() { 56 | local expected="$1" 57 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 58 | 59 | if [[ ! -d "$expected" || -z "$(ls -A "$expected")" ]]; then 60 | state::add_assertions_failed 61 | console_results::print_failed_test "${label}" "${expected}" "to not be empty" "but is empty" 62 | return 63 | fi 64 | 65 | state::add_assertions_passed 66 | } 67 | 68 | function assert_is_directory_readable() { 69 | local expected="$1" 70 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 71 | 72 | if [[ ! -d "$expected" || ! -r "$expected" || ! -x "$expected" ]]; then 73 | state::add_assertions_failed 74 | console_results::print_failed_test "${label}" "${expected}" "to be readable" "but is not readable" 75 | return 76 | fi 77 | 78 | state::add_assertions_passed 79 | } 80 | 81 | function assert_is_directory_not_readable() { 82 | local expected="$1" 83 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 84 | 85 | if [[ ! -d "$expected" ]] || [[ -r "$expected" && -x "$expected" ]]; then 86 | state::add_assertions_failed 87 | console_results::print_failed_test "${label}" "${expected}" "to be not readable" "but is readable" 88 | return 89 | fi 90 | 91 | state::add_assertions_passed 92 | } 93 | 94 | function assert_is_directory_writable() { 95 | local expected="$1" 96 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 97 | 98 | if [[ ! -d "$expected" || ! -w "$expected" ]]; then 99 | state::add_assertions_failed 100 | console_results::print_failed_test "${label}" "${expected}" "to be writable" "but is not writable" 101 | return 102 | fi 103 | 104 | state::add_assertions_passed 105 | } 106 | 107 | function assert_is_directory_not_writable() { 108 | local expected="$1" 109 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 110 | 111 | if [[ ! -d "$expected" || -w "$expected" ]]; then 112 | state::add_assertions_failed 113 | console_results::print_failed_test "${label}" "${expected}" "to be not writable" "but is writable" 114 | return 115 | fi 116 | 117 | state::add_assertions_passed 118 | } 119 | -------------------------------------------------------------------------------- /src/assert_snapshot.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function snapshot::match_with_placeholder() { 4 | local actual="$1" 5 | local snapshot="$2" 6 | local placeholder="${BASHUNIT_SNAPSHOT_PLACEHOLDER:-::ignore::}" 7 | local token="__BASHUNIT_IGNORE__" 8 | 9 | local sanitized_snapshot="${snapshot//$placeholder/$token}" 10 | local regex 11 | regex=$(printf '%s' "$sanitized_snapshot" | sed -e 's/[.[\\^$*+?{}()|]/\\&/g') 12 | regex="${regex//$token/(.|\\n)*}" 13 | regex="^${regex}$" 14 | 15 | if command -v perl >/dev/null 2>&1; then 16 | if REGEX="$regex" perl -0 -e 'my $r=$ENV{REGEX}; exit((join("",<>)) =~ /$r/s ? 0 : 1);' <<< "$actual"; then 17 | return 0 18 | else 19 | return 1 20 | fi 21 | else 22 | # fallback: only supports single-line ignores 23 | local fallback_pattern 24 | fallback_pattern=$(printf '%s' "$snapshot" | sed "s|$placeholder|.*|g") 25 | # escape other special regex chars 26 | fallback_pattern=$(printf '%s' "$fallback_pattern" | sed -e 's/[][\.^$*+?{}|()]/\\&/g') 27 | fallback_pattern="^${fallback_pattern}$" 28 | 29 | if printf '%s\n' "$actual" | grep -Eq "$fallback_pattern"; then 30 | return 0 31 | else 32 | return 1 33 | fi 34 | fi 35 | } 36 | 37 | function assert_match_snapshot() { 38 | local actual 39 | actual=$(echo -n "$1" | tr -d '\r') 40 | local directory 41 | directory="./$(dirname "${BASH_SOURCE[1]}")/snapshots" 42 | local test_file 43 | test_file="$(helper::normalize_variable_name "$(basename "${BASH_SOURCE[1]}")")" 44 | local snapshot_name 45 | snapshot_name="$(helper::normalize_variable_name "${FUNCNAME[1]}").snapshot" 46 | local snapshot_file 47 | snapshot_file="${directory}/${test_file}.${snapshot_name}" 48 | 49 | if [[ ! -f "$snapshot_file" ]]; then 50 | mkdir -p "$directory" 51 | echo "$actual" > "$snapshot_file" 52 | 53 | state::add_assertions_snapshot 54 | return 55 | fi 56 | 57 | local snapshot 58 | snapshot=$(tr -d '\r' < "$snapshot_file") 59 | 60 | if ! snapshot::match_with_placeholder "$actual" "$snapshot"; then 61 | local label 62 | label=$(helper::normalize_test_function_name "${FUNCNAME[1]}") 63 | 64 | state::add_assertions_failed 65 | console_results::print_failed_snapshot_test "$label" "$snapshot_file" 66 | 67 | return 68 | fi 69 | 70 | state::add_assertions_passed 71 | } 72 | 73 | function assert_match_snapshot_ignore_colors() { 74 | local actual 75 | actual=$(echo -n "$1" | sed -r 's/\x1B\[[0-9;]*[mK]//g' | tr -d '\r') 76 | 77 | local directory 78 | directory="./$(dirname "${BASH_SOURCE[1]}")/snapshots" 79 | local test_file 80 | test_file="$(helper::normalize_variable_name "$(basename "${BASH_SOURCE[1]}")")" 81 | local snapshot_name 82 | snapshot_name="$(helper::normalize_variable_name "${FUNCNAME[1]}").snapshot" 83 | local snapshot_file 84 | snapshot_file="${directory}/${test_file}.${snapshot_name}" 85 | 86 | if [[ ! -f "$snapshot_file" ]]; then 87 | mkdir -p "$directory" 88 | echo "$actual" > "$snapshot_file" 89 | 90 | state::add_assertions_snapshot 91 | return 92 | fi 93 | 94 | local snapshot 95 | snapshot=$(tr -d '\r' < "$snapshot_file") 96 | 97 | if ! snapshot::match_with_placeholder "$actual" "$snapshot"; then 98 | local label 99 | label=$(helper::normalize_test_function_name "${FUNCNAME[1]}") 100 | 101 | state::add_assertions_failed 102 | console_results::print_failed_snapshot_test "$label" "$snapshot_file" 103 | 104 | return 105 | fi 106 | 107 | state::add_assertions_passed 108 | } 109 | -------------------------------------------------------------------------------- /src/assertions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source "$BASHUNIT_ROOT_DIR/src/assert.sh" 4 | source "$BASHUNIT_ROOT_DIR/src/assert_arrays.sh" 5 | source "$BASHUNIT_ROOT_DIR/src/assert_files.sh" 6 | source "$BASHUNIT_ROOT_DIR/src/assert_folders.sh" 7 | source "$BASHUNIT_ROOT_DIR/src/assert_snapshot.sh" 8 | source "$BASHUNIT_ROOT_DIR/src/skip_todo.sh" 9 | source "$BASHUNIT_ROOT_DIR/src/test_doubles.sh" 10 | -------------------------------------------------------------------------------- /src/bashunit.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This file provides a facade to developers who wants 4 | # to interact with the internals of bashunit. 5 | # e.g. adding custom assertions 6 | 7 | function bashunit::assertion_failed() { 8 | local expected=$1 9 | local actual=$2 10 | local failure_condition_message=${3:-"but got "} 11 | 12 | local label 13 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 14 | state::add_assertions_failed 15 | console_results::print_failed_test "${label}" "${expected}" \ 16 | "$failure_condition_message" "${actual}" 17 | } 18 | 19 | function bashunit::assertion_passed() { 20 | state::add_assertions_passed 21 | } 22 | -------------------------------------------------------------------------------- /src/benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | _BENCH_NAMES=() 4 | _BENCH_REVS=() 5 | _BENCH_ITS=() 6 | _BENCH_AVERAGES=() 7 | _BENCH_MAX_MILLIS=() 8 | 9 | function benchmark::parse_annotations() { 10 | local fn_name=$1 11 | local script=$2 12 | local revs=1 13 | local its=1 14 | local max_ms="" 15 | 16 | local annotation 17 | annotation=$(awk "/function[[:space:]]+${fn_name}[[:space:]]*\(/ {print prev; exit} {prev=\$0}" "$script") 18 | 19 | if [[ $annotation =~ @revs=([0-9]+) ]]; then 20 | revs="${BASH_REMATCH[1]}" 21 | elif [[ $annotation =~ @revolutions=([0-9]+) ]]; then 22 | revs="${BASH_REMATCH[1]}" 23 | fi 24 | 25 | if [[ $annotation =~ @its=([0-9]+) ]]; then 26 | its="${BASH_REMATCH[1]}" 27 | elif [[ $annotation =~ @iterations=([0-9]+) ]]; then 28 | its="${BASH_REMATCH[1]}" 29 | fi 30 | 31 | if [[ $annotation =~ @max_ms=([0-9.]+) ]]; then 32 | max_ms="${BASH_REMATCH[1]}" 33 | elif [[ $annotation =~ @max_ms=([0-9.]+) ]]; then 34 | max_ms="${BASH_REMATCH[1]}" 35 | fi 36 | 37 | if [[ -n "$max_ms" ]]; then 38 | echo "$revs" "$its" "$max_ms" 39 | else 40 | echo "$revs" "$its" 41 | fi 42 | } 43 | 44 | function benchmark::add_result() { 45 | _BENCH_NAMES+=("$1") 46 | _BENCH_REVS+=("$2") 47 | _BENCH_ITS+=("$3") 48 | _BENCH_AVERAGES+=("$4") 49 | _BENCH_MAX_MILLIS+=("$5") 50 | } 51 | 52 | # shellcheck disable=SC2155 53 | function benchmark::run_function() { 54 | local fn_name=$1 55 | local revs=$2 56 | local its=$3 57 | local max_ms=$4 58 | local durations=() 59 | 60 | for ((i=1; i<=its; i++)); do 61 | local start_time=$(clock::now) 62 | ( 63 | for ((r=1; r<=revs; r++)); do 64 | "$fn_name" >/dev/null 2>&1 65 | done 66 | ) 67 | local end_time=$(clock::now) 68 | local dur_ns=$(math::calculate "($end_time - $start_time)") 69 | local dur_ms=$(math::calculate "$dur_ns / 1000000") 70 | durations+=("$dur_ms") 71 | 72 | if env::is_bench_mode_enabled; then 73 | local label="$(helper::normalize_test_function_name "$fn_name")" 74 | local line="$label [$i/$its] ${dur_ms} ms" 75 | state::print_line "successful" "$line" 76 | fi 77 | done 78 | 79 | local sum=0 80 | for d in "${durations[@]}"; do 81 | sum=$(math::calculate "$sum + $d") 82 | done 83 | local avg=$(math::calculate "$sum / ${#durations[@]}") 84 | benchmark::add_result "$fn_name" "$revs" "$its" "$avg" "$max_ms" 85 | } 86 | 87 | function benchmark::print_results() { 88 | if ! env::is_bench_mode_enabled; then 89 | return 90 | fi 91 | 92 | if (( ${#_BENCH_NAMES[@]} == 0 )); then 93 | return 94 | fi 95 | 96 | if env::is_simple_output_enabled; then 97 | printf "\n" 98 | fi 99 | 100 | printf "\nBenchmark Results (avg ms)\n" 101 | print_line 80 "=" 102 | printf "\n" 103 | 104 | local has_threshold=false 105 | for val in "${_BENCH_MAX_MILLIS[@]}"; do 106 | if [[ -n "$val" ]]; then 107 | has_threshold=true 108 | break 109 | fi 110 | done 111 | 112 | if $has_threshold; then 113 | printf '%-40s %6s %6s %10s %12s\n' "Name" "Revs" "Its" "Avg(ms)" "Status" 114 | else 115 | printf '%-40s %6s %6s %10s\n' "Name" "Revs" "Its" "Avg(ms)" 116 | fi 117 | 118 | for i in "${!_BENCH_NAMES[@]}"; do 119 | local name="${_BENCH_NAMES[$i]}" 120 | local revs="${_BENCH_REVS[$i]}" 121 | local its="${_BENCH_ITS[$i]}" 122 | local avg="${_BENCH_AVERAGES[$i]}" 123 | local max_ms="${_BENCH_MAX_MILLIS[$i]}" 124 | 125 | if [[ -z "$max_ms" ]]; then 126 | printf '%-40s %6s %6s %10s\n' "$name" "$revs" "$its" "$avg" 127 | continue 128 | fi 129 | 130 | if (( $(echo "$avg <= $max_ms" | bc -l) )); then 131 | local raw="≤ ${max_ms}" 132 | printf -v padded "%14s" "$raw" 133 | printf '%-40s %6s %6s %10s %12s\n' "$name" "$revs" "$its" "$avg" "$padded" 134 | continue 135 | fi 136 | 137 | local raw="> ${max_ms}" 138 | printf -v padded "%12s" "$raw" 139 | printf '%-40s %6s %6s %10s %s%s%s\n' \ 140 | "$name" "$revs" "$its" "$avg" \ 141 | "$_COLOR_FAILED" "$padded" "${_COLOR_DEFAULT}" 142 | done 143 | 144 | console_results::print_execution_time 145 | } 146 | -------------------------------------------------------------------------------- /src/check_os.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # shellcheck disable=SC2034 4 | _OS="Unknown" 5 | _DISTRO="Unknown" 6 | 7 | function check_os::init() { 8 | if check_os::is_linux; then 9 | _OS="Linux" 10 | if check_os::is_ubuntu; then 11 | _DISTRO="Ubuntu" 12 | elif check_os::is_alpine; then 13 | _DISTRO="Alpine" 14 | else 15 | _DISTRO="Other" 16 | fi 17 | elif check_os::is_macos; then 18 | _OS="OSX" 19 | elif check_os::is_windows; then 20 | _OS="Windows" 21 | else 22 | _OS="Unknown" 23 | _DISTRO="Unknown" 24 | fi 25 | } 26 | 27 | function check_os::is_ubuntu() { 28 | command -v apt > /dev/null 29 | } 30 | 31 | function check_os::is_alpine() { 32 | command -v apk > /dev/null 33 | } 34 | 35 | function check_os::is_linux() { 36 | [[ "$(uname)" == "Linux" ]] 37 | } 38 | 39 | function check_os::is_macos() { 40 | [[ "$(uname)" == "Darwin" ]] 41 | } 42 | 43 | function check_os::is_windows() { 44 | case "$(uname)" in 45 | *MINGW*|*MSYS*|*CYGWIN*) 46 | return 0 47 | ;; 48 | *) 49 | return 1 50 | ;; 51 | esac 52 | } 53 | 54 | function check_os::is_busybox() { 55 | 56 | case "$_DISTRO" in 57 | 58 | "Alpine") 59 | return 0 60 | ;; 61 | *) 62 | return 1 63 | ;; 64 | esac 65 | } 66 | 67 | check_os::init 68 | 69 | export _OS 70 | export _DISTRO 71 | export -f check_os::is_alpine 72 | export -f check_os::is_busybox 73 | export -f check_os::is_ubuntu 74 | -------------------------------------------------------------------------------- /src/clock.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function clock::now() { 4 | local shell_time 5 | local attempts=() 6 | 7 | # 1. Try using native shell EPOCHREALTIME (if available) 8 | attempts+=("EPOCHREALTIME") 9 | if shell_time="$(clock::shell_time)"; then 10 | local seconds="${shell_time%%.*}" 11 | local microseconds="${shell_time#*.}" 12 | math::calculate "($seconds * 1000000000) + ($microseconds * 1000)" 13 | return 0 14 | fi 15 | 16 | # 2. Try Perl with Time::HiRes 17 | attempts+=("Perl") 18 | if dependencies::has_perl && perl -MTime::HiRes -e "" &>/dev/null; then 19 | perl -MTime::HiRes -e 'printf("%.0f\n", Time::HiRes::time() * 1000000000)' && return 0 20 | fi 21 | 22 | # 3. Try Python 3 with time module 23 | attempts+=("Python") 24 | if dependencies::has_python; then 25 | python - <<'EOF' 26 | import time, sys 27 | sys.stdout.write(str(int(time.time() * 1_000_000_000))) 28 | EOF 29 | return 0 30 | fi 31 | 32 | # 4. Try Node.js 33 | attempts+=("Node") 34 | if dependencies::has_node; then 35 | node -e 'process.stdout.write((BigInt(Date.now()) * 1000000n).toString())' && return 0 36 | fi 37 | # 5. Windows fallback with PowerShell 38 | attempts+=("PowerShell") 39 | if check_os::is_windows && dependencies::has_powershell; then 40 | powershell -Command " 41 | \$unixEpoch = [DateTime]'1970-01-01 00:00:00'; 42 | \$now = [DateTime]::UtcNow; 43 | \$ticksSinceEpoch = (\$now - \$unixEpoch).Ticks; 44 | \$nanosecondsSinceEpoch = \$ticksSinceEpoch * 100; 45 | Write-Output \$nanosecondsSinceEpoch 46 | " && return 0 47 | fi 48 | 49 | # 6. Unix fallback using `date +%s%N` (if not macOS or Alpine) 50 | attempts+=("date") 51 | if ! check_os::is_macos && ! check_os::is_alpine; then 52 | local result 53 | result=$(date +%s%N 2>/dev/null) 54 | if [[ "$result" != *N && "$result" =~ ^[0-9]+$ ]]; then 55 | echo "$result" 56 | return 0 57 | fi 58 | fi 59 | 60 | # 7. All methods failed 61 | printf "clock::now implementations tried: %s\n" "${attempts[*]}" >&2 62 | echo "" 63 | return 1 64 | } 65 | 66 | function clock::shell_time() { 67 | # Get time directly from the shell variable EPOCHREALTIME (Bash 5+) 68 | [[ -n ${EPOCHREALTIME+x} && -n "$EPOCHREALTIME" ]] && LC_ALL=C echo "$EPOCHREALTIME" 69 | } 70 | 71 | function clock::total_runtime_in_milliseconds() { 72 | local end_time 73 | end_time=$(clock::now) 74 | if [[ -n $end_time ]]; then 75 | math::calculate "($end_time - $_START_TIME) / 1000000" 76 | else 77 | echo "" 78 | fi 79 | } 80 | 81 | function clock::total_runtime_in_nanoseconds() { 82 | local end_time 83 | end_time=$(clock::now) 84 | if [[ -n $end_time ]]; then 85 | math::calculate "$end_time - $_START_TIME" 86 | else 87 | echo "" 88 | fi 89 | } 90 | 91 | function clock::init() { 92 | _START_TIME=$(clock::now) 93 | } 94 | -------------------------------------------------------------------------------- /src/colors.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Pass in any number of ANSI SGR codes. 4 | # 5 | # Code reference: 6 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_(Select_Graphic_Rendition)_parameters 7 | # Credit: 8 | # https://superuser.com/a/1119396 9 | sgr() { 10 | local codes=${1:-0} 11 | shift 12 | 13 | for c in "$@"; do 14 | codes="$codes;$c" 15 | done 16 | 17 | echo $'\e'"[${codes}m" 18 | } 19 | 20 | _COLOR_BOLD="$(sgr 1)" 21 | _COLOR_FAINT="$(sgr 2)" 22 | _COLOR_BLACK="$(sgr 30)" 23 | _COLOR_FAILED="$(sgr 31)" 24 | _COLOR_PASSED="$(sgr 32)" 25 | _COLOR_SKIPPED="$(sgr 33)" 26 | _COLOR_INCOMPLETE="$(sgr 36)" 27 | _COLOR_SNAPSHOT="$(sgr 34)" 28 | _COLOR_RETURN_ERROR="$(sgr 41)$_COLOR_BLACK$_COLOR_BOLD" 29 | _COLOR_RETURN_SUCCESS="$(sgr 42)$_COLOR_BLACK$_COLOR_BOLD" 30 | _COLOR_RETURN_SKIPPED="$(sgr 43)$_COLOR_BLACK$_COLOR_BOLD" 31 | _COLOR_RETURN_INCOMPLETE="$(sgr 46)$_COLOR_BLACK$_COLOR_BOLD" 32 | _COLOR_RETURN_SNAPSHOT="$(sgr 44)$_COLOR_BLACK$_COLOR_BOLD" 33 | _COLOR_DEFAULT="$(sgr 0)" 34 | -------------------------------------------------------------------------------- /src/console_header.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function console_header::print_version_with_env() { 4 | local filter=${1:-} 5 | local files=("${@:2}") 6 | 7 | if ! env::is_show_header_enabled; then 8 | return 9 | fi 10 | 11 | console_header::print_version "$filter" "${files[@]}" 12 | } 13 | 14 | function console_header::print_version() { 15 | local filter=${1:-} 16 | if [[ -n "$filter" ]]; then 17 | shift 18 | fi 19 | 20 | local files=("$@") 21 | local total_tests 22 | if [[ ${#files[@]} -eq 0 ]]; then 23 | total_tests=0 24 | else 25 | total_tests=$(helpers::find_total_tests "$filter" "${files[@]}") 26 | fi 27 | 28 | if env::is_header_ascii_art_enabled; then 29 | cat < 64 | Run a core assert function standalone without a test context. 65 | 66 | -e, --env, --boot 67 | Load a custom file, overriding the existing .env variables or loading a file with global functions. 68 | 69 | -f, --filter 70 | Filters the tests to run based on the test name. 71 | 72 | -l, --log-junit 73 | Create a report JUnit XML file that contains information about the test results. 74 | 75 | -p, --parallel || --no-parallel [default] 76 | Run each test in child process, randomizing the tests execution order. 77 | 78 | -r, --report-html 79 | Create a report HTML file that contains information about the test results. 80 | 81 | -s, --simple || --detailed [default] 82 | Enables simple or detailed output to the console. 83 | 84 | -S, --stop-on-failure 85 | Force to stop the runner right after encountering one failing test. 86 | 87 | --debug 88 | Print all executed shell commands to the terminal. 89 | If a file-path is passed, it will redirect the output to that file. 90 | 91 | -vvv, --verbose 92 | Display internal details for each test. 93 | 94 | --version 95 | Displays the current version of bashunit. 96 | 97 | --upgrade 98 | Upgrade to latest version of bashunit. 99 | 100 | -h, --help 101 | This message. 102 | 103 | See more: https://bashunit.typeddevs.com/command-line 104 | EOF 105 | } 106 | -------------------------------------------------------------------------------- /src/console_results.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=SC2155 3 | 4 | _TOTAL_TESTS_COUNT=0 5 | 6 | function console_results::render_result() { 7 | if [[ "$(state::is_duplicated_test_functions_found)" == true ]]; then 8 | console_results::print_execution_time 9 | printf "%s%s%s\n" "${_COLOR_RETURN_ERROR}" "Duplicate test functions found" "${_COLOR_DEFAULT}" 10 | printf "File with duplicate functions: %s\n" "$(state::get_file_with_duplicated_function_names)" 11 | printf "Duplicate functions: %s\n" "$(state::get_duplicated_function_names)" 12 | return 1 13 | fi 14 | 15 | if env::is_simple_output_enabled; then 16 | printf "\n\n" 17 | fi 18 | 19 | local total_tests=0 20 | ((total_tests += $(state::get_tests_passed))) || true 21 | ((total_tests += $(state::get_tests_skipped))) || true 22 | ((total_tests += $(state::get_tests_incomplete))) || true 23 | ((total_tests += $(state::get_tests_snapshot))) || true 24 | ((total_tests += $(state::get_tests_failed))) || true 25 | 26 | local total_assertions=0 27 | ((total_assertions += $(state::get_assertions_passed))) || true 28 | ((total_assertions += $(state::get_assertions_skipped))) || true 29 | ((total_assertions += $(state::get_assertions_incomplete))) || true 30 | ((total_assertions += $(state::get_assertions_snapshot))) || true 31 | ((total_assertions += $(state::get_assertions_failed))) || true 32 | 33 | printf "%sTests: %s" "$_COLOR_FAINT" "$_COLOR_DEFAULT" 34 | if [[ "$(state::get_tests_passed)" -gt 0 ]] || [[ "$(state::get_assertions_passed)" -gt 0 ]]; then 35 | printf " %s%s passed%s," "$_COLOR_PASSED" "$(state::get_tests_passed)" "$_COLOR_DEFAULT" 36 | fi 37 | if [[ "$(state::get_tests_skipped)" -gt 0 ]] || [[ "$(state::get_assertions_skipped)" -gt 0 ]]; then 38 | printf " %s%s skipped%s," "$_COLOR_SKIPPED" "$(state::get_tests_skipped)" "$_COLOR_DEFAULT" 39 | fi 40 | if [[ "$(state::get_tests_incomplete)" -gt 0 ]] || [[ "$(state::get_assertions_incomplete)" -gt 0 ]]; then 41 | printf " %s%s incomplete%s," "$_COLOR_INCOMPLETE" "$(state::get_tests_incomplete)" "$_COLOR_DEFAULT" 42 | fi 43 | if [[ "$(state::get_tests_snapshot)" -gt 0 ]] || [[ "$(state::get_assertions_snapshot)" -gt 0 ]]; then 44 | printf " %s%s snapshot%s," "$_COLOR_SNAPSHOT" "$(state::get_tests_snapshot)" "$_COLOR_DEFAULT" 45 | fi 46 | if [[ "$(state::get_tests_failed)" -gt 0 ]] || [[ "$(state::get_assertions_failed)" -gt 0 ]]; then 47 | printf " %s%s failed%s," "$_COLOR_FAILED" "$(state::get_tests_failed)" "$_COLOR_DEFAULT" 48 | fi 49 | printf " %s total\n" "$total_tests" 50 | 51 | printf "%sAssertions:%s" "$_COLOR_FAINT" "$_COLOR_DEFAULT" 52 | if [[ "$(state::get_tests_passed)" -gt 0 ]] || [[ "$(state::get_assertions_passed)" -gt 0 ]]; then 53 | printf " %s%s passed%s," "$_COLOR_PASSED" "$(state::get_assertions_passed)" "$_COLOR_DEFAULT" 54 | fi 55 | if [[ "$(state::get_tests_skipped)" -gt 0 ]] || [[ "$(state::get_assertions_skipped)" -gt 0 ]]; then 56 | printf " %s%s skipped%s," "$_COLOR_SKIPPED" "$(state::get_assertions_skipped)" "$_COLOR_DEFAULT" 57 | fi 58 | if [[ "$(state::get_tests_incomplete)" -gt 0 ]] || [[ "$(state::get_assertions_incomplete)" -gt 0 ]]; then 59 | printf " %s%s incomplete%s," "$_COLOR_INCOMPLETE" "$(state::get_assertions_incomplete)" "$_COLOR_DEFAULT" 60 | fi 61 | if [[ "$(state::get_tests_snapshot)" -gt 0 ]] || [[ "$(state::get_assertions_snapshot)" -gt 0 ]]; then 62 | printf " %s%s snapshot%s," "$_COLOR_SNAPSHOT" "$(state::get_assertions_snapshot)" "$_COLOR_DEFAULT" 63 | fi 64 | if [[ "$(state::get_tests_failed)" -gt 0 ]] || [[ "$(state::get_assertions_failed)" -gt 0 ]]; then 65 | printf " %s%s failed%s," "$_COLOR_FAILED" "$(state::get_assertions_failed)" "$_COLOR_DEFAULT" 66 | fi 67 | printf " %s total\n" "$total_assertions" 68 | 69 | if [[ "$(state::get_tests_failed)" -gt 0 ]]; then 70 | printf "\n%s%s%s\n" "$_COLOR_RETURN_ERROR" " Some tests failed " "$_COLOR_DEFAULT" 71 | console_results::print_execution_time 72 | return 1 73 | fi 74 | 75 | if [[ "$(state::get_tests_incomplete)" -gt 0 ]]; then 76 | printf "\n%s%s%s\n" "$_COLOR_RETURN_INCOMPLETE" " Some tests incomplete " "$_COLOR_DEFAULT" 77 | console_results::print_execution_time 78 | return 0 79 | fi 80 | 81 | if [[ "$(state::get_tests_skipped)" -gt 0 ]]; then 82 | printf "\n%s%s%s\n" "$_COLOR_RETURN_SKIPPED" " Some tests skipped " "$_COLOR_DEFAULT" 83 | console_results::print_execution_time 84 | return 0 85 | fi 86 | 87 | if [[ "$(state::get_tests_snapshot)" -gt 0 ]]; then 88 | printf "\n%s%s%s\n" "$_COLOR_RETURN_SNAPSHOT" " Some snapshots created " "$_COLOR_DEFAULT" 89 | console_results::print_execution_time 90 | return 0 91 | fi 92 | 93 | if [[ $total_tests -eq 0 ]]; then 94 | printf "\n%s%s%s\n" "$_COLOR_RETURN_ERROR" " No tests found " "$_COLOR_DEFAULT" 95 | console_results::print_execution_time 96 | return 1 97 | fi 98 | 99 | printf "\n%s%s%s\n" "$_COLOR_RETURN_SUCCESS" " All tests passed " "$_COLOR_DEFAULT" 100 | console_results::print_execution_time 101 | return 0 102 | } 103 | 104 | function console_results::print_execution_time() { 105 | if ! env::is_show_execution_time_enabled; then 106 | return 107 | fi 108 | 109 | local time=$(clock::total_runtime_in_milliseconds | awk '{printf "%.0f", $1}') 110 | 111 | if [[ "$time" -lt 1000 ]]; then 112 | printf "${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" \ 113 | "Time taken: $time ms" 114 | return 115 | fi 116 | 117 | local time_in_seconds=$(( time / 1000 )) 118 | local remainder_ms=$(( time % 1000 )) 119 | local formatted_seconds=$(echo "$time_in_seconds.$remainder_ms" | awk '{printf "%.0f", $1}') 120 | 121 | printf "${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" \ 122 | "Time taken: $formatted_seconds s" 123 | } 124 | 125 | function console_results::print_successful_test() { 126 | local test_name=$1 127 | shift 128 | local duration=${1:-"0"} 129 | shift 130 | 131 | local line 132 | if [[ -z "$*" ]]; then 133 | line=$(printf "%s✓ Passed%s: %s" "$_COLOR_PASSED" "$_COLOR_DEFAULT" "$test_name") 134 | else 135 | line=$(printf "%s✓ Passed%s: %s (%s)" "$_COLOR_PASSED" "$_COLOR_DEFAULT" "$test_name" "$*") 136 | fi 137 | 138 | local full_line=$line 139 | if env::is_show_execution_time_enabled; then 140 | full_line="$(printf "%s\n" "$(str::rpad "$line" "$duration ms")")" 141 | fi 142 | 143 | state::print_line "successful" "$full_line" 144 | } 145 | 146 | function console_results::print_failure_message() { 147 | local test_name=$1 148 | local failure_message=$2 149 | 150 | local line 151 | line="$(printf "\ 152 | ${_COLOR_FAILED}✗ Failed${_COLOR_DEFAULT}: %s 153 | ${_COLOR_FAINT}Message:${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}\n"\ 154 | "${test_name}" "${failure_message}")" 155 | 156 | state::print_line "failure" "$line" 157 | } 158 | 159 | function console_results::print_failed_test() { 160 | local function_name=$1 161 | local expected=$2 162 | local failure_condition_message=$3 163 | local actual=$4 164 | local extra_key=${5-} 165 | local extra_value=${6-} 166 | 167 | local line 168 | line="$(printf "\ 169 | ${_COLOR_FAILED}✗ Failed${_COLOR_DEFAULT}: %s 170 | ${_COLOR_FAINT}Expected${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT} 171 | ${_COLOR_FAINT}%s${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}\n" \ 172 | "${function_name}" "${expected}" "${failure_condition_message}" "${actual}")" 173 | 174 | if [ -n "$extra_key" ]; then 175 | line+="$(printf "\ 176 | 177 | ${_COLOR_FAINT}%s${_COLOR_DEFAULT} ${_COLOR_BOLD}'%s'${_COLOR_DEFAULT}\n" \ 178 | "${extra_key}" "${extra_value}")" 179 | fi 180 | 181 | state::print_line "failed" "$line" 182 | } 183 | 184 | 185 | function console_results::print_failed_snapshot_test() { 186 | local function_name=$1 187 | local snapshot_file=$2 188 | 189 | local line 190 | line="$(printf "${_COLOR_FAILED}✗ Failed${_COLOR_DEFAULT}: %s 191 | ${_COLOR_FAINT}Expected to match the snapshot${_COLOR_DEFAULT}\n" "$function_name")" 192 | 193 | if dependencies::has_git; then 194 | local actual_file="${snapshot_file}.tmp" 195 | echo "$actual" > "$actual_file" 196 | 197 | local git_diff_output 198 | git_diff_output="$(git diff --no-index --word-diff --color=always \ 199 | "$snapshot_file" "$actual_file" 2>/dev/null \ 200 | | tail -n +6 | sed "s/^/ /")" 201 | 202 | line+="$git_diff_output" 203 | rm "$actual_file" 204 | fi 205 | 206 | state::print_line "failed_snapshot" "$line" 207 | } 208 | 209 | function console_results::print_skipped_test() { 210 | local function_name=$1 211 | local reason=${2-} 212 | 213 | local line 214 | line="$(printf "${_COLOR_SKIPPED}↷ Skipped${_COLOR_DEFAULT}: %s\n" "${function_name}")" 215 | 216 | if [[ -n "$reason" ]]; then 217 | line+="$(printf "${_COLOR_FAINT} %s${_COLOR_DEFAULT}\n" "${reason}")" 218 | fi 219 | 220 | state::print_line "skipped" "$line" 221 | } 222 | 223 | function console_results::print_incomplete_test() { 224 | local function_name=$1 225 | local pending=${2-} 226 | 227 | local line 228 | line="$(printf "${_COLOR_INCOMPLETE}✒ Incomplete${_COLOR_DEFAULT}: %s\n" "${function_name}")" 229 | 230 | if [[ -n "$pending" ]]; then 231 | line+="$(printf "${_COLOR_FAINT} %s${_COLOR_DEFAULT}\n" "${pending}")" 232 | fi 233 | 234 | state::print_line "incomplete" "$line" 235 | } 236 | 237 | function console_results::print_snapshot_test() { 238 | local function_name=$1 239 | local test_name 240 | test_name=$(helper::normalize_test_function_name "$function_name") 241 | 242 | local line 243 | line="$(printf "${_COLOR_SNAPSHOT}✎ Snapshot${_COLOR_DEFAULT}: %s\n" "${test_name}")" 244 | 245 | state::print_line "snapshot" "$line" 246 | } 247 | 248 | function console_results::print_error_test() { 249 | local function_name=$1 250 | local error="$2" 251 | 252 | local test_name 253 | test_name=$(helper::normalize_test_function_name "$function_name") 254 | 255 | local line 256 | line="$(printf "${_COLOR_FAILED}✗ Error${_COLOR_DEFAULT}: %s 257 | ${_COLOR_FAINT}%s${_COLOR_DEFAULT}\n" "${test_name}" "${error}")" 258 | 259 | state::print_line "error" "$line" 260 | } 261 | 262 | function console_results::print_failing_tests_and_reset() { 263 | if [[ -s "$FAILURES_OUTPUT_PATH" ]]; then 264 | local total_failed 265 | total_failed=$(state::get_tests_failed) 266 | 267 | if env::is_simple_output_enabled; then 268 | printf "\n\n" 269 | fi 270 | 271 | if [[ "$total_failed" -eq 1 ]]; then 272 | echo -e "${_COLOR_BOLD}There was 1 failure:${_COLOR_DEFAULT}\n" 273 | else 274 | echo -e "${_COLOR_BOLD}There were $total_failed failures:${_COLOR_DEFAULT}\n" 275 | fi 276 | 277 | sed '${/^$/d;}' "$FAILURES_OUTPUT_PATH" | sed 's/^/|/' 278 | rm "$FAILURES_OUTPUT_PATH" 279 | 280 | echo "" 281 | fi 282 | } 283 | -------------------------------------------------------------------------------- /src/dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | function dependencies::has_perl() { 5 | command -v perl >/dev/null 2>&1 6 | } 7 | 8 | function dependencies::has_powershell() { 9 | command -v powershell > /dev/null 2>&1 10 | } 11 | 12 | function dependencies::has_adjtimex() { 13 | command -v adjtimex >/dev/null 2>&1 14 | } 15 | 16 | function dependencies::has_bc() { 17 | command -v bc >/dev/null 2>&1 18 | } 19 | 20 | function dependencies::has_awk() { 21 | command -v awk >/dev/null 2>&1 22 | } 23 | 24 | function dependencies::has_git() { 25 | command -v git >/dev/null 2>&1 26 | } 27 | 28 | function dependencies::has_curl() { 29 | command -v curl >/dev/null 2>&1 30 | } 31 | 32 | function dependencies::has_wget() { 33 | command -v wget >/dev/null 2>&1 34 | } 35 | 36 | function dependencies::has_python() { 37 | command -v python >/dev/null 2>&1 38 | } 39 | 40 | function dependencies::has_node() { 41 | command -v node >/dev/null 2>&1 42 | } 43 | -------------------------------------------------------------------------------- /src/dev/debug.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # An alternative to echo when debugging. 4 | # This is debug function; do not use in prod! 5 | function dump() { 6 | printf "[%s] %s: %s\n" "${_COLOR_SKIPPED}DUMP${_COLOR_DEFAULT}" \ 7 | "${_COLOR_PASSED}${BASH_SOURCE[1]}:${BASH_LINENO[0]}" \ 8 | "${_COLOR_DEFAULT}$*" 9 | } 10 | 11 | # Dump and Die. 12 | function dd() { 13 | printf "[%s] %s: %s\n" "${_COLOR_FAILED}DUMP${_COLOR_DEFAULT}" \ 14 | "${_COLOR_PASSED}${BASH_SOURCE[1]}:${BASH_LINENO[0]}" \ 15 | "${_COLOR_DEFAULT}$*" 16 | 17 | kill -9 $$ 18 | } 19 | -------------------------------------------------------------------------------- /src/env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # shellcheck disable=SC2034 4 | 5 | set -o allexport 6 | # shellcheck source=/dev/null 7 | [[ -f ".env" ]] && source .env set 8 | set +o allexport 9 | 10 | _DEFAULT_DEFAULT_PATH="tests" 11 | _DEFAULT_BOOTSTRAP="tests/bootstrap.sh" 12 | _DEFAULT_DEV_LOG="" 13 | _DEFAULT_LOG_JUNIT="" 14 | _DEFAULT_REPORT_HTML="" 15 | 16 | : "${BASHUNIT_DEFAULT_PATH:=${DEFAULT_PATH:=$_DEFAULT_DEFAULT_PATH}}" 17 | : "${BASHUNIT_DEV_LOG:=${DEV_LOG:=$_DEFAULT_DEV_LOG}}" 18 | : "${BASHUNIT_BOOTSTRAP:=${BOOTSTRAP:=$_DEFAULT_BOOTSTRAP}}" 19 | : "${BASHUNIT_LOG_JUNIT:=${LOG_JUNIT:=$_DEFAULT_LOG_JUNIT}}" 20 | : "${BASHUNIT_REPORT_HTML:=${REPORT_HTML:=$_DEFAULT_REPORT_HTML}}" 21 | 22 | # Booleans 23 | _DEFAULT_PARALLEL_RUN="false" 24 | _DEFAULT_SHOW_HEADER="true" 25 | _DEFAULT_HEADER_ASCII_ART="false" 26 | _DEFAULT_SIMPLE_OUTPUT="false" 27 | _DEFAULT_STOP_ON_FAILURE="false" 28 | _DEFAULT_SHOW_EXECUTION_TIME="true" 29 | _DEFAULT_VERBOSE="false" 30 | _DEFAULT_BENCH_MODE="false" 31 | 32 | : "${BASHUNIT_PARALLEL_RUN:=${PARALLEL_RUN:=$_DEFAULT_PARALLEL_RUN}}" 33 | : "${BASHUNIT_SHOW_HEADER:=${SHOW_HEADER:=$_DEFAULT_SHOW_HEADER}}" 34 | : "${BASHUNIT_HEADER_ASCII_ART:=${HEADER_ASCII_ART:=$_DEFAULT_HEADER_ASCII_ART}}" 35 | : "${BASHUNIT_SIMPLE_OUTPUT:=${SIMPLE_OUTPUT:=$_DEFAULT_SIMPLE_OUTPUT}}" 36 | : "${BASHUNIT_STOP_ON_FAILURE:=${STOP_ON_FAILURE:=$_DEFAULT_STOP_ON_FAILURE}}" 37 | : "${BASHUNIT_SHOW_EXECUTION_TIME:=${SHOW_EXECUTION_TIME:=$_DEFAULT_SHOW_EXECUTION_TIME}}" 38 | : "${BASHUNIT_VERBOSE:=${VERBOSE:=$_DEFAULT_VERBOSE}}" 39 | : "${BASHUNIT_BENCH_MODE:=${BENCH_MODE:=$_DEFAULT_BENCH_MODE}}" 40 | 41 | function env::is_parallel_run_enabled() { 42 | [[ "$BASHUNIT_PARALLEL_RUN" == "true" ]] 43 | } 44 | 45 | function env::is_show_header_enabled() { 46 | [[ "$BASHUNIT_SHOW_HEADER" == "true" ]] 47 | } 48 | 49 | function env::is_header_ascii_art_enabled() { 50 | [[ "$BASHUNIT_HEADER_ASCII_ART" == "true" ]] 51 | } 52 | 53 | function env::is_simple_output_enabled() { 54 | [[ "$BASHUNIT_SIMPLE_OUTPUT" == "true" ]] 55 | } 56 | 57 | function env::is_stop_on_failure_enabled() { 58 | [[ "$BASHUNIT_STOP_ON_FAILURE" == "true" ]] 59 | } 60 | 61 | function env::is_show_execution_time_enabled() { 62 | [[ "$BASHUNIT_SHOW_EXECUTION_TIME" == "true" ]] 63 | } 64 | 65 | function env::is_dev_mode_enabled() { 66 | [[ -n "$BASHUNIT_DEV_LOG" ]] 67 | } 68 | 69 | function env::is_verbose_enabled() { 70 | [[ "$BASHUNIT_VERBOSE" == "true" ]] 71 | } 72 | 73 | function env::is_bench_mode_enabled() { 74 | [[ "$BASHUNIT_BENCH_MODE" == "true" ]] 75 | } 76 | 77 | function env::active_internet_connection() { 78 | if ping -c 1 -W 3 google.com &> /dev/null; then 79 | return 0 80 | fi 81 | 82 | return 1 83 | } 84 | 85 | function env::find_terminal_width() { 86 | local cols="" 87 | 88 | if [[ -z "$cols" ]] && command -v stty > /dev/null; then 89 | cols=$(tput cols 2>/dev/null) 90 | fi 91 | if [[ -n "$TERM" ]] && command -v tput > /dev/null; then 92 | cols=$(stty size 2>/dev/null | cut -d' ' -f2) 93 | fi 94 | 95 | # Directly echo the value with fallback 96 | echo "${cols:-100}" 97 | } 98 | 99 | function env::print_verbose() { 100 | local keys=( 101 | "BASHUNIT_DEFAULT_PATH" 102 | "BASHUNIT_DEV_LOG" 103 | "BASHUNIT_BOOTSTRAP" 104 | "BASHUNIT_LOG_JUNIT" 105 | "BASHUNIT_REPORT_HTML" 106 | "BASHUNIT_PARALLEL_RUN" 107 | "BASHUNIT_SHOW_HEADER" 108 | "BASHUNIT_HEADER_ASCII_ART" 109 | "BASHUNIT_SIMPLE_OUTPUT" 110 | "BASHUNIT_STOP_ON_FAILURE" 111 | "BASHUNIT_SHOW_EXECUTION_TIME" 112 | "BASHUNIT_VERBOSE" 113 | ) 114 | 115 | local max_length=0 116 | 117 | for key in "${keys[@]}"; do 118 | if (( ${#key} > max_length )); then 119 | max_length=${#key} 120 | fi 121 | done 122 | 123 | for key in "${keys[@]}"; do 124 | printf "%s:%*s%s\n" "$key" $((max_length - ${#key} + 1)) "" "${!key}" 125 | done 126 | } 127 | 128 | EXIT_CODE_STOP_ON_FAILURE=4 129 | # Use a unique directory per run to avoid conflicts when bashunit is invoked 130 | # recursively or multiple instances are executed in parallel. 131 | TEMP_DIR_PARALLEL_TEST_SUITE="/tmp/bashunit/parallel/${_OS:-Unknown}/$(random_str 8)" 132 | TEMP_FILE_PARALLEL_STOP_ON_FAILURE="$TEMP_DIR_PARALLEL_TEST_SUITE/.stop-on-failure" 133 | TERMINAL_WIDTH="$(env::find_terminal_width)" 134 | FAILURES_OUTPUT_PATH=$(mktemp) 135 | CAT="$(which cat)" 136 | -------------------------------------------------------------------------------- /src/globals.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # This file provides a set of global functions to developers. 5 | 6 | function current_dir() { 7 | dirname "${BASH_SOURCE[1]}" 8 | } 9 | 10 | function current_filename() { 11 | basename "${BASH_SOURCE[1]}" 12 | } 13 | 14 | function caller_filename() { 15 | dirname "${BASH_SOURCE[2]}" 16 | } 17 | 18 | function caller_line() { 19 | echo "${BASH_LINENO[1]}" 20 | } 21 | 22 | function current_timestamp() { 23 | date +"%Y-%m-%d %H:%M:%S" 24 | } 25 | 26 | function is_command_available() { 27 | command -v "$1" >/dev/null 2>&1 28 | } 29 | 30 | function random_str() { 31 | local length=${1:-6} 32 | local chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' 33 | local str='' 34 | for (( i=0; i> "$BASHUNIT_DEV_LOG" 89 | } 90 | 91 | function print_line() { 92 | local length="${1:-70}" # Default to 70 if not passed 93 | local char="${2:--}" # Default to '-' if not passed 94 | printf '%*s\n' "$length" '' | tr ' ' "$char" 95 | } 96 | -------------------------------------------------------------------------------- /src/helpers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | declare -r BASHUNIT_GIT_REPO="https://github.com/TypedDevs/bashunit" 4 | 5 | # 6 | # @param $1 string Eg: "test_some_logic_camelCase" 7 | # 8 | # @return string Eg: "Some logic camelCase" 9 | # 10 | function helper::normalize_test_function_name() { 11 | local original_fn_name="${1-}" 12 | local interpolated_fn_name="${2-}" 13 | 14 | if [[ -n "${interpolated_fn_name-}" ]]; then 15 | original_fn_name="$interpolated_fn_name" 16 | fi 17 | 18 | local result 19 | 20 | # Remove the first "test_" prefix, if present 21 | result="${original_fn_name#test_}" 22 | # If no "test_" was removed (e.g., "testFoo"), remove the "test" prefix 23 | if [[ "$result" == "$original_fn_name" ]]; then 24 | result="${original_fn_name#test}" 25 | fi 26 | # Replace underscores with spaces 27 | result="${result//_/ }" 28 | # Capitalize the first letter 29 | result="$(tr '[:lower:]' '[:upper:]' <<< "${result:0:1}")${result:1}" 30 | 31 | echo "$result" 32 | } 33 | 34 | function helper::escape_single_quotes() { 35 | local value="$1" 36 | # shellcheck disable=SC1003 37 | echo "${value//\'/'\'\\''\'}" 38 | } 39 | 40 | function helper::interpolate_function_name() { 41 | local function_name="$1" 42 | shift 43 | local args=("$@") 44 | local result="$function_name" 45 | 46 | for ((i=0; i<${#args[@]}; i++)); do 47 | local placeholder="::$((i+1))::" 48 | # shellcheck disable=SC2155 49 | local value="$(helper::escape_single_quotes "${args[$i]}")" 50 | value="'$value'" 51 | result="${result//${placeholder}/${value}}" 52 | done 53 | 54 | echo "$result" 55 | } 56 | 57 | function helper::check_duplicate_functions() { 58 | local script="$1" 59 | 60 | local filtered_lines 61 | filtered_lines=$(grep -E '^[[:space:]]*(function[[:space:]]+)?test[a-zA-Z_][a-zA-Z0-9_]*\s*\(\)\s*\{' "$script") 62 | 63 | local function_names 64 | function_names=$(echo "$filtered_lines" | awk '{ 65 | for (i=1; i<=NF; i++) { 66 | if ($i ~ /^test[a-zA-Z_][a-zA-Z0-9_]*\(\)$/) { 67 | gsub(/\(\)/, "", $i) 68 | print $i 69 | break 70 | } 71 | } 72 | }') 73 | 74 | local duplicates 75 | duplicates=$(echo "$function_names" | sort | uniq -d) 76 | if [ -n "$duplicates" ]; then 77 | state::set_duplicated_functions_merged "$script" "$duplicates" 78 | return 1 79 | fi 80 | return 0 81 | } 82 | 83 | # 84 | # @param $1 string Eg: "prefix" 85 | # @param $2 string Eg: "filter" 86 | # @param $3 array Eg: "[fn1, fn2, prefix_filter_fn3, fn4, ...]" 87 | # 88 | # @return array Eg: "[prefix_filter_fn3, ...]" The filtered functions with prefix 89 | # 90 | function helper::get_functions_to_run() { 91 | local prefix=$1 92 | local filter=${2/test_/} 93 | local function_names=$3 94 | 95 | local filtered_functions="" 96 | 97 | for fn in $function_names; do 98 | if [[ $fn == ${prefix}_*${filter}* ]]; then 99 | if [[ $filtered_functions == *" $fn"* ]]; then 100 | return 1 101 | fi 102 | filtered_functions+=" $fn" 103 | fi 104 | done 105 | 106 | echo "${filtered_functions# }" 107 | } 108 | 109 | # 110 | # @param $1 string Eg: "do_something" 111 | # 112 | function helper::execute_function_if_exists() { 113 | if [[ "$(type -t "$1")" == "function" ]]; then 114 | "$1" 2>/dev/null 115 | fi 116 | } 117 | 118 | # 119 | # @param $1 string Eg: "do_something" 120 | # 121 | function helper::unset_if_exists() { 122 | unset "$1" 2>/dev/null 123 | } 124 | 125 | function helper::find_files_recursive() { 126 | ## Remove trailing slash using parameter expansion 127 | local path="${1%%/}" 128 | local pattern="${2:-*[tT]est.sh}" 129 | 130 | if [[ "$path" == *"*"* ]]; then 131 | eval find "$path" -type f -name "$pattern" | sort -u 132 | elif [[ -d "$path" ]]; then 133 | find "$path" -type f -name "$pattern" | sort -u 134 | else 135 | echo "$path" 136 | fi 137 | } 138 | 139 | function helper::normalize_variable_name() { 140 | local input_string="$1" 141 | local normalized_string 142 | 143 | normalized_string="${input_string//[^a-zA-Z0-9_]/_}" 144 | 145 | if [[ ! $normalized_string =~ ^[a-zA-Z_] ]]; then 146 | normalized_string="_$normalized_string" 147 | fi 148 | 149 | echo "$normalized_string" 150 | } 151 | 152 | function helper::get_provider_data() { 153 | local function_name="$1" 154 | local script="$2" 155 | 156 | if [[ ! -f "$script" ]]; then 157 | return 158 | fi 159 | 160 | local data_provider_function 161 | data_provider_function=$( 162 | # shellcheck disable=SC1087 163 | grep -B 2 -E "function[[:space:]]+$function_name[[:space:]]*\(\)" "$script" 2>/dev/null | \ 164 | grep -E "^[[:space:]]*# *@?data_provider[[:space:]]+" | \ 165 | sed -E 's/^[[:space:]]*# *@?data_provider[[:space:]]+//' || true 166 | ) 167 | 168 | if [[ -n "$data_provider_function" ]]; then 169 | helper::execute_function_if_exists "$data_provider_function" 170 | fi 171 | } 172 | 173 | function helper::trim() { 174 | local input_string="$1" 175 | local trimmed_string 176 | 177 | trimmed_string="${input_string#"${input_string%%[![:space:]]*}"}" 178 | trimmed_string="${trimmed_string%"${trimmed_string##*[![:space:]]}"}" 179 | 180 | echo "$trimmed_string" 181 | } 182 | 183 | function helpers::get_latest_tag() { 184 | git ls-remote --tags "$BASHUNIT_GIT_REPO" | 185 | awk '{print $2}' | 186 | sed 's|^refs/tags/||' | 187 | sort -Vr | 188 | head -n 1 189 | } 190 | 191 | function helpers::find_total_tests() { 192 | local filter=${1:-} 193 | local files=("${@:2}") 194 | 195 | if [[ ${#files[@]} -eq 0 ]]; then 196 | echo 0 197 | return 198 | fi 199 | 200 | local total_count=0 201 | local file 202 | 203 | for file in "${files[@]}"; do 204 | if [[ ! -f "$file" ]]; then 205 | continue 206 | fi 207 | 208 | local file_count 209 | file_count=$( ( 210 | # shellcheck source=/dev/null 211 | source "$file" 212 | local all_fn_names 213 | all_fn_names=$(declare -F | awk '{print $3}') 214 | local filtered_functions 215 | filtered_functions=$(helper::get_functions_to_run "test" "$filter" "$all_fn_names") || true 216 | 217 | local count=0 218 | if [[ -n "$filtered_functions" ]]; then 219 | # shellcheck disable=SC2206 220 | # shellcheck disable=SC2207 221 | local functions_to_run=($filtered_functions) 222 | for fn_name in "${functions_to_run[@]}"; do 223 | local provider_data=() 224 | while IFS=" " read -r line; do 225 | provider_data+=("$line") 226 | done <<< "$(helper::get_provider_data "$fn_name" "$file")" 227 | 228 | if [[ "${#provider_data[@]}" -eq 0 ]]; then 229 | count=$((count + 1)) 230 | else 231 | count=$((count + ${#provider_data[@]})) 232 | fi 233 | done 234 | fi 235 | 236 | echo "$count" 237 | ) ) 238 | 239 | total_count=$((total_count + file_count)) 240 | done 241 | 242 | echo "$total_count" 243 | } 244 | 245 | function helper::load_test_files() { 246 | local filter=$1 247 | local files=("${@:2}") 248 | 249 | local test_files=() 250 | 251 | if [[ "${#files[@]}" -eq 0 ]]; then 252 | if [[ -n "${BASHUNIT_DEFAULT_PATH}" ]]; then 253 | while IFS='' read -r line; do 254 | test_files+=("$line") 255 | done < <(helper::find_files_recursive "$BASHUNIT_DEFAULT_PATH") 256 | fi 257 | else 258 | test_files=("${files[@]}") 259 | fi 260 | 261 | printf "%s\n" "${test_files[@]}" 262 | } 263 | 264 | function helper::load_bench_files() { 265 | local filter=$1 266 | local files=("${@:2}") 267 | 268 | local bench_files=() 269 | 270 | if [[ "${#files[@]}" -eq 0 ]]; then 271 | if [[ -n "${BASHUNIT_DEFAULT_PATH}" ]]; then 272 | while IFS='' read -r line; do 273 | bench_files+=("$line") 274 | done < <(helper::find_files_recursive "$BASHUNIT_DEFAULT_PATH" '*[bB]ench.sh') 275 | fi 276 | else 277 | bench_files=("${files[@]}") 278 | fi 279 | 280 | printf "%s\n" "${bench_files[@]}" 281 | } 282 | -------------------------------------------------------------------------------- /src/io.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function io::download_to() { 4 | local url="$1" 5 | local output="$2" 6 | if dependencies::has_curl; then 7 | curl -L -J -o "$output" "$url" 2>/dev/null 8 | elif dependencies::has_wget; then 9 | wget -q -O "$output" "$url" 2>/dev/null 10 | else 11 | return 1 12 | fi 13 | } 14 | -------------------------------------------------------------------------------- /src/main.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function main::exec_tests() { 4 | local filter=$1 5 | local files=("${@:2}") 6 | 7 | local test_files=() 8 | while IFS= read -r line; do 9 | test_files+=("$line") 10 | done < <(helper::load_test_files "$filter" "${files[@]}") 11 | 12 | if [[ ${#test_files[@]} -eq 0 || -z "${test_files[0]}" ]]; then 13 | printf "%sError: At least one file path is required.%s\n" "${_COLOR_FAILED}" "${_COLOR_DEFAULT}" 14 | console_header::print_help 15 | exit 1 16 | fi 17 | 18 | # Trap SIGINT (Ctrl-C) and call the cleanup function 19 | trap 'main::cleanup' SIGINT 20 | trap '[[ $? -eq $EXIT_CODE_STOP_ON_FAILURE ]] && main::handle_stop_on_failure_sync' EXIT 21 | 22 | if env::is_parallel_run_enabled && ! parallel::is_enabled; then 23 | printf "%sWarning: Parallel tests are supported on macOS, Ubuntu and Windows.\n" "${_COLOR_INCOMPLETE}" 24 | printf "For other OS (like Alpine), --parallel is not enabled due to inconsistent results,\n" 25 | printf "particularly involving race conditions.%s " "${_COLOR_DEFAULT}" 26 | printf "%sFallback using --no-parallel%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}" 27 | fi 28 | 29 | if parallel::is_enabled; then 30 | parallel::reset 31 | fi 32 | 33 | console_header::print_version_with_env "$filter" "${test_files[@]}" 34 | 35 | if env::is_verbose_enabled; then 36 | if env::is_simple_output_enabled; then 37 | echo "" 38 | fi 39 | printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '#' 40 | printf "%s\n" "Filter: ${filter:-None}" 41 | printf "%s\n" "Total files: ${#test_files[@]}" 42 | printf "%s\n" "Test files:" 43 | printf -- "- %s\n" "${test_files[@]}" 44 | printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '.' 45 | env::print_verbose 46 | printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '#' 47 | fi 48 | 49 | runner::load_test_files "$filter" "${test_files[@]}" 50 | 51 | if parallel::is_enabled; then 52 | wait 53 | fi 54 | 55 | if parallel::is_enabled && parallel::must_stop_on_failure; then 56 | printf "\r%sStop on failure enabled...%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}" 57 | fi 58 | 59 | console_results::print_failing_tests_and_reset 60 | console_results::render_result 61 | exit_code=$? 62 | 63 | if [[ -n "$BASHUNIT_LOG_JUNIT" ]]; then 64 | reports::generate_junit_xml "$BASHUNIT_LOG_JUNIT" 65 | fi 66 | 67 | if [[ -n "$BASHUNIT_REPORT_HTML" ]]; then 68 | reports::generate_report_html "$BASHUNIT_REPORT_HTML" 69 | fi 70 | 71 | cleanup_temp_files 72 | exit $exit_code 73 | } 74 | 75 | function main::exec_benchmarks() { 76 | local filter=$1 77 | local files=("${@:2}") 78 | 79 | local bench_files=() 80 | while IFS= read -r line; do 81 | bench_files+=("$line") 82 | done < <(helper::load_bench_files "$filter" "${files[@]}") 83 | 84 | if [[ ${#bench_files[@]} -eq 0 || -z "${bench_files[0]}" ]]; then 85 | printf "%sError: At least one file path is required.%s\n" "${_COLOR_FAILED}" "${_COLOR_DEFAULT}" 86 | console_header::print_help 87 | exit 1 88 | fi 89 | 90 | console_header::print_version_with_env "$filter" "${bench_files[@]}" 91 | 92 | runner::load_bench_files "$filter" "${bench_files[@]}" 93 | 94 | benchmark::print_results 95 | 96 | cleanup_temp_files 97 | } 98 | 99 | function main::cleanup() { 100 | printf "%sCaught Ctrl-C, killing all child processes...%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}" 101 | # Kill all child processes of this script 102 | pkill -P $$ 103 | cleanup_temp_files 104 | exit 1 105 | } 106 | 107 | function main::handle_stop_on_failure_sync() { 108 | printf "\n%sStop on failure enabled...%s\n" "${_COLOR_SKIPPED}" "${_COLOR_DEFAULT}" 109 | console_results::print_failing_tests_and_reset 110 | console_results::render_result 111 | cleanup_temp_files 112 | exit 1 113 | } 114 | 115 | function main::exec_assert() { 116 | local original_assert_fn=$1 117 | local args=("${@:2}") 118 | 119 | local assert_fn=$original_assert_fn 120 | 121 | # Check if the function exists 122 | if ! type "$assert_fn" > /dev/null 2>&1; then 123 | assert_fn="assert_$assert_fn" 124 | if ! type "$assert_fn" > /dev/null 2>&1; then 125 | echo "Function $original_assert_fn does not exist." 1>&2 126 | exit 127 127 | fi 128 | fi 129 | 130 | # Get the last argument safely by calculating the array length 131 | local last_index=$((${#args[@]} - 1)) 132 | local last_arg="${args[$last_index]}" 133 | local output="" 134 | local inner_exit_code=0 135 | local bashunit_exit_code=0 136 | 137 | # Handle different assert_* functions 138 | case "$assert_fn" in 139 | assert_exit_code) 140 | output=$(main::handle_assert_exit_code "$last_arg") 141 | inner_exit_code=$? 142 | # Remove the last argument and append the exit code 143 | args=("${args[@]:0:last_index}") 144 | args+=("$inner_exit_code") 145 | ;; 146 | *) 147 | # Add more cases here for other assert_* handlers if needed 148 | ;; 149 | esac 150 | 151 | if [[ -n "$output" ]]; then 152 | echo "$output" 1>&1 153 | assert_fn="assert_same" 154 | fi 155 | 156 | # Run the assertion function and write into stderr 157 | "$assert_fn" "${args[@]}" 1>&2 158 | bashunit_exit_code=$? 159 | 160 | if [[ "$(state::get_tests_failed)" -gt 0 ]] || [[ "$(state::get_assertions_failed)" -gt 0 ]]; then 161 | return 1 162 | fi 163 | 164 | return "$bashunit_exit_code" 165 | } 166 | 167 | function main::handle_assert_exit_code() { 168 | local cmd="$1" 169 | local output 170 | local inner_exit_code=0 171 | 172 | if [[ $(command -v "${cmd%% *}") ]]; then 173 | output=$(eval "$cmd" 2>&1 || echo "inner_exit_code:$?") 174 | local last_line 175 | last_line=$(echo "$output" | tail -n 1) 176 | if echo "$last_line" | grep -q 'inner_exit_code:[0-9]*'; then 177 | inner_exit_code=$(echo "$last_line" | grep -o 'inner_exit_code:[0-9]*' | cut -d':' -f2) 178 | if ! [[ $inner_exit_code =~ ^[0-9]+$ ]]; then 179 | inner_exit_code=1 180 | fi 181 | output=$(echo "$output" | sed '$d') 182 | fi 183 | echo "$output" 184 | return "$inner_exit_code" 185 | else 186 | echo "Command not found: $cmd" 1>&2 187 | return 127 188 | fi 189 | } 190 | -------------------------------------------------------------------------------- /src/math.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if dependencies::has_bc; then 4 | # bc is better than awk because bc has no integer limits. 5 | function math::calculate() { 6 | echo "$*" | bc 7 | } 8 | elif dependencies::has_awk; then 9 | function math::calculate() { 10 | awk "BEGIN { print ""$*"" }" 11 | } 12 | fi 13 | -------------------------------------------------------------------------------- /src/parallel.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function parallel::aggregate_test_results() { 4 | local temp_dir_parallel_test_suite=$1 5 | 6 | local total_failed=0 7 | local total_passed=0 8 | local total_skipped=0 9 | local total_incomplete=0 10 | local total_snapshot=0 11 | 12 | for script_dir in "$temp_dir_parallel_test_suite"/*; do 13 | if ! compgen -G "$script_dir"/*.result > /dev/null; then 14 | printf "%sNo tests found%s" "$_COLOR_SKIPPED" "$_COLOR_DEFAULT" 15 | continue 16 | fi 17 | 18 | for result_file in "$script_dir"/*.result; do 19 | local result_line 20 | result_line=$(tail -n 1 "$result_file") 21 | 22 | local failed="${result_line##*##ASSERTIONS_FAILED=}" 23 | failed="${failed%%##*}"; failed=${failed:-0} 24 | 25 | local passed="${result_line##*##ASSERTIONS_PASSED=}" 26 | passed="${passed%%##*}"; passed=${passed:-0} 27 | 28 | local skipped="${result_line##*##ASSERTIONS_SKIPPED=}" 29 | skipped="${skipped%%##*}"; skipped=${skipped:-0} 30 | 31 | local incomplete="${result_line##*##ASSERTIONS_INCOMPLETE=}" 32 | incomplete="${incomplete%%##*}"; incomplete=${incomplete:-0} 33 | 34 | local snapshot="${result_line##*##ASSERTIONS_SNAPSHOT=}" 35 | snapshot="${snapshot%%##*}"; snapshot=${snapshot:-0} 36 | 37 | local exit_code="${result_line##*##TEST_EXIT_CODE=}" 38 | exit_code="${exit_code%%##*}"; exit_code=${exit_code:-0} 39 | 40 | # Add to the total counts 41 | total_failed=$((total_failed + failed)) 42 | total_passed=$((total_passed + passed)) 43 | total_skipped=$((total_skipped + skipped)) 44 | total_incomplete=$((total_incomplete + incomplete)) 45 | total_snapshot=$((total_snapshot + snapshot)) 46 | 47 | if [ "${failed:-0}" -gt 0 ]; then 48 | state::add_tests_failed 49 | continue 50 | fi 51 | 52 | if [ "${exit_code:-0}" -ne 0 ]; then 53 | state::add_tests_failed 54 | continue 55 | fi 56 | 57 | if [ "${snapshot:-0}" -gt 0 ]; then 58 | state::add_tests_snapshot 59 | continue 60 | fi 61 | 62 | if [ "${incomplete:-0}" -gt 0 ]; then 63 | state::add_tests_incomplete 64 | continue 65 | fi 66 | 67 | if [ "${skipped:-0}" -gt 0 ]; then 68 | state::add_tests_skipped 69 | continue 70 | fi 71 | 72 | state::add_tests_passed 73 | done 74 | done 75 | 76 | export _ASSERTIONS_FAILED=$total_failed 77 | export _ASSERTIONS_PASSED=$total_passed 78 | export _ASSERTIONS_SKIPPED=$total_skipped 79 | export _ASSERTIONS_INCOMPLETE=$total_incomplete 80 | export _ASSERTIONS_SNAPSHOT=$total_snapshot 81 | } 82 | 83 | function parallel::mark_stop_on_failure() { 84 | touch "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE" 85 | } 86 | 87 | function parallel::must_stop_on_failure() { 88 | [[ -f "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE" ]] 89 | } 90 | 91 | function parallel::reset() { 92 | # shellcheck disable=SC2153 93 | rm -rf "$TEMP_DIR_PARALLEL_TEST_SUITE" 94 | mkdir -p "$TEMP_DIR_PARALLEL_TEST_SUITE" 95 | [ -f "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE" ] && rm "$TEMP_FILE_PARALLEL_STOP_ON_FAILURE" 96 | } 97 | 98 | function parallel::is_enabled() { 99 | if env::is_parallel_run_enabled && \ 100 | (check_os::is_macos || check_os::is_ubuntu || check_os::is_windows); then 101 | return 0 102 | fi 103 | return 1 104 | } 105 | -------------------------------------------------------------------------------- /src/reports.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=SC2155 3 | 4 | _REPORTS_TEST_FILES=() 5 | _REPORTS_TEST_NAMES=() 6 | _REPORTS_TEST_STATUSES=() 7 | _REPORTS_TEST_DURATIONS=() 8 | _REPORTS_TEST_ASSERTIONS=() 9 | 10 | function reports::add_test_snapshot() { 11 | reports::add_test "$1" "$2" "$3" "$4" "snapshot" 12 | } 13 | 14 | function reports::add_test_incomplete() { 15 | reports::add_test "$1" "$2" "$3" "$4" "incomplete" 16 | } 17 | 18 | function reports::add_test_skipped() { 19 | reports::add_test "$1" "$2" "$3" "$4" "skipped" 20 | } 21 | 22 | function reports::add_test_passed() { 23 | reports::add_test "$1" "$2" "$3" "$4" "passed" 24 | } 25 | 26 | function reports::add_test_failed() { 27 | reports::add_test "$1" "$2" "$3" "$4" "failed" 28 | } 29 | 30 | function reports::add_test() { 31 | local file="$1" 32 | local test_name="$2" 33 | local duration="$3" 34 | local assertions="$4" 35 | local status="$5" 36 | 37 | _REPORTS_TEST_FILES+=("$file") 38 | _REPORTS_TEST_NAMES+=("$test_name") 39 | _REPORTS_TEST_STATUSES+=("$status") 40 | _REPORTS_TEST_ASSERTIONS+=("$assertions") 41 | _REPORTS_TEST_DURATIONS+=("$duration") 42 | } 43 | 44 | function reports::generate_junit_xml() { 45 | local output_file="$1" 46 | 47 | local test_passed=$(state::get_tests_passed) 48 | local tests_skipped=$(state::get_tests_skipped) 49 | local tests_incomplete=$(state::get_tests_incomplete) 50 | local tests_snapshot=$(state::get_tests_snapshot) 51 | local tests_failed=$(state::get_tests_failed) 52 | local time=$(clock::total_runtime_in_milliseconds) 53 | 54 | { 55 | echo "" 56 | echo "" 57 | echo " " 61 | 62 | for i in "${!_REPORTS_TEST_NAMES[@]}"; do 63 | local file="${_REPORTS_TEST_FILES[$i]}" 64 | local name="${_REPORTS_TEST_NAMES[$i]}" 65 | local assertions="${_REPORTS_TEST_ASSERTIONS[$i]}" 66 | local status="${_REPORTS_TEST_STATUSES[$i]}" 67 | local test_time="${_REPORTS_TEST_DURATIONS[$i]}" 68 | 69 | echo " " 74 | echo " " 75 | done 76 | 77 | echo " " 78 | echo "" 79 | } > "$output_file" 80 | } 81 | 82 | function reports::generate_report_html() { 83 | local output_file="$1" 84 | 85 | local test_passed=$(state::get_tests_passed) 86 | local tests_skipped=$(state::get_tests_skipped) 87 | local tests_incomplete=$(state::get_tests_incomplete) 88 | local tests_snapshot=$(state::get_tests_snapshot) 89 | local tests_failed=$(state::get_tests_failed) 90 | local time=$(clock::total_runtime_in_milliseconds) 91 | 92 | # Temporary file to store test cases by file 93 | local temp_file="temp_test_cases.txt" 94 | 95 | # Collect test cases by file 96 | : > "$temp_file" # Clear temp file if it exists 97 | for i in "${!_REPORTS_TEST_NAMES[@]}"; do 98 | local file="${_REPORTS_TEST_FILES[$i]}" 99 | local name="${_REPORTS_TEST_NAMES[$i]}" 100 | local status="${_REPORTS_TEST_STATUSES[$i]}" 101 | local test_time="${_REPORTS_TEST_DURATIONS[$i]}" 102 | local test_case="$file|$name|$status|$test_time" 103 | 104 | echo "$test_case" >> "$temp_file" 105 | done 106 | 107 | { 108 | echo "" 109 | echo "" 110 | echo "" 111 | echo " " 112 | echo " " 113 | echo " Test Report" 114 | echo " " 125 | echo "" 126 | echo "" 127 | echo "

Test Report

" 128 | echo " " 129 | echo " " 130 | echo " " 131 | echo " " 132 | echo " " 133 | echo " " 134 | echo " " 135 | echo " " 136 | echo " " 137 | echo " " 138 | echo " " 139 | echo " " 140 | echo " " 141 | echo " " 142 | echo " " 143 | echo " " 144 | echo " " 145 | echo " " 146 | echo " " 147 | echo " " 148 | echo " " 149 | echo " " 150 | echo " " 151 | echo "
Total TestsPassedFailedIncompleteSkippedSnapshotTime (ms)
${#_REPORTS_TEST_NAMES[@]}$test_passed$tests_failed$tests_incomplete$tests_skipped$tests_snapshot$time
" 152 | echo "

Time: $time ms

" 153 | 154 | # Read the temporary file and group by file 155 | local current_file="" 156 | while IFS='|' read -r file name status test_time; do 157 | if [ "$file" != "$current_file" ]; then 158 | if [ -n "$current_file" ]; then 159 | echo " " 160 | echo " " 161 | fi 162 | echo "

File: $file

" 163 | echo " " 164 | echo " " 165 | echo " " 166 | echo " " 167 | echo " " 168 | echo " " 169 | echo " " 170 | echo " " 171 | echo " " 172 | current_file="$file" 173 | fi 174 | echo " " 175 | echo " " 176 | echo " " 177 | echo " " 178 | echo " " 179 | done < "$temp_file" 180 | 181 | # Close the last table 182 | if [ -n "$current_file" ]; then 183 | echo " " 184 | echo "
Test NameStatusTime (ms)
$name$status$test_time
" 185 | fi 186 | 187 | echo "" 188 | echo "" 189 | } > "$output_file" 190 | 191 | # Clean up temporary file 192 | rm -f "$temp_file" 193 | } 194 | -------------------------------------------------------------------------------- /src/runner.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # shellcheck disable=SC2155 3 | 4 | function runner::load_test_files() { 5 | local filter=$1 6 | shift 7 | local files=("${@}") 8 | 9 | for test_file in "${files[@]}"; do 10 | if [[ ! -f $test_file ]]; then 11 | continue 12 | fi 13 | # shellcheck source=/dev/null 14 | source "$test_file" 15 | runner::run_set_up_before_script 16 | if parallel::is_enabled; then 17 | runner::call_test_functions "$test_file" "$filter" 2>/dev/null & 18 | else 19 | runner::call_test_functions "$test_file" "$filter" 20 | fi 21 | runner::run_tear_down_after_script 22 | runner::clean_set_up_and_tear_down_after_script 23 | done 24 | 25 | if parallel::is_enabled; then 26 | wait 27 | runner::spinner & 28 | local spinner_pid=$! 29 | parallel::aggregate_test_results "$TEMP_DIR_PARALLEL_TEST_SUITE" 30 | # Kill the spinner once the aggregation finishes 31 | disown "$spinner_pid" && kill "$spinner_pid" &>/dev/null 32 | printf "\r " # Clear the spinner output 33 | fi 34 | } 35 | 36 | function runner::load_bench_files() { 37 | local filter=$1 38 | shift 39 | local files=("${@}") 40 | 41 | for bench_file in "${files[@]}"; do 42 | [[ -f $bench_file ]] || continue 43 | # shellcheck source=/dev/null 44 | source "$bench_file" 45 | runner::run_set_up_before_script 46 | runner::call_bench_functions "$bench_file" "$filter" 47 | runner::run_tear_down_after_script 48 | runner::clean_set_up_and_tear_down_after_script 49 | done 50 | } 51 | 52 | function runner::spinner() { 53 | if env::is_simple_output_enabled; then 54 | printf "\n" 55 | fi 56 | 57 | local delay=0.1 58 | local spin_chars="|/-\\" 59 | while true; do 60 | for ((i=0; i<${#spin_chars}; i++)); do 61 | printf "\r%s" "${spin_chars:$i:1}" 62 | sleep "$delay" 63 | done 64 | done 65 | } 66 | 67 | function runner::functions_for_script() { 68 | local script="$1" 69 | local all_fn_names="$2" 70 | 71 | # Filter the names down to the ones defined in the script, sort them by line number 72 | shopt -s extdebug 73 | # shellcheck disable=SC2086 74 | declare -F $all_fn_names | 75 | awk -v s="$script" '$3 == s {print $1" " $2}' | 76 | sort -k2 -n | 77 | awk '{print $1}' 78 | shopt -u extdebug 79 | } 80 | 81 | function runner::call_test_functions() { 82 | local script="$1" 83 | local filter="$2" 84 | local prefix="test" 85 | # Use declare -F to list all function names 86 | local all_fn_names=$(declare -F | awk '{print $3}') 87 | local filtered_functions=$(helper::get_functions_to_run "$prefix" "$filter" "$all_fn_names") 88 | # shellcheck disable=SC2207 89 | local functions_to_run=($(runner::functions_for_script "$script" "$filtered_functions")) 90 | 91 | if [[ "${#functions_to_run[@]}" -le 0 ]]; then 92 | return 93 | fi 94 | 95 | runner::render_running_file_header 96 | helper::check_duplicate_functions "$script" || true 97 | 98 | for fn_name in "${functions_to_run[@]}"; do 99 | if parallel::is_enabled && parallel::must_stop_on_failure; then 100 | break 101 | fi 102 | 103 | local provider_data=() 104 | while IFS=" " read -r line; do 105 | provider_data+=("$line") 106 | done <<< "$(helper::get_provider_data "$fn_name" "$script")" 107 | 108 | # No data provider found 109 | if [[ "${#provider_data[@]}" -eq 0 ]]; then 110 | runner::run_test "$script" "$fn_name" 111 | unset fn_name 112 | continue 113 | fi 114 | 115 | # Execute the test function for each line of data 116 | for data in "${provider_data[@]}"; do 117 | IFS=" " read -r -a args <<< "$data" 118 | if [ "${#args[@]}" -gt 1 ]; then 119 | runner::run_test "$script" "$fn_name" "${args[@]}" 120 | else 121 | runner::run_test "$script" "$fn_name" "$data" 122 | fi 123 | done 124 | unset fn_name 125 | done 126 | 127 | if ! env::is_simple_output_enabled; then 128 | echo "" 129 | fi 130 | } 131 | 132 | function runner::call_bench_functions() { 133 | local script="$1" 134 | local filter="$2" 135 | local prefix="bench" 136 | 137 | local all_fn_names=$(declare -F | awk '{print $3}') 138 | local filtered_functions=$(helper::get_functions_to_run "$prefix" "$filter" "$all_fn_names") 139 | # shellcheck disable=SC2207 140 | local functions_to_run=($(runner::functions_for_script "$script" "$filtered_functions")) 141 | 142 | if [[ "${#functions_to_run[@]}" -le 0 ]]; then 143 | return 144 | fi 145 | 146 | if env::is_bench_mode_enabled; then 147 | runner::render_running_file_header "$script" 148 | fi 149 | 150 | for fn_name in "${functions_to_run[@]}"; do 151 | read -r revs its max_ms <<< "$(benchmark::parse_annotations "$fn_name" "$script")" 152 | benchmark::run_function "$fn_name" "$revs" "$its" "$max_ms" 153 | unset fn_name 154 | done 155 | 156 | if ! env::is_simple_output_enabled; then 157 | echo "" 158 | fi 159 | } 160 | 161 | function runner::render_running_file_header() { 162 | if parallel::is_enabled; then 163 | return 164 | fi 165 | 166 | if ! env::is_simple_output_enabled; then 167 | if env::is_verbose_enabled; then 168 | printf "\n${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" "Running $script" 169 | else 170 | printf "${_COLOR_BOLD}%s${_COLOR_DEFAULT}\n" "Running $script" 171 | fi 172 | elif env::is_verbose_enabled; then 173 | printf "\n\n${_COLOR_BOLD}%s${_COLOR_DEFAULT}" "Running $script" 174 | fi 175 | } 176 | 177 | function runner::run_test() { 178 | local start_time 179 | start_time=$(clock::now) 180 | 181 | local test_file="$1" 182 | shift 183 | local fn_name="$1" 184 | shift 185 | 186 | # Export a unique test identifier so that test doubles can 187 | # create temporary files scoped per test run. This prevents 188 | # race conditions when running tests in parallel. 189 | local sanitized_fn_name 190 | sanitized_fn_name="$(helper::normalize_variable_name "$fn_name")" 191 | if env::is_parallel_run_enabled; then 192 | export BASHUNIT_CURRENT_TEST_ID="${sanitized_fn_name}_$$_$(random_str 6)" 193 | else 194 | export BASHUNIT_CURRENT_TEST_ID="${sanitized_fn_name}_$$" 195 | fi 196 | 197 | local interpolated_fn_name="$(helper::interpolate_function_name "$fn_name" "$@")" 198 | local current_assertions_failed="$(state::get_assertions_failed)" 199 | local current_assertions_snapshot="$(state::get_assertions_snapshot)" 200 | local current_assertions_incomplete="$(state::get_assertions_incomplete)" 201 | local current_assertions_skipped="$(state::get_assertions_skipped)" 202 | 203 | # (FD = File Descriptor) 204 | # Duplicate the current std-output (FD 1) and assigns it to FD 3. 205 | # This means that FD 3 now points to wherever the std-output was pointing. 206 | exec 3>&1 207 | 208 | local test_execution_result=$( 209 | trap ' 210 | state::set_test_exit_code $? 211 | runner::run_tear_down 212 | runner::clear_mocks 213 | state::export_subshell_context 214 | ' EXIT 215 | state::initialize_assertions_count 216 | runner::run_set_up 217 | 218 | # 2>&1: Redirects the std-error (FD 2) to the std-output (FD 1). 219 | # points to the original std-output. 220 | "$fn_name" "$@" 2>&1 221 | 222 | ) 223 | 224 | # Closes FD 3, which was used temporarily to hold the original stdout. 225 | exec 3>&- 226 | 227 | local end_time=$(clock::now) 228 | local duration_ns=$(math::calculate "($end_time - $start_time) ") 229 | local duration=$(math::calculate "$duration_ns / 1000000") 230 | 231 | if env::is_verbose_enabled; then 232 | if env::is_simple_output_enabled; then 233 | echo "" 234 | fi 235 | 236 | printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '=' 237 | printf "%s\n" "File: $test_file" 238 | printf "%s\n" "Function: $fn_name" 239 | printf "%s\n" "Duration: $duration ms" 240 | local raw_text=${test_execution_result%%##ASSERTIONS_*} 241 | [[ -n $raw_text ]] && printf "%s" "Raw text: ${test_execution_result%%##ASSERTIONS_*}" 242 | printf "%s\n" "##ASSERTIONS_${test_execution_result#*##ASSERTIONS_}" 243 | printf '%*s\n' "$TERMINAL_WIDTH" '' | tr ' ' '-' 244 | fi 245 | 246 | local subshell_output=$(runner::decode_subshell_output "$test_execution_result") 247 | 248 | if [[ -n "$subshell_output" ]]; then 249 | # Formatted as "[type]line" @see `state::print_line()` 250 | local type="${subshell_output%%]*}" # Remove everything after "]" 251 | type="${type#[}" # Remove the leading "[" 252 | local line="${subshell_output#*]}" # Remove everything before and including "]" 253 | 254 | # Replace [type] with a newline to split the messages 255 | line=$(echo "$line" | sed -e 's/\[failed\]/\n/g' \ 256 | -e 's/\[skipped\]/\n/g' \ 257 | -e 's/\[incomplete\]/\n/g') 258 | 259 | state::print_line "$type" "$line" 260 | 261 | subshell_output=$line 262 | fi 263 | 264 | local runtime_output="${test_execution_result%%##ASSERTIONS_*}" 265 | 266 | local runtime_error="" 267 | for error in "command not found" "unbound variable" "permission denied" \ 268 | "no such file or directory" "syntax error" "bad substitution" \ 269 | "division by 0" "cannot allocate memory" "bad file descriptor" \ 270 | "segmentation fault" "illegal option" "argument list too long" \ 271 | "readonly variable" "missing keyword" "killed" \ 272 | "cannot execute binary file" "invalid arithmetic operator"; do 273 | if [[ "$runtime_output" == *"$error"* ]]; then 274 | runtime_error=$(echo "${runtime_output#*: }" | tr -d '\n') 275 | break 276 | fi 277 | done 278 | 279 | runner::parse_result "$fn_name" "$test_execution_result" "$@" 280 | 281 | local total_assertions="$(state::calculate_total_assertions "$test_execution_result")" 282 | local test_exit_code="$(state::get_test_exit_code)" 283 | 284 | if [[ -n $runtime_error || $test_exit_code -ne 0 ]]; then 285 | state::add_tests_failed 286 | console_results::print_error_test "$fn_name" "$runtime_error" 287 | reports::add_test_failed "$test_file" "$fn_name" "$duration" "$total_assertions" 288 | runner::write_failure_result_output "$test_file" "$runtime_error" 289 | return 290 | fi 291 | 292 | if [[ "$current_assertions_failed" != "$(state::get_assertions_failed)" ]]; then 293 | state::add_tests_failed 294 | reports::add_test_failed "$test_file" "$fn_name" "$duration" "$total_assertions" 295 | runner::write_failure_result_output "$test_file" "$subshell_output" 296 | 297 | if env::is_stop_on_failure_enabled; then 298 | if parallel::is_enabled; then 299 | parallel::mark_stop_on_failure 300 | else 301 | exit "$EXIT_CODE_STOP_ON_FAILURE" 302 | fi 303 | fi 304 | return 305 | fi 306 | 307 | if [[ "$current_assertions_snapshot" != "$(state::get_assertions_snapshot)" ]]; then 308 | state::add_tests_snapshot 309 | console_results::print_snapshot_test "$fn_name" 310 | reports::add_test_snapshot "$test_file" "$fn_name" "$duration" "$total_assertions" 311 | return 312 | fi 313 | 314 | if [[ "$current_assertions_incomplete" != "$(state::get_assertions_incomplete)" ]]; then 315 | state::add_tests_incomplete 316 | reports::add_test_incomplete "$test_file" "$fn_name" "$duration" "$total_assertions" 317 | return 318 | fi 319 | 320 | if [[ "$current_assertions_skipped" != "$(state::get_assertions_skipped)" ]]; then 321 | state::add_tests_skipped 322 | reports::add_test_skipped "$test_file" "$fn_name" "$duration" "$total_assertions" 323 | return 324 | fi 325 | 326 | local label="$(helper::normalize_test_function_name "$fn_name" "$interpolated_fn_name")" 327 | 328 | if [[ "$fn_name" == "$interpolated_fn_name" ]]; then 329 | console_results::print_successful_test "${label}" "$duration" "$@" 330 | else 331 | console_results::print_successful_test "${label}" "$duration" 332 | fi 333 | state::add_tests_passed 334 | reports::add_test_passed "$test_file" "$fn_name" "$duration" "$total_assertions" 335 | } 336 | 337 | function runner::decode_subshell_output() { 338 | local test_execution_result="$1" 339 | 340 | local test_output_base64="${test_execution_result##*##TEST_OUTPUT=}" 341 | test_output_base64="${test_output_base64%%##*}" 342 | 343 | local subshell_output 344 | if command -v base64 >/dev/null; then 345 | echo "$test_output_base64" | base64 -d 346 | else 347 | echo "$test_output_base64" | openssl enc -d -base64 348 | fi 349 | } 350 | 351 | function runner::parse_result() { 352 | local fn_name=$1 353 | shift 354 | local execution_result=$1 355 | shift 356 | local args=("$@") 357 | 358 | if parallel::is_enabled; then 359 | runner::parse_result_parallel "$fn_name" "$execution_result" "${args[@]}" 360 | else 361 | runner::parse_result_sync "$fn_name" "$execution_result" 362 | fi 363 | } 364 | 365 | function runner::parse_result_parallel() { 366 | local fn_name=$1 367 | shift 368 | local execution_result=$1 369 | shift 370 | local args=("$@") 371 | 372 | local test_suite_dir="${TEMP_DIR_PARALLEL_TEST_SUITE}/$(basename "$test_file" .sh)" 373 | mkdir -p "$test_suite_dir" 374 | 375 | local sanitized_args 376 | sanitized_args=$(echo "${args[*]}" | tr '[:upper:]' '[:lower:]' | sed -E 's/[^a-z0-9]+/-/g; s/^-|-$//') 377 | local template 378 | if [[ -z "$sanitized_args" ]]; then 379 | template="${fn_name}.XXXXXX" 380 | else 381 | template="${fn_name}-${sanitized_args}.XXXXXX" 382 | fi 383 | 384 | local unique_test_result_file 385 | unique_test_result_file=$(mktemp -p "$test_suite_dir" "$template") 386 | mv "$unique_test_result_file" "${unique_test_result_file}.result" 387 | unique_test_result_file="${unique_test_result_file}.result" 388 | 389 | log "debug" "[PARA]" "fn_name:$fn_name" "execution_result:$execution_result" 390 | 391 | runner::parse_result_sync "$fn_name" "$execution_result" 392 | 393 | echo "$execution_result" > "$unique_test_result_file" 394 | } 395 | 396 | # shellcheck disable=SC2295 397 | function runner::parse_result_sync() { 398 | local fn_name=$1 399 | local execution_result=$2 400 | 401 | local result_line 402 | result_line=$(echo "$execution_result" | tail -n 1) 403 | 404 | local assertions_failed=0 405 | local assertions_passed=0 406 | local assertions_skipped=0 407 | local assertions_incomplete=0 408 | local assertions_snapshot=0 409 | local test_exit_code=0 410 | 411 | local regex 412 | regex='ASSERTIONS_FAILED=([0-9]*)##' 413 | regex+='ASSERTIONS_PASSED=([0-9]*)##' 414 | regex+='ASSERTIONS_SKIPPED=([0-9]*)##' 415 | regex+='ASSERTIONS_INCOMPLETE=([0-9]*)##' 416 | regex+='ASSERTIONS_SNAPSHOT=([0-9]*)##' 417 | regex+='TEST_EXIT_CODE=([0-9]*)' 418 | 419 | if [[ $result_line =~ $regex ]]; then 420 | assertions_failed="${BASH_REMATCH[1]}" 421 | assertions_passed="${BASH_REMATCH[2]}" 422 | assertions_skipped="${BASH_REMATCH[3]}" 423 | assertions_incomplete="${BASH_REMATCH[4]}" 424 | assertions_snapshot="${BASH_REMATCH[5]}" 425 | test_exit_code="${BASH_REMATCH[6]}" 426 | fi 427 | 428 | log "debug" "[SYNC]" "fn_name:$fn_name" "execution_result:$execution_result" 429 | 430 | ((_ASSERTIONS_PASSED += assertions_passed)) || true 431 | ((_ASSERTIONS_FAILED += assertions_failed)) || true 432 | ((_ASSERTIONS_SKIPPED += assertions_skipped)) || true 433 | ((_ASSERTIONS_INCOMPLETE += assertions_incomplete)) || true 434 | ((_ASSERTIONS_SNAPSHOT += assertions_snapshot)) || true 435 | ((_TEST_EXIT_CODE += test_exit_code)) || true 436 | } 437 | 438 | function runner::write_failure_result_output() { 439 | local test_file=$1 440 | local error_msg=$2 441 | 442 | local test_nr="*" 443 | if ! parallel::is_enabled; then 444 | test_nr=$(state::get_tests_failed) 445 | fi 446 | 447 | echo -e "$test_nr) $test_file\n$error_msg" >> "$FAILURES_OUTPUT_PATH" 448 | } 449 | 450 | function runner::run_set_up() { 451 | helper::execute_function_if_exists 'set_up' 452 | } 453 | 454 | function runner::run_set_up_before_script() { 455 | helper::execute_function_if_exists 'set_up_before_script' 456 | } 457 | 458 | function runner::run_tear_down() { 459 | helper::execute_function_if_exists 'tear_down' 460 | } 461 | 462 | function runner::clear_mocks() { 463 | for i in "${!MOCKED_FUNCTIONS[@]}"; do 464 | unmock "${MOCKED_FUNCTIONS[$i]}" 465 | done 466 | } 467 | 468 | function runner::run_tear_down_after_script() { 469 | helper::execute_function_if_exists 'tear_down_after_script' 470 | } 471 | 472 | function runner::clean_set_up_and_tear_down_after_script() { 473 | helper::unset_if_exists 'set_up' 474 | helper::unset_if_exists 'tear_down' 475 | helper::unset_if_exists 'set_up_before_script' 476 | helper::unset_if_exists 'tear_down_after_script' 477 | } 478 | -------------------------------------------------------------------------------- /src/skip_todo.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function skip() { 4 | local reason=${1-} 5 | local label 6 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 7 | 8 | console_results::print_skipped_test "${label}" "${reason}" 9 | 10 | state::add_assertions_skipped 11 | } 12 | 13 | function todo() { 14 | local pending=${1-} 15 | local label 16 | label="$(helper::normalize_test_function_name "${FUNCNAME[1]}")" 17 | 18 | console_results::print_incomplete_test "${label}" "${pending}" 19 | 20 | state::add_assertions_incomplete 21 | } 22 | -------------------------------------------------------------------------------- /src/state.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | _TESTS_PASSED=0 4 | _TESTS_FAILED=0 5 | _TESTS_SKIPPED=0 6 | _TESTS_INCOMPLETE=0 7 | _TESTS_SNAPSHOT=0 8 | _ASSERTIONS_PASSED=0 9 | _ASSERTIONS_FAILED=0 10 | _ASSERTIONS_SKIPPED=0 11 | _ASSERTIONS_INCOMPLETE=0 12 | _ASSERTIONS_SNAPSHOT=0 13 | _DUPLICATED_FUNCTION_NAMES="" 14 | _FILE_WITH_DUPLICATED_FUNCTION_NAMES="" 15 | _DUPLICATED_TEST_FUNCTIONS_FOUND=false 16 | _TEST_OUTPUT="" 17 | _TEST_EXIT_CODE=0 18 | 19 | function state::get_tests_passed() { 20 | echo "$_TESTS_PASSED" 21 | } 22 | 23 | function state::add_tests_passed() { 24 | ((_TESTS_PASSED++)) || true 25 | } 26 | 27 | function state::get_tests_failed() { 28 | echo "$_TESTS_FAILED" 29 | } 30 | 31 | function state::add_tests_failed() { 32 | ((_TESTS_FAILED++)) || true 33 | } 34 | 35 | function state::get_tests_skipped() { 36 | echo "$_TESTS_SKIPPED" 37 | } 38 | 39 | function state::add_tests_skipped() { 40 | ((_TESTS_SKIPPED++)) || true 41 | } 42 | 43 | function state::get_tests_incomplete() { 44 | echo "$_TESTS_INCOMPLETE" 45 | } 46 | 47 | function state::add_tests_incomplete() { 48 | ((_TESTS_INCOMPLETE++)) || true 49 | } 50 | 51 | function state::get_tests_snapshot() { 52 | echo "$_TESTS_SNAPSHOT" 53 | } 54 | 55 | function state::add_tests_snapshot() { 56 | ((_TESTS_SNAPSHOT++)) || true 57 | } 58 | 59 | function state::get_assertions_passed() { 60 | echo "$_ASSERTIONS_PASSED" 61 | } 62 | 63 | function state::add_assertions_passed() { 64 | ((_ASSERTIONS_PASSED++)) || true 65 | } 66 | 67 | function state::get_assertions_failed() { 68 | echo "$_ASSERTIONS_FAILED" 69 | } 70 | 71 | function state::add_assertions_failed() { 72 | ((_ASSERTIONS_FAILED++)) || true 73 | } 74 | 75 | function state::get_assertions_skipped() { 76 | echo "$_ASSERTIONS_SKIPPED" 77 | } 78 | 79 | function state::add_assertions_skipped() { 80 | ((_ASSERTIONS_SKIPPED++)) || true 81 | } 82 | 83 | function state::get_assertions_incomplete() { 84 | echo "$_ASSERTIONS_INCOMPLETE" 85 | } 86 | 87 | function state::add_assertions_incomplete() { 88 | ((_ASSERTIONS_INCOMPLETE++)) || true 89 | } 90 | 91 | function state::get_assertions_snapshot() { 92 | echo "$_ASSERTIONS_SNAPSHOT" 93 | } 94 | 95 | function state::add_assertions_snapshot() { 96 | ((_ASSERTIONS_SNAPSHOT++)) || true 97 | } 98 | 99 | function state::is_duplicated_test_functions_found() { 100 | echo "$_DUPLICATED_TEST_FUNCTIONS_FOUND" 101 | } 102 | 103 | function state::set_duplicated_test_functions_found() { 104 | _DUPLICATED_TEST_FUNCTIONS_FOUND=true 105 | } 106 | 107 | function state::get_duplicated_function_names() { 108 | echo "$_DUPLICATED_FUNCTION_NAMES" 109 | } 110 | 111 | function state::set_duplicated_function_names() { 112 | _DUPLICATED_FUNCTION_NAMES="$1" 113 | } 114 | 115 | function state::get_file_with_duplicated_function_names() { 116 | echo "$_FILE_WITH_DUPLICATED_FUNCTION_NAMES" 117 | } 118 | 119 | function state::set_file_with_duplicated_function_names() { 120 | _FILE_WITH_DUPLICATED_FUNCTION_NAMES="$1" 121 | } 122 | 123 | function state::add_test_output() { 124 | _TEST_OUTPUT+="$1" 125 | } 126 | 127 | function state::get_test_exit_code() { 128 | echo "$_TEST_EXIT_CODE" 129 | } 130 | 131 | function state::set_test_exit_code() { 132 | _TEST_EXIT_CODE="$1" 133 | } 134 | 135 | function state::set_duplicated_functions_merged() { 136 | state::set_duplicated_test_functions_found 137 | state::set_file_with_duplicated_function_names "$1" 138 | state::set_duplicated_function_names "$2" 139 | } 140 | 141 | function state::initialize_assertions_count() { 142 | _ASSERTIONS_PASSED=0 143 | _ASSERTIONS_FAILED=0 144 | _ASSERTIONS_SKIPPED=0 145 | _ASSERTIONS_INCOMPLETE=0 146 | _ASSERTIONS_SNAPSHOT=0 147 | _TEST_OUTPUT="" 148 | } 149 | 150 | function state::export_subshell_context() { 151 | local encoded_test_output 152 | 153 | if base64 --help 2>&1 | grep -q -- "-w"; then 154 | # Alpine requires the -w 0 option to avoid wrapping 155 | encoded_test_output=$(echo -n "$_TEST_OUTPUT" | base64 -w 0) 156 | else 157 | # macOS and others: default base64 without wrapping 158 | encoded_test_output=$(echo -n "$_TEST_OUTPUT" | base64) 159 | fi 160 | 161 | cat < "$times_file" 50 | : > "$params_file" 51 | export "${variable}_times_file"="$times_file" 52 | export "${variable}_params_file"="$params_file" 53 | 54 | eval "function $command() { 55 | echo \"\$*\" >> '$params_file' 56 | local _c=\$(cat '$times_file') 57 | _c=\$((_c+1)) 58 | echo \"\$_c\" > '$times_file' 59 | }" 60 | 61 | export -f "${command?}" 62 | 63 | MOCKED_FUNCTIONS+=("$command") 64 | } 65 | 66 | function assert_have_been_called() { 67 | local command=$1 68 | local variable 69 | variable="$(helper::normalize_variable_name "$command")" 70 | local file_var="${variable}_times_file" 71 | local times=0 72 | if [[ -f "${!file_var-}" ]]; then 73 | times=$(cat "${!file_var}") 74 | fi 75 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 76 | 77 | if [[ $times -eq 0 ]]; then 78 | state::add_assertions_failed 79 | console_results::print_failed_test "${label}" "${command}" "to have been called" "once" 80 | return 81 | fi 82 | 83 | state::add_assertions_passed 84 | } 85 | 86 | function assert_have_been_called_with() { 87 | local expected=$1 88 | local command=$2 89 | local third_arg="${3:-}" 90 | local fourth_arg="${4:-}" 91 | 92 | local index="" 93 | local label="" 94 | if [[ -n $third_arg && $third_arg =~ ^[0-9]+$ ]]; then 95 | index=$third_arg 96 | label="${fourth_arg:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 97 | else 98 | label="${third_arg:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 99 | index="$fourth_arg" 100 | fi 101 | 102 | local variable 103 | variable="$(helper::normalize_variable_name "$command")" 104 | local file_var="${variable}_params_file" 105 | local params="" 106 | if [[ -f "${!file_var-}" ]]; then 107 | if [[ -n $index ]]; then 108 | params=$(sed -n "${index}p" "${!file_var}") 109 | else 110 | params=$(tail -n 1 "${!file_var}") 111 | fi 112 | fi 113 | 114 | if [[ "$expected" != "$params" ]]; then 115 | state::add_assertions_failed 116 | console_results::print_failed_test "${label}" "${expected}" "but got " "$params" 117 | return 118 | fi 119 | 120 | state::add_assertions_passed 121 | } 122 | 123 | function assert_have_been_called_times() { 124 | local expected=$1 125 | local command=$2 126 | local variable 127 | variable="$(helper::normalize_variable_name "$command")" 128 | local file_var="${variable}_times_file" 129 | local times=0 130 | if [[ -f "${!file_var-}" ]]; then 131 | times=$(cat "${!file_var}") 132 | fi 133 | local label="${3:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 134 | if [[ $times -ne $expected ]]; then 135 | state::add_assertions_failed 136 | console_results::print_failed_test "${label}" "${command}" \ 137 | "to have been called" "${expected} times" \ 138 | "actual" "${times} times" 139 | return 140 | fi 141 | 142 | state::add_assertions_passed 143 | } 144 | 145 | function assert_not_called() { 146 | local command=$1 147 | local label="${2:-$(helper::normalize_test_function_name "${FUNCNAME[1]}")}" 148 | assert_have_been_called_times 0 "$command" "$label" 149 | } 150 | -------------------------------------------------------------------------------- /src/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function upgrade::upgrade() { 4 | local script_path 5 | script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | local latest_tag 7 | latest_tag="$(helpers::get_latest_tag)" 8 | 9 | if [[ "$BASHUNIT_VERSION" == "$latest_tag" ]]; then 10 | echo "> You are already on latest version" 11 | return 12 | fi 13 | 14 | echo "> Upgrading bashunit to latest version" 15 | cd "$script_path" || exit 16 | 17 | if ! io::download_to "https://github.com/TypedDevs/bashunit/releases/download/$latest_tag/bashunit" "bashunit"; then 18 | echo "Failed to download bashunit" 19 | fi 20 | 21 | chmod u+x "bashunit" 22 | 23 | echo "> bashunit upgraded successfully to latest version $latest_tag" 24 | } 25 | --------------------------------------------------------------------------------