├── .github └── workflows │ └── tag.yml ├── .gitignore ├── MIT-LICENSE ├── Projectfile ├── README.md ├── bin └── ts ├── dockerfiles ├── alpine │ └── Dockerfile ├── centos │ └── Dockerfile ├── debian │ └── Dockerfile ├── fedora │ └── Dockerfile ├── opensuse │ └── Dockerfile ├── shell │ └── Dockerfile └── ubuntu │ └── Dockerfile ├── man └── man1 │ └── ts.1 └── test ├── benchmark ├── examples ├── abc ├── change_dirs ├── exit_fail_in_setup ├── exit_fail_in_teardown ├── exit_fail_in_test ├── exit_fail_in_test_after_untrap ├── fail ├── include ├── include a ├── include b ├── include_c ├── no_tests ├── options ├── pass ├── public_function_conflict ├── public_function_conflict.d │ ├── setup │ └── teardown ├── return_fail_in_setup ├── return_fail_in_teardown ├── set_u ├── skip ├── test_detection ├── undeclared_variable ├── xyz_one └── xyz_two ├── fail ├── helper ├── nox ├── pass ├── readme ├── background ├── common_tests ├── example ├── test_grep_abc ├── test_sed_abc ├── troubleshoot_fail ├── troubleshoot_pass └── usage └── suite /.github/workflows/tag.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | jobs: 7 | test: 8 | name: Test All 9 | runs-on: ubuntu-latest 10 | timeout-minutes: 10 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Run 14 | run: ./Projectfile test-all 15 | - uses: actions/upload-artifact@v1 16 | with: 17 | name: results 18 | path: tmp/artifacts 19 | 20 | release: 21 | name: Create Release 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v2 25 | - name: Create Release 26 | uses: actions/create-release@latest 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | with: 30 | tag_name: ${{ github.ref }} 31 | release_name: Release ${{ github.ref }} 32 | body: | 33 | TODO - replace with changes 34 | draft: true 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | /tmp 3 | -------------------------------------------------------------------------------- /MIT-LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2011-2012, Simon Chiang. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /Projectfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # usage: ./Projectfile [command] 3 | ############################################################################# 4 | 5 | images () { 6 | for os in $(ls dockerfiles/) 7 | do image "$os" 8 | done 9 | } 10 | 11 | image () { 12 | os="${1:-shell}" 13 | docker build -f dockerfiles/"$os"/Dockerfile -t "ts:$os" . >&2 14 | printf "%s\n" "ts:$os" 15 | } 16 | 17 | shell () { 18 | os="${1:-shell}" 19 | docker run -it --rm -v "$PWD:/app" "ts:$os" /bin/bash 20 | } 21 | 22 | test () { 23 | image="${1:-ts:shell}" 24 | shell="${2:-/bin/sh}" 25 | 26 | # Switch /bin/sh so that all scripts will pick up the target shell as a part 27 | # of shebang usage. Assumes image runs as root user. 28 | docker run --rm -i "$image" /bin/sh <"$buildlog")" 49 | 50 | for shell in "$@" 51 | do 52 | printf "# %s %s\t" "$os" "$shell" 53 | outfile="$os_dir/$(basename "$shell")" 54 | test "$image" "$shell" >"$outfile" 2>&1 55 | tail -n 1 "$outfile" 56 | awk -v os="$os" -v shell="$shell" ' 57 | /^\[/ { summary=$0 }; 58 | /^F/ { print $1 " " os " " shell "\t" summary } 59 | ' "$outfile" 60 | done 61 | } 62 | 63 | test-all () { 64 | test-os alpine /bin/bash /bin/zsh 65 | test-os centos /bin/bash /bin/zsh /bin/ksh 66 | test-os debian /bin/bash /bin/zsh /bin/ksh 67 | test-os fedora /bin/bash /bin/zsh /bin/ksh 68 | test-os opensuse /bin/bash /bin/zsh /bin/ksh 69 | test-os ubuntu /bin/dash /bin/bash /bin/zsh /bin/ksh 70 | } 71 | 72 | # 73 | # Release 74 | # 75 | 76 | prepare-release () { 77 | ts_version="$1" 78 | ts_release_date="$(date +"%Y-%m-%d")" 79 | 80 | if [ -z "$ts_version" ] 81 | then 82 | printf "no version specified\nusage: ./Bakefile prepare-release \n" >&2 83 | exit 1 84 | fi 85 | 86 | if ! ./test/suite 87 | then 88 | printf "cannot release (failing tests)\n" >&2 89 | exit 1 90 | fi 91 | 92 | # set new version 93 | sed -i "" \ 94 | -e "s/ts_version=.*/ts_version=\"$ts_version\"/" \ 95 | -e "s/ts_release_date=.*/ts_release_date=\"$ts_release_date\"/" \ 96 | ./bin/ts 97 | 98 | # update the manpages 99 | docker run --rm -i -v "$PWD:/app" "$(image shell)" /bin/bash < man/man1/ts.1 102 | DOC 103 | 104 | cat >&2 </dev/null 127 | } 128 | 129 | if list | grep -qFx -- "${1:-}" 130 | then "$@" 131 | else 132 | if [ -z "$1" ] 133 | then printf "no command specified (try 'list')\n" >&2 134 | else printf "unknown command: %s\n" "$1" >&2 135 | fi 136 | exit 1 137 | fi 138 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ts(1) -- test script 2 | ============================================= 3 | 4 | ## SYNOPSIS 5 | 6 | `ts` [options] TEST_SCRIPT... 7 | 8 | `[./test_script]` [options] TESTS... 9 | 10 | ## DESCRIPTION 11 | 12 | **ts** provides functions for writing tests in shell. The test scripts can be 13 | run individually or in a batch format using `ts` as a command. 14 | 15 | **ts** makes a test directory available on a per-test basis so it's easy to 16 | sandbox tests that write or manipulate files. **ts** tries to use POSIX 17 | exclusively and so should (hopefully) work with any POSIX-compliant shell. 18 | 19 | ## TEST SCRIPTS 20 | 21 | The `ts` command expects script files that define test cases. Test scripts 22 | have the following form: 23 | 24 | [./example] 25 | #!/bin/sh 26 | # pick a shell, any (POSIX) shell 27 | 28 | setup () { # optional setup 29 | mkdir -p "$ts_test_dir" 30 | } 31 | 32 | teardown () { # optional teardown 33 | rm -r "$ts_test_dir" 34 | } 35 | 36 | test_true () { # write tests named like "test_" 37 | true # return 0 to pass. 38 | } 39 | 40 | . ts # source ts to run the tests 41 | 42 | To run, use any of: 43 | 44 | ts example # run multiple test scripts 45 | ./example # run a single test script 46 | ./example test_a_thing # run a single test 47 | 48 | To debug, try using -e to execute the test function in isolation. 49 | 50 | ./example -e test_a_thing # a most useful pattern 51 | 52 | See the FUNCTIONS, EXAMPLES, and TROUBLESHOOT sections for more details. 53 | 54 | ## OPTIONS 55 | 56 | These options control how `ts` operates. 57 | 58 | * `-a`: 59 | Show passing outputs, which are normally filtered. 60 | 61 | * `-c`: 62 | Colorize output. (green/red/yellow - pass/fail/not-executable) 63 | 64 | * `-d`: 65 | Debug mode. Turns on xtrace (set -x) for the tests and enables -v. 66 | 67 | * `-e`: 68 | Exec mode. Runs a test without processing the output and exits. 69 | 70 | * `-h`: 71 | Prints help. 72 | 73 | * `-m`: 74 | Monitor output. Provide a ticker indicating the progress of tests and 75 | print a summary. Monitor is the default. 76 | 77 | * `-q`: 78 | Quiet output. Shows only stdout, hiding stderr. 79 | 80 | * `-r`: 81 | Remove the tmp dir on complete. Removal is done using `rm -r`. 82 | 83 | * `-s`: 84 | Stream output. Show test progress as it happens. No summary is printed. 85 | 86 | * `-t`: 87 | Set the test tmp dir (default tmp). The test-specific directories are 88 | be located under this directory. 89 | 90 | * `-v`: 91 | Verbose output. Display both stdout and stderr for the tests (enabled 92 | by default). 93 | 94 | ## FUNCTIONS 95 | 96 | Functions provided by **ts**. 97 | 98 | * `setup`: 99 | 100 | A setup function run before each test. 101 | 102 | * `teardown`: 103 | 104 | A teardown function run after each test. 105 | 106 | **ts** ensures teardown runs by setting a trap for EXIT signals during setup 107 | and the actual test. As a result, EXIT traps in tests can prevent teardown. 108 | 109 | * `assert_status EXPECTED ACTUAL [MESSAGE]`: 110 | 111 | Exit 1 unless the numbers EXPECTED and ACTUAL are the same. Use this to make 112 | assertions in the middle of a test. 113 | 114 | * `assert_output EXPECTED ACTUAL`: 115 | 116 | Return 1 unless the variables EXPECTED and ACTUAL are the same. Reads from 117 | stdin for '-'. Also reads ACTUAL from stdin if ACTUAL is unspecified. 118 | 119 | Using assert_output in a pipeline is often convenient but remember this 120 | assertion only returns, it does not exit. As a result you should either use 121 | it as the very last command in a test, or follow it with assert_status in a 122 | multipart test. See the section on my 'tests aren't failing' for more. 123 | 124 | * `skip [MESSAGE]`: 125 | 126 | Skip a test. Exits 0 but counts as a skip and not a pass. 127 | 128 | **ts** reserves all function names starting with 'ts_' for internal use. Note 129 | that `setup` and `teardown` commands on PATH will be ignored because tests 130 | will shadow them with the corresponding **ts** functions. 131 | 132 | ## VARIABLES 133 | 134 | Variables provided by **ts** at runtime. Feel free to use any of them but 135 | treat them as read-only. 136 | 137 | * `ts_test_file`: 138 | The name of the current test script being run. 139 | 140 | * `ts_test_case`: 141 | The basename of the test file, minus the extname. 142 | 143 | * `ts_test_lineno`: 144 | The line number where the current test is defined. 145 | 146 | * `ts_test_name`: 147 | The name of the current test. 148 | 149 | * `ts_test_dir`: 150 | The test-specific directory. 151 | 152 | The test dir is 'tmp\_dir/test\_case'. **ts** does not create this directory 153 | automatically. Add that functionality in the setup function as needed. 154 | 155 | **ts** reserves all variables starting with 'ts\_' for internal use. 156 | 157 | ## ENVIRONMENT 158 | 159 | The behavior of **ts** can be modified via environment variables. Many of 160 | these may be set using options. 161 | 162 | * `TS_USR_DIR` (pwd): 163 | The user dir. Used to determine the ts tmp dir. 164 | 165 | * `TS_TMP_DIR` ($TS\_USR\_DIR/tmp): 166 | The base tmp dir. 167 | 168 | * `TS_COLOR` (false): 169 | Set to "true" to enable color. 170 | 171 | * `TS_DIFF` (diff): 172 | The diff command used by assert_output. 173 | 174 | * `TS_DEBUG` (false): 175 | Set to "true" to enable debug mode. 176 | 177 | * `TS_REMOVE_TMP_DIR` (false): 178 | Set to "true" to remove tmp dir. 179 | 180 | In addition these variables adjust the color output. 181 | 182 | * `TS_PASS` (green): 183 | Passing tests. 184 | 185 | * `TS_FAIL` (red): 186 | Failing tests. 187 | 188 | * `TS_SKIP` (yellow): 189 | Skipped tests. 190 | 191 | * `TS_NORM` (normal): 192 | The normal output color. 193 | 194 | For example to turn failures blue: 195 | 196 | export TS_FAIL=$(printf "%b" "\033[0;34m") 197 | 198 | **ts** reserves all variables starting with 'TS\_' for internal use. 199 | 200 | ## EXAMPLES 201 | 202 | Basic usage: 203 | 204 | [./example] 205 | #!/bin/sh 206 | 207 | test_arbitrary_function () { 208 | echo abc | grep -q b 209 | } 210 | 211 | test_assert_status () { 212 | false 213 | assert_status 1 $? 214 | } 215 | 216 | test_assert_output_style_one () { 217 | out=$(printf "hello world") 218 | assert_output "hello world" "$out" 219 | } 220 | 221 | test_assert_output_style_two () { 222 | printf "hello world" | assert_output "hello world" 223 | } 224 | 225 | test_assert_output_style_three () { 226 | printf "hello world\n" | assert_output "\ 227 | hello world 228 | " 229 | } 230 | 231 | test_skip_test () { 232 | skip "skipping this one" 233 | false 234 | } 235 | 236 | . ts 237 | 238 | Run like: 239 | 240 | chmod +x example 241 | ts example 242 | 243 | Shared examples: 244 | 245 | [./common_tests] 246 | test_it_should_pick_lines_with_abc () { 247 | printf "%s\n" "1 abc" "2 xyz" "3 abc" | 248 | ${picker} | assert_output "\ 249 | 1 abc 250 | 3 abc 251 | " 252 | } 253 | 254 | [./test_grep_abc] 255 | #!/bin/sh 256 | picker="grep abc" 257 | . ts . ./common_tests 258 | . ts 259 | 260 | [./test_sed_abc] 261 | #!/bin/sh 262 | picker="sed -ne /abc/p" 263 | . ts . ./common_tests 264 | . ts 265 | 266 | Run like: 267 | 268 | chmod +x test_grep_abc test_sed_abc 269 | ts test_grep_abc test_sed_abc 270 | 271 | Background jobs work fine, just be sure to cleanup: 272 | 273 | [./background] 274 | #!/bin/sh 275 | 276 | teardown () { 277 | jobs -p | xargs kill -9 278 | true 279 | } 280 | 281 | test_background_job () { 282 | sleep 3 & 283 | true 284 | } 285 | 286 | . ts 287 | 288 | ## TROUBLESHOOT 289 | 290 | **My tests aren't running** 291 | 292 | Be sure you added `. ts` at the end of your script. 293 | 294 | **My tests are failing** 295 | 296 | **1)** Are you incrementing a variable in a loop in a pipeline? 297 | 298 | See http://mywiki.wooledge.org/BashFAQ/024. 299 | 300 | **2)** Is a newline missing from a variable? 301 | 302 | Subshells chomp the last newline off of a command. 303 | 304 | test_newline_is_missing_so_this_fails () { 305 | out=$(echo abc) 306 | 307 | assert_output "\ 308 | abc 309 | " "$out" 310 | } 311 | 312 | One way around this is to print a sacrificial non-newline character. 313 | 314 | test_newline_is_now_accounted_for () { 315 | out=$(echo abc; printf x) 316 | 317 | assert_output "\ 318 | abc 319 | " "${out%x}" 320 | } 321 | 322 | Another way is to pipe into assert_output. 323 | 324 | test_another_newline_strategy () { 325 | echo abc | assert_output "\ 326 | abc 327 | " 328 | } 329 | 330 | **My tests aren't failing** 331 | 332 | **1)** Are you using assert_output in a pipeline? 333 | 334 | **ts** assert methods return failure (rather than exit) so this will pass. 335 | 336 | test_multiple_asserts_not_failing_as_intended () { 337 | assert_output "1" "0" 338 | assert_output "0" "0" 339 | } 340 | 341 | The reason is that exit within a pipeline has shell-specific behavior. For 342 | instance if you run this with different values of shell you will get 0 for 343 | bash and dash, and 1 for zsh and ksh. 344 | 345 | $shell < 450 | 451 | Report bugs here: http://github.com/thinkerbot/ts/issues. 452 | 453 | ## CONTRIBUTORS 454 | 455 | Thanks for the help! 456 | 457 | * Angelo Lakra (github.com/alakra) 458 | * Thomas Adam (github.com/ThomasAdam) 459 | * David Alfonso (github.com/davidag) 460 | 461 | ## COPYRIGHT 462 | 463 | TS is Copyright (C) 2011 Simon Chiang 464 | -------------------------------------------------------------------------------- /bin/ts: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ############################################################################ 3 | ts_progname="${0##*/}" 4 | ts_author="Simon Chiang" 5 | ts_version="2.0.4" 6 | ts_release_date="2020-05-21" 7 | ts_url="http://github.com/thinkerbot/ts" 8 | 9 | ts_usage () { 10 | if [ ts = "$ts_progname" ] 11 | then ts_argstr="[TEST_FILES...]" 12 | else ts_argstr="[TEST_NAMES...]" 13 | fi 14 | printf "\ 15 | usage: ts [-acdehmrsvw] [-t TS_TMP_DIR] %s 16 | 17 | Tests shell scripts. Options control the output format. Artifacts are 18 | written to the tmp dir and can be removed with the -r flag. To debug 19 | a failing test try running it with -e to see how it would look if you 20 | executed the script in a shell manually (after which you can check exit 21 | status as normal), or by using the -d flag to trace execution. 22 | 23 | ts test/script* # run multiple test scripts 24 | ./test/script # run a single test script 25 | ./test/script test_a # run a single test 26 | ./test/script -e test_a # execute a test as a script 27 | 28 | See \`man ts\` for writing test scripts. 29 | 30 | options: 31 | 32 | -a show all tests 33 | -c color output 34 | -d debugger output 35 | -e exec test 36 | -h prints this help 37 | -m monitor output 38 | -q quiet output (hide stderr) 39 | -r remove tmp dir (rm -r) 40 | -s stream output (implies -a) 41 | -t set the tmp dir (default tmp) 42 | -v verbose output (show stderr, default on) 43 | -w raw output 44 | 45 | version: %s - %s 46 | " "$ts_argstr" "$ts_version" "$ts_url" 47 | } 48 | 49 | OPTIND=1 50 | ts_args="" 51 | while [ $OPTIND -le $# ] 52 | do 53 | ts_optcur="$OPTIND" 54 | ts_option="-" 55 | getopts "acdehmqrst:vw" ts_option 56 | if [ "$ts_option" != "-" ] && [ "$ts_option" != "?" ] 57 | then 58 | case "$ts_option" in 59 | (a) TS_FILTER=false ;; 60 | (c) TS_COLOR=true ;; 61 | (d) TS_DEBUG=true; TS_MODE=verbose ;; 62 | (e) TS_REPORT=execute ;; 63 | (h) ts_usage 64 | exit 0 ;; 65 | (m) TS_REPORT=monitor ;; 66 | (q) TS_MODE=quiet ;; 67 | (r) TS_REMOVE_TMP_DIR=true ;; 68 | (s) TS_REPORT=stream; TS_FILTER=false ;; 69 | (t) TS_TMP_DIR="$OPTARG" ;; 70 | (v) TS_MODE=verbose ;; 71 | (w) TS_REPORT=raw ;; 72 | (*) ts_usage | head -n 1 73 | exit 2 ;; 74 | esac 75 | else 76 | if [ "$(eval printf "%s" "\${$ts_optcur}")" = "--" ] 77 | then ts_optcur="$#" 78 | fi 79 | while [ $OPTIND -le $ts_optcur ] 80 | do 81 | ts_args="$ts_args \"\${$OPTIND}\"" 82 | OPTIND=$(($OPTIND + 1)) 83 | done 84 | fi 85 | done 86 | eval set -- $ts_args 87 | 88 | TS_USR_DIR="${TS_USR_DIR:-$(pwd)}" 89 | TS_TMP_DIR="${TS_TMP_DIR:-$TS_USR_DIR/tmp}" 90 | 91 | TS_COLOR=${TS_COLOR:-false} 92 | TS_DEBUG=${TS_DEBUG:-false} 93 | TS_FILTER=${TS_FILTER:-true} 94 | TS_REPORT=${TS_REPORT:-monitor} 95 | TS_MODE=${TS_MODE:-verbose} 96 | TS_REMOVE_TMP_DIR=${TS_REMOVE_TMP_DIR:-false} 97 | 98 | TS_PASS="${TS_PASS:-}" 99 | TS_FAIL="${TS_FAIL:-}" 100 | TS_NORM="${TS_NORM:-}" 101 | TS_SKIP="${TS_SKIP:-}" 102 | 103 | mkdir -p "$TS_TMP_DIR" 104 | ts_status_file="$TS_TMP_DIR/status" 105 | ts_monitor_file="$TS_TMP_DIR/monitor" 106 | ts_skip_file="$TS_TMP_DIR/skips" 107 | 108 | if [ execute = "$TS_REPORT" ] && [ ts = "$ts_progname" ] 109 | then 110 | printf -- "error: -e can only be used when executing test scripts directly\n" >&2 111 | exit 1 112 | fi 113 | ############################################################################ 114 | # private functions 115 | 116 | # On Ubuntu awk is actually mawk, which does not fflush output but will 117 | # unbuffer with an option. Detect when that option must be used here. 118 | # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=593504 119 | ts_guess_awk_opts () { 120 | if awk -Winteractive <&- >/dev/null 2>&1 121 | then printf -- "%s\n" -Winteractive 122 | fi 123 | } 124 | export ts_awk_opts="${ts_awk_opts:-$(ts_guess_awk_opts)}" 125 | 126 | # Creates a pattern for use by ts_list. Example: 127 | # 128 | # ts_pattern a b c # => "a|b|c" 129 | # 130 | ts_pattern () { 131 | if [ 0 -ne $# ] 132 | then 133 | printf "%s" "$1" 134 | shift 1 135 | 136 | if [ 0 -ne $# ] 137 | then printf "|%s" "$@" 138 | fi 139 | fi 140 | } 141 | 142 | # Prints all functions in a test file starting with 'test_' or the pattern 143 | # given by ts_test_pattern. Recurses into sourced files if TS_TESTS_IN_SOURCE 144 | # is set to true. 145 | ts_list () { 146 | ts_file="$1" 147 | shift 1 148 | 149 | if [ $# -eq 0 ] 150 | then 151 | grep -onE "^[[:space:]]*(${ts_test_pattern:-test_\w+})[[:space:]]*\(\)" /dev/null "$ts_file" | 152 | sed -e 's/^\([^:]*\):\([0-9]\{1,\}\):[[:space:]]*\([^ (]\{1,\}\).*/\3 \1:\2/' 153 | else 154 | ts_list "$@" | awk -v file="$ts_file" '{ $2=file " -> " $2; print }' 155 | fi 156 | } 157 | 158 | # Converts the ts raw format into the ts stream format. 159 | ts_stream () { 160 | rm "$ts_status_file" 2>/dev/null 161 | awk $ts_awk_opts -v status_file="$ts_status_file" ' 162 | function readbytes (bytes) { 163 | # ignore current line 164 | $0="" 165 | 166 | # play a trick so bytes does not reset the var upon 167 | # getline (issue with alpine + gawk) 168 | bytes=bytes 169 | 170 | # read bytes 171 | while(getline == 1 && (bytes -= length($0) + 1) > 0) { 172 | print " " $0 173 | } 174 | 175 | if(length($0) > 0) { 176 | print " " substr($0, 1, bytes + length($0) + 1) 177 | $0="" 178 | } 179 | } 180 | function format (status, descr, bytes) { 181 | printf("%s %s\n", status, descr) 182 | readbytes(bytes) 183 | printf("\n") 184 | } 185 | BEGIN { npass=nskip=nfail=0 } 186 | /^\[/ { descr=$0 } 187 | /^P/ { format("P", descr, $2); fflush(); npass++; } 188 | /^F/ { format("F", descr, $2); fflush(); nfail++; } 189 | /^S/ { format("S", descr, $2); fflush(); nskip++; } 190 | END { printf("%d %d %d\n", npass, nfail, nskip) >> status_file } 191 | ' 192 | } 193 | 194 | # Converts the ts stream format into the monitor format. 195 | ts_monitor () { 196 | rm "$ts_monitor_file" 2> /dev/null 197 | touch "$ts_monitor_file" 198 | 199 | awk $ts_awk_opts -v "monitor_file=$ts_monitor_file" ' 200 | BEGIN { ORS = "" }; 201 | { print $0 "\n" >> monitor_file }; 202 | /^P/ { print "."; fflush() }; 203 | /^F/ { print "F"; fflush() }; 204 | /^S/ { print "-"; fflush() }; 205 | ' >&2 206 | printf "\n\n" >&2 207 | 208 | cat "$ts_monitor_file" 209 | } 210 | 211 | # Filters passing tests from the ts stream format. 212 | ts_filter () { 213 | if [ true = "$TS_FILTER" ] 214 | then awk $ts_awk_opts '/^[FS] /,/^$/ { print $0; fflush(); }' 215 | else cat 216 | fi 217 | } 218 | 219 | # Adds color to the ts stream format. 220 | ts_color () { 221 | if [ true = "$TS_COLOR" ] 222 | then awk $ts_awk_opts -v norm="$TS_NORM" -v pass="$TS_PASS" -v skip="$TS_SKIP" -v fail="$TS_FAIL" ' 223 | /^P / { sub("P [[]", "[" pass); sub("]", norm "]"); } 224 | /^F / { sub("F [[]", "[" fail); sub("]", norm "]"); } 225 | /^S / { sub("S [[]", "[" skip); sub("]", norm "]"); } 226 | { print $0; fflush(); }' 227 | else cat 228 | fi 229 | } 230 | 231 | # Converts ts stream to color (if specified) or to the report format. 232 | ts_format () { 233 | if [ true = "$TS_COLOR" ] 234 | then ts_color 235 | else awk $ts_awk_opts ' 236 | /^[PFS] / { status=$1; sub(". ", ""); } 237 | /^$/ { print status; } 238 | { print $0; fflush(); }' 239 | fi 240 | } 241 | 242 | # Prints the summary for the tests and returns with the correct status. 243 | ts_print_status () { 244 | awk -v nsec="$1" ' 245 | BEGIN { npass=nskip=nfail=0 } 246 | { npass += $1; nfail+= $2; nskip += $3; } 247 | END { printf("%d pass %d fail %d skip %d s\n", npass, nfail, nskip, nsec); if(nfail > 0) exit 1 } 248 | ' < "$ts_status_file" 249 | 250 | ts_exitstatus=$? 251 | 252 | if [ "true" = "$TS_REMOVE_TMP_DIR" ] 253 | then rm -r "$TS_TMP_DIR" 254 | fi 255 | 256 | return $ts_exitstatus 257 | } 258 | 259 | ts_report () { 260 | case "$TS_REPORT" in 261 | (execute) 262 | cat 263 | exit $(cat "$ts_status_file") 264 | ;; 265 | (monitor) 266 | ts_start_time=${SECONDS:-1} 267 | ts_stream | ts_monitor | ts_filter | ts_format 268 | ts_end_time=${SECONDS:-0} 269 | ts_print_status "$(($ts_end_time - $ts_start_time))" >&2 270 | ;; 271 | (stream) 272 | ts_stream | ts_filter | ts_color 273 | ts_print_status > /dev/null 274 | ;; 275 | (raw) 276 | cat 277 | ;; 278 | esac 279 | } 280 | 281 | # Runs a specific test. In that case the test output is not filtered or 282 | # changed. the exit status is the exit status of the specific test. 283 | ts_run_test () { 284 | if [ -e "$ts_test_dir" ] && ! rm -r "$ts_test_dir" 285 | then 286 | printf "could not remove existing test dir: %s\n" "$ts_test_dir" 287 | exit 1 288 | fi 289 | 290 | if [ -e "$ts_skip_file" ] && ! rm "$ts_skip_file" 291 | then 292 | printf "could not remove skip file: %s\n" "$ts_skip_file" 293 | exit 1 294 | fi 295 | 296 | # input /dev/null so that tests which read from stdin will not hang 297 | exec "$ts_status_file" 334 | else 335 | 336 | # run the test by calling back into the test file 337 | # * use stdout for progress, stderr for debug information 338 | # * a zero exit status is considered a pass, otherwise fail 339 | # * capture as a variable to calculate length 340 | # * use a subprocess to prevent leakage (ex set -x) 341 | if ts_stdout=$( 342 | if [ verbose = "$TS_MODE" ] 343 | then (ts_run_test 2>&1) 344 | else (ts_run_test 2>/dev/null) 345 | fi 346 | ) 347 | then 348 | if [ -e "$ts_skip_file" ] 349 | then ts_status=S 350 | else ts_status=P 351 | fi 352 | else 353 | ts_status=F 354 | fi 355 | 356 | printf "%s %d\n%s\n" "$ts_status" "${#ts_stdout}" "$ts_stdout" 357 | fi 358 | done | ts_report 359 | } 360 | 361 | ts_src_test_files () { 362 | shift 1 363 | for ts_test_file in "$@" 364 | do 365 | ts_prev="${ts_curr:-$0}" 366 | ts_curr="$ts_prev:$ts_test_file" 367 | ts_test_files="${ts_test_files:-$0} 368 | $ts_curr 369 | " 370 | . "$ts_test_file" 371 | ts_curr="$ts_prev" 372 | done 373 | } 374 | 375 | ts_run_test_files () { 376 | export TS_USR_DIR 377 | export TS_TMP_DIR 378 | 379 | export TS_COLOR 380 | export TS_FILTER 381 | export TS_MODE 382 | export TS_DEBUG 383 | 384 | for ts_test_file in "$@" 385 | do 386 | if [ -f "$ts_test_file" ] 387 | then 388 | if [ -x "$ts_test_file" ] 389 | then 390 | if [ x"${ts_test_file}" = x"${ts_test_file#*/}" ] 391 | then ./"$ts_test_file" -w 392 | else "$ts_test_file" -w 393 | fi 394 | else printf "[%s] not executable\nS 0\n\n" "$ts_test_file" 395 | fi 396 | elif [ x- = x"$ts_test_file" ] 397 | then cat 398 | else printf "[%s] not a file\nS 0\n\n" "$ts_test_file" 399 | fi 400 | done | ts_report 401 | } 402 | 403 | ts_function_exists () { 404 | [ x"$(command -v "$1" 2>/dev/null)" = x"$1" ] 405 | } 406 | 407 | ############################################################################ 408 | # public functions 409 | 410 | if ! ts_function_exists setup 411 | then 412 | setup () { 413 | true 414 | } 415 | fi 416 | 417 | if ! ts_function_exists teardown 418 | then 419 | teardown () { 420 | true 421 | } 422 | fi 423 | 424 | # Flunks unless the numbers $1 (expected) and $2 (actual) are the same. 425 | assert_status () { 426 | ts_expected=$1; ts_actual=$2 427 | 428 | if ! [ $ts_actual -eq $ts_expected ] 429 | then 430 | shift 2 431 | if [ $# -ne 0 ] 432 | then ts_msg=" ($*)" 433 | fi 434 | printf "expected status %s but was %s%s\n" "$ts_expected" "$ts_actual" "${ts_msg:-}" 435 | exit 1 436 | fi 437 | } 438 | 439 | # Flunks unless the variables $1 (expected) and $2 (actual) are the same. 440 | # Reads from stdin for '-'. '-' is assumed if $2 is not specified. 441 | assert_output () { 442 | ts_expected="$1"; ts_actual="${2:--}" 443 | 444 | if [ x- = x"$ts_expected" ] 445 | then 446 | ts_expected=$(cat; printf x) 447 | ts_expected=${ts_expected%x} 448 | fi 449 | 450 | if [ x- = x"$ts_actual" ] 451 | then 452 | ts_actual=$(cat; printf x) 453 | ts_actual=${ts_actual%x} 454 | fi 455 | 456 | if [ x"$ts_actual" != x"$ts_expected" ] 457 | then 458 | mkdir -p "$(dirname "$ts_test_dir")" 459 | printf "%s" "$ts_expected" > "$ts_test_dir.e.txt" 460 | printf "%s" "$ts_actual" > "$ts_test_dir.a.txt" 461 | printf "unequal output:\n" 462 | 463 | ${TS_DIFF:-diff} "$ts_test_dir.e.txt" "$ts_test_dir.a.txt" 464 | 465 | return 1 466 | fi 467 | } 468 | 469 | skip () { 470 | ts_reason="${1:-no reason given}" 471 | 472 | touch "$ts_skip_file" 473 | printf "%s\n" "$ts_reason" 474 | 475 | exit 0 476 | } 477 | 478 | ############################################################################ 479 | 480 | # Run the test files if this script is executed directly. 481 | if [ ts = "$ts_progname" ] 482 | then ts_run_test_files "$@" 483 | elif [ "." = "${1:-}" ] 484 | then ts_src_test_files "$@" 485 | else ts_run_test_suite "$@" 486 | fi 487 | -------------------------------------------------------------------------------- /dockerfiles/alpine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:latest 2 | 3 | # needed to get ts output working 4 | RUN apk add --no-cache diffutils gawk 5 | 6 | # add more shells for development/testing 7 | RUN apk add --no-cache bash zsh 8 | 9 | WORKDIR /app 10 | COPY . /app 11 | -------------------------------------------------------------------------------- /dockerfiles/centos/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:latest 2 | RUN yum install -y bash zsh ksh 3 | WORKDIR /app 4 | COPY . /app 5 | -------------------------------------------------------------------------------- /dockerfiles/debian/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:latest 2 | RUN apt-get update && \ 3 | apt-get install -y bash zsh ksh 4 | WORKDIR /app 5 | COPY . /app 6 | -------------------------------------------------------------------------------- /dockerfiles/fedora/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fedora:latest 2 | RUN dnf install -y bash zsh ksh 3 | WORKDIR /app 4 | COPY . /app 5 | -------------------------------------------------------------------------------- /dockerfiles/opensuse/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM opensuse/leap:latest 2 | RUN zypper install -y bash zsh ksh 3 | WORKDIR /app 4 | COPY . /app 5 | -------------------------------------------------------------------------------- /dockerfiles/shell/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch-slim 2 | RUN apt-get update && \ 3 | apt-get install -y bash zsh ksh ruby ruby-dev git build-essential && \ 4 | git clone git://github.com/thinkerbot/ronn.git /tmp/ronn && \ 5 | cd /tmp/ronn && \ 6 | rake install 7 | WORKDIR /app 8 | COPY . /app 9 | -------------------------------------------------------------------------------- /dockerfiles/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | RUN apt-get update && \ 3 | apt-get install -y bash zsh ksh 4 | WORKDIR /app 5 | COPY . /app 6 | -------------------------------------------------------------------------------- /man/man1/ts.1: -------------------------------------------------------------------------------- 1 | .\" generated with Ronn/v0.7.3 2 | .\" http://github.com/rtomayko/ronn/tree/0.7.3 3 | . 4 | .TH "TS" "1" "May 2020" "2.0.4" "" 5 | . 6 | .SH "NAME" 7 | \fBts\fR \- test script 8 | . 9 | .SH "SYNOPSIS" 10 | \fBts\fR \fIoptions\fR TEST_SCRIPT\.\.\. 11 | . 12 | .P 13 | \fB[\./test_script]\fR \fIoptions\fR TESTS\.\.\. 14 | . 15 | .SH "DESCRIPTION" 16 | \fBts\fR provides functions for writing tests in shell\. The test scripts can be run individually or in a batch format using \fBts\fR as a command\. 17 | . 18 | .P 19 | \fBts\fR makes a test directory available on a per\-test basis so it\'s easy to sandbox tests that write or manipulate files\. \fBts\fR tries to use POSIX exclusively and so should (hopefully) work with any POSIX\-compliant shell\. 20 | . 21 | .SH "TEST SCRIPTS" 22 | The \fBts\fR command expects script files that define test cases\. Test scripts have the following form: 23 | . 24 | .IP "" 4 25 | . 26 | .nf 27 | 28 | [\./example] 29 | #!/bin/sh 30 | # pick a shell, any (POSIX) shell 31 | 32 | setup () { # optional setup 33 | mkdir \-p "$ts_test_dir" 34 | } 35 | 36 | teardown () { # optional teardown 37 | rm \-r "$ts_test_dir" 38 | } 39 | 40 | test_true () { # write tests named like "test_" 41 | true # return 0 to pass\. 42 | } 43 | 44 | \[char46] ts # source ts to run the tests 45 | . 46 | .fi 47 | . 48 | .IP "" 0 49 | . 50 | .P 51 | To run, use any of: 52 | . 53 | .IP "" 4 54 | . 55 | .nf 56 | 57 | ts example # run multiple test scripts 58 | \[char46]/example # run a single test script 59 | \[char46]/example test_a_thing # run a single test 60 | . 61 | .fi 62 | . 63 | .IP "" 0 64 | . 65 | .P 66 | To debug, try using \-e to execute the test function in isolation\. 67 | . 68 | .IP "" 4 69 | . 70 | .nf 71 | 72 | \[char46]/example \-e test_a_thing # a most useful pattern 73 | . 74 | .fi 75 | . 76 | .IP "" 0 77 | . 78 | .P 79 | See the FUNCTIONS, EXAMPLES, and TROUBLESHOOT sections for more details\. 80 | . 81 | .SH "OPTIONS" 82 | These options control how \fBts\fR operates\. 83 | . 84 | .TP 85 | \fB\-a\fR 86 | Show passing outputs, which are normally filtered\. 87 | . 88 | .TP 89 | \fB\-c\fR 90 | Colorize output\. (green/red/yellow \- pass/fail/not\-executable) 91 | . 92 | .TP 93 | \fB\-d\fR 94 | Debug mode\. Turns on xtrace (set \-x) for the tests and enables \-v\. 95 | . 96 | .TP 97 | \fB\-e\fR 98 | Exec mode\. Runs a test without processing the output and exits\. 99 | . 100 | .TP 101 | \fB\-h\fR 102 | Prints help\. 103 | . 104 | .TP 105 | \fB\-m\fR 106 | Monitor output\. Provide a ticker indicating the progress of tests and print a summary\. Monitor is the default\. 107 | . 108 | .TP 109 | \fB\-q\fR 110 | Quiet output\. Shows only stdout, hiding stderr\. 111 | . 112 | .TP 113 | \fB\-r\fR 114 | Remove the tmp dir on complete\. Removal is done using \fBrm \-r\fR\. 115 | . 116 | .TP 117 | \fB\-s\fR 118 | Stream output\. Show test progress as it happens\. No summary is printed\. 119 | . 120 | .TP 121 | \fB\-t\fR 122 | Set the test tmp dir (default tmp)\. The test\-specific directories are be located under this directory\. 123 | . 124 | .TP 125 | \fB\-v\fR 126 | Verbose output\. Display both stdout and stderr for the tests (enabled by default)\. 127 | . 128 | .SH "FUNCTIONS" 129 | Functions provided by \fBts\fR\. 130 | . 131 | .TP 132 | \fBsetup\fR: 133 | . 134 | .IP 135 | A setup function run before each test\. 136 | . 137 | .TP 138 | \fBteardown\fR: 139 | . 140 | .IP 141 | A teardown function run after each test\. 142 | . 143 | .IP 144 | \fBts\fR ensures teardown runs by setting a trap for EXIT signals during setup and the actual test\. As a result, EXIT traps in tests can prevent teardown\. 145 | . 146 | .TP 147 | \fBassert_status EXPECTED ACTUAL [MESSAGE]\fR: 148 | . 149 | .IP 150 | Exit 1 unless the numbers EXPECTED and ACTUAL are the same\. Use this to make assertions in the middle of a test\. 151 | . 152 | .TP 153 | \fBassert_output EXPECTED ACTUAL\fR: 154 | . 155 | .IP 156 | Return 1 unless the variables EXPECTED and ACTUAL are the same\. Reads from stdin for \'\-\'\. Also reads ACTUAL from stdin if ACTUAL is unspecified\. 157 | . 158 | .IP 159 | Using assert_output in a pipeline is often convenient but remember this assertion only returns, it does not exit\. As a result you should either use it as the very last command in a test, or follow it with assert_status in a multipart test\. See the section on my \'tests aren\'t failing\' for more\. 160 | . 161 | .TP 162 | \fBskip [MESSAGE]\fR: 163 | . 164 | .IP 165 | Skip a test\. Exits 0 but counts as a skip and not a pass\. 166 | . 167 | .P 168 | \fBts\fR reserves all function names starting with \'ts_\' for internal use\. Note that \fBsetup\fR and \fBteardown\fR commands on PATH will be ignored because tests will shadow them with the corresponding \fBts\fR functions\. 169 | . 170 | .SH "VARIABLES" 171 | Variables provided by \fBts\fR at runtime\. Feel free to use any of them but treat them as read\-only\. 172 | . 173 | .TP 174 | \fBts_test_file\fR 175 | The name of the current test script being run\. 176 | . 177 | .TP 178 | \fBts_test_case\fR 179 | The basename of the test file, minus the extname\. 180 | . 181 | .TP 182 | \fBts_test_lineno\fR 183 | The line number where the current test is defined\. 184 | . 185 | .TP 186 | \fBts_test_name\fR 187 | The name of the current test\. 188 | . 189 | .TP 190 | \fBts_test_dir\fR 191 | The test\-specific directory\. 192 | . 193 | .IP 194 | The test dir is \'tmp_dir/test_case\'\. \fBts\fR does not create this directory automatically\. Add that functionality in the setup function as needed\. 195 | . 196 | .P 197 | \fBts\fR reserves all variables starting with \'ts_\' for internal use\. 198 | . 199 | .SH "ENVIRONMENT" 200 | The behavior of \fBts\fR can be modified via environment variables\. Many of these may be set using options\. 201 | . 202 | .TP 203 | \fBTS_USR_DIR\fR (pwd) 204 | The user dir\. Used to determine the ts tmp dir\. 205 | . 206 | .TP 207 | \fBTS_TMP_DIR\fR ($TS_USR_DIR/tmp) 208 | The base tmp dir\. 209 | . 210 | .TP 211 | \fBTS_COLOR\fR (false) 212 | Set to "true" to enable color\. 213 | . 214 | .TP 215 | \fBTS_DIFF\fR (diff) 216 | The diff command used by assert_output\. 217 | . 218 | .TP 219 | \fBTS_DEBUG\fR (false) 220 | Set to "true" to enable debug mode\. 221 | . 222 | .TP 223 | \fBTS_REMOVE_TMP_DIR\fR (false) 224 | Set to "true" to remove tmp dir\. 225 | . 226 | .P 227 | In addition these variables adjust the color output\. 228 | . 229 | .TP 230 | \fBTS_PASS\fR (green) 231 | Passing tests\. 232 | . 233 | .TP 234 | \fBTS_FAIL\fR (red) 235 | Failing tests\. 236 | . 237 | .TP 238 | \fBTS_SKIP\fR (yellow) 239 | Skipped tests\. 240 | . 241 | .TP 242 | \fBTS_NORM\fR (normal) 243 | The normal output color\. 244 | . 245 | .P 246 | For example to turn failures blue: 247 | . 248 | .IP "" 4 249 | . 250 | .nf 251 | 252 | export TS_FAIL=$(printf "%b" "\e033[0;34m") 253 | . 254 | .fi 255 | . 256 | .IP "" 0 257 | . 258 | .P 259 | \fBts\fR reserves all variables starting with \'TS_\' for internal use\. 260 | . 261 | .SH "EXAMPLES" 262 | Basic usage: 263 | . 264 | .IP "" 4 265 | . 266 | .nf 267 | 268 | [\./example] 269 | #!/bin/sh 270 | 271 | test_arbitrary_function () { 272 | echo abc | grep \-q b 273 | } 274 | 275 | test_assert_status () { 276 | false 277 | assert_status 1 $? 278 | } 279 | 280 | test_assert_output_style_one () { 281 | out=$(printf "hello world") 282 | assert_output "hello world" "$out" 283 | } 284 | 285 | test_assert_output_style_two () { 286 | printf "hello world" | assert_output "hello world" 287 | } 288 | 289 | test_assert_output_style_three () { 290 | printf "hello world\en" | assert_output "\e 291 | hello world 292 | " 293 | } 294 | 295 | test_skip_test () { 296 | skip "skipping this one" 297 | false 298 | } 299 | 300 | \[char46] ts 301 | . 302 | .fi 303 | . 304 | .IP "" 0 305 | . 306 | .P 307 | Run like: 308 | . 309 | .IP "" 4 310 | . 311 | .nf 312 | 313 | chmod +x example 314 | ts example 315 | . 316 | .fi 317 | . 318 | .IP "" 0 319 | . 320 | .P 321 | Shared examples: 322 | . 323 | .IP "" 4 324 | . 325 | .nf 326 | 327 | [\./common_tests] 328 | test_it_should_pick_lines_with_abc () { 329 | printf "%s\en" "1 abc" "2 xyz" "3 abc" | 330 | ${picker} | assert_output "\e 331 | 1 abc 332 | 3 abc 333 | " 334 | } 335 | 336 | [\./test_grep_abc] 337 | #!/bin/sh 338 | picker="grep abc" 339 | \[char46] ts \. \./common_tests 340 | \[char46] ts 341 | 342 | [\./test_sed_abc] 343 | #!/bin/sh 344 | picker="sed \-ne /abc/p" 345 | \[char46] ts \. \./common_tests 346 | \[char46] ts 347 | . 348 | .fi 349 | . 350 | .IP "" 0 351 | . 352 | .P 353 | Run like: 354 | . 355 | .IP "" 4 356 | . 357 | .nf 358 | 359 | chmod +x test_grep_abc test_sed_abc 360 | ts test_grep_abc test_sed_abc 361 | . 362 | .fi 363 | . 364 | .IP "" 0 365 | . 366 | .P 367 | Background jobs work fine, just be sure to cleanup: 368 | . 369 | .IP "" 4 370 | . 371 | .nf 372 | 373 | [\./background] 374 | #!/bin/sh 375 | 376 | teardown () { 377 | jobs \-p | xargs kill \-9 378 | true 379 | } 380 | 381 | test_background_job () { 382 | sleep 3 & 383 | true 384 | } 385 | 386 | \[char46] ts 387 | . 388 | .fi 389 | . 390 | .IP "" 0 391 | . 392 | .SH "TROUBLESHOOT" 393 | \fBMy tests aren\'t running\fR 394 | . 395 | .P 396 | Be sure you added \fB\. ts\fR at the end of your script\. 397 | . 398 | .P 399 | \fBMy tests are failing\fR 400 | . 401 | .P 402 | \fB1)\fR Are you incrementing a variable in a loop in a pipeline? 403 | . 404 | .P 405 | See http://mywiki\.wooledge\.org/BashFAQ/024\. 406 | . 407 | .P 408 | \fB2)\fR Is a newline missing from a variable? 409 | . 410 | .P 411 | Subshells chomp the last newline off of a command\. 412 | . 413 | .IP "" 4 414 | . 415 | .nf 416 | 417 | test_newline_is_missing_so_this_fails () { 418 | out=$(echo abc) 419 | 420 | assert_output "\e 421 | abc 422 | " "$out" 423 | } 424 | . 425 | .fi 426 | . 427 | .IP "" 0 428 | . 429 | .P 430 | One way around this is to print a sacrificial non\-newline character\. 431 | . 432 | .IP "" 4 433 | . 434 | .nf 435 | 436 | test_newline_is_now_accounted_for () { 437 | out=$(echo abc; printf x) 438 | 439 | assert_output "\e 440 | abc 441 | " "${out%x}" 442 | } 443 | . 444 | .fi 445 | . 446 | .IP "" 0 447 | . 448 | .P 449 | Another way is to pipe into assert_output\. 450 | . 451 | .IP "" 4 452 | . 453 | .nf 454 | 455 | test_another_newline_strategy () { 456 | echo abc | assert_output "\e 457 | abc 458 | " 459 | } 460 | . 461 | .fi 462 | . 463 | .IP "" 0 464 | . 465 | .P 466 | \fBMy tests aren\'t failing\fR 467 | . 468 | .P 469 | \fB1)\fR Are you using assert_output in a pipeline? 470 | . 471 | .P 472 | \fBts\fR assert methods return failure (rather than exit) so this will pass\. 473 | . 474 | .IP "" 4 475 | . 476 | .nf 477 | 478 | test_multiple_asserts_not_failing_as_intended () { 479 | assert_output "1" "0" 480 | assert_output "0" "0" 481 | } 482 | . 483 | .fi 484 | . 485 | .IP "" 0 486 | . 487 | .P 488 | The reason is that exit within a pipeline has shell\-specific behavior\. For instance if you run this with different values of shell you will get 0 for bash and dash, and 1 for zsh and ksh\. 489 | . 490 | .IP "" 4 491 | . 492 | .nf 493 | 494 | $shell < 660 | . 661 | .fi 662 | . 663 | .IP "" 0 664 | . 665 | .P 666 | Report bugs here: http://github\.com/thinkerbot/ts/issues\. 667 | . 668 | .SH "CONTRIBUTORS" 669 | Thanks for the help! 670 | . 671 | .IP "\(bu" 4 672 | Angelo Lakra (github\.com/alakra) 673 | . 674 | .IP "\(bu" 4 675 | Thomas Adam (github\.com/ThomasAdam) 676 | . 677 | .IP "\(bu" 4 678 | David Alfonso (https://github\.com/davidag) 679 | . 680 | .IP "" 0 681 | . 682 | .SH "COPYRIGHT" 683 | TS is Copyright (C) 2011 Simon Chiang \fIhttp://github\.com/thinkerbot\fR 684 | -------------------------------------------------------------------------------- /test/benchmark: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | test_0 () { 3 | true 4 | } 5 | test_1 () { 6 | true 7 | } 8 | test_2 () { 9 | true 10 | } 11 | test_3 () { 12 | true 13 | } 14 | test_4 () { 15 | true 16 | } 17 | test_5 () { 18 | true 19 | } 20 | test_6 () { 21 | true 22 | } 23 | test_7 () { 24 | true 25 | } 26 | test_8 () { 27 | true 28 | } 29 | test_9 () { 30 | true 31 | } 32 | test_10 () { 33 | true 34 | } 35 | test_11 () { 36 | true 37 | } 38 | test_12 () { 39 | true 40 | } 41 | test_13 () { 42 | true 43 | } 44 | test_14 () { 45 | true 46 | } 47 | test_15 () { 48 | true 49 | } 50 | test_16 () { 51 | true 52 | } 53 | test_17 () { 54 | true 55 | } 56 | test_18 () { 57 | true 58 | } 59 | test_19 () { 60 | true 61 | } 62 | test_20 () { 63 | true 64 | } 65 | test_21 () { 66 | true 67 | } 68 | test_22 () { 69 | true 70 | } 71 | test_23 () { 72 | true 73 | } 74 | test_24 () { 75 | true 76 | } 77 | test_25 () { 78 | true 79 | } 80 | test_26 () { 81 | true 82 | } 83 | test_27 () { 84 | true 85 | } 86 | test_28 () { 87 | true 88 | } 89 | test_29 () { 90 | true 91 | } 92 | test_30 () { 93 | true 94 | } 95 | test_31 () { 96 | true 97 | } 98 | test_32 () { 99 | true 100 | } 101 | test_33 () { 102 | true 103 | } 104 | test_34 () { 105 | true 106 | } 107 | test_35 () { 108 | true 109 | } 110 | test_36 () { 111 | true 112 | } 113 | test_37 () { 114 | true 115 | } 116 | test_38 () { 117 | true 118 | } 119 | test_39 () { 120 | true 121 | } 122 | test_40 () { 123 | true 124 | } 125 | test_41 () { 126 | true 127 | } 128 | test_42 () { 129 | true 130 | } 131 | test_43 () { 132 | true 133 | } 134 | test_44 () { 135 | true 136 | } 137 | test_45 () { 138 | true 139 | } 140 | test_46 () { 141 | true 142 | } 143 | test_47 () { 144 | true 145 | } 146 | test_48 () { 147 | true 148 | } 149 | test_49 () { 150 | true 151 | } 152 | test_50 () { 153 | true 154 | } 155 | test_51 () { 156 | true 157 | } 158 | test_52 () { 159 | true 160 | } 161 | test_53 () { 162 | true 163 | } 164 | test_54 () { 165 | true 166 | } 167 | test_55 () { 168 | true 169 | } 170 | test_56 () { 171 | true 172 | } 173 | test_57 () { 174 | true 175 | } 176 | test_58 () { 177 | true 178 | } 179 | test_59 () { 180 | true 181 | } 182 | test_60 () { 183 | true 184 | } 185 | test_61 () { 186 | true 187 | } 188 | test_62 () { 189 | true 190 | } 191 | test_63 () { 192 | true 193 | } 194 | test_64 () { 195 | true 196 | } 197 | test_65 () { 198 | true 199 | } 200 | test_66 () { 201 | true 202 | } 203 | test_67 () { 204 | true 205 | } 206 | test_68 () { 207 | true 208 | } 209 | test_69 () { 210 | true 211 | } 212 | test_70 () { 213 | true 214 | } 215 | test_71 () { 216 | true 217 | } 218 | test_72 () { 219 | true 220 | } 221 | test_73 () { 222 | true 223 | } 224 | test_74 () { 225 | true 226 | } 227 | test_75 () { 228 | true 229 | } 230 | test_76 () { 231 | true 232 | } 233 | test_77 () { 234 | true 235 | } 236 | test_78 () { 237 | true 238 | } 239 | test_79 () { 240 | true 241 | } 242 | test_80 () { 243 | true 244 | } 245 | test_81 () { 246 | true 247 | } 248 | test_82 () { 249 | true 250 | } 251 | test_83 () { 252 | true 253 | } 254 | test_84 () { 255 | true 256 | } 257 | test_85 () { 258 | true 259 | } 260 | test_86 () { 261 | true 262 | } 263 | test_87 () { 264 | true 265 | } 266 | test_88 () { 267 | true 268 | } 269 | test_89 () { 270 | true 271 | } 272 | test_90 () { 273 | true 274 | } 275 | test_91 () { 276 | true 277 | } 278 | test_92 () { 279 | true 280 | } 281 | test_93 () { 282 | true 283 | } 284 | test_94 () { 285 | true 286 | } 287 | test_95 () { 288 | true 289 | } 290 | test_96 () { 291 | true 292 | } 293 | test_97 () { 294 | true 295 | } 296 | test_98 () { 297 | true 298 | } 299 | test_99 () { 300 | true 301 | } 302 | . test/helper 303 | . ts 304 | -------------------------------------------------------------------------------- /test/examples/abc: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_a () { 4 | printf "a" 5 | printf "A" >&2 6 | true 7 | } 8 | 9 | test_b () { 10 | printf "b" 11 | printf "B" >&2 12 | true 13 | } 14 | 15 | test_c () { 16 | printf "c" 17 | printf "C" >&2 18 | false 19 | } 20 | 21 | test_s () { 22 | skip "s" 23 | false 24 | } 25 | 26 | . test/helper 27 | . ts 28 | -------------------------------------------------------------------------------- /test/examples/change_dirs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | 4 | # PASS 5 | test_a () { 6 | cd "$TS_USR_DIR/bin" 7 | assert_output "$TS_USR_DIR/bin" "$(pwd)" 8 | } 9 | 10 | # PASS 11 | test_b () { 12 | assert_output "$TS_USR_DIR" "$(pwd)" 13 | } 14 | 15 | . ts 16 | -------------------------------------------------------------------------------- /test/examples/exit_fail_in_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FAIL 4 | # in setup 5 | # in teardown 6 | # 7 | . test/helper 8 | 9 | setup () { 10 | printf "in setup\n" 11 | exit 1 12 | } 13 | 14 | teardown () { 15 | printf "in teardown\n" 16 | } 17 | 18 | test_exit_fail_in_setup () { 19 | printf "in test\n" 20 | } 21 | 22 | . ts 23 | -------------------------------------------------------------------------------- /test/examples/exit_fail_in_teardown: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FAIL 4 | # in setup 5 | # in test 6 | # in teardown 7 | # 8 | . test/helper 9 | 10 | setup () { 11 | printf "in setup\n" 12 | } 13 | 14 | teardown () { 15 | printf "in teardown\n" 16 | exit 1 17 | } 18 | 19 | test_exit_fail_in_teardown () { 20 | printf "in test\n" 21 | } 22 | 23 | . ts 24 | -------------------------------------------------------------------------------- /test/examples/exit_fail_in_test: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FAIL 4 | # in setup 5 | # in test 6 | # in teardown 7 | # 8 | . test/helper 9 | 10 | setup () { 11 | printf "in setup\n" 12 | } 13 | 14 | teardown () { 15 | printf "in teardown\n" 16 | } 17 | 18 | test_exit_fail_in_test () { 19 | printf "in test\n" 20 | exit 1 21 | } 22 | 23 | . ts 24 | -------------------------------------------------------------------------------- /test/examples/exit_fail_in_test_after_untrap: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FAIL 4 | # in setup 5 | # in test 6 | # 7 | . test/helper 8 | 9 | setup () { 10 | printf "in setup\n" 11 | } 12 | 13 | teardown () { 14 | printf "in teardown\n" 15 | } 16 | 17 | test_exit_fail_in_test_after_untrap () { 18 | printf "in test\n" 19 | trap - EXIT 20 | exit 1 21 | } 22 | 23 | . ts 24 | -------------------------------------------------------------------------------- /test/examples/fail: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | 4 | # FAIL 5 | test_exit_fail () { 6 | exit 8 7 | } 8 | 9 | # FAIL 10 | test_return_fail () { 11 | false 12 | } 13 | 14 | # FAIL 15 | # expected status 0 but was 1 16 | test_assert_status_fail () { 17 | assert_status 0 1 18 | } 19 | 20 | # FAIL 21 | # expected status 0 but was 1 (message) 22 | test_assert_status_fail_with_message () { 23 | assert_status 0 1 "message" 24 | } 25 | 26 | # FAIL 27 | # unequal output: 28 | # 1c1 29 | # < hello world 30 | # \ No newline at end of file 31 | # --- 32 | # > hell0 world 33 | # \ No newline at end of file 34 | test_assert_output_fail () { 35 | assert_output "hello world" "hell0 world" 36 | } 37 | 38 | # FAIL 39 | # unequal output: 40 | # 1c1 41 | # < hello world 42 | # \ No newline at end of file 43 | # --- 44 | # > hell0 world 45 | # \ No newline at end of file 46 | test_assert_output_expected_from_stdin_fail () { 47 | printf "hello world" | assert_output - "hell0 world" 48 | } 49 | 50 | # FAIL 51 | # unequal output: 52 | # 1c1 53 | # < hello world 54 | # \ No newline at end of file 55 | # --- 56 | # > hello w0rld 57 | # \ No newline at end of file 58 | test_assert_output_actual_from_stdin_fail () { 59 | printf "hello w0rld" | assert_output "hello world" - 60 | } 61 | 62 | # FAIL 63 | # unequal output: 64 | # 1c1 65 | # < hello world 66 | # \ No newline at end of file 67 | # --- 68 | # > hello w0rld 69 | # \ No newline at end of file 70 | test_assert_output_with_implicit_stdin_fail () { 71 | printf "hello w0rld" | assert_output "hello world" 72 | } 73 | 74 | # FAIL 75 | # expected status 0 but was 1 76 | test_multiple_asserts_fail_early () { 77 | assert_status 0 1 78 | assert_output "0" "0" 79 | } 80 | 81 | # FAIL 82 | # unequal output: 83 | # 1c1 84 | # < 0 85 | # \ No newline at end of file 86 | # --- 87 | # > 1 88 | # \ No newline at end of file 89 | test_multiple_asserts_fail_late () { 90 | assert_status 0 0 91 | assert_output "0" "1" 92 | } 93 | 94 | . ts 95 | -------------------------------------------------------------------------------- /test/examples/include: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | test_include () { 4 | true 5 | } 6 | 7 | . ts . "test/examples/include a" 8 | . ts . test/examples/include\ b 9 | . ts 10 | -------------------------------------------------------------------------------- /test/examples/include a: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_include_a () { 4 | true 5 | } 6 | -------------------------------------------------------------------------------- /test/examples/include b: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_include_b () { 4 | true 5 | } 6 | 7 | . ts . test/examples/include_c 8 | -------------------------------------------------------------------------------- /test/examples/include_c: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_include_c () { 4 | true 5 | } 6 | -------------------------------------------------------------------------------- /test/examples/no_tests: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . ts 4 | -------------------------------------------------------------------------------- /test/examples/options: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | 4 | parse_options () { 5 | OPTIND=1 6 | args="" 7 | while [ $OPTIND -le $# ] 8 | do 9 | OPTCUR="$OPTIND" 10 | OPTNAME="-" 11 | getopts "xy:" OPTNAME 12 | if [ "$OPTNAME" != "-" ] && [ "$OPTNAME" != "?" ] 13 | then 14 | case "$OPTNAME" in 15 | (x) printf "x." ;; 16 | (y) printf "y.%s." "$OPTARG" ;; 17 | (*) exit 1 18 | esac 19 | else 20 | if [ "$(eval printf "%s" "\${$OPTCUR}")" = "--" ] 21 | then OPTCUR="$#" 22 | fi 23 | while [ $OPTIND -le $OPTCUR ] 24 | do 25 | args="$args \"\${$OPTIND}\"" 26 | OPTIND=$(($OPTIND + 1)) 27 | done 28 | fi 29 | done 30 | eval set -- $args 31 | unset OPTNAME OPTCUR 32 | 33 | printf "%s." "$@" 34 | printf "\n" 35 | } 36 | 37 | # 38 | # argument handling 39 | # 40 | 41 | # PASS 42 | test_options_without_options () { 43 | parse_options a b c | assert_output "\ 44 | a.b.c. 45 | " 46 | } 47 | 48 | # PASS 49 | test_options_with_args_with_sensitive_chars () { 50 | parse_options "a b c" "x;y;z" | assert_output "\ 51 | a b c.x;y;z. 52 | " 53 | } 54 | 55 | # 56 | # option location 57 | # 58 | 59 | # PASS 60 | test_options_with_leading_options () { 61 | parse_options -x -y z a b c | assert_output "\ 62 | x.y.z.a.b.c. 63 | " 64 | } 65 | 66 | # PASS 67 | test_options_with_leading_options_yx () { 68 | parse_options -y z -x a b c | assert_output "\ 69 | y.z.x.a.b.c. 70 | " 71 | } 72 | 73 | # PASS 74 | test_options_with_trailing_options () { 75 | parse_options a b c -x -y z | assert_output "\ 76 | x.y.z.a.b.c. 77 | " 78 | } 79 | 80 | # PASS 81 | test_options_with_trailing_options_yx () { 82 | parse_options a b c -y z -x | assert_output "\ 83 | y.z.x.a.b.c. 84 | " 85 | } 86 | 87 | # PASS 88 | test_options_with_inside_options () { 89 | parse_options a -x b -y z c| assert_output "\ 90 | x.y.z.a.b.c. 91 | " 92 | } 93 | 94 | # PASS 95 | test_options_with_inside_options_yx () { 96 | parse_options a -y z b -x c | assert_output "\ 97 | y.z.x.a.b.c. 98 | " 99 | } 100 | 101 | # 102 | # option values 103 | # 104 | 105 | # PASS 106 | test_options_with_option_values () { 107 | parse_options a -y -z b c | assert_output "\ 108 | y.-z.a.b.c. 109 | " 110 | } 111 | 112 | # PASS 113 | test_options_with_whitespace_values () { 114 | parse_options a -y 'z z' b c | assert_output "\ 115 | y.z z.a.b.c. 116 | " 117 | } 118 | 119 | # PASS 120 | test_options_with_sensitive_values () { 121 | parse_options a -y 'z;z' b c | assert_output "\ 122 | y.z;z.a.b.c. 123 | " 124 | } 125 | 126 | # 127 | # special case 128 | # 129 | 130 | # PASS 131 | test_options_with_multiple_calls () { 132 | parse_options a b c >/dev/null 133 | parse_options d e f | assert_output "\ 134 | d.e.f. 135 | " 136 | } 137 | 138 | # PASS 139 | test_options_with_option_break () { 140 | parse_options a -x b -- c -y z | assert_output "\ 141 | x.a.b.c.-y.z. 142 | " 143 | } 144 | 145 | # PASS 146 | test_options_with_option_break_yx () { 147 | parse_options a -y z b -- c -x | assert_output "\ 148 | y.z.a.b.c.-x. 149 | " 150 | } 151 | 152 | . ts 153 | -------------------------------------------------------------------------------- /test/examples/pass: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | 4 | # PASS 5 | test_exit_pass () { 6 | exit 7 | } 8 | 9 | # PASS 10 | test_return_pass () { 11 | true 12 | } 13 | 14 | # PASS 15 | test_assert_status_pass () { 16 | assert_status 0 0 17 | } 18 | 19 | # PASS 20 | test_assert_status_pass_with_message () { 21 | assert_status 0 0 "message" 22 | } 23 | 24 | # PASS 25 | test_assert_output_pass () { 26 | assert_output "hello world" "hello world" 27 | } 28 | 29 | # PASS 30 | test_assert_output_from_stdin_pass () { 31 | printf "hello world" | assert_output - "hello world" 32 | } 33 | 34 | # PASS 35 | test_assert_output_actual_from_stdin_pass () { 36 | printf "hello world" | assert_output "hello world" - 37 | } 38 | 39 | # PASS 40 | test_assert_output_with_implicit_stdin_pass () { 41 | printf "hello world" | assert_output "hello world" 42 | } 43 | 44 | # PASS 45 | test_multiple_asserts_pass () { 46 | assert_status 0 0 47 | assert_output "0" "0" 48 | } 49 | 50 | . ts 51 | -------------------------------------------------------------------------------- /test/examples/public_function_conflict: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # PASS 4 | # in test 5 | # 6 | . test/helper 7 | 8 | export PATH="$PWD/test/examples/public_function_conflict.d:$PATH" 9 | 10 | test_setup_and_teardown_commands_on_PATH_are_ignored () { 11 | printf "in test\n" 12 | } 13 | 14 | . ts 15 | -------------------------------------------------------------------------------- /test/examples/public_function_conflict.d/setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | false 3 | -------------------------------------------------------------------------------- /test/examples/public_function_conflict.d/teardown: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | false 3 | -------------------------------------------------------------------------------- /test/examples/return_fail_in_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FAIL 4 | # in setup 5 | # in teardown 6 | # 7 | . test/helper 8 | 9 | setup () { 10 | printf "in setup\n" 11 | false 12 | } 13 | 14 | teardown () { 15 | printf "in teardown\n" 16 | } 17 | 18 | test_return_fail_in_setup () { 19 | printf "in test\n" 20 | } 21 | 22 | . ts 23 | -------------------------------------------------------------------------------- /test/examples/return_fail_in_teardown: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FAIL 4 | # in setup 5 | # in test 6 | # in teardown 7 | # 8 | . test/helper 9 | 10 | setup () { 11 | printf "in setup\n" 12 | } 13 | 14 | teardown () { 15 | printf "in teardown\n" 16 | false 17 | } 18 | 19 | test_return_fail_in_teardown () { 20 | printf "in test\n" 21 | } 22 | 23 | . ts 24 | -------------------------------------------------------------------------------- /test/examples/set_u: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -u 4 | 5 | . test/helper 6 | 7 | # PASS 8 | test_set_u() { 9 | true 10 | } 11 | 12 | . ts 13 | -------------------------------------------------------------------------------- /test/examples/skip: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . test/helper 4 | 5 | test_skip_no_reason () { 6 | skip 7 | } 8 | 9 | test_skip_with_reason () { 10 | skip "Skipping..." 11 | } 12 | 13 | . ts 14 | -------------------------------------------------------------------------------- /test/examples/test_detection: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | 4 | # PASS 5 | test_common () { 6 | true 7 | } 8 | 9 | # PASS 10 | test_with_whitespace () { 11 | true 12 | } 13 | 14 | # PASS 15 | test_without_whitespace() { 16 | true 17 | } 18 | 19 | # PASS 20 | test_leading_whitespace () { 21 | true 22 | } 23 | 24 | # PASS 25 | test_oneline () { true; } 26 | 27 | # PASS 28 | test_subshell () ( 29 | true 30 | ) 31 | 32 | # PASS 33 | test_subshell_oneline () ( true ) 34 | 35 | . ts 36 | -------------------------------------------------------------------------------- /test/examples/undeclared_variable: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . test/helper 3 | 4 | # FAIL 5 | # test/examples/undeclared_variable: line 8: var: unbound variable 6 | # 7 | # (note the output above is shell-dependent -- see test_undeclared_variable) 8 | test_ts_fails_for_use_of_undeclared_variable () { 9 | unset var 10 | assert_output "" "$var" 11 | } 12 | 13 | # PASS 14 | test_ts_can_use_undeclared_variables_with_set_u () { 15 | set +u 16 | unset var 17 | assert_output "" "$var" 18 | } 19 | 20 | . ts 21 | -------------------------------------------------------------------------------- /test/examples/xyz_one: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_xyz () { 4 | true 5 | } 6 | 7 | . test/helper 8 | . ts 9 | -------------------------------------------------------------------------------- /test/examples/xyz_two: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_xyz () { 4 | false 5 | } 6 | 7 | . test/helper 8 | . ts 9 | -------------------------------------------------------------------------------- /test/fail: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_fail () { 4 | false 5 | } 6 | 7 | . test/helper 8 | . ts 9 | -------------------------------------------------------------------------------- /test/helper: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -e bin/ts ] 4 | then 5 | export PATH="$PWD/bin:$PATH" 6 | else 7 | printf "Tests must be run from ts project root\n" 8 | exit 1 9 | fi 10 | 11 | # special case adjustments 12 | ############################################################################# 13 | # BusyBox diff only outputs unified diffs so these tests cannot pass. 14 | # * https://busybox.net/downloads/BusyBox.html 15 | if ! (diff test/pass test/fail 2>&1 | grep -q '^---$') 16 | then 17 | skip_if_non_compliant_diff () { 18 | skip "diff does not conform to POSIX definition" 19 | } 20 | else 21 | skip_if_non_compliant_diff () { 22 | : # noop 23 | } 24 | fi 25 | ############################################################################# 26 | # `.` doesn't set args in all shells (ex DASH) and as a result tests using 27 | # imports cannot pass. As needed source this file with a "check_imports" 28 | # arg; if it does not show up as $1 then imports do not work and should be 29 | # skipped. 30 | if [ "$1" != "check_imports" ] 31 | then 32 | skip_if_imports_do_not_work () { 33 | skip "imports do not work in this shell" 34 | } 35 | else 36 | skip_if_imports_do_not_work () { 37 | : # noop 38 | } 39 | fi 40 | ############################################################################# 41 | -------------------------------------------------------------------------------- /test/nox: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_not_run () { 4 | false 5 | } 6 | 7 | . test/helper 8 | . ts 9 | -------------------------------------------------------------------------------- /test/pass: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | test_pass () { 4 | true 5 | } 6 | 7 | . test/helper 8 | . ts 9 | -------------------------------------------------------------------------------- /test/readme/background: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | teardown () { 4 | jobs -p | xargs kill -9 5 | true 6 | } 7 | 8 | # PASS 9 | test_background_job () { 10 | sleep 3 & 11 | true 12 | } 13 | 14 | . ts 15 | -------------------------------------------------------------------------------- /test/readme/common_tests: -------------------------------------------------------------------------------- 1 | test_it_should_pick_lines_with_abc () { 2 | printf "%s\n" "1 abc" "2 xyz" "3 abc" | 3 | ${picker} | assert_output "\ 4 | 1 abc 5 | 3 abc 6 | " 7 | } 8 | -------------------------------------------------------------------------------- /test/readme/example: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # PASS 4 | test_arbitrary_function () { 5 | echo abc | grep -q b 6 | } 7 | 8 | # PASS 9 | test_assert_status () { 10 | false 11 | assert_status 1 $? 12 | } 13 | 14 | # PASS 15 | test_assert_output_style_one () { 16 | out=$(printf "hello world") 17 | assert_output "hello world" "$out" 18 | } 19 | 20 | # PASS 21 | test_assert_output_style_two () { 22 | printf "hello world" | assert_output "hello world" 23 | } 24 | 25 | # PASS 26 | test_assert_output_style_three () { 27 | printf "hello world\n" | assert_output "\ 28 | hello world 29 | " 30 | } 31 | 32 | # SKIP 33 | # skipping this one 34 | test_skip_test () { 35 | skip "skipping this one" 36 | false 37 | } 38 | 39 | . test/helper 40 | . ts 41 | -------------------------------------------------------------------------------- /test/readme/test_grep_abc: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | picker="grep abc" 3 | . ts . ./common_tests 4 | . ts 5 | -------------------------------------------------------------------------------- /test/readme/test_sed_abc: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | picker="sed -ne /abc/p" 3 | . ts . ./common_tests 4 | . ts 5 | -------------------------------------------------------------------------------- /test/readme/troubleshoot_fail: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # FAIL 4 | # unequal output: 5 | # 1c1 6 | # < abc 7 | # --- 8 | # > abc 9 | # \ No newline at end of file 10 | test_newline_is_missing_so_this_fails () { 11 | out=$(echo abc) 12 | 13 | assert_output "\ 14 | abc 15 | " "$out" 16 | } 17 | 18 | # PASS 19 | # unequal output: 20 | # 1c1 21 | # < 1 22 | # \ No newline at end of file 23 | # --- 24 | # > 0 25 | # \ No newline at end of file 26 | test_multiple_asserts_not_failing_as_intended () { 27 | assert_output "1" "0" 28 | assert_output "0" "0" 29 | } 30 | 31 | # FAIL 32 | # unequal output: 33 | # 1c1 34 | # < 1 35 | # \ No newline at end of file 36 | # --- 37 | # > 0 38 | # \ No newline at end of file 39 | test_this_fails_as_expected () { 40 | printf "0" | assert_output "1" && 41 | assert_output "0" "0" 42 | } 43 | 44 | # FAIL 45 | # unequal output: 46 | # 1c1 47 | # < 1 48 | # \ No newline at end of file 49 | # --- 50 | # > 0 51 | # \ No newline at end of file 52 | # expected status 0 but was 1 (checking the pipeline) 53 | test_this_also_fails_as_expected () { 54 | printf "0" | assert_output "1" 55 | assert_status "0" $? "checking the pipeline" 56 | assert_output "0" "0" 57 | } 58 | 59 | . test/helper 60 | . ts 61 | -------------------------------------------------------------------------------- /test/readme/troubleshoot_pass: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # PASS 4 | test_newline_is_now_accounted_for () { 5 | out=$(echo abc; printf x) 6 | 7 | assert_output "\ 8 | abc 9 | " "${out%x}" 10 | } 11 | 12 | # PASS 13 | test_another_newline_strategy () { 14 | echo abc | assert_output "\ 15 | abc 16 | " 17 | } 18 | 19 | # PASS 20 | # unequal output: 21 | # 1c1 22 | # < 1 23 | # \ No newline at end of file 24 | # --- 25 | # > 0 26 | # \ No newline at end of file 27 | test_this_has_a_bug_and_does_not_fail () { 28 | printf "0" | assert_output "1" 29 | assert_output "0" "0" 30 | } 31 | 32 | . test/helper 33 | . ts 34 | -------------------------------------------------------------------------------- /test/readme/usage: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # pick a shell, any (POSIX) shell 3 | 4 | setup () { # optional setup 5 | mkdir -p "$ts_test_dir" 6 | } 7 | 8 | teardown () { # optional teardown 9 | rm -r "$ts_test_dir" 10 | } 11 | 12 | test_pass () { # write tests named like "test_" 13 | true # return 0 to pass. 14 | } 15 | 16 | . test/helper 17 | . ts 18 | -------------------------------------------------------------------------------- /test/suite: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ############################################################################ 3 | . test/helper check_imports 4 | 5 | setup () { 6 | unset $(set | grep '^TS_' | sed -e 's/=.*//' | tr "\n" " ") 7 | export TS_TMP_DIR="$ts_test_dir" 8 | } 9 | 10 | assert_example () { 11 | skip_if_non_compliant_diff 12 | example_file="$1" 13 | options="${2:-}" 14 | 15 | ts -s $options "$example_file" | 16 | normalize_stream | 17 | assert_output "$( 18 | sed -ne ' 19 | /^# PASS/,/^[^#]/s/# //p 20 | /^# FAIL/,/^[^#]/s/# //p 21 | /^# SKIP/,/^[^#]/s/# //p 22 | ' "$example_file" | 23 | normalize_stream 24 | ) 25 | " 26 | } 27 | 28 | normalize_stream () { 29 | sed -e ' 30 | /^P/s/.*/PASS/ 31 | /^F/s/.*/FAIL/ 32 | /^S/s/.*/SKIP/ 33 | /^$/d 34 | ' 35 | } 36 | 37 | # 38 | # ts test 39 | # 40 | 41 | test_ts_runs_file () { 42 | ts -s test/pass | assert_output "\ 43 | P [test/pass:3] test_pass 44 | 45 | " 46 | } 47 | 48 | test_ts_runs_file_in_pwd () { 49 | mkdir -p "$ts_test_dir" 50 | sed -e '/helper/d' test/pass > "$ts_test_dir/pass" 51 | chmod +x "$ts_test_dir/pass" 52 | cd "$ts_test_dir" 53 | 54 | ts -s pass | assert_output "\ 55 | P [./pass:3] test_pass 56 | 57 | " 58 | } 59 | 60 | test_ts_runs_multiple_files () { 61 | ts -s test/pass test/fail | assert_output "\ 62 | P [test/pass:3] test_pass 63 | 64 | F [test/fail:3] test_fail 65 | 66 | " 67 | } 68 | 69 | test_ts_runs_tests_with_same_name_in_different_files () { 70 | ts test/examples/xyz_one test/examples/xyz_two -s | assert_output "\ 71 | P [test/examples/xyz_one:3] test_xyz 72 | 73 | F [test/examples/xyz_two:3] test_xyz 74 | 75 | " 76 | } 77 | 78 | test_ts_reads_raw_input_for_dash () { 79 | ( ts test/pass -ar 80 | ts test/fail -ar 81 | ) | ts - -s | assert_output "\ 82 | P [test/pass:3] test_pass 83 | 84 | F [test/fail:3] test_fail 85 | 86 | " 87 | } 88 | 89 | test_ts_allows_whitespace_between_raw_inputs () { 90 | printf "%s" "\ 91 | [test/examples/abc:3] test_a 92 | P 1 93 | a 94 | 95 | [test/examples/abc:9] test_b 96 | P 1 97 | b 98 | 99 | 100 | [test/examples/abc:15] test_c 101 | F 1 102 | c 103 | 104 | " | ts -s - | assert_output "\ 105 | P [test/examples/abc:3] test_a 106 | a 107 | 108 | P [test/examples/abc:9] test_b 109 | b 110 | 111 | F [test/examples/abc:15] test_c 112 | c 113 | 114 | " 115 | } 116 | 117 | test_ts_preserves_whitespace_identified_in_raw_inputs () { 118 | printf "%s" "\ 119 | [test/examples/abc:3] test_a 120 | P 3 121 | 122 | 123 | " | ts -s - | assert_output "\ 124 | P [test/examples/abc:3] test_a 125 | 126 | 127 | " 128 | } 129 | 130 | test_ts_reports_non_executable_files () { 131 | ts -s test/nox | assert_output "\ 132 | S [test/nox] not executable 133 | 134 | " 135 | } 136 | 137 | test_ts_reports_non_files () { 138 | ts -s test test/miss | assert_output "\ 139 | S [test] not a file 140 | 141 | S [test/miss] not a file 142 | 143 | " 144 | } 145 | 146 | test_script_runs_tests () { 147 | ./test/examples/abc -s | assert_output "\ 148 | P [./test/examples/abc:3] test_a 149 | aA 150 | 151 | P [./test/examples/abc:9] test_b 152 | bB 153 | 154 | F [./test/examples/abc:15] test_c 155 | cC 156 | 157 | S [./test/examples/abc:21] test_s 158 | s 159 | 160 | " 161 | } 162 | 163 | test_script_runs_named_tests () { 164 | ./test/examples/abc -s test_a test_c | assert_output "\ 165 | P [./test/examples/abc:3] test_a 166 | aA 167 | 168 | F [./test/examples/abc:15] test_c 169 | cC 170 | 171 | " 172 | } 173 | 174 | test_script_runs_tests_matching_patterns () { 175 | ./test/examples/abc -s 'test_[ac]' | assert_output "\ 176 | P [./test/examples/abc:3] test_a 177 | aA 178 | 179 | F [./test/examples/abc:15] test_c 180 | cC 181 | 182 | " 183 | } 184 | 185 | test_ts_runs_cleanly_with_no_tests () { 186 | # some shell (ex dash) don't support $SECONDS and so the timing shows 187 | # up as '-1 s' -- normalize this away as it doesn't change the test 188 | ./test/examples/no_tests 2>&1 | sed -e 's/-1/0/' | assert_output "\ 189 | 190 | 191 | 0 pass 0 fail 0 skip 0 s 192 | " 193 | } 194 | 195 | # 196 | # options test 197 | # 198 | 199 | test_ts_accepts_options_anywhere () { 200 | ts test/pass -a test/fail -s 2>/dev/null | 201 | assert_output "\ 202 | P [test/pass:3] test_pass 203 | 204 | F [test/fail:3] test_fail 205 | 206 | " 207 | } 208 | 209 | test_script_accepts_options_anywhere () { 210 | ./test/examples/abc test_a -a test_c -s | assert_output "\ 211 | P [./test/examples/abc:3] test_a 212 | aA 213 | 214 | F [./test/examples/abc:15] test_c 215 | cC 216 | 217 | " 218 | } 219 | 220 | # 221 | # -a option 222 | # 223 | 224 | test_ts_does_not_print_passing_tests_by_default () { 225 | ts test/examples/abc 2>&1 | 226 | sed -e '$ s/[-0-9]\{1,\} s$/# s/' | 227 | assert_output "\ 228 | ..F- 229 | 230 | [test/examples/abc:15] test_c 231 | cC 232 | F 233 | 234 | [test/examples/abc:21] test_s 235 | s 236 | S 237 | 238 | 2 pass 1 fail 1 skip # s 239 | " 240 | } 241 | 242 | test_ts_a_option_prints_all_tests () { 243 | ts -a test/examples/abc 2>&1 | 244 | sed -e '$ s/[-0-9]\{1,\} s$/# s/' | 245 | assert_output "\ 246 | ..F- 247 | 248 | [test/examples/abc:3] test_a 249 | aA 250 | P 251 | 252 | [test/examples/abc:9] test_b 253 | bB 254 | P 255 | 256 | [test/examples/abc:15] test_c 257 | cC 258 | F 259 | 260 | [test/examples/abc:21] test_s 261 | s 262 | S 263 | 264 | 2 pass 1 fail 1 skip # s 265 | " 266 | } 267 | 268 | test_script_does_not_print_passing_tests_by_default () { 269 | ./test/examples/abc 2>&1 | 270 | sed -e '$ s/[-0-9]\{1,\} s$/# s/' | 271 | assert_output "\ 272 | ..F- 273 | 274 | [./test/examples/abc:15] test_c 275 | cC 276 | F 277 | 278 | [./test/examples/abc:21] test_s 279 | s 280 | S 281 | 282 | 2 pass 1 fail 1 skip # s 283 | " 284 | } 285 | 286 | test_script_a_option_prints_all_tests () { 287 | ./test/examples/abc -a 2>&1 | 288 | sed -e '$ s/[-0-9]\{1,\} s$/# s/' | 289 | assert_output "\ 290 | ..F- 291 | 292 | [./test/examples/abc:3] test_a 293 | aA 294 | P 295 | 296 | [./test/examples/abc:9] test_b 297 | bB 298 | P 299 | 300 | [./test/examples/abc:15] test_c 301 | cC 302 | F 303 | 304 | [./test/examples/abc:21] test_s 305 | s 306 | S 307 | 308 | 2 pass 1 fail 1 skip # s 309 | " 310 | } 311 | 312 | # 313 | # -c option 314 | # 315 | 316 | test_ts_c_option_prints_green_red_color_output () { 317 | ts -sc test/examples/abc | assert_output "\ 318 | [test/examples/abc:3] test_a 319 | aA 320 | 321 | [test/examples/abc:9] test_b 322 | bB 323 | 324 | [test/examples/abc:15] test_c 325 | cC 326 | 327 | [test/examples/abc:21] test_s 328 | s 329 | 330 | " 331 | } 332 | 333 | test_script_c_option_prints_color_output () { 334 | ./test/examples/abc -s -c | assert_output "\ 335 | [./test/examples/abc:3] test_a 336 | aA 337 | 338 | [./test/examples/abc:9] test_b 339 | bB 340 | 341 | [./test/examples/abc:15] test_c 342 | cC 343 | 344 | [./test/examples/abc:21] test_s 345 | s 346 | 347 | " 348 | } 349 | 350 | # 351 | # -d option 352 | # 353 | 354 | # bash does '+++' 355 | # dash does '+' 356 | # zsh adds trailing whitespace to ts_exitstatus 357 | normalize_debug_output () { 358 | sed -e ' 359 | s/+\{1,\}/+++/ 360 | s/ts_exitstatus=0 /ts_exitstatus=0/ 361 | ' 362 | } 363 | 364 | test_ts_d_option_turns_on_xtrace_and_verbose () { 365 | ts -ds test/pass | normalize_debug_output | assert_output "\ 366 | P [test/pass:3] test_pass 367 | +++ trap teardown EXIT 368 | +++ setup 369 | +++ true 370 | +++ test_pass 371 | +++ true 372 | +++ ts_exitstatus=0 373 | +++ trap - EXIT 374 | +++ teardown 375 | +++ true 376 | +++ return 0 377 | 378 | " 379 | } 380 | 381 | test_script_d_option_turns_on_xtrace_and_verbose () { 382 | ./test/pass -d -s | normalize_debug_output | assert_output "\ 383 | P [./test/pass:3] test_pass 384 | +++ trap teardown EXIT 385 | +++ setup 386 | +++ true 387 | +++ test_pass 388 | +++ true 389 | +++ ts_exitstatus=0 390 | +++ trap - EXIT 391 | +++ teardown 392 | +++ true 393 | +++ return 0 394 | 395 | " 396 | } 397 | 398 | # 399 | # -e 400 | # 401 | 402 | test_script_e_option_executes_test_in_shell () { 403 | skip_if_non_compliant_diff 404 | ./test/examples/fail test_assert_output_fail -e | assert_output "\ 405 | [./test/examples/fail:34] test_assert_output_fail 406 | unequal output: 407 | 1c1 408 | < hello world 409 | \ No newline at end of file 410 | --- 411 | > hell0 world 412 | \ No newline at end of file 413 | " 414 | } 415 | 416 | test_script_e_option_exits_with_test_exit_status () { 417 | ./test/examples/fail test_exit_fail -e 418 | assert_status 8 $? 419 | } 420 | 421 | # 422 | # -m option 423 | # 424 | 425 | test_ts_m_option_monitors_output () { 426 | ts -am test/examples/abc 2>&1 | 427 | sed -e '$ s/[-0-9]\{1,\} s$/# s/' | 428 | assert_output "\ 429 | ..F- 430 | 431 | [test/examples/abc:3] test_a 432 | aA 433 | P 434 | 435 | [test/examples/abc:9] test_b 436 | bB 437 | P 438 | 439 | [test/examples/abc:15] test_c 440 | cC 441 | F 442 | 443 | [test/examples/abc:21] test_s 444 | s 445 | S 446 | 447 | 2 pass 1 fail 1 skip # s 448 | " 449 | } 450 | 451 | test_script_m_option_monitors_output () { 452 | ./test/examples/abc -a -m 2>&1 | 453 | sed -e "s/[-0-9]\{1,\} s$/# s/" | 454 | assert_output "\ 455 | ..F- 456 | 457 | [./test/examples/abc:3] test_a 458 | aA 459 | P 460 | 461 | [./test/examples/abc:9] test_b 462 | bB 463 | P 464 | 465 | [./test/examples/abc:15] test_c 466 | cC 467 | F 468 | 469 | [./test/examples/abc:21] test_s 470 | s 471 | S 472 | 473 | 2 pass 1 fail 1 skip # s 474 | " 475 | } 476 | 477 | # 478 | # -h option 479 | # 480 | 481 | test_ts_h_prints_help () { 482 | ts -h | grep -q "usage:" 483 | } 484 | 485 | test_script_h_prints_help () { 486 | ./test/pass -h | grep -q "usage:" 487 | } 488 | 489 | # 490 | # -q option 491 | # 492 | 493 | test_ts_displays_stdout_and_stderr () { 494 | ts -s test/examples/abc | assert_output "\ 495 | P [test/examples/abc:3] test_a 496 | aA 497 | 498 | P [test/examples/abc:9] test_b 499 | bB 500 | 501 | F [test/examples/abc:15] test_c 502 | cC 503 | 504 | S [test/examples/abc:21] test_s 505 | s 506 | 507 | " 508 | } 509 | 510 | test_ts_q_option_hide_stderr () { 511 | ts -s -q test/examples/abc | assert_output "\ 512 | P [test/examples/abc:3] test_a 513 | a 514 | 515 | P [test/examples/abc:9] test_b 516 | b 517 | 518 | F [test/examples/abc:15] test_c 519 | c 520 | 521 | S [test/examples/abc:21] test_s 522 | s 523 | 524 | " 525 | } 526 | 527 | test_script_displays_stdout_and_stderr () { 528 | ./test/examples/abc -s | assert_output "\ 529 | P [./test/examples/abc:3] test_a 530 | aA 531 | 532 | P [./test/examples/abc:9] test_b 533 | bB 534 | 535 | F [./test/examples/abc:15] test_c 536 | cC 537 | 538 | S [./test/examples/abc:21] test_s 539 | s 540 | 541 | " 542 | } 543 | 544 | test_script_q_hides_stderr () { 545 | ./test/examples/abc -s -q | assert_output "\ 546 | P [./test/examples/abc:3] test_a 547 | a 548 | 549 | P [./test/examples/abc:9] test_b 550 | b 551 | 552 | F [./test/examples/abc:15] test_c 553 | c 554 | 555 | S [./test/examples/abc:21] test_s 556 | s 557 | 558 | " 559 | } 560 | 561 | # 562 | # -r option 563 | # 564 | 565 | test_ts_r_option_removes_tmp_dir () { 566 | tmpdir="$ts_test_dir/tmp" 567 | ts -r -t "$tmpdir" test/pass 568 | [ ! -e "$tmpdir" ] 569 | } 570 | 571 | test_script_r_option_removes_tmp_dir () { 572 | tmpdir="$ts_test_dir/tmp" 573 | ./test/pass -r -t "$tmpdir" 574 | [ ! -e "$tmpdir" ] 575 | } 576 | 577 | # 578 | # -s option 579 | # 580 | 581 | test_ts_s_option_prints_structured_output () { 582 | ts -s test/examples/abc | assert_output "\ 583 | P [test/examples/abc:3] test_a 584 | aA 585 | 586 | P [test/examples/abc:9] test_b 587 | bB 588 | 589 | F [test/examples/abc:15] test_c 590 | cC 591 | 592 | S [test/examples/abc:21] test_s 593 | s 594 | 595 | " 596 | } 597 | 598 | test_script_s_option_prints_structured_output () { 599 | ./test/examples/abc -s | assert_output "\ 600 | P [./test/examples/abc:3] test_a 601 | aA 602 | 603 | P [./test/examples/abc:9] test_b 604 | bB 605 | 606 | F [./test/examples/abc:15] test_c 607 | cC 608 | 609 | S [./test/examples/abc:21] test_s 610 | s 611 | 612 | " 613 | } 614 | 615 | # 616 | # -v option 617 | # 618 | 619 | test_ts_v_option_displays_stderr () { 620 | ts -sv test/examples/abc | assert_output "\ 621 | P [test/examples/abc:3] test_a 622 | aA 623 | 624 | P [test/examples/abc:9] test_b 625 | bB 626 | 627 | F [test/examples/abc:15] test_c 628 | cC 629 | 630 | S [test/examples/abc:21] test_s 631 | s 632 | 633 | " 634 | } 635 | 636 | test_script_v_option_displays_stderr () { 637 | ./test/examples/abc -s -v | assert_output "\ 638 | P [./test/examples/abc:3] test_a 639 | aA 640 | 641 | P [./test/examples/abc:9] test_b 642 | bB 643 | 644 | F [./test/examples/abc:15] test_c 645 | cC 646 | 647 | S [./test/examples/abc:21] test_s 648 | s 649 | 650 | " 651 | } 652 | 653 | # 654 | # -w option 655 | # 656 | 657 | test_ts_w_option_prints_raw_output () { 658 | ts -w test/examples/abc | assert_output "\ 659 | [test/examples/abc:3] test_a 660 | P 2 661 | aA 662 | [test/examples/abc:9] test_b 663 | P 2 664 | bB 665 | [test/examples/abc:15] test_c 666 | F 2 667 | cC 668 | [test/examples/abc:21] test_s 669 | S 1 670 | s 671 | " 672 | } 673 | 674 | test_script_w_option_prints_raw_output () { 675 | ./test/examples/abc -w | assert_output "\ 676 | [./test/examples/abc:3] test_a 677 | P 2 678 | aA 679 | [./test/examples/abc:9] test_b 680 | P 2 681 | bB 682 | [./test/examples/abc:15] test_c 683 | F 2 684 | cC 685 | [./test/examples/abc:21] test_s 686 | S 1 687 | s 688 | " 689 | } 690 | 691 | # 692 | # TS_DIFF test 693 | # 694 | 695 | test_ts_uses_TS_DIFF_to_diff_outputs () { 696 | skip_if_non_compliant_diff 697 | export TS_DIFF="diff -e" 698 | ./test/examples/fail test_assert_output_fail | assert_output "\ 699 | [./test/examples/fail:34] test_assert_output_fail 700 | unequal output: 701 | 1c 702 | hell0 world 703 | . 704 | diff: $ts_test_dir/fail/test_assert_output_fail.e.txt: No newline at end of file 705 | 706 | diff: $ts_test_dir/fail/test_assert_output_fail.a.txt: No newline at end of file 707 | F 708 | 709 | " 710 | } 711 | 712 | # 713 | # exit status test 714 | # 715 | 716 | test_ts_exits_0_for_passing_test () { 717 | ts test/pass 718 | assert_status 0 $? 719 | } 720 | 721 | test_ts_exits_1_for_failing_test () { 722 | ts test/fail >&2 723 | assert_status 1 $? 724 | } 725 | 726 | test_script_exits_0_for_passing_test () { 727 | ./test/pass 728 | assert_status 0 $? 729 | } 730 | 731 | test_script_exits_1_for_failing_test () { 732 | ./test/fail >&2 733 | assert_status 1 $? 734 | } 735 | 736 | # 737 | # testcases 738 | # 739 | 740 | test_ts_returns_to_TS_USR_DIR_for_each_test () { 741 | assert_example test/examples/change_dirs 742 | } 743 | 744 | test_ts_exit_fail_in_setup () { 745 | assert_example test/examples/exit_fail_in_setup 746 | } 747 | 748 | test_ts_exit_fail_in_test () { 749 | assert_example test/examples/exit_fail_in_test 750 | } 751 | 752 | test_ts_exit_fail_in_teardown () { 753 | assert_example test/examples/exit_fail_in_teardown 754 | } 755 | 756 | test_ts_exit_fail_after_untrap_EXIT () { 757 | assert_example test/examples/exit_fail_in_test_after_untrap 758 | } 759 | 760 | test_ts_fail_examples () { 761 | assert_example test/examples/fail 762 | } 763 | 764 | test_ts_options () { 765 | assert_example test/examples/options 766 | } 767 | 768 | # test this manually to see the nesting 769 | test_include () { 770 | skip_if_imports_do_not_work 771 | ts -s test/examples/include | assert_output "\ 772 | P [test/examples/include:3] test_include 773 | 774 | P [test/examples/include -> test/examples/include a:3] test_include_a 775 | 776 | P [test/examples/include -> test/examples/include b:3] test_include_b 777 | 778 | P [test/examples/include -> test/examples/include b -> test/examples/include_c:3] test_include_c 779 | 780 | " 781 | } 782 | 783 | test_ts_pass_examples () { 784 | assert_example test/examples/pass 785 | } 786 | 787 | test_ts_public_function_conflict () { 788 | assert_example test/examples/public_function_conflict 789 | } 790 | 791 | test_ts_return_fail_in_setup () { 792 | assert_example test/examples/return_fail_in_setup 793 | } 794 | 795 | test_ts_return_fail_in_teardown () { 796 | assert_example test/examples/return_fail_in_teardown 797 | } 798 | 799 | test_ts_test_detection () { 800 | assert_example test/examples/test_detection 801 | } 802 | 803 | test_undeclared_variable () { 804 | # The output message for an undeclared variable is shell-dependent. Because 805 | # assert_example expects fixed output we manually check this example in a 806 | # shell-independent way. 807 | 808 | ts -s test/examples/undeclared_variable | 809 | sed -e '/^ /s|test/examples/undeclared_variable:.*10:.*var:.*|...|' | # bash/dash 810 | sed -e '/^ /s|test_ts_fails_for_use_of_undeclared_variable:2: var: parameter not set|...|' | # zsh 811 | sed -e '/^ /s|test/examples/undeclared_variable.*line 10: var: parameter not set|...|' | # ksh 812 | assert_output "\ 813 | F [test/examples/undeclared_variable:8] test_ts_fails_for_use_of_undeclared_variable 814 | ... 815 | 816 | P [test/examples/undeclared_variable:14] test_ts_can_use_undeclared_variables_with_set_u 817 | 818 | " 819 | } 820 | 821 | # 822 | # misc tests 823 | # 824 | 825 | test_ts_prevents_hang_by_stdin () { 826 | # cat hangs if stdin is waiting 827 | cat | assert_output "" 828 | } 829 | 830 | test_ts_allows_exit_trap () { 831 | trap "exit 0" EXIT 832 | exit 1 833 | } 834 | 835 | test_ts_allows_set_u_in_tests() { 836 | assert_example test/examples/set_u 837 | } 838 | 839 | # 840 | # skip test 841 | # 842 | 843 | test_ts_skip () { 844 | ts -s test/examples/skip | assert_output "\ 845 | S [test/examples/skip:5] test_skip_no_reason 846 | no reason given 847 | 848 | S [test/examples/skip:9] test_skip_with_reason 849 | Skipping... 850 | 851 | " 852 | } 853 | 854 | # 855 | # readme test 856 | # 857 | 858 | test_ts_background_job_example () { 859 | # stderr message couldn't be generalized so use -q 860 | assert_example test/readme/background -q 861 | } 862 | 863 | test_ts_readme_example () { 864 | assert_example test/readme/example 865 | } 866 | 867 | test_ts_readme_common_tests () { 868 | skip_if_imports_do_not_work 869 | cd test/readme 870 | ts -s test_grep_abc test_sed_abc | assert_output "\ 871 | P [./test_grep_abc -> ./common_tests:1] test_it_should_pick_lines_with_abc 872 | 873 | P [./test_sed_abc -> ./common_tests:1] test_it_should_pick_lines_with_abc 874 | 875 | " 876 | } 877 | 878 | test_ts_readme_troubleshoot_fail () { 879 | assert_example test/readme/troubleshoot_fail 880 | } 881 | 882 | test_ts_readme_troubleshoot_pass () { 883 | assert_example test/readme/troubleshoot_pass 884 | } 885 | 886 | # test this one literally to ensure the example is 100% correct 887 | test_ts_readme_usage () { 888 | ts -s test/readme/usage | assert_output "\ 889 | P [test/readme/usage:12] test_pass 890 | 891 | " 892 | } 893 | 894 | . ts 895 | --------------------------------------------------------------------------------