├── README.md ├── bash_test_tools ├── documentation ├── docs │ ├── Assert_API.md │ ├── custom.css │ ├── index.md │ └── screenshot1.jpg └── mkdocs.yml ├── examples ├── test_find.sh └── test_netcat.sh └── tests ├── arg_is_foobar.py └── test_asserts.sh /README.md: -------------------------------------------------------------------------------- 1 | # bash_test_tools 2 | 3 | ```bash 4 | source bash_test_tools 5 | 6 | WORK="/tmp/work" 7 | 8 | function setup 9 | { 10 | mkdir -p "$WORK" 11 | cd "$WORK" 12 | touch some_file.txt 13 | } 14 | 15 | function teardown 16 | { 17 | cd 18 | rm -rf "$WORK" 19 | } 20 | 21 | function test_find_local_directory 22 | { 23 | # Run 24 | run "find ./" 25 | # Assert 26 | assert_success 27 | assert_output_contains "some_file.txt" 28 | } 29 | 30 | testrunner 31 | ``` 32 | Bash Test Tools is intended to be a simple to use framework for testing executables inside the 33 | shell environment. The framework allows extraction and assert operations on parameters 34 | such as standard output, standard error, exit code, execution time, file system and network services. 35 | Please read the full documentation located here [bash_test_tools docs](https://thorsteinssonh.github.io/bash_test_tools). 36 | 37 | -------------------------------------------------------------------------------- /bash_test_tools: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | # -*- coding: utf-8 -*- 3 | # --------------------------------------------------------------------------- 4 | # bash_test_tools - command line and utility testing tools for bash 5 | 6 | # Copyright 2016, Videntifier Technologies 7 | # Developer: Hrobjartur Thorsteinsson 8 | 9 | # This program is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | 14 | # This program is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License at for 18 | # more details. 19 | 20 | # --------------------------------------------------------------------------- 21 | 22 | 23 | # Codes 24 | RED='\033[1;31m' 25 | GREEN='\033[1;32m' 26 | BLUE='\033[1;34m' 27 | PURPLE='\033[1;36m' 28 | NC='\033[0m' 29 | ASSERT_STR="${BLUE}Assert:${NC}" 30 | RUNNING_STR="${BLUE}Running:${NC}" 31 | OK_STR="${GREEN}OK${NC}" 32 | FAIL_STR="${RED}OK${NC}" 33 | FAIL_STR="${RED}FAIL${NC}" 34 | THIS_SCRIPT_NAME=$( basename "$0" ) 35 | 36 | MAX_WIDTH=100 37 | 38 | function bash_test_tools_help 39 | { 40 | printf "$THIS_SCRIPT_NAME - tests built on bash_test_tools\n\n" 41 | printf "Usage: $THIS_SCRIPT_NAME [OPTIONS]...\n\n" 42 | printf " -l list all available tests\n" 43 | printf " -t TESTNAME run only tests ending in TESTNAME\n" 44 | printf " -o TAP_FILE write test results to TAP (test anything) file\n" 45 | printf " -x disable teardown (for debugging)\n" 46 | printf " -h print this help\n\n" 47 | } 48 | 49 | # parser options 50 | while getopts t:o:hlx opt; do 51 | case $opt in 52 | t) 53 | TEST_FUNCTION="$OPTARG" 54 | ;; 55 | o) 56 | TAP_FILE="$OPTARG" 57 | ;; 58 | h) 59 | bash_test_tools_help 60 | exit 0 61 | ;; 62 | l) 63 | LIST_TESTS_ONLY="1" 64 | ;; 65 | x) 66 | NO_TEARDOWN="1" 67 | ;; 68 | \?) 69 | echo "Unknown option" 70 | bash_test_tools_help 71 | exit 1 72 | esac 73 | done 74 | 75 | # default empty setup/teardown functions 76 | function setup 77 | { 78 | : 79 | } 80 | 81 | function teardown 82 | { 83 | : 84 | } 85 | 86 | function get_current_line 87 | { 88 | IFS=';' read -sdR -p $'\E[6n' ROW COL;echo "${ROW#*[}" 89 | } 90 | 91 | function move_cursor_right 92 | { 93 | local cur_line=$( get_current_line ) 94 | local cols=$(tput cols) 95 | ((cur_line=cur_line-1)) 96 | [ "$cols" -gt "$MAX_WIDTH" ] && cols=$(($MAX_WIDTH-5)) || cols=$((cols-5)) 97 | tput cup $cur_line $cols 98 | } 99 | 100 | function print_line 101 | { 102 | local cols=$(tput cols) 103 | [ "$cols" -gt "$MAX_WIDTH" ] && printf "%${MAX_WIDTH}s\n" | tr " " "$1" || printf "%${cols}s\n" | tr " " "$1" 104 | } 105 | 106 | function print_ok 107 | { 108 | move_cursor_right 109 | printf " ${OK_STR}\n" 110 | } 111 | 112 | function print_fail 113 | { 114 | move_cursor_right 115 | printf "${FAIL_STR}\n" 116 | } 117 | 118 | function count_files 119 | { 120 | # count files in argument directory - skipps hidden .- files 121 | shopt -s nullglob # to count 0 in empty dir / no match 122 | local fn=("$1"/*) 123 | echo ${#fn[@]} 124 | shopt -u nullglob 125 | } 126 | 127 | # Assert functions 128 | _assert_condition=0 # this var tracks assert status during testing 129 | 130 | function assert 131 | { 132 | # run an external custom executable or function as assert, 133 | # exit code 0 for success, !=0 for fail. 134 | 135 | # strip down full path from command - for neater printing 136 | local execname=$(basename "${1%% *}") 137 | if [[ "$1" =~ ^[^[:space:]]+[[:space:]]+[^[:space:]]+ ]]; then 138 | # checking if cmd has arguments 139 | local stripped_cmd="${execname} ${1#* }" 140 | else 141 | local stripped_cmd="${execname}" 142 | fi 143 | 144 | printf "${ASSERT_STR} $stripped_cmd" 145 | if $1 1>_assert_output 2>_assert_output; then 146 | local assert_output=$(<_assert_output) 147 | rm -rf _assert_output 148 | print_ok 149 | if [ "$assert_output" != "" ]; then 150 | printf " ++ $execname says:\n" 151 | printf "$assert_output\n" 152 | fi 153 | return 0 154 | else 155 | local assert_output=$(<_assert_output) 156 | rm -rf _assert_output 157 | print_fail 158 | ((_assert_condition++)) 159 | rm -rf _assert_output 160 | if [ "$assert_output" != "" ]; then 161 | printf " ++ $execname says:\n" 162 | printf "$assert_output\n" 163 | fi 164 | return 1 165 | fi 166 | } 167 | 168 | function assert_matches_regex 169 | { 170 | [[ $3 ]] && printf "${ASSERT_STR} '$3' matches '$2' " || printf "${ASSERT_STR} matches '$2' " 171 | if echo "$1" | grep -E "$2" >> /dev/null; then 172 | print_ok 173 | return 0 174 | else 175 | print_fail 176 | ((_assert_condition++)) 177 | return 1 178 | fi 179 | } 180 | 181 | function assert_not_empty 182 | { 183 | [[ "$2" ]] && printf "${ASSERT_STR} '$2' not empty " || printf "${ASSERT_STR} variable not empty " 184 | if [[ "$1" != "" ]] ;then 185 | print_ok 186 | return 0 187 | else 188 | print_fail 189 | ((_assert_condition++)) 190 | return 1 191 | fi 192 | } 193 | 194 | function assert_empty 195 | { 196 | [[ "$2" ]] && printf "${ASSERT_STR} '$2' is empty " || printf "${ASSERT_STR} variable is empty " 197 | if [[ "$1" == "" ]] ;then 198 | print_ok 199 | return 0 200 | else 201 | print_fail 202 | ((_assert_condition++)) 203 | return 1 204 | fi 205 | } 206 | 207 | function assert_exit_success 208 | { 209 | assert_equal "$returnval" 0 "exit status" 210 | } 211 | 212 | function assert_exit_fail 213 | { 214 | assert_not_equal "$returnval" 0 "exit status" 215 | } 216 | 217 | function assert_no_error 218 | { 219 | # asserts std error ouput $error has no message 220 | assert_empty "$error" "stderror" 221 | } 222 | 223 | function assert_has_error 224 | { 225 | # asserts std error ouput $error has a message 226 | assert_not_empty "$error" "stderror" 227 | } 228 | 229 | function assert_no_output 230 | { 231 | # asserts std ouput $output has no message 232 | assert_empty "$output" "stdout" 233 | } 234 | 235 | function assert_has_output 236 | { 237 | # asserts std ouput $output has no message 238 | assert_not_empty "$output" "stdout" 239 | } 240 | 241 | function assert_contains 242 | { 243 | if [[ $4 ]]; then 244 | printf "${ASSERT_STR} '$3' contains '$4' " 245 | else 246 | [[ $3 ]] && printf "${ASSERT_STR} '$3' contains '$2' " || printf "${ASSERT_STR} contains '$2' " 247 | fi 248 | if [[ "$1" == *"$2"* ]]; then 249 | print_ok 250 | return 0 251 | else 252 | print_fail 253 | ((_assert_condition++)) 254 | return 1 255 | fi 256 | } 257 | 258 | function assert_output_contains 259 | { 260 | assert_contains "$output" "$1" "stdoutput" 261 | } 262 | 263 | function assert_output_not_contains 264 | { 265 | assert_not_contains "$output" "$1" "stdoutput" 266 | } 267 | 268 | function assert_error_contains 269 | { 270 | assert_contains "$error" "$1" "stderror" 271 | } 272 | 273 | function assert_not_contains 274 | { 275 | if [[ $4 ]]; then 276 | printf "${ASSERT_STR} '$3' does not contain '$4' " 277 | else 278 | [[ $3 ]] && printf "${ASSERT_STR} '$3' does not contain '$2' " || printf "${ASSERT_STR} does not contain '$2' " 279 | fi 280 | if [[ "$1" != *"$2"* ]]; then 281 | print_ok 282 | return 0 283 | else 284 | print_fail 285 | ((_assert_condition++)) 286 | return 1 287 | fi 288 | } 289 | 290 | function assert_file_exists 291 | { 292 | [[ "${#1}">40 ]] && local fn=$(basename $1) || local fn=$1 293 | printf "${ASSERT_STR} file '${fn}' exists " 294 | if [ -f $1 ]; then 295 | print_ok 296 | return 0 297 | else 298 | print_fail 299 | ((_assert_condition++)) 300 | return 1 301 | fi 302 | } 303 | 304 | function assert_file_not_exists 305 | { 306 | [[ "${#1}">40 ]] && local fn=$(basename $1) || local fn=$1 307 | printf "${ASSERT_STR} file '${fn}' does not exist " 308 | if [ ! -f $1 ]; then 309 | print_ok 310 | return 0 311 | else 312 | print_fail 313 | ((_assert_condition++)) 314 | return 1 315 | fi 316 | } 317 | 318 | function assert_dir_exists 319 | { 320 | local fn=$(basename $1) 321 | printf "${ASSERT_STR} directory '${fn}' exists " 322 | if [ -d $1 ]; then 323 | print_ok 324 | return 0 325 | else 326 | print_fail 327 | ((_assert_condition++)) 328 | return 1 329 | fi 330 | } 331 | 332 | function assert_equal 333 | { 334 | # parse input labels - if labels 3 or 4 provided then use in echo 335 | if [[ $4 ]]; then 336 | printf "${ASSERT_STR} '$3' equal to '$4' " 337 | else 338 | [[ $3 ]] && printf "${ASSERT_STR} '$3' equal to $2 " || printf "${ASSERT_STR} $1 equal to $2 " 339 | fi 340 | 341 | if [ "$1" = "$2" ]; then 342 | print_ok 343 | return 0 344 | else 345 | print_fail 346 | ((_assert_condition++)) 347 | # if comparing multiple lines 348 | # then show diff 349 | if [[ "$1" == *$'\n'* ]]; then 350 | printf "${RED} * Diff:\n" 351 | diff <(echo "$1") <(echo "$2") 352 | printf "${NC}" 353 | fi 354 | return 1 355 | fi 356 | } 357 | 358 | function assert_not_equal 359 | { 360 | if [[ $4 ]]; then 361 | printf "${ASSERT_STR} '$3' not equal to '$4' " 362 | else 363 | [[ $3 ]] && printf "${ASSERT_STR} '$3' not equal to $2 " || printf "${ASSERT_STR} $1 not equal to $2 " 364 | fi 365 | if [ "$1" != "$2" ]; then 366 | print_ok 367 | return 0 368 | else 369 | print_fail 370 | ((_assert_condition++)) 371 | return 1 372 | fi 373 | } 374 | 375 | function assert_tree_equal 376 | { 377 | tree1=$(tree "$1") 378 | tree2=$(tree "$2") 379 | assert_equal "${tree1//$1/basedir}" "${tree2//$2/basedir}" "dir tree $1" "dir tree $2" 380 | } 381 | 382 | function assert_tree_not_equal 383 | { 384 | tree1=$(tree "$1") 385 | tree2=$(tree "$2") 386 | assert_not_equal "${tree1//$1/basedir}" "${tree2//$2/basedir}" "dir tree $1" "dir tree $2" 387 | } 388 | 389 | function assert_greater_than 390 | { 391 | [[ $3 ]] && printf "${ASSERT_STR} '$3' greater than $2 " || printf "${ASSERT_STR} $1 greater than $2 " 392 | if (( "$( echo "$1>$2"|bc )"==1 )); then 393 | print_ok 394 | return 0 395 | else 396 | print_fail 397 | ((_assert_condition++)) 398 | return 1 399 | fi 400 | } 401 | 402 | function assert_less_than 403 | { 404 | [[ $3 ]] && printf "${ASSERT_STR} '$3' less than $2 " || printf "${ASSERT_STR} $1 less than $2 " 405 | if (( "$( echo "$1<$2"|bc )"==1 )); then 406 | print_ok 407 | return 0 408 | else 409 | print_fail 410 | ((_assert_condition++)) 411 | return 1 412 | fi 413 | } 414 | 415 | function assert_terminated_normally 416 | { 417 | printf "${ASSERT_STR} process terminated normally" 418 | if [[ "$strace" != *"tgkill"* ]]; then 419 | print_ok 420 | return 0 421 | else 422 | print_fail 423 | ((_assert_condition++)) 424 | return 1 425 | fi 426 | } 427 | 428 | function assert_service_on_port 429 | { 430 | printf "${ASSERT_STR} service on port $1" 431 | if netstat -tnl | grep "$1" > /dev/null; then 432 | print_ok 433 | return 0 434 | else 435 | print_fail 436 | ((_assert_condition++)) 437 | return 1 438 | fi 439 | } 440 | 441 | function assert_success 442 | { 443 | # tests basic normal success behavior 444 | assert_terminated_normally 445 | assert_exit_success 446 | assert_no_error 447 | } 448 | 449 | function assert_fail 450 | { 451 | # tests basic normal fail behavior 452 | assert_terminated_normally 453 | assert_exit_fail 454 | assert_has_error 455 | } 456 | 457 | # Generic tests (e.g. shared behavior/conventions) 458 | function generic_has_unix_version 459 | { 460 | # run 461 | run "$1 --version" 462 | assert_success 463 | assert_has_output 464 | # assert 465 | run "$1 --help" 466 | assert_contains "$output" "--version" "help" 467 | } 468 | 469 | function generic_has_unix_help 470 | { 471 | # run 472 | run "$1 --help" 473 | # assert 474 | assert_success 475 | assert_has_output 476 | assert_contains "$output" "--help" "help" 477 | } 478 | 479 | function generic_has_unix_conventions 480 | { 481 | # assert common Single UNIX/POSIX/GNU conventions are met for command line utility 482 | generic_has_unix_help "$1" 483 | generic_has_unix_version "$1" 484 | } 485 | 486 | function generic 487 | { 488 | # generate generic tests on executable 489 | local test="$1" 490 | local executable="$2" 491 | eval "function test_$1 { generic_$test '$executable'; }" 492 | } 493 | 494 | 495 | # helper background routine monitorin process for number of threads 496 | function count_threads 497 | { 498 | _count_threads "$1" & 499 | count_threads_pid=$! 500 | } 501 | 502 | function _count_threads 503 | { 504 | procname=$1 505 | while [ 1=1 ]; do 506 | pid=$(pgrep $procname) 507 | if [[ $pid != "" ]]; then 508 | break 509 | fi 510 | sleep 1 511 | done 512 | n=0 513 | while [ 1=1 ]; do 514 | nnow=$( ps -T $pid | wc -l ) 515 | [[ "$nnow" > "$n" ]] && n="$nnow" 516 | echo "Thread monitoring: $nnow threads" 517 | sleep 1 518 | if ! pgrep $procname > /dev/null; then 519 | echo "Thread count done" 520 | break 521 | fi 522 | echo $n > _nthreads 523 | done 524 | } 525 | 526 | # returns result recorded by count_thread function 527 | # should be run in following order 528 | # $ count_threads processname & 529 | # $ processname 530 | # $ wait 531 | # $ get_n_threads 532 | function add_background_assert 533 | { 534 | # these assert statements are executed on 'run "cmd..." background' statements 535 | [[ "$background_assert" ]] && background_assert="${background_assert}; ${@}" || background_assert="${@}" 536 | } 537 | 538 | function get_n_threads 539 | { 540 | wait $count_threads_pid # wait for count_threads to finish 541 | nthreads=$(<_nthreads) # get result from file 542 | unset count_threads_pid 543 | } 544 | 545 | # Check if argument is a bash function 546 | function is_function 547 | { 548 | local tp=$( type $1 | head -1 ) 549 | if [[ "$tp" == *"is a function" ]]; then 550 | return 0; 551 | else 552 | return 1; 553 | fi 554 | } 555 | 556 | # Get total file size in Bytes 557 | function get_file_size 558 | { 559 | stat "$1" --format='%s' 560 | } 561 | 562 | # Run function collecting stdout stderr and ret value 563 | function run 564 | { 565 | output=""; returnval=""; error=""; exectime=""; strace="" 566 | # strip ugly dir path of executable 567 | local execname=$(basename "${1%% *}") 568 | if [[ "$1" =~ ^[^[:space:]]+[[:space:]]+[^[:space:]]+ ]]; then 569 | # checking if cmd has arguments 570 | local stripped_cmd="${execname} ${1#* }" 571 | else 572 | local stripped_cmd="${execname}" 573 | fi 574 | export TIMEFORMAT='%R' 575 | 576 | if [ "$2" == "background" ]; then 577 | printf "${RUNNING_STR} ${PURPLE}${stripped_cmd}${NC} (background $3 secs)\n" 578 | { { time { strace -e tgkill -o _strace $1 1>_out 2>_err;}; echo $?>_returnval;} 2>_time; } >/dev/null 2>&1 & 579 | sleep "$3" 580 | # execute during background assert if provided 581 | [[ $5 ]] && $5 582 | [[ $background_assert ]] && eval "${background_assert}" 583 | # finally shutdown background process 584 | pkill -u $USER -$4 "$execname" 585 | wait #wait for background process (strace and time) to complete after killing executable 586 | else 587 | printf "${RUNNING_STR} ${PURPLE}${stripped_cmd}${NC}\n" 588 | { { time { strace -e tgkill -o _strace $1 1>_out 2>_err;}; echo $?>_returnval;} 2>_time; } >/dev/null 2>&1 589 | fi 590 | [[ "$count_threads_pid" ]] && get_n_threads 591 | returnval=$(<_returnval) 592 | output=$(<_out) 593 | error=$(<_err) 594 | exectime=$(tail -1 _time) # NOTE this is a hack... _time can contain shell traceback on segfault need to FIX! 595 | strace=$(<_strace) 596 | rm -f _out _err _time _strace _returnval 597 | 598 | if [[ "$strace" == *"tgkill"* ]]; then 599 | printf "${RED}***WARNING: Process terminated abnormally${NC}\n"; 600 | fi 601 | printf "${BLUE}ExecTime:${NC} ${exectime} seconds\n" 602 | 603 | } 604 | 605 | # Count no of tests 606 | function count_tests 607 | { 608 | local count_tests=0 609 | for foo in $(declare -F); do 610 | if [[ "$foo" == "test_"*"${TEST_FUNCTION}" ]]; then 611 | ((count_tests++)) 612 | fi 613 | done 614 | echo $count_tests 615 | } 616 | 617 | # Test runner function 618 | function testrunner 619 | { 620 | # search through all declared functions and 621 | # and run those with names beginning with test_ 622 | local count_fail=0 623 | local count_tests=0 624 | 625 | # write TAP file test count header 626 | if [ $TAP_FILE ]; then 627 | local ntests=$(count_tests) 628 | TAP_FILE=$(realpath $TAP_FILE) 629 | rm -rf $TAP_FILE 630 | echo 1..$ntests >> $TAP_FILE 631 | fi 632 | 633 | for foo in $(declare -F); do 634 | if [[ "$foo" == "test_"*"${TEST_FUNCTION}" ]]; then 635 | [[ "$LIST_TESTS_ONLY" ]] && { echo "$foo"; continue; } 636 | _assert_condition=0 # assert condition reset before run 637 | print_line "-" 638 | title=$(echo $foo | tr '_[:lower:]' ' [:upper:]') 639 | printf "${title}\n" 640 | setup 641 | $foo; 642 | echo "" 643 | printf "Status - " 644 | [ "$_assert_condition" == "0" ] && printf "${GREEN}PASS${NC}\n" || printf "${FAIL_STR}\n" 645 | [ "$_assert_condition" == "0" ] || ((count_fail++)) 646 | ((count_tests++)) 647 | [ "$NO_TEARDOWN" ] || teardown 648 | # write test result to TAP file 649 | if [ "$TAP_FILE" ]; then 650 | [ "$_assert_condition" != "0" ] && echo -n "not " >> $TAP_FILE 651 | echo -n "ok $count_tests - $foo" >> $TAP_FILE 652 | echo "" >> $TAP_FILE 653 | fi 654 | fi; 655 | done 656 | [[ "$LIST_TESTS_ONLY" ]] && exit 0 657 | # print summary 658 | print_line "=" 659 | echo "Ran ${count_tests} tests - Failed ${count_fail}" 660 | # return with correct exit code 661 | if ((${count_fail} == 0)); then 662 | return 0 663 | else 664 | return 1 665 | fi 666 | } 667 | -------------------------------------------------------------------------------- /documentation/docs/Assert_API.md: -------------------------------------------------------------------------------- 1 | 2 | # Assert API 3 | Currently the following assert functions have been implemented, Some useful asserts are probably still missing, so please contribute or ask for help. 4 | If you want to develop, please take a look at functions beginning with `assert_` 5 | in the `bash_test_tools` file. 6 | 7 | ## variables 8 | The most elementary assert calls operate on environment variables within the shell. 9 | Typically more specific assert statements, such as `assert_no_error` are composed of one or more of these 10 | more elementary asserts. 11 | 12 | ####assert_contains 13 | ```bash 14 | # Asserts that $var contains sub-string $string 15 | # assert_contains "$var" "$string" [var-alias [string-alias]] 16 | # example: 17 | assert_contains "$output" "Hello World" 18 | # or 19 | assert_contains "$output" "Hello World" "standard output" 20 | ``` 21 | 22 | ####assert_not_contains 23 | ```bash 24 | # Asserts that $var does not contain a sub-string $string 25 | # assert_not_contains "$string" [var-alias [string-alias]] 26 | # example: 27 | assert_not_contains "$output" "Hello World" 28 | # or 29 | assert_not_contains "$output" "Hello World" "standard output" 30 | ``` 31 | 32 | ####assert_equal 33 | ```bash 34 | # Asserts that $var1 equals $var2 - works on both numeric and strings 35 | # assert_equal "$var1" "$var2" [var1-alias [var2-alias]] 36 | # example: 37 | assert_equal "$output" "Hello World" 38 | assert_equal "$output1" "$output2" "run 1 output" "run 2 output" 39 | ``` 40 | 41 | ####assert_not_equal 42 | ```bash 43 | # Asserts that $var1 does not equal $var2 - works on both numeric and strings 44 | # assert_not_equal "$var1" "$var2" [var1-alias [var2-alias]] 45 | # example: 46 | assert_not_equal "$output" "Hello World" 47 | # or 48 | assert_equal "$output1" "$output2" "run 1 output" "run 2 output" 49 | ``` 50 | 51 | ####assert_greater_than 52 | ```bash 53 | # Asserts that $var1 is greater than $var2 - works on numeric 54 | # assert_greater_than "$var1" "$var2" [var1-alias [var2-alias]] 55 | # example: 56 | assert_greater_than "$thread_count" "4" "thread count" 57 | ``` 58 | 59 | ####assert_less_than 60 | ```bash 61 | # Asserts that $var1 is less than $var2 - works on numeric 62 | # assert_less_than "$var1" "$var2" [var1-alias [var2-alias]] 63 | # example: 64 | assert_less_than "$thread_count" "4" "thread count" 65 | ``` 66 | 67 | ####assert_empty 68 | ```bash 69 | # Asserts that $var is "" or not set 70 | # assert_empty "$var" [var-alias] 71 | # example: 72 | assert_empty "$error" "std error" 73 | ``` 74 | 75 | ####assert_not_empty 76 | ```bash 77 | # Asserts that $var is not "" 78 | # assert_not_empty "$var" [var-alias] 79 | # example: 80 | assert_not_empty "$output" "std output" 81 | ``` 82 | 83 | ####assert_matches_regex 84 | ```bash 85 | # Asserts that $var matches regular expression $regex 86 | # assert_matches "$var" "$regex" [var-alias] 87 | # example: 88 | assert_matches_regex "$output" "^[0-9]+\.[0-9]+\.[0-9]+" "output version number" 89 | # i.e. asserts that $output begins with ##.##.## version style numerics 90 | ``` 91 | 92 | ## output 93 | 94 | These asserts apply specifically to the **standard output**, variable `output`. 95 | 96 | ####assert_output_contains 97 | ```bash 98 | # Asserts that $output contains a sub-string $string [string-alias] 99 | # assert_output_contains "$string" 100 | # example: 101 | assert_output_contains "Hello World" 102 | ``` 103 | 104 | ####assert_output_not_contains 105 | ```bash 106 | # Asserts that $output does not contain a sub-string $string [string-alias] 107 | # assert_output_not_contains "$string" 108 | # example: 109 | assert_output_not_contains "Hello world" 110 | ``` 111 | 112 | ####assert_has_output 113 | ```bash 114 | # Asserts that $output is not empty != "" 115 | # assert_has_output 116 | # example: 117 | assert_has_output 118 | ``` 119 | 120 | ####assert_no_output 121 | ```bash 122 | # Asserts that $output is empty == "" 123 | # assert_no_output 124 | # example: 125 | assert_no_output 126 | ``` 127 | 128 | ## error 129 | 130 | These asserts apply specifically to the **standard error**, variable `error` 131 | 132 | ####assert_error_contains 133 | ```bash 134 | # Asserts that $error contains a sub-string $string [string-alias] 135 | # assert_error_contains "$string" 136 | # example: 137 | assert_error_contains "could not open file" 138 | ``` 139 | 140 | ####assert_has_error 141 | ```bash 142 | # Asserts that $error is not empty != "" 143 | # assert_has_error 144 | # example: 145 | assert_has_error 146 | ``` 147 | 148 | ####assert_no_error 149 | ```bash 150 | # Asserts that $error is empty == "" 151 | # assert_no_error 152 | # example: 153 | assert_no_error 154 | ``` 155 | 156 | ## termination 157 | 158 | These asserts apply specifically to the variables `returnval`, `error` and `strace`. They evaluate the conditions by which the executable terminated. 159 | 160 | ####assert_exit_success 161 | ```bash 162 | # Asserts that $returnval is equal to 0 (SUCCESS) 163 | # assert_exit_success 164 | # example: 165 | assert_exit_succes 166 | ``` 167 | 168 | ####assert_exit_fail 169 | ```bash 170 | # Asserts that $returnval is not equal to 0 (FAIL) 171 | # assert_exit_fail 172 | # example: 173 | assert_exit_fail 174 | ``` 175 | 176 | ####assert_terminated_normally 177 | ```bash 178 | # Essentailly asserts that the executable exited without crashing. 179 | # Asserts that $strace does no include substring "tgkill" 180 | # assert_terminated_normally 181 | # example: 182 | assert_terminated_normally 183 | ``` 184 | 185 | ####assert_success 186 | ```bash 187 | # Asserts healthy success behavior 188 | # Calls assert_terminated_normally, assert_exit_success, assert_no_error 189 | # example: 190 | assert_success 191 | ``` 192 | 193 | ####assert_fail 194 | ```bash 195 | # Asserts healthy fail behavior 196 | # Calls assert_terminated_normally, assert_exit_fail, assert_has_error 197 | # example: 198 | assert_fail 199 | ``` 200 | 201 | ## file system 202 | 203 | These asserts apply to files and directories 204 | 205 | ####assert_file_exists 206 | ```bash 207 | # asserts that file exists 208 | # assert_file_exists "$file_path" 209 | # example: 210 | assert_file_exists "$file_path" 211 | ``` 212 | 213 | ####assert_file_not_exists 214 | ```bash 215 | # asserts that file does not exist 216 | # assert_file_not_exists "$file_path" 217 | # example: 218 | assert_file_not_exists "$file_path" 219 | ``` 220 | 221 | ####assert_dir_exists 222 | ```bash 223 | # asserts that a directory exists 224 | # assert_dir_exists "$dir_path" 225 | # example: 226 | assert_dir_exists "$dir_path" 227 | ``` 228 | 229 | ####assert_tree_equal 230 | ```bash 231 | # asserts that two directory trees are the same 232 | # assert_tree_equal "$dir1" "$dir2" 233 | # example: 234 | assert_tree_equal "/some/dir1" "/some/other/dir2" 235 | ``` 236 | 237 | ####assert_tree_not_equal 238 | ```bash 239 | # asserts that two directory trees are not the same 240 | # assert_tree_not_equal "$dir1" "$dir2" 241 | # example: 242 | assert_tree_not_equal "/some/dir1" "/some/other/dir2" 243 | ``` 244 | 245 | ## services 246 | These asserts apply to services, typically network services. 247 | 248 | ####assert_service_on_port 249 | ```bash 250 | # asserts that a network service is listening on port $port 251 | # assert_service_on_port "$port" 252 | # example: 253 | assert_service_on_port "8000" 254 | ``` 255 | 256 | 257 | 258 | -------------------------------------------------------------------------------- /documentation/docs/custom.css: -------------------------------------------------------------------------------- 1 | .hljs-built_in 2 | { 3 | color: #333; 4 | } 5 | -------------------------------------------------------------------------------- /documentation/docs/index.md: -------------------------------------------------------------------------------- 1 | #**Bash Test Tools** 2 | **for testing executables in a shell environment - here's a quick look...** 3 | 4 | ```bash 5 | # A simple test for the "find" executable 6 | source bash_test_tools 7 | 8 | WORK="/tmp/work" 9 | 10 | function setup 11 | { 12 | mkdir -p "$WORK" 13 | cd "$WORK" 14 | touch some_file.txt 15 | } 16 | 17 | function teardown 18 | { 19 | cd 20 | rm -rf "$WORK" 21 | } 22 | 23 | function test_find_local_directory 24 | { 25 | # Run 26 | run "find ./" 27 | # Assert 28 | assert_success 29 | assert_output_contains "some_file.txt" 30 | } 31 | 32 | testrunner 33 | ``` 34 | 35 | ![one test](screenshot1.jpg) 36 | 37 | # Introduction 38 | 39 | Bash Test Tools is intended to be a simple to use framework for testing executables inside 40 | a shell environment. The framework allows extraction and assert operations on parameters 41 | such as **standard output**, **standard error**, **exit code**, **execution time**, **file system** and **network services**. 42 | 43 | The Bash Test Tools are suitable for performing high level tests on executables, i.e. **system tests**, treating an executable as 44 | a black box, examining only the output and state of the executable and its environment. Typical domains of use could be to: 45 | 46 | * verify complete **use cases** 47 | * identify simple but critical failures, aka **smoke testing** 48 | * verify that documented behavior and **--help** is correct 49 | * collect **performance metrics** such as **execution time** 50 | 51 | In this document we will collectively call these test scenarios **system tests**. 52 | 53 | # Prerequisites 54 | 55 | The test runner uses `strace` to track signalled exits or terminations of executables. 56 | To install on debian systems do, 57 | 58 | ```bash 59 | $ sudo apt-get install strace 60 | ``` 61 | 62 | Also get a copy of the latest **bash_test_tools** source file: 63 | 64 | ```bash 65 | $ wget https://raw.githubusercontent.com/thorsteinssonh/bash_test_tools/master/bash_test_tools 66 | ``` 67 | 68 | # Workflow 69 | The workflow for implementing tests is as follows, 70 | 71 | 1. create a bash script 72 | 2. source the `bash_test_tools` file 73 | 3. define a function called `setup` 74 | 4. define a function called `teardown` 75 | 5. implement a series of test function, all must be named beginning with `test_` (e.g. `test_foo`). 76 | * **must** contain a run call, e.g. `run "foo --some-opts args"` 77 | * followed by at least one assert call, e.g. `assert_success` 78 | 6. execute the `testrunner` function (it will magically run all test that have been defined) 79 | 80 | When the script is executed each test will be preceeded by a `setup` followed by a `teardown` call. 81 | This is sometimes inefficient but ensures that all tests run in isolation from each other. 82 | If any one assert statement within a test function fails, the whole test will fail. 83 | 84 | # Creating a Script 85 | For demonstration purposes we are going to create a test for the UNIX command line tool 86 | **find**, a tool that helps you list and search for files and directories. 87 | First we must fetch the source code and source the `bash_test_tools` file that is located in the root of the 88 | source code directory 89 | 90 | ```bash 91 | $ # get the source code 92 | $ git clone https://github.com/thorsteinssonh/bash_test_tools.git 93 | $ cd bash_test_tools 94 | $ # start editing a new test in you favorite editor (here we use nano) 95 | $ nano test_find.sh 96 | ``` 97 | 98 | At the top of your test script add your typical **shebang** and 99 | source the `bash_test_tools` file. 100 | 101 | ```bash 102 | #! /usr/bin/env bash 103 | # -*- coding: utf-8 -*- 104 | source bash_test_tools 105 | ``` 106 | 107 | Before implementing our tests we must first define the `setup` and `teardown` 108 | functions that take care of setting up and removing an environment for 109 | the tests. 110 | 111 | Typically a `setup` function will create a working directory and `cd` 112 | into it. The `setup` may also start necessary services and/or provide test files to 113 | operate on. Here is an example setup function that creates a work directory and 114 | adds an empty test file: 115 | 116 | ```bash 117 | function setup 118 | { 119 | mkdir -p work 120 | cd work 121 | touch some_file.txt 122 | } 123 | ``` 124 | 125 | The teardown function simply cleans up after the test has been performed and 126 | typically may look as follows: 127 | 128 | ```bash 129 | function teardown 130 | { 131 | cd .. 132 | rm -rf work 133 | } 134 | ``` 135 | 136 | We can now define our first test. In this example lets find files in 137 | the local directory, then assert that the operation was successfull and 138 | exited gracefully. Off course we also assert that **find** has discovered 139 | our test file as expected, 140 | 141 | ```bash 142 | function test_find_local_directory 143 | { 144 | # Run 145 | run "find ./" 146 | # Assert 147 | assert_success 148 | assert_output_contains "some_file.txt" 149 | } 150 | ``` 151 | 152 | Finally it is necessary to execute the `testrunner`, without it no tests will be processed. 153 | Add the following line at the bottom of the script, 154 | 155 | ```bash 156 | testrunner 157 | ``` 158 | 159 | The entire script now looks as follows, 160 | 161 | ```bash 162 | #! /usr/bin/env bash 163 | # -*- coding: utf-8 -*- 164 | source bash_test_tools 165 | 166 | function setup 167 | { 168 | mkdir -p work 169 | cd work 170 | touch some_file.txt 171 | } 172 | 173 | function teardown 174 | { 175 | cd .. 176 | rm -rf work 177 | } 178 | 179 | function test_find_local_directory 180 | { 181 | # Run 182 | run "find ./" 183 | # Assert 184 | assert_success 185 | assert_output_contains "some_file.txt" 186 | } 187 | 188 | testrunner 189 | ``` 190 | Execute the script 191 | ```bash 192 | chmod u+x test_find.sh 193 | ./test_find.sh 194 | ``` 195 | 196 | and the output should look as follows, 197 | 198 | ![one test](screenshot1.jpg) 199 | 200 | ## Review 201 | Lets look in a little more detail what our test did. 202 | The test defined did three things: 203 | 204 | * First the `"find ./"` execution call was passed to the `bash_test_tools` `run` function. Note the quotes `"find ./"`, they are necessary. 205 | `Run` will collect various metrics into global shell variables called `output`, `error`, `exectime`, `returnval` and `strace` - these will be addressed in detail later. 206 | * The second function call `assert_success` is a generic assert for healthy program termination with success. In fact the single `assert_success` call consists of a series of more granular asserts, called `assert_terminated_normally`, `assert_exit_success` and `assert_no_error`. 207 | * `terminated normally` checks if executable exited normally (i.e. **without crashing** signals such as SIGENV). 208 | * `exit success` checks the exit status is 0 (SUCCESS). 209 | * `no error` will verify that nothing has been printed to standard error. 210 | * The third function call `assert_output_contains` simply verifies that the `find` has correctly reported to `standard output` that the test file `some_file.txt` was found. 211 | 212 | 213 | # Script Options 214 | The framework automatically embeds options to the test script. 215 | Help will be printed with optional argument `-h`. 216 | 217 | ```bash 218 | $ ./test_find.sh -h 219 | 220 | test_find.sh - tests built on bash_test_tools 221 | 222 | Usage: test_find.sh [OPTIONS]... 223 | 224 | -l list all available tests 225 | -t [TESTNAME] run only tests ending in TESTNAME 226 | -o [TAP FILE] write test results to TAP (test anything) file 227 | -x disable teardown (for debugging) 228 | -h print this help 229 | ``` 230 | 231 | For example, examine the test script provided inside the **examples** directory. 232 | 233 | ```bash 234 | $ cd examples 235 | $ ./test_find.sh -l 236 | test_find_delete 237 | test_find_local_directory 238 | test_find_txt_files 239 | test_has_unix_conventions 240 | test_invalid_file_or_directory 241 | test_invalid_option 242 | test_new_feature 243 | ``` 244 | We can specifically run only tests ending with the name "_directory" 245 | 246 | ```bash 247 | $ ./test_find.sh -t _directory 248 | ---------------------------------------------------------------- 249 | TEST FIND LOCAL DIRECTORY 250 | Running: find ./ 251 | ExecTime: 0.004 seconds 252 | Assert: process terminated normally OK 253 | Assert: 'exit status' equal to 0 OK 254 | Assert: 'stderror' is empty OK 255 | Assert: 'stdoutput' contains 'some_file.txt' OK 256 | 257 | Status - PASS 258 | ---------------------------------------------------------------- 259 | TEST INVALID FILE OR DIRECTORY 260 | Running: find ./non_existing_path 261 | ExecTime: 0.004 seconds 262 | Assert: process terminated normally OK 263 | Assert: 'exit status' not equal to 0 OK 264 | Assert: 'stderror' not empty OK 265 | Assert: 'stderror' contains 'No such file or directory' OK 266 | 267 | Status - PASS 268 | ================================================================ 269 | Ran 2 tests - Failed 0 270 | ``` 271 | and we can output test results in a portable format using the [Test Anything Protocol](https://testanything.org/), 272 | ```bash 273 | $ ./test_find.sh -o result.tap 274 | $ cat result.tap 275 | 1..7 276 | ok 1 - test_find_delete 277 | ok 2 - test_find_local_directory 278 | ok 3 - test_find_txt_files 279 | ok 4 - test_has_unix_conventions 280 | ok 5 - test_invalid_file_or_directory 281 | ok 6 - test_invalid_option 282 | not ok 7 - test_new_feature 283 | ``` 284 | 285 | #Generic Tests 286 | `bash_test_tools` ships with a few **generic** tests that are appropriate for 287 | testing common features. Two very common features within UNIX environments 288 | are that executables typically accept `--version` and `--help` arguments. 289 | To test an executable with a generic test for `--version` and `--help` options add 290 | the following two lines to your script, 291 | ```bash 292 | generic has_unix_version "find" 293 | generic has_unix_help "find" 294 | ``` 295 | This will automatically construct tests on the executable **find** 296 | that check if the executable accepts version and help options. 297 | They will assert that the program terminates healthily and if it actually prints 298 | something to sandard out. 299 | ``` 300 | ------------------------------------------------------ 301 | TEST HAS UNIX HELP 302 | Running: find --help 303 | ExecTime: 0.004 seconds 304 | Assert: process terminated normally OK 305 | Assert: 'exit status' equal to 0 OK 306 | Assert: 'stderror' is empty OK 307 | Assert: 'stdout' not empty OK 308 | Assert: 'help' contains '--help' OK 309 | 310 | Status - PASS 311 | ------------------------------------------------------ 312 | TEST HAS UNIX VERSION 313 | Running: find --version 314 | ExecTime: 0.005 seconds 315 | Assert: process terminated normally OK 316 | Assert: 'exit status' equal to 0 OK 317 | Assert: 'stderror' is empty OK 318 | Assert: 'stdout' not empty OK 319 | Running: find --help 320 | ExecTime: 0.004 seconds 321 | Assert: 'help' contains '--version' OK 322 | 323 | Status - PASS 324 | ------------------------------------------------------ 325 | ``` 326 | Notice that `has_unix_version` also checks if it has been 327 | documented in `--help`. 328 | There is a third generic test that calls these two test, 329 | it is called `has_unix_convention`, hence you can replace 330 | the above two with a single line, 331 | ```bash 332 | generic has_unix_convention "find" 333 | ``` 334 | Not all command line tools do accept these options, we can take 335 | a look at how this test fails when no such option is available. 336 | **strace** is a tool that doesn't, here is how it fails, 337 | ```bash 338 | ---------------------------------------------------------- 339 | TEST HAS UNIX VERSION 340 | Running: strace --version 341 | ExecTime: 0.003 seconds 342 | Assert: process terminated normally OK 343 | Assert: 'exit status' equal to 0 FAIL 344 | Assert: 'stderror' is empty FAIL 345 | Assert: 'stdout' not empty FAIL 346 | Running: strace --help 347 | ExecTime: 0.002 seconds 348 | Assert: 'help' contains '--version' FAIL 349 | 350 | Status - FAIL 351 | ========================================================== 352 | Ran 1 tests - Failed 1 353 | ``` 354 | Turns out **strace** terminates gracefully, but does indicate 355 | through **exit status** and **standard error** that the 356 | call is unsupported. Writing tests that fail in this way 357 | is in fact one of the main points of software testing, to track through 358 | *test driven development* if planned software features have been implemented 359 | or not. 360 | 361 | As the `bash_test_tools` codebase developes we expect to add more generic 362 | tests that help catch commonplace conventions. 363 | Tests for POSIX, Single UNIX and GNU protocols should be 364 | quite re-usable and ideal for writing generic tests. Please contribute some! 365 | Look for **"function generic_"** inside the `bash_test_tools` file to see how 366 | a generic test is developed. 367 | 368 | # External Asserts 369 | The shell environment is not ideal for performing complicated tests 370 | on text and binary files. The `assert` function allows you to call any 371 | executable, be it compiled, python, or ruby, for the purpose of performing more 372 | granular asserts. The requirement on the external assert executable is that it return an 373 | exit code of 0 if the assert is successful, and !=0 if the assert fails. 374 | For demonstration purposes we will define an assert executable in python, `is_foobar.py`. 375 | It returns success if the argument is `"foobar"`, otherwise it returns fail. 376 | ```python 377 | import sys 378 | argument = sys.argv[1] 379 | if argument == "foobar": 380 | sys.exit(0) 381 | else: 382 | sys.exit(1) 383 | ``` 384 | In our hypothetical test we can now add an assert that calls this 385 | custom python test, 386 | ```bash 387 | assert "is_foobar.py foobar" 388 | ``` 389 | The output test should now include this assert step, 390 | ```bash 391 | Assert: arg_is_foobar.py foobar OK 392 | ``` 393 | Typically these custom assert functions will performe more useful 394 | and more detailed tasks than demonstrated here. 395 | It's useful to name the asserts informatively as above. 396 | For example, 397 | ```bash 398 | assert "is_jpeg_image.py some_image.file" 399 | # or 400 | assert "is_json_text.py some_text_file" 401 | ``` 402 | This will help to make the test output more readable. 403 | 404 | # Assert During Execution 405 | So far we have only dealt with asserting conditions after an executable 406 | has terminated. However, sometimes we need to test for conditions 407 | during execution. Some executables are for example designed to run as **services** or **daemons**. 408 | In such situations we may need to execute assert statements while an executable is running 409 | in the background. `bash_test_tools` allow you to do this adding a 410 | set of assert statements to a background_assert queue. The following statement 411 | adds a tcp service check on port 1234 to the queue, 412 | ```bash 413 | add_background_assert assert_service_on_port 1234 414 | ``` 415 | These asserts will then be executed during a backgrounded run statement. 416 | Here we start **netcat**, a network diagnostic tool, and listen on port 1234 417 | for 2 seconds before ending the process with a signalled SIGTERM. 418 | ```bash 419 | run "nc -l 1234" background 2 SIGTERM 420 | ``` 421 | The queued assert statements are executed after the 2 second sleep, followed by 422 | the signalled termination of the process. Some amount of sleep before executing 423 | asserts is necessary to allow the process or service to boot up or initialize. 424 | Off course the necessary sleep length will depend on the software and conditions that are being tested. 425 | The whole test function that we have described looks like this, 426 | 427 | ```bash 428 | function test_nc_listen_on_port 429 | { 430 | add_background_assert assert_service_on_port 1234 431 | #run 432 | run "nc -l 1234" background 2 SIGTERM 433 | #assert 434 | assert_terminated_normally 435 | assert_no_error 436 | } 437 | ``` 438 | We have added a couple of more 'after execution' asserts to check for healthy termination of the software. 439 | When we execute the test we get the following output, 440 | ```bash 441 | ------------------------------------------------------------------ 442 | TEST NC LISTEN ON PORT 443 | Running: nc -l 1234 (background 2 secs) 444 | Assert: service on port 1234 OK 445 | ExecTime: 2.016 seconds 446 | Assert: process terminated normally OK 447 | Assert: 'stderror' is empty OK 448 | 449 | Status - PASS 450 | ================================================================== 451 | Ran 1 tests - Failed 0 452 | 453 | ``` 454 | -------------------------------------------------------------------------------- /documentation/docs/screenshot1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thorsteinssonh/bash_test_tools/8acd3af7b29b61dfc86e9b6c4d8c828665b0d0b1/documentation/docs/screenshot1.jpg -------------------------------------------------------------------------------- /documentation/mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Bash Test Tools 2 | #theme: readthedocs 3 | 4 | pages: 5 | - Introduction: 'index.md' 6 | - 'Assert_API.md' 7 | 8 | -------------------------------------------------------------------------------- /examples/test_find.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | # -*- coding: utf-8 -*- 3 | 4 | source ../bash_test_tools 5 | 6 | # setup / tear down 7 | function setup 8 | { 9 | mkdir -p work 10 | cd work 11 | touch some_file.txt 12 | touch other_file 13 | } 14 | 15 | function teardown 16 | { 17 | cd .. 18 | rm -rf work 19 | } 20 | 21 | # Set up generic predefined tests 22 | generic has_unix_conventions "find" 23 | 24 | # Add test functions here 25 | function test_find_local_directory 26 | { 27 | # Run 28 | run "find ./" 29 | # Assert 30 | assert_success 31 | assert_output_contains "some_file.txt" 32 | } 33 | 34 | function test_find_txt_files 35 | { 36 | # Run 37 | run "find ./ -name *.txt" 38 | # Assert 39 | assert_success 40 | assert_output_contains "some_file.txt" 41 | assert_output_not_contains "other_file" 42 | } 43 | 44 | function test_find_delete 45 | { 46 | # Run 47 | run "find ./ -name *.txt -delete" 48 | # Assert 49 | assert_success 50 | assert_file_not_exists "some_file.txt" 51 | assert_file_exists "other_file" 52 | } 53 | 54 | function test_invalid_option 55 | { 56 | # Run 57 | run "find --silly_bad_option" 58 | # Assert 59 | assert_fail 60 | assert_error_contains "unknown predicate" 61 | } 62 | 63 | function test_invalid_file_or_directory 64 | { 65 | # Run 66 | run "find ./non_existing_path" 67 | # Assert 68 | assert_fail 69 | assert_error_contains "No such file or directory" 70 | } 71 | 72 | function test_new_feature 73 | { 74 | ## In testdriven development we 75 | ## may implement a test for a feature yet to be implemented 76 | # Run 77 | run "find --new-feature" 78 | # Assert 79 | assert_success 80 | } 81 | 82 | # Run all test functions - optional argument passed to run specific tests only 83 | testrunner 84 | -------------------------------------------------------------------------------- /examples/test_netcat.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | # -*- coding: utf-8 -*- 3 | 4 | source ../bash_test_tools 5 | 6 | # setup / tear down 7 | function setup 8 | { 9 | mkdir -p work 10 | cd work 11 | } 12 | 13 | function teardown 14 | { 15 | cd .. 16 | rm -rf work 17 | } 18 | 19 | # Add test functions here 20 | function test_nc_listen_on_port 21 | { 22 | # queue an assert on backgrounded run 23 | add_background_assert "assert_service_on_port 1234" 24 | #run 25 | run "nc -l 1234" background 2 SIGTERM 26 | #assert 27 | assert_terminated_normally 28 | assert_no_error 29 | } 30 | 31 | # Run all test functions - optional argument passed to run specific tests only 32 | testrunner 33 | -------------------------------------------------------------------------------- /tests/arg_is_foobar.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | # These is a custom assert test written in python. 4 | # Posix shells are not ideal for performing granual detailed asserts 5 | # on output from executables. Therefore we have added this simple 6 | # option to develop a detailed assert in the language of your choice. 7 | # All that is necessary is that the test retun 0 for success or 8 | # !=0 for fail as exit code. 9 | 10 | import sys 11 | 12 | argument = sys.argv[1] 13 | 14 | if argument == "foobar": 15 | sys.exit(0) 16 | else: 17 | sys.exit(1) 18 | 19 | -------------------------------------------------------------------------------- /tests/test_asserts.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | # -*- coding: utf-8 -*- 3 | 4 | source ../bash_test_tools 5 | 6 | echo "Preprocessing Assert Statmenents" 7 | # Testing the framework asserts requires some hacking 8 | echo "Running Asserts that should evaluate as OK" 9 | _assert_condition=0 10 | assert_equal 1234 1234 11 | assert_not_equal 1234 1235 12 | assert_equal "bla\notherline" "bla\notherline" "multiline1" "multiline2" 13 | error="something"; assert_has_error 14 | output="some output"; assert_has_output 15 | error=""; assert_no_error 16 | output=""; assert_no_output 17 | assert_contains "some text here" "text h" "string" "substring" 18 | assert_contains "some text\nhere" "text\nh" "string" "stubstring w newline" 19 | assert_not_contains "some text here" "text-h" "string" "substring" 20 | assert "./arg_is_foobar.py foobar" 21 | 22 | total_fail1=$_assert_condition 23 | 24 | echo "Running Asserts that should evaluate as FAIL" 25 | _assert_condition=0 26 | assert_equal 34 35 27 | assert_not_equal 12 12 28 | assert_equal "bla\notherline" "blabla\notherline" "multiline1" "multiline2" 29 | error=""; assert_has_error 30 | output=""; assert_has_output 31 | error="abc"; assert_no_error 32 | output="abc"; assert_no_output 33 | assert_contains "some text here" "texth" "string" "substring" 34 | assert_contains "some text here" "text\nh" "string" "stubstring w newline" 35 | assert_not_contains "some text here" "text h" "string" "substring" 36 | assert "./arg_is_foobar.py notfoobar" 37 | 38 | total_fail2=$_assert_condition 39 | 40 | function test_ok_asserts 41 | { 42 | # Assert 43 | assert_equal "$total_fail1" "0" "no. of ok asserts failed should be" 44 | } 45 | 46 | function test_fail_asserts 47 | { 48 | # Assert 49 | assert_equal "$total_fail2" "11" "no. of fail asserts failed should be" 50 | } 51 | 52 | # Run all test functions - optional argument passed to run specific tests only 53 | testrunner 54 | --------------------------------------------------------------------------------