From e5e57ab74e2c4908056991e26302110fd577b41e Mon Sep 17 00:00:00 2001 From: Tomasz Gromadzki Date: Sat, 17 Jun 2023 10:22:52 +0200 Subject: [PATCH 1/3] common: add support for --force-enable option in RUNTESTS script Add support for --force-enable option in RUNTESTS script to be able to call bash tests execution in the same way as Python tests with RUNTEST.py where --force-enable option is available. Signed-off-by: Tomasz Gromadzki --- src/test/README | 3 ++- src/test/RUNTESTS | 58 +++++++++++++++++++++++++++++++---------------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/src/test/README b/src/test/README index 0decd633845..ca12b69d013 100644 --- a/src/test/README +++ b/src/test/README @@ -64,7 +64,8 @@ RUNTESTS takes options to limit what it runs. The usage is: RUNTESTS [ -hnv ] [ -b build-type ] [ -t test-type ] [ -f fs-type ] [ -o timeout ] [ -s test-file ] [ -k skip-dir ] - [ -m memcheck ] [-p pmemcheck ] [ -e helgrind ] [ -d drd ] [ -c ] + [[ -m memcheck ] [-p pmemcheck ] [ -e helgrind ] [ -d drd ] || + [ --force-enable memcheck|pmemcheck|helgrind|drd ]] [ -c ] [tests...] Build types are: debug, nondebug, static-debug, static-nondebug, all (default) diff --git a/src/test/RUNTESTS b/src/test/RUNTESTS index 29a2c23ec0b..28bdfaaaeae 100755 --- a/src/test/RUNTESTS +++ b/src/test/RUNTESTS @@ -1,6 +1,6 @@ #!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause -# Copyright 2014-2022, Intel Corporation +# Copyright 2014-2023, Intel Corporation # # RUNTESTS -- setup the environment and run each test @@ -15,8 +15,9 @@ usage() cat >&2 < Date: Sat, 17 Jun 2023 11:49:30 +0200 Subject: [PATCH 2/3] common: enable --force-enable option in GHA Signed-off-by: Tomasz Gromadzki --- .github/workflows/pmem_valgrind_sh.yml | 4 +- src/test/RUNTESTS.sh | 706 +++++++++++++++++++++++++ 2 files changed, 708 insertions(+), 2 deletions(-) create mode 100644 src/test/RUNTESTS.sh diff --git a/.github/workflows/pmem_valgrind_sh.yml b/.github/workflows/pmem_valgrind_sh.yml index a39e89caf04..3f0e16503f2 100644 --- a/.github/workflows/pmem_valgrind_sh.yml +++ b/.github/workflows/pmem_valgrind_sh.yml @@ -17,7 +17,7 @@ jobs: strategy: fail-fast: false matrix: - config: ['-d', '-p', '-m', '-e'] + config: ['drd', 'pmemcheck', 'memcheck', 'helgrind'] build: ['debug', 'nondebug', 'static-debug', 'static-nondebug'] os: [[self-hosted, rhel],[self-hosted, opensuse]] @@ -38,4 +38,4 @@ jobs: run: ./$WORKDIR/create-testconfig.sh - name: Run tests - run: cd src/test/ && ./RUNTESTS -b ${{ matrix.build }} ${{ matrix.config }} force-enable + run: cd src/test/ && ./RUNTESTS --force-enable ${{ matrix.config }} -b ${{ matrix.build }} diff --git a/src/test/RUNTESTS.sh b/src/test/RUNTESTS.sh new file mode 100644 index 00000000000..31e21f42096 --- /dev/null +++ b/src/test/RUNTESTS.sh @@ -0,0 +1,706 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2014-2023, Intel Corporation + +# +# RUNTESTS.sh -- setup the environment and run each test +# + +# +# usage -- print usage message and exit +# +usage() +{ + [ "$1" ] && echo Error: $1 + cat >&2 </dev/null && errmsg="$(tput setaf 1)$errmsg$(tput sgr0)" + echo "RUNTESTS.sh: stopping: $RUNTEST_DIR/$RUNTEST_SCRIPT $errmsg, $RUNTEST_PARAMS" >&2 + if [ "$keep_going" == "y" ]; then + keep_going_exit_code=1 + keep_going_skip=y + fail_list="$fail_list $RUNTEST_DIR/$RUNTEST_SCRIPT" + ((fail_count+=1)) + + if [ "$CLEAN_FAILED" == "y" ]; then + dir_rm=$(<$TEMP_LOC) + rm -Rf $dir_rm + if [ $? -ne 0 ]; then + echo -e "Cannot remove directory with data: $dir_rm" + fi + fi + else + exit 1 + fi + } + rm -f $TEMP_LOC + + [ "$verbose_old" != "-1" ] && verbose=$verbose_old + + return 0 +} + +# +# load_default_global_test_configuration -- load a default global configuration +# +load_default_global_test_configuration() { + global_req_testtype=all + global_req_fstype=all + global_req_buildtype=all + global_req_timeout='3m' + + return 0 +} + +# switch_hyphen -- substitute hyphen for underscores +switch_hyphen() { + echo ${1//-/_} +} + +# +# read_global_test_configuration -- read a global configuration from a test +# config file and overwrite a global configuration +# +read_global_test_configuration() { + if [ ! -e "config.sh" ]; then + return + fi + + # unset all global settings + unset CONF_GLOBAL_TEST_TYPE + unset CONF_GLOBAL_FS_TYPE + unset CONF_GLOBAL_BUILD_TYPE + unset CONF_GLOBAL_TIMEOUT + + # unset all local settings + unset CONF_TEST_TYPE + unset CONF_FS_TYPE + unset CONF_BUILD_TYPE + unset CONF_TIMEOUT + + . config.sh + + [ -n "$CONF_GLOBAL_TEST_TYPE" ] && global_req_testtype=$CONF_GLOBAL_TEST_TYPE + [ -n "$CONF_GLOBAL_FS_TYPE" ] && global_req_fstype=$CONF_GLOBAL_FS_TYPE + [ -n "$CONF_GLOBAL_BUILD_TYPE" ] && global_req_buildtype=$CONF_GLOBAL_BUILD_TYPE + [ -n "$CONF_GLOBAL_TIMEOUT" ] && global_req_timeout=$CONF_GLOBAL_TIMEOUT + + return 0 +} + +# +# read_test_configuration -- generate a test configuration from a global +# configuration and a test configuration read from a test config file +# usage: read_test_configuration +# +read_test_configuration() { + req_testtype=$global_req_testtype + req_fstype=$global_req_fstype + req_buildtype=$global_req_buildtype + req_timeout=$global_req_timeout + + [ -n "${CONF_TEST_TYPE[$1]}" ] && req_testtype=${CONF_TEST_TYPE[$1]} + [ -n "${CONF_FS_TYPE[$1]}" ] && req_fstype=${CONF_FS_TYPE[$1]} + [ -n "${CONF_BUILD_TYPE[$1]}" ] && req_buildtype=${CONF_BUILD_TYPE[$1]} + if [ -n "$runtest_timeout" ]; then + req_timeout="$runtest_timeout" + else + [ -n "${CONF_TIMEOUT[$1]}" ] && req_timeout=${CONF_TIMEOUT[$1]} + fi + + special_params= + [ "$req_fstype" == "none" -o "$req_fstype" == "any" ] && \ + special_params="req_fs_type=1" + + return 0 +} + +# +# intersection -- return common elements of collection of available and required +# values +# usage: intersection +# +intersection() { + collection=$1 + [ "$collection" == "all" ] && collection=$3 + [ "$2" == "all" ] && echo $collection && return + for e in $collection; do + for r in $2; do + [ "$e" == "$r" ] && { + subset="$subset $e" + } + done + done + echo $subset +} + +# +# runtest -- given the test directory name, run tests found inside it +# +runtest() { + [ "$UNITTEST_LOG_LEVEL" ] || UNITTEST_LOG_LEVEL=1 + export UNITTEST_LOG_LEVEL + + [ -f "$1/TEST0" ] || { + echo FAIL: $1: test not found. >&2 + exit 1 + } + [ -x "$1/TEST0" ] || { + echo FAIL: $1: test not executable. >&2 + exit 1 + } + + cd $1 + + load_default_global_test_configuration + read_global_test_configuration + + runscripts=$testfile + if [ "$runscripts" = all ]; then + if [ "$testseq" = all ]; then + runscripts=`ls -1 TEST* | grep '^TEST[0-9]\+$' | sort -V` + else + # generate test sequence + seqs=(${testseq//,/ }) + runscripts= + for seq in ${seqs[@]}; do + limits=(${seq//-/ }) + if [ "${#limits[@]}" -eq "2" ]; then + if [ ${limits[0]} -lt ${limits[1]} ]; then + nos="$(seq ${limits[0]} ${limits[1]})" + else + nos="$(seq ${limits[1]} ${limits[0]})" + fi + else + nos=${limits[0]} + fi + for no in $nos; do + runscripts="$runscripts TEST$no" + done + done + fi + fi + + # for each TEST script found... + for runscript in $runscripts + do + UNITTEST_NAME="$1/$runscript" + local sid=${runscript#TEST} + read_test_configuration $sid + + local _testtype="$testtype" + # unwind check test type to its subtypes + [ "$_testtype" == "check" ] && _testtype="short medium" + [ "$_testtype" == "all" ] && _testtype="short medium long" + + ttype=$(intersection "$_testtype" "$req_testtype" "short medium long") + [ -z "$ttype" ] && { + echo "$UNITTEST_NAME: SKIP test-type $testtype ($req_testtype required)" + continue + } + # collapse test type to check if its valid superset + [ "$ttype" == "short medium" ] && ttype="check" + [ "$ttype" == "short medium long" ] && ttype="all" + + # check if output test type is single value + ttype_array=($ttype) + [ "${#ttype_array[@]}" -gt 1 ] && { + echo "$UNITTEST_NAME: multiple test types ($ttype)" + exit 1 + } + + fss=$(intersection "$fstype" "$req_fstype" "none pmem non-pmem any") + builds=$(intersection "$buildtype" "$req_buildtype" "debug nondebug static-debug static-nondebug") + + # for each fs-type being tested... + for fs in $fss + do + # don't bother trying when fs-type isn't available... + if [ "$fs" == "pmem" ] && [ -z "$PMEM_FS_DIR" ] && [ "$fstype" == "all" ]; then + pmem_skip=1 + continue + fi + + if [ "$fs" == "non-pmem" ] && [ -z "$NON_PMEM_FS_DIR" ] && [ "$fstype" == "all" ]; then + non_pmem_skip=1 + continue + fi + + if [ "$fs" == "any" ] && [ -z "$PMEM_FS_DIR" ] && [ -z "$NON_PMEM_FS_DIR" ] && [ "$fstype" == "all" ]; then + continue + fi + # for each build-type being tested... + for build in $builds + do + export RUNTEST_DIR=$1 + export RUNTEST_PARAMS="TEST=$ttype FS=$fs BUILD=$build" + export RUNTEST_EXTRA="CHECK_TYPE=$checktype CHECK_POOL=$check_pool \ + $special_params" + export RUNTEST_SCRIPT="$runscript" + export RUNTEST_TIMEOUT="$req_timeout" + + if [ "$KEEP_GOING" == "y" ] && [ "$CLEAN_FAILED" == "y" ]; then + # temporary file used for sharing data + # between RUNTESTS.sh and tests processes + temp_loc=$(mktemp /tmp/data-location.XXXXXXXX) + export TEMP_LOC=$temp_loc + fi + # to not overwrite logs skip other tests from the group + # if KEEP_GOING=y and test fail + if [ "$keep_going_skip" == "n" ]; then + runtest_local + fi + done + done + keep_going_skip=n + done + + cd .. +} + +[ -f testconfig.sh ] || { + cat >&2 </dev/null +if [ $? != 0 ]; then + unset killopt +fi + +# check if timeout can be run in the foreground +timeout --foreground 1s true &>/dev/null +if [ $? != 0 ]; then + unset use_timeout +fi + +if [ -n "$TRACE" ]; then + unset use_timeout +fi + +if [ "$1" ]; then + for test in $* + do + [ -d "$test" ] || echo "RUNTESTS.sh: Test does not exist: $test" + [ -f "$test/TEST0" ] && runtest $test + done +else + # no arguments means run them all + for testfile0 in */TEST0 + do + testdir=`dirname $testfile0` + if [[ "$skip_dir" =~ "$testdir" ]]; then + echo "RUNTESTS.sh: Skipping: $testdir" + continue + fi + runtest $testdir + done +fi + +[ "$pmem_skip" ] && echo "SKIPPED fs-type \"pmem\" runs: testconfig.sh doesn't set PMEM_FS_DIR" +[ "$non_pmem_skip" ] && echo "SKIPPED fs-type \"non-pmem\" runs: testconfig.sh doesn't set NON_PMEM_FS_DIR" + +if [ "$fail_count" != "0" ]; then + echo "$(tput setaf 1)$fail_count tests failed:$(tput sgr0)" + # remove duplicates and print each test name in a new line + echo $fail_list | xargs -n1 | uniq + exit $keep_going_exit_code +else + exit 0 +fi From d13f3697da08a4510d6205154583ee7520a9e98b Mon Sep 17 00:00:00 2001 From: Tomasz Gromadzki Date: Sat, 17 Jun 2023 12:01:58 +0200 Subject: [PATCH 3/3] common: add proper bash suffix to RUNTESTS script name Signed-off-by: Tomasz Gromadzki --- .github/workflows/pmem_long.yml | 8 +- .github/workflows/pmem_valgrind_sh.yml | 2 +- src/test/Makefile | 2 +- src/test/Makefile.inc | 4 +- src/test/README | 24 +- src/test/RUNTESTS | 706 ------------------------- src/test/RUNTESTS.sh | 0 src/test/unittest/unittest.sh | 6 +- utils/check_whitespace | 2 +- utils/docker/run-build-package.sh | 2 +- 10 files changed, 25 insertions(+), 731 deletions(-) delete mode 100755 src/test/RUNTESTS mode change 100644 => 100755 src/test/RUNTESTS.sh diff --git a/.github/workflows/pmem_long.yml b/.github/workflows/pmem_long.yml index 5cbbeb7f988..1235022defd 100644 --- a/.github/workflows/pmem_long.yml +++ b/.github/workflows/pmem_long.yml @@ -18,10 +18,10 @@ jobs: fail-fast: false matrix: os: [[self-hosted, rhel],[self-hosted, opensuse]] - config: ['RUNTESTS -t long -b debug', - 'RUNTESTS -t long -b nondebug', - 'RUNTESTS -t long -b static-debug', - 'RUNTESTS -t long -b static-nondebug', + config: ['RUNTESTS.sh -t long -b debug', + 'RUNTESTS.sh -t long -b nondebug', + 'RUNTESTS.sh -t long -b static-debug', + 'RUNTESTS.sh -t long -b static-nondebug', 'RUNTESTS.py -t long -b debug', 'RUNTESTS.py -t long -b release', 'RUNTESTS.py -t long -b static_debug', diff --git a/.github/workflows/pmem_valgrind_sh.yml b/.github/workflows/pmem_valgrind_sh.yml index 3f0e16503f2..2b1be3d923f 100644 --- a/.github/workflows/pmem_valgrind_sh.yml +++ b/.github/workflows/pmem_valgrind_sh.yml @@ -38,4 +38,4 @@ jobs: run: ./$WORKDIR/create-testconfig.sh - name: Run tests - run: cd src/test/ && ./RUNTESTS --force-enable ${{ matrix.config }} -b ${{ matrix.build }} + run: cd src/test/ && ./RUNTESTS.sh --force-enable ${{ matrix.config }} -b ${{ matrix.build }} diff --git a/src/test/Makefile b/src/test/Makefile index f85d41d978d..6ceb5f2a783 100644 --- a/src/test/Makefile +++ b/src/test/Makefile @@ -365,7 +365,7 @@ memcheck-summary-leaks: check: @[ -z "$(BLACKLIST_TESTS)" ] || echo "Blacklisted tests: $(BLACKLIST_TESTS)" - @./RUNTESTS $(RUNTEST_OPTIONS) $(LOCAL_TESTS) + @./RUNTESTS.sh $(RUNTEST_OPTIONS) $(LOCAL_TESTS) $(MAKE) @echo "No failures." diff --git a/src/test/Makefile.inc b/src/test/Makefile.inc index 6b1861fac9a..e38f5d0bb6b 100644 --- a/src/test/Makefile.inc +++ b/src/test/Makefile.inc @@ -629,10 +629,10 @@ $(TSTCHECKSPY): @cd .. && ./RUNTESTS.py $(subst nondebug,release,$(RUNTEST_OPTIONS)) -u $(shell echo $@ | sed 's/^py\/[^0-9]*\([0-9]*\)$$/\1/') -- ${TST} $(TSTCHECKS): sync-test - @cd .. && ./RUNTESTS ${TST} $(RUNTEST_OPTIONS) -s $@ + @cd .. && ./RUNTESTS.sh ${TST} $(RUNTEST_OPTIONS) -s $@ check: sync-test - @cd .. && ./RUNTESTS ${TST} $(RUNTEST_OPTIONS) + @cd .. && ./RUNTESTS.sh ${TST} $(RUNTEST_OPTIONS) pcheck: export NOTTY=1 diff --git a/src/test/README b/src/test/README index ca12b69d013..e53f2f5403e 100644 --- a/src/test/README +++ b/src/test/README @@ -9,7 +9,7 @@ That file describes the local machine configuration (where to find persistent memory, for example) and must be created by hand in each repo as it makes no sense to check in that configuration description to the main repo. -testconfig.sh.example provides more detail. The script RUNTESTS, when run with +testconfig.sh.example provides more detail. The script RUNTESTS.sh, when run with no arguments, will run all unit tests through all the combinations of fs-types and build-types, running the "check" level test. @@ -29,16 +29,16 @@ A testconfig.sh must exist to run these tests! $ cp testconfig.sh.example testconfig.sh $ ...edit testconfig.sh and modify as appropriate... -Tests may be run using the RUNTESTS script: - $ RUNTESTS (runs them all) - $ RUNTESTS testname (runs just the named test) +Tests may be run using the RUNTESTS.sh script: + $ RUNTESTS.sh (runs them all) + $ RUNTESTS.sh testname (runs just the named test) Each test script (named something like "TEST0") is potentially run multiple times with a different set of environment variables so run the test with different target file systems or different versions of the libraries. To see -how RUNTESTS will run a test, use the -n option. For example: +how RUNTESTS.sh will run a test, use the -n option. For example: - $ RUNTESTS -n blk_nblock -s TEST0 + $ RUNTESTS.sh -n blk_nblock -s TEST0 (in ./blk_nblock) TEST=check FS=none BUILD=debug ./TEST0 (in ./blk_nblock) TEST=check FS=none BUILD=nondebug ./TEST0 (in ./blk_nblock) TEST=check FS=none BUILD=static-debug ./TEST0 @@ -60,9 +60,9 @@ Notice how the TEST0 script is run repeatedly with different settings for the three environment variables TEST, FS, and BUILD, providing the test type, file system type, and build type to test. -RUNTESTS takes options to limit what it runs. The usage is: +RUNTESTS.sh takes options to limit what it runs. The usage is: - RUNTESTS [ -hnv ] [ -b build-type ] [ -t test-type ] [ -f fs-type ] + RUNTESTS.sh [ -hnv ] [ -b build-type ] [ -t test-type ] [ -f fs-type ] [ -o timeout ] [ -s test-file ] [ -k skip-dir ] [[ -m memcheck ] [-p pmemcheck ] [ -e helgrind ] [ -d drd ] || [ --force-enable memcheck|pmemcheck|helgrind|drd ]] [ -c ] @@ -84,12 +84,12 @@ RUNTESTS takes options to limit what it runs. The usage is: For example: - $ RUNTESTS -b debug blk_nblock -s TEST0 + $ RUNTESTS.sh -b debug blk_nblock -s TEST0 blk_nblock/TEST0: SETUP (check/pmem/debug) blk_nblock/TEST0: START: blk_nblock blk_nblock/TEST0: PASS -Since the "-b debug" option was given, the RUNTESTS run above only executes +Since the "-b debug" option was given, the RUNTESTS.sh run above only executes the test for the debug version of the library and skips the other variants. Running the TEST* scripts directly is also common, especially when debugging @@ -108,7 +108,7 @@ these defaults can be overridden on the command line: $ TEST=check FS=any BUILD=nondebug ./TEST0 The above example runs TEST0 with the nondebug library, just as using -RUNTESTS with "-b nondebug" would from the parent directory. +RUNTESTS.sh with "-b nondebug" would from the parent directory. In addition to overriding TEST, FS, and BUILD environment variables, the unit test framework also looks for several other variables: @@ -139,7 +139,7 @@ up and checking tests. Additionally, most unit tests build a local test program and call it from the TEST* scripts. In additional to TEST0, there can be as many TEST scripts as desired, and -RUNTESTS will execute them in numeric order for each of the test runs it +RUNTESTS.sh will execute them in numeric order for each of the test runs it executes. There are two ways of setting test requirements: diff --git a/src/test/RUNTESTS b/src/test/RUNTESTS deleted file mode 100755 index 28bdfaaaeae..00000000000 --- a/src/test/RUNTESTS +++ /dev/null @@ -1,706 +0,0 @@ -#!/usr/bin/env bash -# SPDX-License-Identifier: BSD-3-Clause -# Copyright 2014-2023, Intel Corporation - -# -# RUNTESTS -- setup the environment and run each test -# - -# -# usage -- print usage message and exit -# -usage() -{ - [ "$1" ] && echo Error: $1 - cat >&2 </dev/null && errmsg="$(tput setaf 1)$errmsg$(tput sgr0)" - echo "RUNTESTS: stopping: $RUNTEST_DIR/$RUNTEST_SCRIPT $errmsg, $RUNTEST_PARAMS" >&2 - if [ "$keep_going" == "y" ]; then - keep_going_exit_code=1 - keep_going_skip=y - fail_list="$fail_list $RUNTEST_DIR/$RUNTEST_SCRIPT" - ((fail_count+=1)) - - if [ "$CLEAN_FAILED" == "y" ]; then - dir_rm=$(<$TEMP_LOC) - rm -Rf $dir_rm - if [ $? -ne 0 ]; then - echo -e "Cannot remove directory with data: $dir_rm" - fi - fi - else - exit 1 - fi - } - rm -f $TEMP_LOC - - [ "$verbose_old" != "-1" ] && verbose=$verbose_old - - return 0 -} - -# -# load_default_global_test_configuration -- load a default global configuration -# -load_default_global_test_configuration() { - global_req_testtype=all - global_req_fstype=all - global_req_buildtype=all - global_req_timeout='3m' - - return 0 -} - -# switch_hyphen -- substitute hyphen for underscores -switch_hyphen() { - echo ${1//-/_} -} - -# -# read_global_test_configuration -- read a global configuration from a test -# config file and overwrite a global configuration -# -read_global_test_configuration() { - if [ ! -e "config.sh" ]; then - return - fi - - # unset all global settings - unset CONF_GLOBAL_TEST_TYPE - unset CONF_GLOBAL_FS_TYPE - unset CONF_GLOBAL_BUILD_TYPE - unset CONF_GLOBAL_TIMEOUT - - # unset all local settings - unset CONF_TEST_TYPE - unset CONF_FS_TYPE - unset CONF_BUILD_TYPE - unset CONF_TIMEOUT - - . config.sh - - [ -n "$CONF_GLOBAL_TEST_TYPE" ] && global_req_testtype=$CONF_GLOBAL_TEST_TYPE - [ -n "$CONF_GLOBAL_FS_TYPE" ] && global_req_fstype=$CONF_GLOBAL_FS_TYPE - [ -n "$CONF_GLOBAL_BUILD_TYPE" ] && global_req_buildtype=$CONF_GLOBAL_BUILD_TYPE - [ -n "$CONF_GLOBAL_TIMEOUT" ] && global_req_timeout=$CONF_GLOBAL_TIMEOUT - - return 0 -} - -# -# read_test_configuration -- generate a test configuration from a global -# configuration and a test configuration read from a test config file -# usage: read_test_configuration -# -read_test_configuration() { - req_testtype=$global_req_testtype - req_fstype=$global_req_fstype - req_buildtype=$global_req_buildtype - req_timeout=$global_req_timeout - - [ -n "${CONF_TEST_TYPE[$1]}" ] && req_testtype=${CONF_TEST_TYPE[$1]} - [ -n "${CONF_FS_TYPE[$1]}" ] && req_fstype=${CONF_FS_TYPE[$1]} - [ -n "${CONF_BUILD_TYPE[$1]}" ] && req_buildtype=${CONF_BUILD_TYPE[$1]} - if [ -n "$runtest_timeout" ]; then - req_timeout="$runtest_timeout" - else - [ -n "${CONF_TIMEOUT[$1]}" ] && req_timeout=${CONF_TIMEOUT[$1]} - fi - - special_params= - [ "$req_fstype" == "none" -o "$req_fstype" == "any" ] && \ - special_params="req_fs_type=1" - - return 0 -} - -# -# intersection -- return common elements of collection of available and required -# values -# usage: intersection -# -intersection() { - collection=$1 - [ "$collection" == "all" ] && collection=$3 - [ "$2" == "all" ] && echo $collection && return - for e in $collection; do - for r in $2; do - [ "$e" == "$r" ] && { - subset="$subset $e" - } - done - done - echo $subset -} - -# -# runtest -- given the test directory name, run tests found inside it -# -runtest() { - [ "$UNITTEST_LOG_LEVEL" ] || UNITTEST_LOG_LEVEL=1 - export UNITTEST_LOG_LEVEL - - [ -f "$1/TEST0" ] || { - echo FAIL: $1: test not found. >&2 - exit 1 - } - [ -x "$1/TEST0" ] || { - echo FAIL: $1: test not executable. >&2 - exit 1 - } - - cd $1 - - load_default_global_test_configuration - read_global_test_configuration - - runscripts=$testfile - if [ "$runscripts" = all ]; then - if [ "$testseq" = all ]; then - runscripts=`ls -1 TEST* | grep '^TEST[0-9]\+$' | sort -V` - else - # generate test sequence - seqs=(${testseq//,/ }) - runscripts= - for seq in ${seqs[@]}; do - limits=(${seq//-/ }) - if [ "${#limits[@]}" -eq "2" ]; then - if [ ${limits[0]} -lt ${limits[1]} ]; then - nos="$(seq ${limits[0]} ${limits[1]})" - else - nos="$(seq ${limits[1]} ${limits[0]})" - fi - else - nos=${limits[0]} - fi - for no in $nos; do - runscripts="$runscripts TEST$no" - done - done - fi - fi - - # for each TEST script found... - for runscript in $runscripts - do - UNITTEST_NAME="$1/$runscript" - local sid=${runscript#TEST} - read_test_configuration $sid - - local _testtype="$testtype" - # unwind check test type to its subtypes - [ "$_testtype" == "check" ] && _testtype="short medium" - [ "$_testtype" == "all" ] && _testtype="short medium long" - - ttype=$(intersection "$_testtype" "$req_testtype" "short medium long") - [ -z "$ttype" ] && { - echo "$UNITTEST_NAME: SKIP test-type $testtype ($req_testtype required)" - continue - } - # collapse test type to check if its valid superset - [ "$ttype" == "short medium" ] && ttype="check" - [ "$ttype" == "short medium long" ] && ttype="all" - - # check if output test type is single value - ttype_array=($ttype) - [ "${#ttype_array[@]}" -gt 1 ] && { - echo "$UNITTEST_NAME: multiple test types ($ttype)" - exit 1 - } - - fss=$(intersection "$fstype" "$req_fstype" "none pmem non-pmem any") - builds=$(intersection "$buildtype" "$req_buildtype" "debug nondebug static-debug static-nondebug") - - # for each fs-type being tested... - for fs in $fss - do - # don't bother trying when fs-type isn't available... - if [ "$fs" == "pmem" ] && [ -z "$PMEM_FS_DIR" ] && [ "$fstype" == "all" ]; then - pmem_skip=1 - continue - fi - - if [ "$fs" == "non-pmem" ] && [ -z "$NON_PMEM_FS_DIR" ] && [ "$fstype" == "all" ]; then - non_pmem_skip=1 - continue - fi - - if [ "$fs" == "any" ] && [ -z "$PMEM_FS_DIR" ] && [ -z "$NON_PMEM_FS_DIR" ] && [ "$fstype" == "all" ]; then - continue - fi - # for each build-type being tested... - for build in $builds - do - export RUNTEST_DIR=$1 - export RUNTEST_PARAMS="TEST=$ttype FS=$fs BUILD=$build" - export RUNTEST_EXTRA="CHECK_TYPE=$checktype CHECK_POOL=$check_pool \ - $special_params" - export RUNTEST_SCRIPT="$runscript" - export RUNTEST_TIMEOUT="$req_timeout" - - if [ "$KEEP_GOING" == "y" ] && [ "$CLEAN_FAILED" == "y" ]; then - # temporary file used for sharing data - # between RUNTESTS and tests processes - temp_loc=$(mktemp /tmp/data-location.XXXXXXXX) - export TEMP_LOC=$temp_loc - fi - # to not overwrite logs skip other tests from the group - # if KEEP_GOING=y and test fail - if [ "$keep_going_skip" == "n" ]; then - runtest_local - fi - done - done - keep_going_skip=n - done - - cd .. -} - -[ -f testconfig.sh ] || { - cat >&2 </dev/null -if [ $? != 0 ]; then - unset killopt -fi - -# check if timeout can be run in the foreground -timeout --foreground 1s true &>/dev/null -if [ $? != 0 ]; then - unset use_timeout -fi - -if [ -n "$TRACE" ]; then - unset use_timeout -fi - -if [ "$1" ]; then - for test in $* - do - [ -d "$test" ] || echo "RUNTESTS: Test does not exist: $test" - [ -f "$test/TEST0" ] && runtest $test - done -else - # no arguments means run them all - for testfile0 in */TEST0 - do - testdir=`dirname $testfile0` - if [[ "$skip_dir" =~ "$testdir" ]]; then - echo "RUNTESTS: Skipping: $testdir" - continue - fi - runtest $testdir - done -fi - -[ "$pmem_skip" ] && echo "SKIPPED fs-type \"pmem\" runs: testconfig.sh doesn't set PMEM_FS_DIR" -[ "$non_pmem_skip" ] && echo "SKIPPED fs-type \"non-pmem\" runs: testconfig.sh doesn't set NON_PMEM_FS_DIR" - -if [ "$fail_count" != "0" ]; then - echo "$(tput setaf 1)$fail_count tests failed:$(tput sgr0)" - # remove duplicates and print each test name in a new line - echo $fail_list | xargs -n1 | uniq - exit $keep_going_exit_code -else - exit 0 -fi diff --git a/src/test/RUNTESTS.sh b/src/test/RUNTESTS.sh old mode 100644 new mode 100755 diff --git a/src/test/unittest/unittest.sh b/src/test/unittest/unittest.sh index f78de5f7e3c..6998f59e3ff 100644 --- a/src/test/unittest/unittest.sh +++ b/src/test/unittest/unittest.sh @@ -1409,13 +1409,13 @@ function configure_valgrind() { fi else if [ "$1" == "force-disable" ]; then - msg "$UNITTEST_NAME: SKIP RUNTESTS script parameter $CHECK_TYPE tries to enable valgrind test when all valgrind tests are disabled in TEST" + msg "$UNITTEST_NAME: SKIP RUNTESTS.sh script parameter $CHECK_TYPE tries to enable valgrind test when all valgrind tests are disabled in TEST" exit 0 elif [ "$CHECK_TYPE" != "$1" -a "$2" == "force-enable" ]; then - msg "$UNITTEST_NAME: SKIP RUNTESTS script parameter $CHECK_TYPE tries to enable different valgrind test than one defined in TEST" + msg "$UNITTEST_NAME: SKIP RUNTESTS.sh script parameter $CHECK_TYPE tries to enable different valgrind test than one defined in TEST" exit 0 elif [ "$CHECK_TYPE" == "$1" -a "$2" == "force-disable" ]; then - msg "$UNITTEST_NAME: SKIP RUNTESTS script parameter $CHECK_TYPE tries to enable test defined in TEST as force-disable" + msg "$UNITTEST_NAME: SKIP RUNTESTS.sh script parameter $CHECK_TYPE tries to enable test defined in TEST as force-disable" exit 0 fi require_valgrind_tool $CHECK_TYPE $3 diff --git a/utils/check_whitespace b/utils/check_whitespace index 84de387cc4b..6823217efc7 100755 --- a/utils/check_whitespace +++ b/utils/check_whitespace @@ -114,7 +114,7 @@ sub check_whitespace_with_exc { $_ = basename($full); - return 0 unless /^(README.*|LICENSE.*|Makefile.*|CMakeLists.txt|.gitignore|TEST.*|RUNTESTS|check_whitespace|.*\.([chp13s]|sh|map|cpp|hpp|inc|py|md|cmake))$/; + return 0 unless /^(README.*|LICENSE.*|Makefile.*|CMakeLists.txt|.gitignore|TEST.*|RUNTESTS.sh|check_whitespace|.*\.([chp13s]|sh|map|cpp|hpp|inc|py|md|cmake))$/; return 0 if -z; check_whitespace($full, $_); diff --git a/utils/docker/run-build-package.sh b/utils/docker/run-build-package.sh index c211441a524..4af76ef8328 100755 --- a/utils/docker/run-build-package.sh +++ b/utils/docker/run-build-package.sh @@ -66,7 +66,7 @@ make -j$(nproc) # Prepare test config once more. Now, with path to PMDK set in the OS # (rather than in the git tree) - for testing packages installed in the system. $SCRIPTSDIR/configure-tests.sh PKG -./RUNTESTS -t check +./RUNTESTS.sh -t check popd popd