diff --git a/src/test/obj_pmalloc_mt/TEST4 b/src/test/obj_pmalloc_mt/TEST4 new file mode 100755 index 00000000000..2b220e15f3f --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST4 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST4 -- multithreaded allocator test +# (long helgrind version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem non-pmem +require_test_type long +configure_valgrind helgrind force-enable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST5 b/src/test/obj_pmalloc_mt/TEST5 new file mode 100755 index 00000000000..ddabbb01956 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST5 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST5 -- multithreaded allocator test +# (medium non-helgrind/drd version) w/ statistics +# + +. ../unittest/unittest.sh + +require_fs_type pmem non-pmem +require_test_type medium +configure_valgrind drd force-disable +configure_valgrind helgrind force-disable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST6 b/src/test/obj_pmalloc_mt/TEST6 new file mode 100755 index 00000000000..bf924168bdd --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST6 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST6 -- multithreaded allocator test +# (medium helgrind version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem non-pmem +require_test_type medium +configure_valgrind helgrind force-enable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST7 b/src/test/obj_pmalloc_mt/TEST7 new file mode 100755 index 00000000000..7f7a2fbb618 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST7 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST7 -- multithreaded allocator test +# (long drd version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem non-pmem +require_test_type long +configure_valgrind drd force-enable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST8 b/src/test/obj_pmalloc_mt/TEST8 new file mode 100755 index 00000000000..9f512e332f4 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST8 @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST8 -- multithreaded allocator test +# (long - several iteration) w/ statistics +# + +. ../unittest/unittest.sh + +#require_valgrind 3.10 +require_fs_type pmem +require_test_type short +#configure_valgrind helgrind force-enable +setup + +if test -f "rm $DIR/testfile"; then + rm $DIR/testfile +fi +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 0 + +if test -f "rm $DIR/testfile"; then + rm $DIR/testfile +fi + +PMEM_IS_PMEM_FORCE=1 expect_abnormal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 1 + +check + +pass diff --git a/src/test/obj_pmalloc_mt/TEST9 b/src/test/obj_pmalloc_mt/TEST9 new file mode 100755 index 00000000000..1db855e3249 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST9 @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST9 -- multithreaded allocator test +# (long drd version) w/ statistics +# + +. ../unittest/unittest.sh + +#require_valgrind 3.10 +require_fs_type pmem +require_test_type short +#configure_valgrind helgrind force-enable +setup + +if test -f "rm $DIR/testfile"; then + rm $DIR/testfile +fi +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 0 0 + +if test -f "rm $DIR/testfile"; then + rm $DIR/testfile +fi +PMEM_IS_PMEM_FORCE=1 expect_abnormal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 0 1 + +check + +pass diff --git a/src/test/obj_pmalloc_mt/err8.log.match b/src/test/obj_pmalloc_mt/err8.log.match new file mode 100644 index 00000000000..2f6920d5025 --- /dev/null +++ b/src/test/obj_pmalloc_mt/err8.log.match @@ -0,0 +1 @@ +{obj_pmalloc_mt.c:68 realloc_worker} obj_pmalloc_mt/TEST8: Error: assertion failure: ret (0xffffffffffffffff) == 0 (0x0) diff --git a/src/test/obj_pmalloc_mt/err9.log.match b/src/test/obj_pmalloc_mt/err9.log.match new file mode 100644 index 00000000000..80b4f9f19a9 --- /dev/null +++ b/src/test/obj_pmalloc_mt/err9.log.match @@ -0,0 +1,2 @@ +{obj_pmalloc_mt.c:68 realloc_worker} obj_pmalloc_mt/TEST9: Error: assertion failure: ret (0xffffffffffffffff) == 0 (0x0) +{obj_pmalloc_mt.c:68 realloc_worker} obj_pmalloc_mt/TEST9: Error: assertion failure: ret (0xffffffffffffffff) == 0 (0x0) diff --git a/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c b/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c index c356d3a7c8c..9fd916ac4d1 100644 --- a/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c +++ b/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2015-2020, Intel Corporation */ +/* Copyright 2015-2023, Intel Corporation */ /* * obj_pmalloc_mt.c -- multithreaded test of allocator @@ -59,10 +59,13 @@ static void * realloc_worker(void *arg) { struct worker_args *a = arg; + int ret; for (unsigned i = 0; i < Ops_per_thread; ++i) { - prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0); + ret = prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, + 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); + UT_ASSERTeq(ret, 0); } return NULL; @@ -110,6 +113,7 @@ static void * tx_worker(void *arg) { struct worker_args *a = arg; + PMEMoid oid; /* * Allocate objects until exhaustion, once that happens the transaction @@ -117,7 +121,8 @@ tx_worker(void *arg) */ TX_BEGIN(a->pop) { for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */ - pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + UT_ASSERT(!OID_IS_NULL(oid)); if (Ops_per_thread != MAX_OPS_PER_THREAD && n == Ops_per_thread) { pmemobj_tx_abort(0); @@ -132,6 +137,7 @@ static void * tx3_worker(void *arg) { struct worker_args *a = arg; + PMEMoid oid; /* * Allocate N objects, abort, repeat M times. Should reveal issues in @@ -140,7 +146,8 @@ tx3_worker(void *arg) for (unsigned n = 0; n < Tx_per_thread; ++n) { TX_BEGIN(a->pop) { for (unsigned i = 0; i < Ops_per_thread; ++i) { - pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + UT_ASSERT(!OID_IS_NULL(oid)); } pmemobj_tx_abort(EINVAL); } TX_END @@ -314,15 +321,28 @@ run_worker(void *(worker_func)(void *arg), struct worker_args args[]) THREAD_JOIN(&t[i], NULL); } +static inline size_t +allocation_inc(size_t base) +{ + size_t result = ((base / 128) + 1) * 128; + result *= Ops_per_thread; + result *= Threads; + return result; +} + int main(int argc, char *argv[]) { START(argc, argv, "obj_pmalloc_mt"); - if (argc != 5) - UT_FATAL("usage: %s [file]", argv[0]); + if (argc < 5) + UT_FATAL( + "usage: %s [enable stats]", + argv[0]); PMEMobjpool *pop; + unsigned enable_stats = 0; + size_t allocPre, alloc, allocPost; Threads = ATOU(argv[1]); if (Threads > MAX_THREADS) @@ -349,11 +369,21 @@ main(int argc, char *argv[]) if (pop == NULL) UT_FATAL("!pmemobj_open"); } + if (argc > 5) + enable_stats = ATOU(argv[5]); + + if (enable_stats) { + int ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats); + UT_ASSERTeq(ret, 0); + } PMEMoid oid = pmemobj_root(pop, sizeof(struct root)); struct root *r = pmemobj_direct(oid); UT_ASSERTne(r, NULL); + int ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPre); + UT_ASSERTeq(ret, 0); + struct worker_args args[MAX_THREADS]; for (unsigned i = 0; i < Threads; ++i) { @@ -367,16 +397,56 @@ main(int argc, char *argv[]) } } + alloc = allocPre; + if (enable_stats) + alloc += allocation_inc(ALLOC_SIZE); run_worker(alloc_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + if (enable_stats) { + alloc -= allocation_inc(ALLOC_SIZE); + alloc += allocation_inc(REALLOC_SIZE); + } run_worker(realloc_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + alloc = allocPre; run_worker(free_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + run_worker(mix_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + run_worker(alloc_free_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + run_worker(action_cancel_worker, args); actions_clear(pop, r); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + if (enable_stats && Threads > 1) + alloc += allocation_inc(ALLOC_SIZE) / 2; run_worker(action_publish_worker, args); actions_clear(pop, r); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + if (enable_stats && Threads > 1) + alloc += allocation_inc(ALLOC_SIZE) / 4; run_worker(action_mix_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + if (enable_stats) { + enable_stats = 0; + ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats); + UT_ASSERTeq(ret, 0); + } /* * Reduce the number of lanes to a value smaller than the number of