Skip to content

Commit

Permalink
test: add statistics verification to obj_pmalloc_mt test
Browse files Browse the repository at this point in the history
Enable statistics in pmalloc mt tests.
Extend missing diagnostic of pmalloc and prealloc
functions calls.

Signed-off-by: Tomasz Gromadzki <tomasz.gromadzki@intel.com>
  • Loading branch information
grom72 committed Mar 31, 2023
1 parent 0d2d76d commit 48ab8bf
Show file tree
Hide file tree
Showing 7 changed files with 214 additions and 7 deletions.
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST4
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST4 -- multithreaded allocator test
# (long helgrind version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem non-pmem
require_test_type long
configure_valgrind helgrind force-enable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1

pass
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST5
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST5 -- multithreaded allocator test
# (medium non-helgrind/drd version) w/ statistics
#

. ../unittest/unittest.sh

require_fs_type pmem non-pmem
require_test_type medium
configure_valgrind drd force-disable
configure_valgrind helgrind force-disable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1

pass
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST6
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST6 -- multithreaded allocator test
# (medium helgrind version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem non-pmem
require_test_type medium
configure_valgrind helgrind force-enable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1

pass
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST7
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST7 -- multithreaded allocator test
# (long drd version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem non-pmem
require_test_type long
configure_valgrind drd force-enable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1

pass
27 changes: 27 additions & 0 deletions src/test/obj_pmalloc_mt/TEST8
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST8 -- multithreaded allocator test
# (long - several iteration) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem non-pmem
require_test_type long
configure_valgrind helgrind force-enable
setup

for n in {1..500}
do
if test -f "rm $DIR/testfile"; then
rm $DIR/testfile
fi
PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 $n
done

pass
27 changes: 27 additions & 0 deletions src/test/obj_pmalloc_mt/TEST9
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST9 -- multithreaded allocator test
# (long drd version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem non-pmem
require_test_type long
configure_valgrind helgrind force-enable
setup

for n in {1..50}
do
if test -f "rm $DIR/testfile"; then
rm $DIR/testfile
fi
PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 0 $n
done

pass
83 changes: 76 additions & 7 deletions src/test/obj_pmalloc_mt/obj_pmalloc_mt.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/* Copyright 2015-2023, Intel Corporation */

/*
* obj_pmalloc_mt.c -- multithreaded test of allocator
Expand Down Expand Up @@ -59,10 +59,13 @@ static void *
realloc_worker(void *arg)
{
struct worker_args *a = arg;
int ret;

for (unsigned i = 0; i < Ops_per_thread; ++i) {
prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0);
ret = prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE,
0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
UT_ASSERTeq(ret, 0);
}

return NULL;
Expand Down Expand Up @@ -110,14 +113,16 @@ static void *
tx_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;

/*
* Allocate objects until exhaustion, once that happens the transaction
* will automatically abort and all of the objects will be freed.
*/
TX_BEGIN(a->pop) {
for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
UT_ASSERT(!OID_IS_NULL(oid));
if (Ops_per_thread != MAX_OPS_PER_THREAD &&
n == Ops_per_thread) {
pmemobj_tx_abort(0);
Expand All @@ -132,6 +137,7 @@ static void *
tx3_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;

/*
* Allocate N objects, abort, repeat M times. Should reveal issues in
Expand All @@ -140,7 +146,8 @@ tx3_worker(void *arg)
for (unsigned n = 0; n < Tx_per_thread; ++n) {
TX_BEGIN(a->pop) {
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
UT_ASSERT(!OID_IS_NULL(oid));
}
pmemobj_tx_abort(EINVAL);
} TX_END
Expand Down Expand Up @@ -314,15 +321,28 @@ run_worker(void *(worker_func)(void *arg), struct worker_args args[])
THREAD_JOIN(&t[i], NULL);
}

static inline size_t
allocation_inc(size_t base)
{
size_t result = ((base / 128) + 1) * 128;
result *= Ops_per_thread;
result *= Threads;
return result;
}

int
main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_mt");

if (argc != 5)
UT_FATAL("usage: %s <threads> <ops/t> <tx/t> [file]", argv[0]);
if (argc < 5)
UT_FATAL(
"usage: %s <threads> <ops/t> <tx/t> <file> [enable stats]",
argv[0]);

PMEMobjpool *pop;
unsigned enable_stats = 0;
size_t allocPre, alloc, allocPost;

Threads = ATOU(argv[1]);
if (Threads > MAX_THREADS)
Expand All @@ -349,11 +369,21 @@ main(int argc, char *argv[])
if (pop == NULL)
UT_FATAL("!pmemobj_open");
}
if (argc > 5)
enable_stats = ATOU(argv[5]);

if (enable_stats) {
int ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats);
UT_ASSERTeq(ret, 0);
}

PMEMoid oid = pmemobj_root(pop, sizeof(struct root));
struct root *r = pmemobj_direct(oid);
UT_ASSERTne(r, NULL);

int ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPre);
UT_ASSERTeq(ret, 0);

struct worker_args args[MAX_THREADS];

for (unsigned i = 0; i < Threads; ++i) {
Expand All @@ -367,16 +397,56 @@ main(int argc, char *argv[])
}
}

alloc = allocPre;
if (enable_stats)
alloc += allocation_inc(ALLOC_SIZE);
run_worker(alloc_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

if (enable_stats) {
alloc -= allocation_inc(ALLOC_SIZE);
alloc += allocation_inc(REALLOC_SIZE);
}
run_worker(realloc_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

alloc = allocPre;
run_worker(free_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

run_worker(mix_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

run_worker(alloc_free_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

run_worker(action_cancel_worker, args);
actions_clear(pop, r);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);
if (enable_stats && Threads > 1)
alloc += allocation_inc(ALLOC_SIZE) / 2;
run_worker(action_publish_worker, args);
actions_clear(pop, r);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

if (enable_stats && Threads > 1)
alloc += allocation_inc(ALLOC_SIZE) / 4;
run_worker(action_mix_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

if (enable_stats) {
enable_stats = 0;
ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats);
UT_ASSERTeq(ret, 0);
}

/*
* Reduce the number of lanes to a value smaller than the number of
Expand All @@ -395,7 +465,6 @@ main(int argc, char *argv[])
*/
if (Threads == MAX_THREADS) /* don't run for short tests */
run_worker(tx_worker, args);

run_worker(tx3_worker, args);

pmemobj_close(pop);
Expand Down

0 comments on commit 48ab8bf

Please sign in to comment.