diff --git a/src/test/obj_pmalloc_mt/TEST4 b/src/test/obj_pmalloc_mt/TEST4 new file mode 100755 index 00000000000..af6e14e5ab8 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST4 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST0 -- multithreaded allocator test +# (long helgrind version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem +require_test_type long +configure_valgrind helgrind force-enable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST5 b/src/test/obj_pmalloc_mt/TEST5 new file mode 100755 index 00000000000..8c5982af61d --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST5 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST1 -- multithreaded allocator test +# (medium non-helgrind/drd version) w/ statistics +# + +. ../unittest/unittest.sh + +require_fs_type pmem +require_test_type medium +configure_valgrind drd force-disable +configure_valgrind helgrind force-disable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST6 b/src/test/obj_pmalloc_mt/TEST6 new file mode 100755 index 00000000000..41eaf2814a9 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST6 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST2 -- multithreaded allocator test +# (medium helgrind version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem +require_test_type medium +configure_valgrind helgrind force-enable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST7 b/src/test/obj_pmalloc_mt/TEST7 new file mode 100755 index 00000000000..32bfa563160 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST7 @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST3 -- multithreaded allocator test +# (medium drd version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem +require_test_type long +configure_valgrind drd force-enable +setup + +PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1 + +pass diff --git a/src/test/obj_pmalloc_mt/TEST8 b/src/test/obj_pmalloc_mt/TEST8 new file mode 100755 index 00000000000..27634791915 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST8 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST3 -- multithreaded allocator test +# (medium drd version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem +require_test_type long +#configure_valgrind drd force-enable +setup + +for n in {1..50} +do + if test -f "rm $DIR/testfile"; then + rm $DIR/testfile + fi + PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 $n +done + +pass diff --git a/src/test/obj_pmalloc_mt/TEST9 b/src/test/obj_pmalloc_mt/TEST9 new file mode 100755 index 00000000000..2a118467df4 --- /dev/null +++ b/src/test/obj_pmalloc_mt/TEST9 @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2023, Intel Corporation + +# +# src/test/obj_pmalloc_mt/TEST3 -- multithreaded allocator test +# (medium drd version) w/ statistics +# + +. ../unittest/unittest.sh + +require_valgrind 3.10 +require_fs_type pmem +require_test_type long +#configure_valgrind drd force-enable +setup + +for n in {1..50} +do + if test -f "rm $DIR/testfile"; then + rm $DIR/testfile + fi + PMEM_IS_PMEM_FORCE=1 expect_normal_exit\ + ./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 0 $n +done + +pass diff --git a/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c b/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c index c356d3a7c8c..b664e1c6ca3 100644 --- a/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c +++ b/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: BSD-3-Clause -/* Copyright 2015-2020, Intel Corporation */ +/* Copyright 2015-2023, Intel Corporation */ /* * obj_pmalloc_mt.c -- multithreaded test of allocator @@ -59,10 +59,13 @@ static void * realloc_worker(void *arg) { struct worker_args *a = arg; + int ret; for (unsigned i = 0; i < Ops_per_thread; ++i) { - prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0); + ret = prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, + 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); + UT_ASSERTeq(ret, 0); } return NULL; @@ -110,6 +113,7 @@ static void * tx_worker(void *arg) { struct worker_args *a = arg; + PMEMoid oid; /* * Allocate objects until exhaustion, once that happens the transaction @@ -117,7 +121,8 @@ tx_worker(void *arg) */ TX_BEGIN(a->pop) { for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */ - pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + UT_ASSERT(!OID_IS_NULL(oid)); if (Ops_per_thread != MAX_OPS_PER_THREAD && n == Ops_per_thread) { pmemobj_tx_abort(0); @@ -132,6 +137,7 @@ static void * tx3_worker(void *arg) { struct worker_args *a = arg; + PMEMoid oid; /* * Allocate N objects, abort, repeat M times. Should reveal issues in @@ -140,7 +146,8 @@ tx3_worker(void *arg) for (unsigned n = 0; n < Tx_per_thread; ++n) { TX_BEGIN(a->pop) { for (unsigned i = 0; i < Ops_per_thread; ++i) { - pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx); + UT_ASSERT(!OID_IS_NULL(oid)); } pmemobj_tx_abort(EINVAL); } TX_END @@ -319,10 +326,14 @@ main(int argc, char *argv[]) { START(argc, argv, "obj_pmalloc_mt"); - if (argc != 5) - UT_FATAL("usage: %s [file]", argv[0]); + if (argc < 5) + UT_FATAL( + "usage: %s [enable stats]", + argv[0]); PMEMobjpool *pop; + unsigned enable_stats = 0; + size_t allocPre, alloc, allocPost; Threads = ATOU(argv[1]); if (Threads > MAX_THREADS) @@ -349,11 +360,21 @@ main(int argc, char *argv[]) if (pop == NULL) UT_FATAL("!pmemobj_open"); } + if (argc > 5) + enable_stats = ATOU(argv[5]); + + if (enable_stats) { + int ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats); + UT_ASSERTeq(ret, 0); + } PMEMoid oid = pmemobj_root(pop, sizeof(struct root)); struct root *r = pmemobj_direct(oid); UT_ASSERTne(r, NULL); + int ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPre); + UT_ASSERTeq(ret, 0); + struct worker_args args[MAX_THREADS]; for (unsigned i = 0; i < Threads; ++i) { @@ -367,16 +388,61 @@ main(int argc, char *argv[]) } } + alloc = allocPre; + if (enable_stats) + alloc += Ops_per_thread * Threads * ((ALLOC_SIZE / 128) + 1) + * 128; run_worker(alloc_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + if (enable_stats) { + alloc -= Ops_per_thread * Threads * ((ALLOC_SIZE / 128) + 1) + * 128; + alloc += Ops_per_thread * Threads * ((REALLOC_SIZE / 128) + 1) + * 128; + } run_worker(realloc_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + alloc = allocPre; run_worker(free_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + run_worker(mix_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + run_worker(alloc_free_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + run_worker(action_cancel_worker, args); actions_clear(pop, r); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + if (enable_stats && Threads > 1) + alloc += Ops_per_thread / 2 * Threads + * ((ALLOC_SIZE / 128) + 1) * 128; run_worker(action_publish_worker, args); actions_clear(pop, r); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + if (enable_stats && Threads > 1) + alloc += Ops_per_thread / 4 * Threads + * ((ALLOC_SIZE / 128) + 1) * 128; run_worker(action_mix_worker, args); + ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost); + UT_ASSERTeq(alloc, allocPost); + + if (enable_stats) { + enable_stats = 0; + ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats); + UT_ASSERTeq(ret, 0); + } /* * Reduce the number of lanes to a value smaller than the number of @@ -395,7 +461,6 @@ main(int argc, char *argv[]) */ if (Threads == MAX_THREADS) /* don't run for short tests */ run_worker(tx_worker, args); - run_worker(tx3_worker, args); pmemobj_close(pop);