Skip to content

Commit

Permalink
test: add statistics verification to obj_pmalloc_mt test
Browse files Browse the repository at this point in the history
Enable statistics in pmalloc mt tests.
Extend diagnostic of some tests like pmalloc, prealloc result verification.

Signed-off-by: Tomasz Gromadzki <[email protected]>
  • Loading branch information
grom72 committed Mar 30, 2023
1 parent 7c189da commit 4b74cbf
Show file tree
Hide file tree
Showing 7 changed files with 210 additions and 7 deletions.
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST4
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST0 -- multithreaded allocator test
# (long helgrind version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem
require_test_type long
configure_valgrind helgrind force-enable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1

pass
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST5
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST1 -- multithreaded allocator test
# (medium non-helgrind/drd version) w/ statistics
#

. ../unittest/unittest.sh

require_fs_type pmem
require_test_type medium
configure_valgrind drd force-disable
configure_valgrind helgrind force-disable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1

pass
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST6
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST2 -- multithreaded allocator test
# (medium helgrind version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem
require_test_type medium
configure_valgrind helgrind force-enable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1

pass
21 changes: 21 additions & 0 deletions src/test/obj_pmalloc_mt/TEST7
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST3 -- multithreaded allocator test
# (medium drd version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem
require_test_type long
configure_valgrind drd force-enable
setup

PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 4 64 4 $DIR/testfile 1

pass
27 changes: 27 additions & 0 deletions src/test/obj_pmalloc_mt/TEST8
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST3 -- multithreaded allocator test
# (medium drd version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem
require_test_type long
#configure_valgrind drd force-enable
setup

for n in {1..50}
do
if test -f "rm $DIR/testfile"; then
rm $DIR/testfile
fi
PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 1 $n
done

pass
27 changes: 27 additions & 0 deletions src/test/obj_pmalloc_mt/TEST9
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2023, Intel Corporation

#
# src/test/obj_pmalloc_mt/TEST3 -- multithreaded allocator test
# (medium drd version) w/ statistics
#

. ../unittest/unittest.sh

require_valgrind 3.10
require_fs_type pmem
require_test_type long
#configure_valgrind drd force-enable
setup

for n in {1..50}
do
if test -f "rm $DIR/testfile"; then
rm $DIR/testfile
fi
PMEM_IS_PMEM_FORCE=1 expect_normal_exit\
./obj_pmalloc_mt$EXESUFFIX 32 1000 100 $DIR/testfile 0 $n
done

pass
79 changes: 72 additions & 7 deletions src/test/obj_pmalloc_mt/obj_pmalloc_mt.c
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
/* Copyright 2015-2020, Intel Corporation */
/* Copyright 2015-2023, Intel Corporation */

/*
* obj_pmalloc_mt.c -- multithreaded test of allocator
Expand Down Expand Up @@ -59,10 +59,13 @@ static void *
realloc_worker(void *arg)
{
struct worker_args *a = arg;
int ret;

for (unsigned i = 0; i < Ops_per_thread; ++i) {
prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0);
ret = prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE,
0, 0);
UT_ASSERTne(a->r->offs[a->idx][i], 0);
UT_ASSERTeq(ret, 0);
}

return NULL;
Expand Down Expand Up @@ -110,14 +113,16 @@ static void *
tx_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;

/*
* Allocate objects until exhaustion, once that happens the transaction
* will automatically abort and all of the objects will be freed.
*/
TX_BEGIN(a->pop) {
for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
UT_ASSERT(!OID_IS_NULL(oid));
if (Ops_per_thread != MAX_OPS_PER_THREAD &&
n == Ops_per_thread) {
pmemobj_tx_abort(0);
Expand All @@ -132,6 +137,7 @@ static void *
tx3_worker(void *arg)
{
struct worker_args *a = arg;
PMEMoid oid;

/*
* Allocate N objects, abort, repeat M times. Should reveal issues in
Expand All @@ -140,7 +146,8 @@ tx3_worker(void *arg)
for (unsigned n = 0; n < Tx_per_thread; ++n) {
TX_BEGIN(a->pop) {
for (unsigned i = 0; i < Ops_per_thread; ++i) {
pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
oid = pmemobj_tx_alloc(ALLOC_SIZE, a->idx);
UT_ASSERT(!OID_IS_NULL(oid));
}
pmemobj_tx_abort(EINVAL);
} TX_END
Expand Down Expand Up @@ -319,10 +326,14 @@ main(int argc, char *argv[])
{
START(argc, argv, "obj_pmalloc_mt");

if (argc != 5)
UT_FATAL("usage: %s <threads> <ops/t> <tx/t> [file]", argv[0]);
if (argc < 5)
UT_FATAL(
"usage: %s <threads> <ops/t> <tx/t> <file> [enable stats]",
argv[0]);

PMEMobjpool *pop;
unsigned enable_stats = 0;
size_t allocPre, alloc, allocPost;

Threads = ATOU(argv[1]);
if (Threads > MAX_THREADS)
Expand All @@ -349,11 +360,21 @@ main(int argc, char *argv[])
if (pop == NULL)
UT_FATAL("!pmemobj_open");
}
if (argc > 5)
enable_stats = ATOU(argv[5]);

if (enable_stats) {
int ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats);
UT_ASSERTeq(ret, 0);
}

PMEMoid oid = pmemobj_root(pop, sizeof(struct root));
struct root *r = pmemobj_direct(oid);
UT_ASSERTne(r, NULL);

int ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPre);
UT_ASSERTeq(ret, 0);

struct worker_args args[MAX_THREADS];

for (unsigned i = 0; i < Threads; ++i) {
Expand All @@ -367,16 +388,61 @@ main(int argc, char *argv[])
}
}

alloc = allocPre;
if (enable_stats)
alloc += Ops_per_thread * Threads * ((ALLOC_SIZE / 128) + 1)
* 128;
run_worker(alloc_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

if (enable_stats) {
alloc -= Ops_per_thread * Threads * ((ALLOC_SIZE / 128) + 1)
* 128;
alloc += Ops_per_thread * Threads * ((REALLOC_SIZE / 128) + 1)
* 128;
}
run_worker(realloc_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

alloc = allocPre;
run_worker(free_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

run_worker(mix_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

run_worker(alloc_free_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

run_worker(action_cancel_worker, args);
actions_clear(pop, r);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);
if (enable_stats && Threads > 1)
alloc += Ops_per_thread / 2 * Threads
* ((ALLOC_SIZE / 128) + 1) * 128;
run_worker(action_publish_worker, args);
actions_clear(pop, r);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

if (enable_stats && Threads > 1)
alloc += Ops_per_thread / 4 * Threads
* ((ALLOC_SIZE / 128) + 1) * 128;
run_worker(action_mix_worker, args);
ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocPost);
UT_ASSERTeq(alloc, allocPost);

if (enable_stats) {
enable_stats = 0;
ret = pmemobj_ctl_set(pop, "stats.enabled", &enable_stats);
UT_ASSERTeq(ret, 0);
}

/*
* Reduce the number of lanes to a value smaller than the number of
Expand All @@ -395,7 +461,6 @@ main(int argc, char *argv[])
*/
if (Threads == MAX_THREADS) /* don't run for short tests */
run_worker(tx_worker, args);

run_worker(tx3_worker, args);

pmemobj_close(pop);
Expand Down

0 comments on commit 4b74cbf

Please sign in to comment.