11// SPDX-License-Identifier: BSD-3-Clause
2- /* Copyright 2015-2020 , Intel Corporation */
2+ /* Copyright 2015-2023 , Intel Corporation */
33
44/*
55 * obj_pmalloc_mt.c -- multithreaded test of allocator
@@ -59,10 +59,13 @@ static void *
5959realloc_worker (void * arg )
6060{
6161 struct worker_args * a = arg ;
62+ int ret ;
6263
6364 for (unsigned i = 0 ; i < Ops_per_thread ; ++ i ) {
64- prealloc (a -> pop , & a -> r -> offs [a -> idx ][i ], REALLOC_SIZE , 0 , 0 );
65+ ret = prealloc (a -> pop , & a -> r -> offs [a -> idx ][i ], REALLOC_SIZE ,
66+ 0 , 0 );
6567 UT_ASSERTne (a -> r -> offs [a -> idx ][i ], 0 );
68+ UT_ASSERTeq (ret , 0 );
6669 }
6770
6871 return NULL ;
@@ -110,14 +113,16 @@ static void *
110113tx_worker (void * arg )
111114{
112115 struct worker_args * a = arg ;
116+ PMEMoid oid ;
113117
114118 /*
115119 * Allocate objects until exhaustion, once that happens the transaction
116120 * will automatically abort and all of the objects will be freed.
117121 */
118122 TX_BEGIN (a -> pop ) {
119123 for (unsigned n = 0 ; ; ++ n ) { /* this is NOT an infinite loop */
120- pmemobj_tx_alloc (ALLOC_SIZE , a -> idx );
124+ oid = pmemobj_tx_alloc (ALLOC_SIZE , a -> idx );
125+ UT_ASSERT (!OID_IS_NULL (oid ));
121126 if (Ops_per_thread != MAX_OPS_PER_THREAD &&
122127 n == Ops_per_thread ) {
123128 pmemobj_tx_abort (0 );
@@ -132,6 +137,7 @@ static void *
132137tx3_worker (void * arg )
133138{
134139 struct worker_args * a = arg ;
140+ PMEMoid oid ;
135141
136142 /*
137143 * Allocate N objects, abort, repeat M times. Should reveal issues in
@@ -140,7 +146,8 @@ tx3_worker(void *arg)
140146 for (unsigned n = 0 ; n < Tx_per_thread ; ++ n ) {
141147 TX_BEGIN (a -> pop ) {
142148 for (unsigned i = 0 ; i < Ops_per_thread ; ++ i ) {
143- pmemobj_tx_alloc (ALLOC_SIZE , a -> idx );
149+ oid = pmemobj_tx_alloc (ALLOC_SIZE , a -> idx );
150+ UT_ASSERT (!OID_IS_NULL (oid ));
144151 }
145152 pmemobj_tx_abort (EINVAL );
146153 } TX_END
@@ -319,10 +326,14 @@ main(int argc, char *argv[])
319326{
320327 START (argc , argv , "obj_pmalloc_mt" );
321328
322- if (argc != 5 )
323- UT_FATAL ("usage: %s <threads> <ops/t> <tx/t> [file]" , argv [0 ]);
329+ if (argc < 5 )
330+ UT_FATAL (
331+ "usage: %s <threads> <ops/t> <tx/t> <file> [enable stats]" ,
332+ argv [0 ]);
324333
325334 PMEMobjpool * pop ;
335+ unsigned enable_stats = 0 ;
336+ size_t allocPre , alloc , allocPost ;
326337
327338 Threads = ATOU (argv [1 ]);
328339 if (Threads > MAX_THREADS )
@@ -349,11 +360,21 @@ main(int argc, char *argv[])
349360 if (pop == NULL )
350361 UT_FATAL ("!pmemobj_open" );
351362 }
363+ if (argc > 5 )
364+ enable_stats = ATOU (argv [5 ]);
365+
366+ if (enable_stats ) {
367+ int ret = pmemobj_ctl_set (pop , "stats.enabled" , & enable_stats );
368+ UT_ASSERTeq (ret , 0 );
369+ }
352370
353371 PMEMoid oid = pmemobj_root (pop , sizeof (struct root ));
354372 struct root * r = pmemobj_direct (oid );
355373 UT_ASSERTne (r , NULL );
356374
375+ int ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPre );
376+ UT_ASSERTeq (ret , 0 );
377+
357378 struct worker_args args [MAX_THREADS ];
358379
359380 for (unsigned i = 0 ; i < Threads ; ++ i ) {
@@ -367,16 +388,61 @@ main(int argc, char *argv[])
367388 }
368389 }
369390
391+ alloc = allocPre ;
392+ if (enable_stats )
393+ alloc += Ops_per_thread * Threads * ((ALLOC_SIZE / 128 ) + 1 )
394+ * 128 ;
370395 run_worker (alloc_worker , args );
396+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
397+ UT_ASSERTeq (alloc , allocPost );
398+
399+ if (enable_stats ) {
400+ alloc -= Ops_per_thread * Threads * ((ALLOC_SIZE / 128 ) + 1 )
401+ * 128 ;
402+ alloc += Ops_per_thread * Threads * ((REALLOC_SIZE / 128 ) + 1 )
403+ * 128 ;
404+ }
371405 run_worker (realloc_worker , args );
406+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
407+ UT_ASSERTeq (alloc , allocPost );
408+
409+ alloc = allocPre ;
372410 run_worker (free_worker , args );
411+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
412+ UT_ASSERTeq (alloc , allocPost );
413+
373414 run_worker (mix_worker , args );
415+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
416+ UT_ASSERTeq (alloc , allocPost );
417+
374418 run_worker (alloc_free_worker , args );
419+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
420+ UT_ASSERTeq (alloc , allocPost );
421+
375422 run_worker (action_cancel_worker , args );
376423 actions_clear (pop , r );
424+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
425+ UT_ASSERTeq (alloc , allocPost );
426+ if (enable_stats && Threads > 1 )
427+ alloc += Ops_per_thread / 2 * Threads
428+ * ((ALLOC_SIZE / 128 ) + 1 ) * 128 ;
377429 run_worker (action_publish_worker , args );
378430 actions_clear (pop , r );
431+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
432+ UT_ASSERTeq (alloc , allocPost );
433+
434+ if (enable_stats && Threads > 1 )
435+ alloc += Ops_per_thread / 4 * Threads
436+ * ((ALLOC_SIZE / 128 ) + 1 ) * 128 ;
379437 run_worker (action_mix_worker , args );
438+ ret = pmemobj_ctl_get (pop , "stats.heap.curr_allocated" , & allocPost );
439+ UT_ASSERTeq (alloc , allocPost );
440+
441+ if (enable_stats ) {
442+ enable_stats = 0 ;
443+ ret = pmemobj_ctl_set (pop , "stats.enabled" , & enable_stats );
444+ UT_ASSERTeq (ret , 0 );
445+ }
380446
381447 /*
382448 * Reduce the number of lanes to a value smaller than the number of
@@ -395,7 +461,6 @@ main(int argc, char *argv[])
395461 */
396462 if (Threads == MAX_THREADS ) /* don't run for short tests */
397463 run_worker (tx_worker , args );
398-
399464 run_worker (tx3_worker , args );
400465
401466 pmemobj_close (pop );
0 commit comments