1/* 2 * Standalone mutex tester for Berkeley DB mutexes. 3 * 4 * $Id: test_mutex.c,v 12.24 2007/12/05 14:48:08 bostic Exp $ 5 */ 6 7#include "db_config.h" 8 9#include "db_int.h" 10 11#include <sys/wait.h> 12 13#ifdef DB_WIN32 14#define MUTEX_THREAD_TEST 1 15 16extern int getopt(int, char * const *, const char *); 17 18typedef HANDLE os_pid_t; 19typedef HANDLE os_thread_t; 20 21#define os_thread_create(thrp, attr, func, arg) \ 22 (((*(thrp) = CreateThread(NULL, 0, \ 23 (LPTHREAD_START_ROUTINE)(func), (arg), 0, NULL)) == NULL) ? -1 : 0) 24#define os_thread_join(thr, statusp) \ 25 ((WaitForSingleObject((thr), INFINITE) == WAIT_OBJECT_0) && \ 26 GetExitCodeThread((thr), (LPDWORD)(statusp)) ? 0 : -1) 27#define os_thread_self() GetCurrentThreadId() 28 29#else /* !DB_WIN32 */ 30 31typedef pid_t os_pid_t; 32 33/* 34 * There's only one mutex implementation that can't support thread-level 35 * locking: UNIX/fcntl mutexes. 36 * 37 * The general Berkeley DB library configuration doesn't look for the POSIX 38 * pthread functions, with one exception -- pthread_yield. 39 * 40 * Use these two facts to decide if we're going to build with or without 41 * threads. 42 */ 43#if !defined(HAVE_MUTEX_FCNTL) && defined(HAVE_PTHREAD_YIELD) 44#define MUTEX_THREAD_TEST 1 45 46#include <pthread.h> 47 48typedef pthread_t os_thread_t; 49 50#define os_thread_create(thrp, attr, func, arg) \ 51 pthread_create((thrp), (attr), (func), (arg)) 52#define os_thread_join(thr, statusp) pthread_join((thr), (statusp)) 53#define os_thread_self() pthread_self() 54#endif /* HAVE_PTHREAD_YIELD */ 55#endif /* !DB_WIN32 */ 56 57#define OS_BAD_PID ((os_pid_t)-1) 58 59#define TESTDIR "TESTDIR" /* Working area */ 60#define MT_FILE "TESTDIR/mutex.file" 61#define MT_FILE_QUIT "TESTDIR/mutex.file.quit" 62 63/* 64 * The backing data layout: 65 * TM[1] per-thread mutex array lock 66 * TM[nthreads] per-thread mutex array 67 * TM[maxlocks] per-lock mutex array 68 */ 69typedef struct { 70 db_mutex_t mutex; /* Mutex. */ 71 u_long id; /* Holder's ID. */ 72 u_int wakeme; /* Request to awake. */ 73} TM; 74 75DB_ENV *dbenv; /* Backing environment */ 76ENV *env; 77size_t len; /* Backing data chunk size. */ 78 79u_int8_t *gm_addr; /* Global mutex */ 80u_int8_t *lm_addr; /* Locker mutexes */ 81u_int8_t *tm_addr; /* Thread mutexes */ 82 83#ifdef MUTEX_THREAD_TEST 84os_thread_t *kidsp; /* Locker threads */ 85os_thread_t wakep; /* Wakeup thread */ 86#endif 87 88#ifndef HAVE_MMAP 89u_int nprocs = 1; /* -p: Processes. */ 90u_int nthreads = 20; /* -t: Threads. */ 91#elif MUTEX_THREAD_TEST 92u_int nprocs = 5; /* -p: Processes. */ 93u_int nthreads = 4; /* -t: Threads. */ 94#else 95u_int nprocs = 20; /* -p: Processes. */ 96u_int nthreads = 1; /* -t: Threads. */ 97#endif 98 99u_int maxlocks = 20; /* -l: Backing locks. */ 100u_int nlocks = 10000; /* -n: Locks per process. */ 101int verbose; /* -v: Verbosity. */ 102 103const char *progname; 104 105void data_off(u_int8_t *, DB_FH *); 106void data_on(u_int8_t **, u_int8_t **, u_int8_t **, DB_FH **, int); 107int locker_start(u_long); 108int locker_wait(void); 109os_pid_t os_spawn(const char *, char *const[]); 110int os_wait(os_pid_t *, u_int); 111void *run_lthread(void *); 112void *run_wthread(void *); 113os_pid_t spawn_proc(u_long, char *, char *); 114void tm_env_close(void); 115int tm_env_init(void); 116void tm_mutex_destroy(void); 117void tm_mutex_init(void); 118void tm_mutex_stats(void); 119int usage(void); 120int wakeup_start(u_long); 121int wakeup_wait(void); 122 123int 124main(argc, argv) 125 int argc; 126 char *argv[]; 127{ 128 enum {LOCKER, WAKEUP, PARENT} rtype; 129 extern int optind; 130 extern char *optarg; 131 os_pid_t wakeup_pid, *pids; 132 u_long id; 133 u_int i; 134 DB_FH *fhp, *map_fhp; 135 int ch, err; 136 char *p, *tmpath, cmd[1024]; 137 138 if ((progname = __db_rpath(argv[0])) == NULL) 139 progname = argv[0]; 140 else 141 ++progname; 142 143 rtype = PARENT; 144 id = 0; 145 tmpath = argv[0]; 146 while ((ch = getopt(argc, argv, "l:n:p:T:t:v")) != EOF) 147 switch (ch) { 148 case 'l': 149 maxlocks = (u_int)atoi(optarg); 150 break; 151 case 'n': 152 nlocks = (u_int)atoi(optarg); 153 break; 154 case 'p': 155 nprocs = (u_int)atoi(optarg); 156 break; 157 case 't': 158 if ((nthreads = (u_int)atoi(optarg)) == 0) 159 nthreads = 1; 160#if !defined(MUTEX_THREAD_TEST) 161 if (nthreads != 1) { 162 fprintf(stderr, 163 "%s: thread support not available or not compiled for this platform.\n", 164 progname); 165 return (EXIT_FAILURE); 166 } 167#endif 168 break; 169 case 'T': 170 if (!memcmp(optarg, "locker", sizeof("locker") - 1)) 171 rtype = LOCKER; 172 else if ( 173 !memcmp(optarg, "wakeup", sizeof("wakeup") - 1)) 174 rtype = WAKEUP; 175 else 176 return (usage()); 177 if ((p = strchr(optarg, '=')) == NULL) 178 return (usage()); 179 id = (u_long)atoi(p + 1); 180 break; 181 case 'v': 182 verbose = 1; 183 break; 184 case '?': 185 default: 186 return (usage()); 187 } 188 argc -= optind; 189 argv += optind; 190 191 /* 192 * If we're not running a multi-process test, we should be running 193 * a multi-thread test. 194 */ 195 if (nprocs == 1 && nthreads == 1) { 196 fprintf(stderr, 197 "%s: running in a single process requires multiple threads\n", 198 progname); 199 return (EXIT_FAILURE); 200 } 201 202 len = sizeof(TM) * (1 + nthreads * nprocs + maxlocks); 203 204 /* 205 * In the multi-process test, the parent spawns processes that exec 206 * the original binary, ending up here. Each process joins the DB 207 * environment separately and then calls the supporting function. 208 */ 209 if (rtype == LOCKER || rtype == WAKEUP) { 210 __os_yield(env, 3, 0); /* Let everyone catch up. */ 211 /* Initialize random numbers. */ 212 srand((u_int)time(NULL) % (u_int)getpid()); 213 214 if (tm_env_init() != 0) /* Join the environment. */ 215 exit(EXIT_FAILURE); 216 /* Join the backing data. */ 217 data_on(&gm_addr, &tm_addr, &lm_addr, &map_fhp, 0); 218 if (verbose) 219 printf( 220 "Backing file: global (%#lx), threads (%#lx), locks (%#lx)\n", 221 (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr); 222 223 if ((rtype == LOCKER ? 224 locker_start(id) : wakeup_start(id)) != 0) 225 exit(EXIT_FAILURE); 226 if ((rtype == LOCKER ? locker_wait() : wakeup_wait()) != 0) 227 exit(EXIT_FAILURE); 228 229 data_off(gm_addr, map_fhp); /* Detach from backing data. */ 230 231 tm_env_close(); /* Detach from environment. */ 232 233 exit(EXIT_SUCCESS); 234 } 235 236 /* 237 * The following code is only executed by the original parent process. 238 * 239 * Clean up from any previous runs. 240 */ 241 snprintf(cmd, sizeof(cmd), "rm -rf %s", TESTDIR); 242 (void)system(cmd); 243 snprintf(cmd, sizeof(cmd), "mkdir %s", TESTDIR); 244 (void)system(cmd); 245 246 printf( 247 "%s: %u processes, %u threads/process, %u lock requests from %u locks\n", 248 progname, nprocs, nthreads, nlocks, maxlocks); 249 printf("%s: backing data %lu bytes\n", progname, (u_long)len); 250 251 if (tm_env_init() != 0) /* Create the environment. */ 252 exit(EXIT_FAILURE); 253 /* Create the backing data. */ 254 data_on(&gm_addr, &tm_addr, &lm_addr, &map_fhp, 1); 255 if (verbose) 256 printf( 257 "backing data: global (%#lx), threads (%#lx), locks (%#lx)\n", 258 (u_long)gm_addr, (u_long)tm_addr, (u_long)lm_addr); 259 260 tm_mutex_init(); /* Initialize mutexes. */ 261 262 if (nprocs > 1) { /* Run the multi-process test. */ 263 /* Allocate array of locker process IDs. */ 264 if ((pids = calloc(nprocs, sizeof(os_pid_t))) == NULL) { 265 fprintf(stderr, "%s: %s\n", progname, strerror(errno)); 266 goto fail; 267 } 268 269 /* Spawn locker processes and threads. */ 270 for (i = 0; i < nprocs; ++i) { 271 if ((pids[i] = 272 spawn_proc(id, tmpath, "locker")) == OS_BAD_PID) { 273 fprintf(stderr, 274 "%s: failed to spawn a locker\n", progname); 275 goto fail; 276 } 277 id += nthreads; 278 } 279 280 /* Spawn wakeup process/thread. */ 281 if ((wakeup_pid = 282 spawn_proc(id, tmpath, "wakeup")) == OS_BAD_PID) { 283 fprintf(stderr, 284 "%s: failed to spawn waker\n", progname); 285 goto fail; 286 } 287 ++id; 288 289 /* Wait for all lockers to exit. */ 290 if ((err = os_wait(pids, nprocs)) != 0) { 291 fprintf(stderr, "%s: locker wait failed with %d\n", 292 progname, err); 293 goto fail; 294 } 295 296 /* Signal wakeup process to exit. */ 297 if ((err = __os_open( 298 env, MT_FILE_QUIT, 0, DB_OSO_CREATE, 0664, &fhp)) != 0) { 299 fprintf(stderr, 300 "%s: open %s\n", progname, db_strerror(err)); 301 goto fail; 302 } 303 (void)__os_closehandle(env, fhp); 304 305 /* Wait for wakeup process/thread. */ 306 if ((err = os_wait(&wakeup_pid, 1)) != 0) { 307 fprintf(stderr, "%s: %lu: exited %d\n", 308 progname, (u_long)wakeup_pid, err); 309 goto fail; 310 } 311 } else { /* Run the single-process test. */ 312 /* Spawn locker threads. */ 313 if (locker_start(0) != 0) 314 goto fail; 315 316 /* Spawn wakeup thread. */ 317 if (wakeup_start(nthreads) != 0) 318 goto fail; 319 320 /* Wait for all lockers to exit. */ 321 if (locker_wait() != 0) 322 goto fail; 323 324 /* Signal wakeup process to exit. */ 325 if ((err = __os_open( 326 env, MT_FILE_QUIT, 0, DB_OSO_CREATE, 0664, &fhp)) != 0) { 327 fprintf(stderr, 328 "%s: open %s\n", progname, db_strerror(err)); 329 goto fail; 330 } 331 (void)__os_closehandle(env, fhp); 332 333 /* Wait for wakeup thread. */ 334 if (wakeup_wait() != 0) 335 goto fail; 336 } 337 338 tm_mutex_stats(); /* Display run statistics. */ 339 tm_mutex_destroy(); /* Destroy mutexes. */ 340 341 data_off(gm_addr, map_fhp); /* Detach from backing data. */ 342 343 tm_env_close(); /* Detach from environment. */ 344 345 printf("%s: test succeeded\n", progname); 346 return (EXIT_SUCCESS); 347 348fail: printf("%s: FAILED!\n", progname); 349 return (EXIT_FAILURE); 350} 351 352int 353locker_start(id) 354 u_long id; 355{ 356#if defined(MUTEX_THREAD_TEST) 357 u_int i; 358 int err; 359 360 /* 361 * Spawn off threads. We have nthreads all locking and going to 362 * sleep, and one other thread cycling through and waking them up. 363 */ 364 if ((kidsp = 365 (os_thread_t *)calloc(sizeof(os_thread_t), nthreads)) == NULL) { 366 fprintf(stderr, "%s: %s\n", progname, strerror(errno)); 367 return (1); 368 } 369 for (i = 0; i < nthreads; i++) 370 if ((err = os_thread_create( 371 &kidsp[i], NULL, run_lthread, (void *)(id + i))) != 0) { 372 fprintf(stderr, "%s: failed spawning thread: %s\n", 373 progname, db_strerror(err)); 374 return (1); 375 } 376 return (0); 377#else 378 return (run_lthread((void *)id) == NULL ? 0 : 1); 379#endif 380} 381 382int 383locker_wait() 384{ 385#if defined(MUTEX_THREAD_TEST) 386 u_int i; 387 void *retp; 388 389 /* Wait for the threads to exit. */ 390 for (i = 0; i < nthreads; i++) { 391 (void)os_thread_join(kidsp[i], &retp); 392 if (retp != NULL) { 393 fprintf(stderr, 394 "%s: thread exited with error\n", progname); 395 return (1); 396 } 397 } 398 free(kidsp); 399#endif 400 return (0); 401} 402 403void * 404run_lthread(arg) 405 void *arg; 406{ 407 TM *gp, *mp, *tp; 408 u_long id, tid; 409 u_int lock, nl; 410 int err, i; 411 412 id = (uintptr_t)arg; 413#if defined(MUTEX_THREAD_TEST) 414 tid = (u_long)os_thread_self(); 415#else 416 tid = 0; 417#endif 418 printf("Locker: ID %03lu (PID: %lu; TID: %lx)\n", 419 id, (u_long)getpid(), tid); 420 421 gp = (TM *)gm_addr; 422 tp = (TM *)(tm_addr + id * sizeof(TM)); 423 424 for (nl = nlocks; nl > 0;) { 425 /* Select and acquire a data lock. */ 426 lock = (u_int)rand() % maxlocks; 427 mp = (TM *)(lm_addr + lock * sizeof(TM)); 428 if (verbose) 429 printf("%03lu: lock %d (mtx: %lu)\n", 430 id, lock, (u_long)mp->mutex); 431 432 if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) { 433 fprintf(stderr, "%s: %03lu: never got lock %d: %s\n", 434 progname, id, lock, db_strerror(err)); 435 return ((void *)1); 436 } 437 if (mp->id != 0) { 438 fprintf(stderr, 439 "%s: RACE! (%03lu granted lock %d held by %03lu)\n", 440 progname, id, lock, mp->id); 441 return ((void *)1); 442 } 443 mp->id = id; 444 445 /* 446 * Pretend to do some work, periodically checking to see if 447 * we still hold the mutex. 448 */ 449 for (i = 0; i < 3; ++i) { 450 __os_yield(env, 0, (u_long)rand() % 3); 451 if (mp->id != id) { 452 fprintf(stderr, 453 "%s: RACE! (%03lu stole lock %d from %03lu)\n", 454 progname, mp->id, lock, id); 455 return ((void *)1); 456 } 457 } 458 459 /* 460 * Test self-blocking and unlocking by other threads/processes: 461 * 462 * acquire the global lock 463 * set our wakeup flag 464 * release the global lock 465 * acquire our per-thread lock 466 * 467 * The wakeup thread will wake us up. 468 */ 469 if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) { 470 fprintf(stderr, "%s: %03lu: global lock: %s\n", 471 progname, id, db_strerror(err)); 472 return ((void *)1); 473 } 474 if (tp->id != 0 && tp->id != id) { 475 fprintf(stderr, 476 "%s: %03lu: per-thread mutex isn't mine, owned by %03lu\n", 477 progname, id, tp->id); 478 return ((void *)1); 479 } 480 tp->id = id; 481 if (verbose) 482 printf("%03lu: self-blocking (mtx: %lu)\n", 483 id, (u_long)tp->mutex); 484 if (tp->wakeme) { 485 fprintf(stderr, 486 "%s: %03lu: wakeup flag incorrectly set\n", 487 progname, id); 488 return ((void *)1); 489 } 490 tp->wakeme = 1; 491 if ((err = dbenv->mutex_unlock(dbenv, gp->mutex)) != 0) { 492 fprintf(stderr, 493 "%s: %03lu: global unlock: %s\n", 494 progname, id, db_strerror(err)); 495 return ((void *)1); 496 } 497 if ((err = dbenv->mutex_lock(dbenv, tp->mutex)) != 0) { 498 fprintf(stderr, "%s: %03lu: per-thread lock: %s\n", 499 progname, id, db_strerror(err)); 500 return ((void *)1); 501 } 502 /* Time passes... */ 503 if (tp->wakeme) { 504 fprintf(stderr, "%s: %03lu: wakeup flag not cleared\n", 505 progname, id); 506 return ((void *)1); 507 } 508 509 if (verbose) 510 printf("%03lu: release %d (mtx: %lu)\n", 511 id, lock, (u_long)mp->mutex); 512 513 /* Release the data lock. */ 514 mp->id = 0; 515 if ((err = dbenv->mutex_unlock(dbenv, mp->mutex)) != 0) { 516 fprintf(stderr, 517 "%s: %03lu: lock release: %s\n", 518 progname, id, db_strerror(err)); 519 return ((void *)1); 520 } 521 522 if (--nl % 100 == 0) 523 printf("%03lu: %d\n", id, nl); 524 } 525 526 return (NULL); 527} 528 529int 530wakeup_start(id) 531 u_long id; 532{ 533#if defined(MUTEX_THREAD_TEST) 534 int err; 535 536 /* 537 * Spawn off wakeup thread. 538 */ 539 if ((err = os_thread_create( 540 &wakep, NULL, run_wthread, (void *)id)) != 0) { 541 fprintf(stderr, "%s: failed spawning wakeup thread: %s\n", 542 progname, db_strerror(err)); 543 return (1); 544 } 545 return (0); 546#else 547 return (run_wthread((void *)id) == NULL ? 0 : 1); 548#endif 549} 550 551int 552wakeup_wait() 553{ 554#if defined(MUTEX_THREAD_TEST) 555 void *retp; 556 557 /* 558 * A file is created when the wakeup thread is no longer needed. 559 */ 560 (void)os_thread_join(wakep, &retp); 561 if (retp != NULL) { 562 fprintf(stderr, 563 "%s: wakeup thread exited with error\n", progname); 564 return (1); 565 } 566#endif 567 return (0); 568} 569 570/* 571 * run_wthread -- 572 * Thread to wake up other threads that are sleeping. 573 */ 574void * 575run_wthread(arg) 576 void *arg; 577{ 578 TM *gp, *tp; 579 u_long id, tid; 580 u_int check_id; 581 int err; 582 583 id = (uintptr_t)arg; 584#if defined(MUTEX_THREAD_TEST) 585 tid = (u_long)os_thread_self(); 586#else 587 tid = 0; 588#endif 589 printf("Wakeup: ID %03lu (PID: %lu; TID: %lx)\n", 590 id, (u_long)getpid(), tid); 591 592 gp = (TM *)gm_addr; 593 594 /* Loop, waking up sleepers and periodically sleeping ourselves. */ 595 for (check_id = 0;; ++check_id) { 596 /* Check to see if the locking threads have finished. */ 597 if (__os_exists(env, MT_FILE_QUIT, NULL) == 0) 598 break; 599 600 /* Check for ID wraparound. */ 601 if (check_id == nthreads * nprocs) 602 check_id = 0; 603 604 /* Check for a thread that needs a wakeup. */ 605 tp = (TM *)(tm_addr + check_id * sizeof(TM)); 606 if (!tp->wakeme) 607 continue; 608 609 if (verbose) { 610 printf("%03lu: wakeup thread %03lu (mtx: %lu)\n", 611 id, tp->id, (u_long)tp->mutex); 612 (void)fflush(stdout); 613 } 614 615 /* Acquire the global lock. */ 616 if ((err = dbenv->mutex_lock(dbenv, gp->mutex)) != 0) { 617 fprintf(stderr, "%s: wakeup: global lock: %s\n", 618 progname, db_strerror(err)); 619 return ((void *)1); 620 } 621 622 tp->wakeme = 0; 623 if ((err = dbenv->mutex_unlock(dbenv, tp->mutex)) != 0) { 624 fprintf(stderr, "%s: wakeup: unlock: %s\n", 625 progname, db_strerror(err)); 626 return ((void *)1); 627 } 628 629 if ((err = dbenv->mutex_unlock(dbenv, gp->mutex)) != 0) { 630 fprintf(stderr, "%s: wakeup: global unlock: %s\n", 631 progname, db_strerror(err)); 632 return ((void *)1); 633 } 634 635 __os_yield(env, 0, (u_long)rand() % 3); 636 } 637 return (NULL); 638} 639 640/* 641 * tm_env_init -- 642 * Create the backing database environment. 643 */ 644int 645tm_env_init() 646{ 647 u_int32_t flags; 648 int ret; 649 char *home; 650 651 /* 652 * Create an environment object and initialize it for error 653 * reporting. 654 */ 655 if ((ret = db_env_create(&dbenv, 0)) != 0) { 656 fprintf(stderr, "%s: %s\n", progname, db_strerror(ret)); 657 return (1); 658 } 659 env = dbenv->env; 660 dbenv->set_errfile(dbenv, stderr); 661 dbenv->set_errpfx(dbenv, progname); 662 663 /* Allocate enough mutexes. */ 664 if ((ret = dbenv->mutex_set_increment(dbenv, 665 1 + nthreads * nprocs + maxlocks)) != 0) { 666 dbenv->err(dbenv, ret, "dbenv->mutex_set_increment"); 667 return (1); 668 } 669 670 flags = DB_CREATE; 671 if (nprocs == 1) { 672 home = NULL; 673 flags |= DB_PRIVATE; 674 } else 675 home = TESTDIR; 676 if (nthreads != 1) 677 flags |= DB_THREAD; 678 if ((ret = dbenv->open(dbenv, home, flags, 0)) != 0) { 679 dbenv->err(dbenv, ret, "environment open: %s", home); 680 return (1); 681 } 682 683 return (0); 684} 685 686/* 687 * tm_env_close -- 688 * Close the backing database environment. 689 */ 690void 691tm_env_close() 692{ 693 (void)dbenv->close(dbenv, 0); 694} 695 696/* 697 * tm_mutex_init -- 698 * Initialize the mutexes. 699 */ 700void 701tm_mutex_init() 702{ 703 TM *mp; 704 u_int i; 705 int err; 706 707 if (verbose) 708 printf("Allocate the global mutex: "); 709 mp = (TM *)gm_addr; 710 if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) { 711 fprintf(stderr, "%s: DB_ENV->mutex_alloc (global): %s\n", 712 progname, db_strerror(err)); 713 exit(EXIT_FAILURE); 714 } 715 if (verbose) 716 printf("%lu\n", (u_long)mp->mutex); 717 718 if (verbose) 719 printf( 720 "Allocate %d per-thread, self-blocking mutexes: ", 721 nthreads * nprocs); 722 for (i = 0; i < nthreads * nprocs; ++i) { 723 mp = (TM *)(tm_addr + i * sizeof(TM)); 724 if ((err = dbenv->mutex_alloc( 725 dbenv, DB_MUTEX_SELF_BLOCK, &mp->mutex)) != 0) { 726 fprintf(stderr, 727 "%s: DB_ENV->mutex_alloc (per-thread %d): %s\n", 728 progname, i, db_strerror(err)); 729 exit(EXIT_FAILURE); 730 } 731 if ((err = dbenv->mutex_lock(dbenv, mp->mutex)) != 0) { 732 fprintf(stderr, 733 "%s: DB_ENV->mutex_lock (per-thread %d): %s\n", 734 progname, i, db_strerror(err)); 735 exit(EXIT_FAILURE); 736 } 737 if (verbose) 738 printf("%lu ", (u_long)mp->mutex); 739 } 740 if (verbose) 741 printf("\n"); 742 743 if (verbose) 744 printf("Allocate %d per-lock mutexes: ", maxlocks); 745 for (i = 0; i < maxlocks; ++i) { 746 mp = (TM *)(lm_addr + i * sizeof(TM)); 747 if ((err = dbenv->mutex_alloc(dbenv, 0, &mp->mutex)) != 0) { 748 fprintf(stderr, 749 "%s: DB_ENV->mutex_alloc (per-lock: %d): %s\n", 750 progname, i, db_strerror(err)); 751 exit(EXIT_FAILURE); 752 } 753 if (verbose) 754 printf("%lu ", (u_long)mp->mutex); 755 } 756 if (verbose) 757 printf("\n"); 758} 759 760/* 761 * tm_mutex_destroy -- 762 * Destroy the mutexes. 763 */ 764void 765tm_mutex_destroy() 766{ 767 TM *gp, *mp; 768 u_int i; 769 int err; 770 771 if (verbose) 772 printf("Destroy the global mutex.\n"); 773 gp = (TM *)gm_addr; 774 if ((err = dbenv->mutex_free(dbenv, gp->mutex)) != 0) { 775 fprintf(stderr, "%s: DB_ENV->mutex_free (global): %s\n", 776 progname, db_strerror(err)); 777 exit(EXIT_FAILURE); 778 } 779 780 if (verbose) 781 printf("Destroy the per-thread mutexes.\n"); 782 for (i = 0; i < nthreads * nprocs; ++i) { 783 mp = (TM *)(tm_addr + i * sizeof(TM)); 784 if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) { 785 fprintf(stderr, 786 "%s: DB_ENV->mutex_free (per-thread %d): %s\n", 787 progname, i, db_strerror(err)); 788 exit(EXIT_FAILURE); 789 } 790 } 791 792 if (verbose) 793 printf("Destroy the per-lock mutexes.\n"); 794 for (i = 0; i < maxlocks; ++i) { 795 mp = (TM *)(lm_addr + i * sizeof(TM)); 796 if ((err = dbenv->mutex_free(dbenv, mp->mutex)) != 0) { 797 fprintf(stderr, 798 "%s: DB_ENV->mutex_free (per-lock: %d): %s\n", 799 progname, i, db_strerror(err)); 800 exit(EXIT_FAILURE); 801 } 802 } 803} 804 805/* 806 * tm_mutex_stats -- 807 * Display mutex statistics. 808 */ 809void 810tm_mutex_stats() 811{ 812#ifdef HAVE_STATISTICS 813 TM *mp; 814 u_int32_t set_wait, set_nowait; 815 u_int i; 816 817 printf("Per-lock mutex statistics.\n"); 818 for (i = 0; i < maxlocks; ++i) { 819 mp = (TM *)(lm_addr + i * sizeof(TM)); 820 __mutex_set_wait_info(env, mp->mutex, &set_wait, &set_nowait); 821 printf("mutex %2d: wait: %lu; no wait %lu\n", i, 822 (u_long)set_wait, (u_long)set_nowait); 823 } 824#endif 825} 826 827/* 828 * data_on -- 829 * Map in or allocate the backing data space. 830 */ 831void 832data_on(gm_addrp, tm_addrp, lm_addrp, fhpp, init) 833 u_int8_t **gm_addrp, **tm_addrp, **lm_addrp; 834 DB_FH **fhpp; 835 int init; 836{ 837 DB_FH *fhp; 838 size_t nwrite; 839 int err; 840 void *addr; 841 842 fhp = NULL; 843 844 /* 845 * In a single process, use heap memory. 846 */ 847 if (nprocs == 1) { 848 if (init) { 849 if ((err = 850 __os_calloc(env, (size_t)len, 1, &addr)) != 0) 851 exit(EXIT_FAILURE); 852 } else { 853 fprintf(stderr, 854 "%s: init should be set for single process call\n", 855 progname); 856 exit(EXIT_FAILURE); 857 } 858 } else { 859 if (init) { 860 if (verbose) 861 printf("Create the backing file.\n"); 862 863 if ((err = __os_open(env, MT_FILE, 0, 864 DB_OSO_CREATE | DB_OSO_TRUNC, 0666, &fhp)) == -1) { 865 fprintf(stderr, "%s: %s: open: %s\n", 866 progname, MT_FILE, db_strerror(err)); 867 exit(EXIT_FAILURE); 868 } 869 870 if ((err = __os_seek(env, fhp, 0, 0, len)) != 0 || 871 (err = 872 __os_write(env, fhp, &err, 1, &nwrite)) != 0 || 873 nwrite != 1) { 874 fprintf(stderr, "%s: %s: seek/write: %s\n", 875 progname, MT_FILE, db_strerror(err)); 876 exit(EXIT_FAILURE); 877 } 878 } else 879 if ((err = __os_open(env, MT_FILE, 0, 0, 0, &fhp)) != 0) 880 exit(EXIT_FAILURE); 881 882 if ((err = 883 __os_mapfile(env, MT_FILE, fhp, len, 0, &addr)) != 0) 884 exit(EXIT_FAILURE); 885 } 886 887 *gm_addrp = (u_int8_t *)addr; 888 addr = (u_int8_t *)addr + sizeof(TM); 889 *tm_addrp = (u_int8_t *)addr; 890 addr = (u_int8_t *)addr + sizeof(TM) * (nthreads * nprocs); 891 *lm_addrp = (u_int8_t *)addr; 892 893 if (fhpp != NULL) 894 *fhpp = fhp; 895} 896 897/* 898 * data_off -- 899 * Discard or de-allocate the backing data space. 900 */ 901void 902data_off(addr, fhp) 903 u_int8_t *addr; 904 DB_FH *fhp; 905{ 906 if (nprocs == 1) 907 __os_free(env, addr); 908 else { 909 if (__os_unmapfile(env, addr, len) != 0) 910 exit(EXIT_FAILURE); 911 if (__os_closehandle(env, fhp) != 0) 912 exit(EXIT_FAILURE); 913 } 914} 915 916/* 917 * usage -- 918 * 919 */ 920int 921usage() 922{ 923 fprintf(stderr, "usage: %s %s\n\t%s\n", progname, 924 "[-v] [-l maxlocks]", 925 "[-n locks] [-p procs] [-T locker=ID|wakeup=ID] [-t threads]"); 926 return (EXIT_FAILURE); 927} 928 929/* 930 * os_wait -- 931 * Wait for an array of N procs. 932 */ 933int 934os_wait(procs, n) 935 os_pid_t *procs; 936 u_int n; 937{ 938 u_int i; 939 int status; 940#if defined(DB_WIN32) 941 DWORD ret; 942#endif 943 944 status = 0; 945 946#if defined(DB_WIN32) 947 do { 948 ret = WaitForMultipleObjects(n, procs, FALSE, INFINITE); 949 i = ret - WAIT_OBJECT_0; 950 if (i < 0 || i >= n) 951 return (__os_posix_err(__os_get_syserr())); 952 953 if ((GetExitCodeProcess(procs[i], &ret) == 0) || (ret != 0)) 954 return (ret); 955 956 /* remove the process handle from the list */ 957 while (++i < n) 958 procs[i - 1] = procs[i]; 959 } while (--n); 960#elif !defined(HAVE_VXWORKS) 961 do { 962 if (wait(&status) == -1) 963 return (__os_posix_err(__os_get_syserr())); 964 965 if (WIFEXITED(status) == 0 || WEXITSTATUS(status) != 0) { 966 for (i = 0; i < n; i++) 967 (void)kill(procs[i], SIGKILL); 968 return (WEXITSTATUS(status)); 969 } 970 } while (--n); 971#endif 972 973 return (0); 974} 975 976os_pid_t 977spawn_proc(id, tmpath, typearg) 978 u_long id; 979 char *tmpath, *typearg; 980{ 981 char *const vbuf = verbose ? "-v" : NULL; 982 char *args[13], lbuf[16], nbuf[16], pbuf[16], tbuf[16], Tbuf[256]; 983 984 args[0] = tmpath; 985 args[1] = "-l"; 986 snprintf(lbuf, sizeof(lbuf), "%d", maxlocks); 987 args[2] = lbuf; 988 args[3] = "-n"; 989 snprintf(nbuf, sizeof(nbuf), "%d", nlocks); 990 args[4] = nbuf; 991 args[5] = "-p"; 992 snprintf(pbuf, sizeof(pbuf), "%d", nprocs); 993 args[6] = pbuf; 994 args[7] = "-t"; 995 snprintf(tbuf, sizeof(tbuf), "%d", nthreads); 996 args[8] = tbuf; 997 args[9] = "-T"; 998 snprintf(Tbuf, sizeof(Tbuf), "%s=%lu", typearg, id); 999 args[10] = Tbuf; 1000 args[11] = vbuf; 1001 args[12] = NULL; 1002 1003 return (os_spawn(tmpath, args)); 1004} 1005 1006os_pid_t 1007os_spawn(path, argv) 1008 const char *path; 1009 char *const argv[]; 1010{ 1011 os_pid_t pid; 1012 int status; 1013 1014 COMPQUIET(pid, 0); 1015 COMPQUIET(status, 0); 1016 1017#ifdef HAVE_VXWORKS 1018 fprintf(stderr, "%s: os_spawn not supported for VxWorks.\n", progname); 1019 return (OS_BAD_PID); 1020#elif defined(HAVE_QNX) 1021 /* 1022 * For QNX, we cannot fork if we've ever used threads. So 1023 * we'll use their spawn function. We use 'spawnl' which 1024 * is NOT a POSIX function. 1025 * 1026 * The return value of spawnl is just what we want depending 1027 * on the value of the 'wait' arg. 1028 */ 1029 return (spawnv(P_NOWAIT, path, argv)); 1030#elif defined(DB_WIN32) 1031 return (os_pid_t)(_spawnv(P_NOWAIT, path, argv)); 1032#else 1033 if ((pid = fork()) != 0) { 1034 if (pid == -1) 1035 return (OS_BAD_PID); 1036 return (pid); 1037 } else { 1038 (void)execv(path, argv); 1039 exit(EXIT_FAILURE); 1040 } 1041#endif 1042} 1043