1/* 2 * See the file LICENSE for redistribution information. 3 * 4 * Copyright (c) 2002-2009 Oracle. All rights reserved. 5 * 6 * $Id$ 7 */ 8 9#include "db_config.h" 10 11#define LOAD_ACTUAL_MUTEX_CODE 12#include "db_int.h" 13 14#include "dbinc/atomic.h" 15/* 16 * This is where we load in the actual test-and-set mutex code. 17 */ 18#include "dbinc/mutex_int.h" 19 20/* We don't want to run this code even in "ordinary" diagnostic mode. */ 21#undef MUTEX_DIAG 22 23/* 24 * Common code to get an event handle. This is executed whenever a mutex 25 * blocks, or when unlocking a mutex that a thread is waiting on. We can't 26 * keep these handles around, since the mutex structure is in shared memory, 27 * and each process gets its own handle value. 28 * 29 * We pass security attributes so that the created event is accessible by all 30 * users, in case a Windows service is sharing an environment with a local 31 * process run as a different user. 32 */ 33static _TCHAR hex_digits[] = _T("0123456789abcdef"); 34static SECURITY_DESCRIPTOR null_sd; 35static SECURITY_ATTRIBUTES all_sa; 36static int security_initialized = 0; 37 38static __inline int get_handle(env, mutexp, eventp) 39 ENV *env; 40 DB_MUTEX *mutexp; 41 HANDLE *eventp; 42{ 43 _TCHAR idbuf[] = _T("db.m00000000"); 44 _TCHAR *p = idbuf + 12; 45 int ret = 0; 46 u_int32_t id; 47 48 for (id = (mutexp)->id; id != 0; id >>= 4) 49 *--p = hex_digits[id & 0xf]; 50 51#ifndef DB_WINCE 52 if (!security_initialized) { 53 InitializeSecurityDescriptor(&null_sd, 54 SECURITY_DESCRIPTOR_REVISION); 55 SetSecurityDescriptorDacl(&null_sd, TRUE, 0, FALSE); 56 all_sa.nLength = sizeof(SECURITY_ATTRIBUTES); 57 all_sa.bInheritHandle = FALSE; 58 all_sa.lpSecurityDescriptor = &null_sd; 59 security_initialized = 1; 60 } 61#endif 62 63 if ((*eventp = CreateEvent(&all_sa, FALSE, FALSE, idbuf)) == NULL) { 64 ret = __os_get_syserr(); 65 __db_syserr(env, ret, "Win32 create event failed"); 66 } 67 68 return (ret); 69} 70 71/* 72 * __db_win32_mutex_lock_int 73 * Internal function to lock a win32 mutex 74 * 75 * If the wait paramter is 0, this function will return DB_LOCK_NOTGRANTED 76 * rather than wait. 77 * 78 */ 79static __inline int 80__db_win32_mutex_lock_int(env, mutex, wait) 81 ENV *env; 82 db_mutex_t mutex; 83 int wait; 84{ 85 DB_ENV *dbenv; 86 DB_MUTEX *mutexp; 87 DB_MUTEXMGR *mtxmgr; 88 DB_MUTEXREGION *mtxregion; 89 DB_THREAD_INFO *ip; 90 HANDLE event; 91 u_int32_t nspins; 92 int ms, ret; 93#ifdef MUTEX_DIAG 94 LARGE_INTEGER now; 95#endif 96 dbenv = env->dbenv; 97 98 if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) 99 return (0); 100 101 mtxmgr = env->mutex_handle; 102 mtxregion = mtxmgr->reginfo.primary; 103 mutexp = MUTEXP_SET(mtxmgr, mutex); 104 105 CHECK_MTX_THREAD(env, mutexp); 106 107 /* 108 * See WINCE_ATOMIC_MAGIC definition for details. 109 * Use sharecount, because the value just needs to be a db_atomic_t 110 * memory mapped onto the same page as those being Interlocked*. 111 */ 112 WINCE_ATOMIC_MAGIC(&mutexp->sharecount); 113 114 event = NULL; 115 ms = 50; 116 ret = 0; 117 118 /* 119 * Only check the thread state once, by initializing the thread 120 * control block pointer to null. If it is not the failchk 121 * thread, then ip will have a valid value subsequent times 122 * in the loop. 123 */ 124 ip = NULL; 125 126loop: /* Attempt to acquire the mutex mutex_tas_spins times, if waiting. */ 127 for (nspins = 128 mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) { 129 /* 130 * We can avoid the (expensive) interlocked instructions if 131 * the mutex is already busy. 132 */ 133 if (MUTEXP_IS_BUSY(mutexp) || !MUTEXP_ACQUIRE(mutexp)) { 134 if (F_ISSET(dbenv, DB_ENV_FAILCHK) && 135 ip == NULL && dbenv->is_alive(dbenv, 136 mutexp->pid, mutexp->tid, 0) == 0) { 137 ret = __env_set_state(env, &ip, THREAD_VERIFY); 138 if (ret != 0 || 139 ip->dbth_state == THREAD_FAILCHK) 140 return (DB_RUNRECOVERY); 141 } 142 if (!wait) 143 return (DB_LOCK_NOTGRANTED); 144 /* 145 * Some systems (notably those with newer Intel CPUs) 146 * need a small pause before retrying. [#6975] 147 */ 148 MUTEX_PAUSE 149 continue; 150 } 151 152#ifdef DIAGNOSTIC 153 if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { 154 char buf[DB_THREADID_STRLEN]; 155 __db_errx(env, 156 "Win32 lock failed: mutex already locked by %s", 157 dbenv->thread_id_string(dbenv, 158 mutexp->pid, mutexp->tid, buf)); 159 return (__env_panic(env, EACCES)); 160 } 161#endif 162 F_SET(mutexp, DB_MUTEX_LOCKED); 163 dbenv->thread_id(dbenv, &mutexp->pid, &mutexp->tid); 164 165#ifdef HAVE_STATISTICS 166 if (event == NULL) 167 ++mutexp->mutex_set_nowait; 168 else 169 ++mutexp->mutex_set_wait; 170#endif 171 if (event != NULL) { 172 CloseHandle(event); 173 InterlockedDecrement(&mutexp->nwaiters); 174#ifdef MUTEX_DIAG 175 if (ret != WAIT_OBJECT_0) { 176 QueryPerformanceCounter(&now); 177 printf("[%I64d]: Lost signal on mutex %p, " 178 "id %d, ms %d\n", 179 now.QuadPart, mutexp, mutexp->id, ms); 180 } 181#endif 182 } 183 184#ifdef DIAGNOSTIC 185 /* 186 * We want to switch threads as often as possible. Yield 187 * every time we get a mutex to ensure contention. 188 */ 189 if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) 190 __os_yield(env, 0, 0); 191#endif 192 193 return (0); 194 } 195 196 /* 197 * Yield the processor; wait 50 ms initially, up to 1 second. This 198 * loop is needed to work around a race where the signal from the 199 * unlocking thread gets lost. We start at 50 ms because it's unlikely 200 * to happen often and we want to avoid wasting CPU. 201 */ 202 if (event == NULL) { 203#ifdef MUTEX_DIAG 204 QueryPerformanceCounter(&now); 205 printf("[%I64d]: Waiting on mutex %p, id %d\n", 206 now.QuadPart, mutexp, mutexp->id); 207#endif 208 InterlockedIncrement(&mutexp->nwaiters); 209 if ((ret = get_handle(env, mutexp, &event)) != 0) 210 goto err; 211 } 212 if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) { 213 ret = __os_get_syserr(); 214 goto err; 215 } 216 if ((ms <<= 1) > MS_PER_SEC) 217 ms = MS_PER_SEC; 218 219 PANIC_CHECK(env); 220 goto loop; 221 222err: __db_syserr(env, ret, "Win32 lock failed"); 223 return (__env_panic(env, __os_posix_err(ret))); 224} 225 226/* 227 * __db_win32_mutex_init -- 228 * Initialize a Win32 mutex. 229 * 230 * PUBLIC: int __db_win32_mutex_init __P((ENV *, db_mutex_t, u_int32_t)); 231 */ 232int 233__db_win32_mutex_init(env, mutex, flags) 234 ENV *env; 235 db_mutex_t mutex; 236 u_int32_t flags; 237{ 238 DB_MUTEX *mutexp; 239 240 mutexp = MUTEXP_SET(env->mutex_handle, mutex); 241 mutexp->id = ((getpid() & 0xffff) << 16) ^ P_TO_UINT32(mutexp); 242 F_SET(mutexp, flags); 243 244 return (0); 245} 246 247/* 248 * __db_win32_mutex_lock 249 * Lock on a mutex, blocking if necessary. 250 * 251 * PUBLIC: int __db_win32_mutex_lock __P((ENV *, db_mutex_t)); 252 */ 253int 254__db_win32_mutex_lock(env, mutex) 255 ENV *env; 256 db_mutex_t mutex; 257{ 258 return (__db_win32_mutex_lock_int(env, mutex, 1)); 259} 260 261/* 262 * __db_win32_mutex_trylock 263 * Try to lock a mutex, returning without waiting if it is busy 264 * 265 * PUBLIC: int __db_win32_mutex_trylock __P((ENV *, db_mutex_t)); 266 */ 267int 268__db_win32_mutex_trylock(env, mutex) 269 ENV *env; 270 db_mutex_t mutex; 271{ 272 return (__db_win32_mutex_lock_int(env, mutex, 0)); 273} 274 275#if defined(HAVE_SHARED_LATCHES) 276/* 277 * __db_win32_mutex_readlock_int 278 * Try to lock a mutex, possibly waiting if requested and necessary. 279 */ 280int 281__db_win32_mutex_readlock_int(env, mutex, nowait) 282 ENV *env; 283 db_mutex_t mutex; 284 int nowait; 285{ 286 DB_ENV *dbenv; 287 DB_MUTEX *mutexp; 288 DB_MUTEXMGR *mtxmgr; 289 DB_MUTEXREGION *mtxregion; 290 HANDLE event; 291 u_int32_t nspins; 292 int ms, ret; 293 long exch_ret, mtx_val; 294#ifdef MUTEX_DIAG 295 LARGE_INTEGER now; 296#endif 297 dbenv = env->dbenv; 298 299 if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) 300 return (0); 301 302 mtxmgr = env->mutex_handle; 303 mtxregion = mtxmgr->reginfo.primary; 304 mutexp = MUTEXP_SET(mtxmgr, mutex); 305 306 CHECK_MTX_THREAD(env, mutexp); 307 308 /* 309 * See WINCE_ATOMIC_MAGIC definition for details. 310 * Use sharecount, because the value just needs to be a db_atomic_t 311 * memory mapped onto the same page as those being Interlocked*. 312 */ 313 WINCE_ATOMIC_MAGIC(&mutexp->sharecount); 314 315 event = NULL; 316 ms = 50; 317 ret = 0; 318 /* 319 * This needs to be initialized, since if mutexp->tas 320 * is write locked on the first pass, it needs a value. 321 */ 322 exch_ret = 0; 323 324loop: /* Attempt to acquire the resource for N spins. */ 325 for (nspins = 326 mtxregion->stat.st_mutex_tas_spins; nspins > 0; --nspins) { 327 /* 328 * We can avoid the (expensive) interlocked instructions if 329 * the mutex is already "set". 330 */ 331retry: mtx_val = atomic_read(&mutexp->sharecount); 332 if (mtx_val == MUTEX_SHARE_ISEXCLUSIVE) { 333 if (nowait) 334 return (DB_LOCK_NOTGRANTED); 335 336 continue; 337 } else if (!atomic_compare_exchange(env, &mutexp->sharecount, 338 mtx_val, mtx_val + 1)) { 339 /* 340 * Some systems (notably those with newer Intel CPUs) 341 * need a small pause here. [#6975] 342 */ 343 MUTEX_PAUSE 344 goto retry; 345 } 346 347#ifdef HAVE_STATISTICS 348 if (event == NULL) 349 ++mutexp->mutex_set_rd_nowait; 350 else 351 ++mutexp->mutex_set_rd_wait; 352#endif 353 if (event != NULL) { 354 CloseHandle(event); 355 InterlockedDecrement(&mutexp->nwaiters); 356#ifdef MUTEX_DIAG 357 if (ret != WAIT_OBJECT_0) { 358 QueryPerformanceCounter(&now); 359 printf("[%I64d]: Lost signal on mutex %p, " 360 "id %d, ms %d\n", 361 now.QuadPart, mutexp, mutexp->id, ms); 362 } 363#endif 364 } 365 366#ifdef DIAGNOSTIC 367 /* 368 * We want to switch threads as often as possible. Yield 369 * every time we get a mutex to ensure contention. 370 */ 371 if (F_ISSET(dbenv, DB_ENV_YIELDCPU)) 372 __os_yield(env, 0, 0); 373#endif 374 375 return (0); 376 } 377 378 /* 379 * Yield the processor; wait 50 ms initially, up to 1 second. This 380 * loop is needed to work around a race where the signal from the 381 * unlocking thread gets lost. We start at 50 ms because it's unlikely 382 * to happen often and we want to avoid wasting CPU. 383 */ 384 if (event == NULL) { 385#ifdef MUTEX_DIAG 386 QueryPerformanceCounter(&now); 387 printf("[%I64d]: Waiting on mutex %p, id %d\n", 388 now.QuadPart, mutexp, mutexp->id); 389#endif 390 InterlockedIncrement(&mutexp->nwaiters); 391 if ((ret = get_handle(env, mutexp, &event)) != 0) 392 goto err; 393 } 394 if ((ret = WaitForSingleObject(event, ms)) == WAIT_FAILED) { 395 ret = __os_get_syserr(); 396 goto err; 397 } 398 if ((ms <<= 1) > MS_PER_SEC) 399 ms = MS_PER_SEC; 400 401 PANIC_CHECK(env); 402 goto loop; 403 404err: __db_syserr(env, ret, "Win32 read lock failed"); 405 return (__env_panic(env, __os_posix_err(ret))); 406} 407 408/* 409 * __db_win32_mutex_readlock 410 * Get a shared lock on a latch 411 * 412 * PUBLIC: #if defined(HAVE_SHARED_LATCHES) 413 * PUBLIC: int __db_win32_mutex_readlock __P((ENV *, db_mutex_t)); 414 * PUBLIC: #endif 415 */ 416int 417__db_win32_mutex_readlock(env, mutex) 418 ENV *env; 419 db_mutex_t mutex; 420{ 421 return (__db_win32_mutex_readlock_int(env, mutex, 0)); 422} 423 424/* 425 * __db_win32_mutex_tryreadlock 426 * Try to a shared lock on a latch 427 * 428 * PUBLIC: #if defined(HAVE_SHARED_LATCHES) 429 * PUBLIC: int __db_win32_mutex_tryreadlock __P((ENV *, db_mutex_t)); 430 * PUBLIC: #endif 431 */ 432int 433__db_win32_mutex_tryreadlock(env, mutex) 434 ENV *env; 435 db_mutex_t mutex; 436{ 437 return (__db_win32_mutex_readlock_int(env, mutex, 1)); 438} 439#endif 440 441/* 442 * __db_win32_mutex_unlock -- 443 * Release a mutex. 444 * 445 * PUBLIC: int __db_win32_mutex_unlock __P((ENV *, db_mutex_t)); 446 */ 447int 448__db_win32_mutex_unlock(env, mutex) 449 ENV *env; 450 db_mutex_t mutex; 451{ 452 DB_ENV *dbenv; 453 DB_MUTEX *mutexp; 454 DB_MUTEXMGR *mtxmgr; 455 HANDLE event; 456 int ret; 457#ifdef MUTEX_DIAG 458 LARGE_INTEGER now; 459#endif 460 dbenv = env->dbenv; 461 462 if (!MUTEX_ON(env) || F_ISSET(dbenv, DB_ENV_NOLOCKING)) 463 return (0); 464 465 mtxmgr = env->mutex_handle; 466 mutexp = MUTEXP_SET(mtxmgr, mutex); 467 468#ifdef DIAGNOSTIC 469 if (!MUTEXP_IS_BUSY(mutexp) || !(F_ISSET(mutexp, DB_MUTEX_SHARED) || 470 F_ISSET(mutexp, DB_MUTEX_LOCKED))) { 471 __db_errx(env, 472 "Win32 unlock failed: lock already unlocked: mutex %d busy %d", 473 mutex, MUTEXP_BUSY_FIELD(mutexp)); 474 return (__env_panic(env, EACCES)); 475 } 476#endif 477 /* 478 * If we have a shared latch, and a read lock (DB_MUTEX_LOCKED is only 479 * set for write locks), then decrement the latch. If the readlock is 480 * still held by other threads, just return. Otherwise go ahead and 481 * notify any waiting threads. 482 */ 483#ifdef HAVE_SHARED_LATCHES 484 if (F_ISSET(mutexp, DB_MUTEX_SHARED)) { 485 if (F_ISSET(mutexp, DB_MUTEX_LOCKED)) { 486 F_CLR(mutexp, DB_MUTEX_LOCKED); 487 if ((ret = InterlockedExchange( 488 (interlocked_val)(&atomic_read( 489 &mutexp->sharecount)), 0)) != 490 MUTEX_SHARE_ISEXCLUSIVE) { 491 ret = DB_RUNRECOVERY; 492 goto err; 493 } 494 } else if (InterlockedDecrement( 495 (interlocked_val)(&atomic_read(&mutexp->sharecount))) > 0) 496 return (0); 497 } else 498#endif 499 { 500 F_CLR(mutexp, DB_MUTEX_LOCKED); 501 MUTEX_UNSET(&mutexp->tas); 502 } 503 504 if (mutexp->nwaiters > 0) { 505 if ((ret = get_handle(env, mutexp, &event)) != 0) 506 goto err; 507 508#ifdef MUTEX_DIAG 509 QueryPerformanceCounter(&now); 510 printf("[%I64d]: Signalling mutex %p, id %d\n", 511 now.QuadPart, mutexp, mutexp->id); 512#endif 513 if (!PulseEvent(event)) { 514 ret = __os_get_syserr(); 515 CloseHandle(event); 516 goto err; 517 } 518 519 CloseHandle(event); 520 } 521 522 return (0); 523 524err: __db_syserr(env, ret, "Win32 unlock failed"); 525 return (__env_panic(env, __os_posix_err(ret))); 526} 527 528/* 529 * __db_win32_mutex_destroy -- 530 * Destroy a mutex. 531 * 532 * PUBLIC: int __db_win32_mutex_destroy __P((ENV *, db_mutex_t)); 533 */ 534int 535__db_win32_mutex_destroy(env, mutex) 536 ENV *env; 537 db_mutex_t mutex; 538{ 539 return (0); 540} 541