Lines Matching refs:lk

73   return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
77 return lck->lk.depth_locked != -1;
85 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
94 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
95 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
107 } while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
108 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy));
135 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
136 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
158 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
184 lck->lk.poll = KMP_LOCK_FREE(tas);
187 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
207 lck->lk.depth_locked += 1;
212 lck->lk.depth_locked = 1;
232 retval = ++lck->lk.depth_locked;
237 retval = lck->lk.depth_locked = 1;
255 if (--(lck->lk.depth_locked) == 0) {
280 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
285 lck->lk.depth_locked = 0;
310 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
314 return lck->lk.depth_locked != -1;
324 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
332 lck, lck->lk.poll, gtid));
337 &(lck->lk.poll), KMP_LOCK_FREE(futex),
355 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
360 lck, lck->lk.poll, gtid));
367 lck->lk.poll, gtid));
376 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
395 lck->lk.poll, gtid));
419 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
441 lck, lck->lk.poll, gtid));
446 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
456 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
463 lck->lk.poll, gtid));
488 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
491 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
511 lck->lk.depth_locked += 1;
516 lck->lk.depth_locked = 1;
536 retval = ++lck->lk.depth_locked;
541 retval = lck->lk.depth_locked = 1;
559 if (--(lck->lk.depth_locked) == 0) {
584 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
589 lck->lk.depth_locked = 0;
609 return std::atomic_load_explicit(&lck->lk.owner_id,
615 return std::atomic_load_explicit(&lck->lk.depth_locked,
628 &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
631 if (std::atomic_load_explicit(&lck->lk.now_serving,
637 if (std::atomic_load_explicit(&lck->lk.now_serving,
641 KMP_WAIT_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
655 if (!std::atomic_load_explicit(&lck->lk.initialized,
659 if (lck->lk.self != lck) {
671 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
677 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
680 if (std::atomic_load_explicit(&lck->lk.now_serving,
684 &lck->lk.next_ticket, &my_ticket, next_ticket,
696 if (!std::atomic_load_explicit(&lck->lk.initialized,
700 if (lck->lk.self != lck) {
710 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
717 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
719 std::atomic_load_explicit(&lck->lk.now_serving,
723 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
735 if (!std::atomic_load_explicit(&lck->lk.initialized,
739 if (lck->lk.self != lck) {
752 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
757 lck->lk.location = NULL;
758 lck->lk.self = lck;
759 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
761 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
764 &lck->lk.owner_id, 0,
767 &lck->lk.depth_locked, -1,
769 std::atomic_store_explicit(&lck->lk.initialized, true,
774 std::atomic_store_explicit(&lck->lk.initialized, false,
776 lck->lk.self = NULL;
777 lck->lk.location = NULL;
778 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
780 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
782 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
783 std::atomic_store_explicit(&lck->lk.depth_locked, -1,
790 if (!std::atomic_load_explicit(&lck->lk.initialized,
794 if (lck->lk.self != lck) {
812 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
818 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
820 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
830 if (!std::atomic_load_explicit(&lck->lk.initialized,
834 if (lck->lk.self != lck) {
849 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
855 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
857 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
868 if (!std::atomic_load_explicit(&lck->lk.initialized,
872 if (lck->lk.self != lck) {
884 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
887 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
898 if (!std::atomic_load_explicit(&lck->lk.initialized,
902 if (lck->lk.self != lck) {
919 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
926 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
934 if (!std::atomic_load_explicit(&lck->lk.initialized,
938 if (lck->lk.self != lck) {
953 return lck->lk.location;
958 lck->lk.location = loc;
962 return lck->lk.flags;
967 lck->lk.flags = flags;
1060 __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
1062 if (lck->lk.head_id >= 1) {
1063 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1069 __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id);
1076 return TCR_4(lck->lk.owner_id) - 1;
1080 return lck->lk.depth_locked != -1;
1091 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1092 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1292 if (lck->lk.initialized != lck) {
1304 lck->lk.owner_id = gtid + 1;
1309 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1344 if (lck->lk.initialized != lck) {
1354 lck->lk.owner_id = gtid + 1;
1361 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1362 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1506 if (lck->lk.initialized != lck) {
1518 lck->lk.owner_id = 0;
1523 lck->lk.location = NULL;
1524 lck->lk.head_id = 0;
1525 lck->lk.tail_id = 0;
1526 lck->lk.next_ticket = 0;
1527 lck->lk.now_serving = 0;
1528 lck->lk.owner_id = 0; // no thread owns the lock.
1529 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
1530 lck->lk.initialized = lck;
1536 lck->lk.initialized = NULL;
1537 lck->lk.location = NULL;
1538 lck->lk.head_id = 0;
1539 lck->lk.tail_id = 0;
1540 lck->lk.next_ticket = 0;
1541 lck->lk.now_serving = 0;
1542 lck->lk.owner_id = 0;
1543 lck->lk.depth_locked = -1;
1548 if (lck->lk.initialized != lck) {
1566 lck->lk.depth_locked += 1;
1572 lck->lk.depth_locked = 1;
1574 lck->lk.owner_id = gtid + 1;
1583 if (lck->lk.initialized != lck) {
1598 retval = ++lck->lk.depth_locked;
1603 retval = lck->lk.depth_locked = 1;
1605 lck->lk.owner_id = gtid + 1;
1613 if (lck->lk.initialized != lck) {
1626 if (--(lck->lk.depth_locked) == 0) {
1628 lck->lk.owner_id = 0;
1640 if (lck->lk.initialized != lck) {
1657 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
1662 lck->lk.depth_locked = 0;
1668 if (lck->lk.initialized != lck) {
1683 return lck->lk.location;
1688 lck->lk.location = loc;
1692 return lck->lk.flags;
1697 lck->lk.flags = flags;
1961 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1970 bool res = lck->lk.head_id == 0;
1987 lck->lk.adaptive.badness = 0;
1993 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
1994 if (newBadness > lck->lk.adaptive.max_badness) {
1997 lck->lk.adaptive.badness = newBadness;
2004 kmp_uint32 badness = lck->lk.adaptive.badness;
2005 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
2014 int retries = lck->lk.adaptive.max_soft_retries;
2068 lck->lk.adaptive.acquire_attempts++;
2082 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2089 lck->lk.qlk.owner_id = gtid + 1;
2130 lck->lk.adaptive.acquire_attempts++;
2141 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2150 lck->lk.qlk.owner_id = gtid + 1;
2172 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2181 lck->lk.qlk.owner_id = 0;
2188 lck->lk.adaptive.badness = 0;
2189 lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0;
2190 lck->lk.adaptive.max_soft_retries =
2192 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2194 __kmp_zero_speculative_stats(&lck->lk.adaptive);
2201 __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2209 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2225 return lck->lk.owner_id - 1;
2229 return lck->lk.depth_locked != -1;
2234 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2235 kmp_uint64 mask = lck->lk.mask; // atomic load
2236 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2263 mask = lck->lk.mask; // atomic load
2264 polls = lck->lk.polls; // atomic load
2271 lck->lk.now_serving = ticket; // non-volatile store
2278 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2279 __kmp_free(lck->lk.old_polls);
2280 lck->lk.old_polls = NULL;
2281 lck->lk.cleanup_ticket = 0;
2287 if (lck->lk.old_polls == NULL) {
2290 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2298 num_polls = TCR_4(lck->lk.num_polls);
2309 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2345 lck->lk.old_polls = old_polls;
2346 lck->lk.polls = polls; // atomic store
2350 lck->lk.num_polls = num_polls;
2351 lck->lk.mask = mask; // atomic store
2359 lck->lk.cleanup_ticket = lck->lk.next_ticket;
2374 if (lck->lk.initialized != lck) {
2386 lck->lk.owner_id = gtid + 1;
2393 kmp_uint64 ticket = lck->lk.next_ticket; // atomic load
2394 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2395 kmp_uint64 mask = lck->lk.mask; // atomic load
2398 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2403 lck->lk.now_serving = ticket; // non-volatile store
2420 if (lck->lk.initialized != lck) {
2430 lck->lk.owner_id = gtid + 1;
2438 kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load
2439 std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load
2440 kmp_uint64 mask = lck->lk.mask; // atomic load
2453 if (lck->lk.initialized != lck) {
2466 lck->lk.owner_id = 0;
2471 lck->lk.location = NULL;
2472 lck->lk.mask = 0;
2473 lck->lk.num_polls = 1;
2474 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2475 lck->lk.num_polls * sizeof(*(lck->lk.polls)));
2476 lck->lk.cleanup_ticket = 0;
2477 lck->lk.old_polls = NULL;
2478 lck->lk.next_ticket = 0;
2479 lck->lk.now_serving = 0;
2480 lck->lk.owner_id = 0; // no thread owns the lock.
2481 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
2482 lck->lk.initialized = lck;
2488 lck->lk.initialized = NULL;
2489 lck->lk.location = NULL;
2490 if (lck->lk.polls.load() != NULL) {
2491 __kmp_free(lck->lk.polls.load());
2492 lck->lk.polls = NULL;
2494 if (lck->lk.old_polls != NULL) {
2495 __kmp_free(lck->lk.old_polls);
2496 lck->lk.old_polls = NULL;
2498 lck->lk.mask = 0;
2499 lck->lk.num_polls = 0;
2500 lck->lk.cleanup_ticket = 0;
2501 lck->lk.next_ticket = 0;
2502 lck->lk.now_serving = 0;
2503 lck->lk.owner_id = 0;
2504 lck->lk.depth_locked = -1;
2509 if (lck->lk.initialized != lck) {
2527 lck->lk.depth_locked += 1;
2533 lck->lk.depth_locked = 1;
2535 lck->lk.owner_id = gtid + 1;
2543 if (lck->lk.initialized != lck) {
2558 retval = ++lck->lk.depth_locked;
2563 retval = lck->lk.depth_locked = 1;
2565 lck->lk.owner_id = gtid + 1;
2573 if (lck->lk.initialized != lck) {
2586 if (--(lck->lk.depth_locked) == 0) {
2588 lck->lk.owner_id = 0;
2599 if (lck->lk.initialized != lck) {
2616 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
2621 lck->lk.depth_locked = 0;
2626 if (lck->lk.initialized != lck) {
2641 return lck->lk.location;
2646 lck->lk.location = loc;
2650 return lck->lk.flags;
2655 lck->lk.flags = flags;
2857 #define expand1(lk, op) \
2858 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \
2859 __kmp_##op##_##lk##_##lock(&lock->lk); \
2861 #define expand2(lk, op) \
2862 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \
2864 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \
2866 #define expand3(lk, op) \
2867 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \
2869 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \
2871 #define expand4(lk, op) \
2872 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \
2874 __kmp_set_##lk##_lock_location(&lock->lk, loc); \
3380 return lck == lck->lk.self;
3392 return lck == lck->lk.initialized;
3411 return lck == lck->lk.initialized;