Lines Matching defs:lck

172 extern int lck_rw_grab_want(lck_rw_t *lck);
173 extern int lck_rw_grab_shared(lck_rw_t *lck);
174 extern int lck_rw_held_read_or_upgrade(lck_rw_t *lck);
182 lck_rw_t *lck);
185 lck_rw_t *lck);
188 lck_rw_t *lck);
191 lck_rw_t *lck,
195 lck_rw_t *lck,
199 lck_rw_t *lck,
212 lck_spin_t *lck;
214 if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0)
215 lck_spin_init(lck, grp, attr);
217 return(lck);
225 lck_spin_t *lck,
228 lck_spin_destroy(lck, grp);
229 kfree(lck, sizeof(lck_spin_t));
237 lck_spin_t *lck,
241 usimple_lock_init((usimple_lock_t) lck, 0);
251 lck_spin_t *lck,
254 if (lck->interlock == LCK_SPIN_TAG_DESTROYED)
256 lck->interlock = LCK_SPIN_TAG_DESTROYED;
267 lck_spin_t *lck)
269 usimple_lock((usimple_lock_t) lck);
277 lck_spin_t *lck)
279 usimple_unlock((usimple_lock_t) lck);
288 lck_spin_t *lck)
290 return((boolean_t)usimple_lock_try((usimple_lock_t) lck));
818 lck_rw_t *lck;
820 if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) {
821 bzero(lck, sizeof(lck_rw_t));
822 lck_rw_init(lck, grp, attr);
825 return(lck);
833 lck_rw_t *lck,
835 lck_rw_destroy(lck, grp);
836 kfree(lck, sizeof(lck_rw_t));
844 lck_rw_t *lck,
851 hw_lock_byte_init(&lck->lck_rw_interlock);
852 lck->lck_rw_want_write = FALSE;
853 lck->lck_rw_want_upgrade = FALSE;
854 lck->lck_rw_shared_count = 0;
855 lck->lck_rw_can_sleep = TRUE;
856 lck->lck_r_waiting = lck->lck_w_waiting = 0;
857 lck->lck_rw_tag = 0;
858 lck->lck_rw_priv_excl = ((lck_attr->lck_attr_val &
870 lck_rw_t *lck,
873 if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED)
876 lck_rw_assert(lck, LCK_RW_ASSERT_NOTHELD);
878 lck->lck_rw_tag = LCK_RW_TAG_DESTROYED;
904 lck_interlock_lock(lck_rw_t *lck)
909 hw_lock_byte_lock(&lck->lck_rw_interlock);
915 lck_interlock_unlock(lck_rw_t *lck, boolean_t istate)
917 hw_lock_byte_unlock(&lck->lck_rw_interlock);
941 lck_rw_deadline_for_spin(lck_rw_t *lck)
943 if (lck->lck_rw_can_sleep) {
944 if (lck->lck_r_waiting || lck->lck_w_waiting || lck->lck_rw_shared_count > machine_info.max_cpus) {
968 lck_rw_t *lck)
987 while ( !lck_rw_grab_want(lck)) {
1000 readers_at_sleep = lck->lck_rw_shared_count;
1008 deadline = lck_rw_deadline_for_spin(lck);
1010 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1012 while (((gotlock = lck_rw_grab_want(lck)) == 0) && mach_absolute_time() < deadline)
1015 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_END, (int)lck, 0, 0, gotlock, 0);
1024 if (lck->lck_rw_can_sleep) {
1026 istate = lck_interlock_lock(lck);
1028 if (lck->lck_rw_want_write) {
1030 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1032 lck->lck_w_waiting = TRUE;
1034 res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
1035 lck_interlock_unlock(lck, istate);
1041 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_END, (int)lck, res, slept, 0, 0);
1043 lck->lck_rw_want_write = TRUE;
1044 lck_interlock_unlock(lck, istate);
1060 while (lck_rw_held_read_or_upgrade(lck)) {
1079 readers_at_sleep = lck->lck_rw_shared_count;
1087 deadline = lck_rw_deadline_for_spin(lck);
1089 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1091 while ((lockheld = lck_rw_held_read_or_upgrade(lck)) && mach_absolute_time() < deadline)
1094 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_END, (int)lck, 0, 0, lockheld, 0);
1103 if (lck->lck_rw_can_sleep) {
1105 istate = lck_interlock_lock(lck);
1107 if (lck->lck_rw_shared_count != 0 || lck->lck_rw_want_upgrade) {
1108 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_WAIT_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0);
1110 lck->lck_w_waiting = TRUE;
1112 res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
1113 lck_interlock_unlock(lck, istate);
1119 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_WAIT_CODE) | DBG_FUNC_END, (int)lck, res, slept, 0, 0);
1121 lck_interlock_unlock(lck, istate);
1144 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN, lck,
1153 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK, lck,
1158 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lck, 1);
1182 lck_rw_t *lck,
1212 thread_wakeup(RW_LOCK_WRITER_EVENT(lck));
1215 thread_wakeup(RW_LOCK_READER_EVENT(lck));
1223 LOCKSTAT_RECORD(LS_LCK_RW_DONE_RELEASE, lck, lock_type == LCK_RW_TYPE_SHARED ? 0 : 1);
1235 lck_rw_t *lck,
1239 lck_rw_unlock_shared(lck);
1241 lck_rw_unlock_exclusive(lck);
1252 lck_rw_t *lck)
1256 ret = lck_rw_done(lck);
1268 lck_rw_t *lck)
1272 ret = lck_rw_done(lck);
1284 lck_rw_t *lck,
1288 lck_rw_lock_shared(lck);
1290 lck_rw_lock_exclusive(lck);
1305 lck_rw_t *lck)
1320 while ( !lck_rw_grab_shared(lck)) {
1333 readers_at_sleep = lck->lck_rw_shared_count;
1341 deadline = lck_rw_deadline_for_spin(lck);
1344 (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);
1346 while (((gotlock = lck_rw_grab_shared(lck)) == 0) && mach_absolute_time() < deadline)
1350 (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, gotlock, 0);
1359 if (lck->lck_rw_can_sleep) {
1361 istate = lck_interlock_lock(lck);
1363 if ((lck->lck_rw_want_write || lck->lck_rw_want_upgrade) &&
1364 ((lck->lck_rw_shared_count == 0) || lck->lck_rw_priv_excl)) {
1367 (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0);
1369 lck->lck_r_waiting = TRUE;
1371 res = assert_wait(RW_LOCK_READER_EVENT(lck), THREAD_UNINT);
1372 lck_interlock_unlock(lck, istate);
1379 (int)lck, res, slept, 0, 0);
1381 lck->lck_rw_shared_count++;
1382 lck_interlock_unlock(lck, istate);
1391 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN, lck, mach_absolute_time() - wait_interval, 0);
1393 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK, lck,
1398 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, lck, 0);
1413 lck_rw_t *lck,
1446 thread_wakeup(RW_LOCK_WRITER_EVENT(lck));
1449 (int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0);
1465 lck_rw_t *lck)
1480 while (lck->lck_rw_shared_count != 0) {
1493 readers_at_sleep = lck->lck_rw_shared_count;
1501 deadline = lck_rw_deadline_for_spin(lck);
1504 (int)lck, lck->lck_rw_shared_count, 0, 0, 0);
1506 while ((still_shared = lck->lck_rw_shared_count) && mach_absolute_time() < deadline)
1510 (int)lck, lck->lck_rw_shared_count, 0, 0, 0);
1519 if (lck->lck_rw_can_sleep) {
1521 istate = lck_interlock_lock(lck);
1523 if (lck->lck_rw_shared_count != 0) {
1525 (int)lck, lck->lck_rw_shared_count, 0, 0, 0);
1527 lck->lck_w_waiting = TRUE;
1529 res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT);
1530 lck_interlock_unlock(lck, istate);
1537 (int)lck, res, slept, 0, 0);
1539 lck_interlock_unlock(lck, istate);
1550 LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 0);
1552 LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, lck,
1557 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, lck, 1);
1573 lck_rw_t *lck,
1587 (int)lck, fake_lck->lck_rw_want_write, fake_lck->lck_rw_want_upgrade, 0, 0);
1598 thread_wakeup(RW_LOCK_READER_EVENT(lck));
1601 (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0);
1604 LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, lck, 0);
1614 lck_rw_t *lck,
1618 return(lck_rw_try_lock_shared(lck));
1620 return(lck_rw_try_lock_exclusive(lck));
1629 lck_rw_t *lck,
1634 if (lck->lck_rw_shared_count != 0) {
1639 if ((lck->lck_rw_want_write ||
1640 lck->lck_rw_want_upgrade) &&
1641 lck->lck_rw_shared_count == 0) {
1646 if (lck->lck_rw_want_write ||
1647 lck->lck_rw_want_upgrade ||
1648 lck->lck_rw_shared_count != 0) {
1653 if (!(lck->lck_rw_want_write ||
1654 lck->lck_rw_want_upgrade ||
1655 lck->lck_rw_shared_count != 0)) {
1663 panic("rw lock (%p)%s held (mode=%u), first word %08x\n", lck, (type == LCK_RW_ASSERT_NOTHELD ? "" : " not"), type, *(uint32_t *)lck);
1692 lck_mtx_t *lck;
1694 if ((lck = (lck_mtx_t *)zalloc(lck_mtx_zone)) != 0)
1695 lck_mtx_init(lck, grp, attr);
1697 if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0)
1698 lck_mtx_init(lck, grp, attr);
1700 return(lck);
1708 lck_mtx_t *lck,
1711 lck_mtx_destroy(lck, grp);
1713 zfree(lck_mtx_zone, lck);
1715 kfree(lck, sizeof(lck_mtx_t));
1724 lck_mtx_ext_t *lck,
1728 bzero((void *)lck, sizeof(lck_mtx_ext_t));
1731 lck->lck_mtx_deb.type = MUTEX_TAG;
1732 lck->lck_mtx_attr |= LCK_MTX_ATTR_DEBUG;
1735 lck->lck_mtx_grp = grp;
1738 lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT;
1740 lck->lck_mtx.lck_mtx_is_ext = 1;
1741 lck->lck_mtx.lck_mtx_sw.lck_mtxd.lck_mtxd_pad32 = 0xFFFFFFFF;
1749 lck_mtx_t *lck,
1764 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1765 lck->lck_mtx_ptr = lck_ext;
1768 lck->lck_mtx_owner = 0;
1769 lck->lck_mtx_state = 0;
1771 lck->lck_mtx_sw.lck_mtxd.lck_mtxd_pad32 = 0xFFFFFFFF;
1781 lck_mtx_t *lck,
1795 lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT;
1796 lck->lck_mtx_ptr = lck_ext;
1798 lck->lck_mtx_owner = 0;
1799 lck->lck_mtx_state = 0;
1801 lck->lck_mtx_sw.lck_mtxd.lck_mtxd_pad32 = 0xFFFFFFFF;
1812 lck_mtx_t *lck,
1817 if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED)
1820 lck_mtx_assert(lck, LCK_MTX_ASSERT_NOTOWNED);
1822 lck_is_indirect = (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT);
1824 lck_mtx_lock_mark_destroyed(lck);
1827 kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t));