Lines Matching refs:rnh_lock

152  * Because rnh_lock and rt_lock for the entry are held during those
158 * - Routing lock (rnh_lock)
210 * Insertion and removal from llinfo_nd6 must be done with rnh_lock held.
213 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \
222 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED); \
654 * therefore protected by rnh_lock. For obvious reasons, we cannot
655 * hold rnh_lock across calls that might lead to code paths which
656 * attempt to acquire rnh_lock, else we deadlock. Hence for such
657 * cases we drop rt_lock and rnh_lock, make the calls, and repeat the
664 lck_mtx_lock(rnh_lock);
673 /* ln_next/prev/rt is protected by rnh_lock */
740 lck_mtx_unlock(rnh_lock);
762 lck_mtx_unlock(rnh_lock);
767 lck_mtx_unlock(rnh_lock);
771 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
788 lck_mtx_unlock(rnh_lock);
790 lck_mtx_assert(rnh_lock,
806 lck_mtx_unlock(rnh_lock);
809 lck_mtx_assert(rnh_lock,
826 lck_mtx_unlock(rnh_lock);
832 lck_mtx_unlock(rnh_lock);
835 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
844 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
858 lck_mtx_unlock(rnh_lock);
1292 lck_mtx_lock(rnh_lock);
1305 lck_mtx_unlock(rnh_lock);
1316 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1323 lck_mtx_unlock(rnh_lock);
1329 * This routine does not require rnh_lock to be held by the caller,
1351 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
1402 lck_mtx_lock(rnh_lock);
1417 lck_mtx_unlock(rnh_lock);
1613 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_NOTOWNED);
1787 lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
3000 * calls nd6_lookup() and acquires rnh_lock.
3055 lck_mtx_lock(rnh_lock);
3076 lck_mtx_unlock(rnh_lock);
3087 lck_mtx_unlock(rnh_lock);
3159 * nd6_lookup() and acquires rnh_lock.
3240 lck_mtx_lock(rnh_lock);
3247 lck_mtx_unlock(rnh_lock);
3289 lck_mtx_lock(rnh_lock);
3304 lck_mtx_unlock(rnh_lock);