Lines Matching refs:lock

270 	/* nests inside the rq lock: */
302 raw_spinlock_t lock;
354 raw_spinlock_t lock;
616 raw_spinlock_t lock ____cacheline_aligned;
713 /* Nests inside the rq lock: */
897 /* These atomics are updated outside of a lock */
981 * Locking rule: those places that want to lock multiple runqueues
982 * (such as the load balancing or the thread migration code), lock
986 /* runqueue lock: */
1031 * it on another CPU. Always updated under the runqueue lock:
1149 /* Must be inspected within a rcu lock section */
1495 * made to update_rq_clock() since the last time rq::lock was pinned.
1653 __acquires(rq->lock);
1657 __acquires(rq->lock);
1660 __releases(rq->lock)
1668 __releases(rq->lock)
1677 _T->rq = task_rq_lock(_T->lock, &_T->rf),
1678 task_rq_unlock(_T->rq, _T->lock, &_T->rf),
1683 __acquires(rq->lock)
1691 __acquires(rq->lock)
1699 __acquires(rq->lock)
1707 __releases(rq->lock)
1715 __releases(rq->lock)
1723 __releases(rq->lock)
1730 rq_lock(_T->lock, &_T->rf),
1731 rq_unlock(_T->lock, &_T->rf),
1735 rq_lock_irq(_T->lock, &_T->rf),
1736 rq_unlock_irq(_T->lock, &_T->rf),
1740 rq_lock_irqsave(_T->lock, &_T->rf),
1741 rq_unlock_irqrestore(_T->lock, &_T->rf),
1746 __acquires(rq->lock)
2021 * holding both task_struct::pi_lock and rq::lock.
2302 * The switched_from() call is allowed to drop rq->lock, therefore we
2304 * rq->lock. They are however serialized by p->pi_lock.
2648 * acquire rq lock instead of rq_lock(). So at the end of these two functions
2666 static inline class_##name##_t class_##name##_constructor(type *lock, type *lock2) \
2667 { class_##name##_t _t = { .lock = lock, .lock2 = lock2 }, *_T = &_t; \
2681 * double_rq_lock(0,3); will take core-0, core-1 lock
2682 * double_rq_lock(1,2); will take core-1, core-0 lock
2692 * __sched_core_flip() relies on SMT having cpu-id lock order.
2711 __releases(this_rq->lock)
2712 __acquires(busiest->lock)
2713 __acquires(this_rq->lock)
2726 * grant the double lock to lower CPUs over higher ids under contention,
2730 __releases(this_rq->lock)
2731 __acquires(busiest->lock)
2732 __acquires(this_rq->lock)
2755 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2765 __releases(busiest->lock)
2806 double_raw_lock(_T->lock, _T->lock2),
2807 double_raw_unlock(_T->lock, _T->lock2))
2816 __releases(rq1->lock)
2817 __releases(rq2->lock)
2822 __release(rq2->lock);
2833 * double_rq_lock - safely lock two runqueues
2839 __acquires(rq1->lock)
2840 __acquires(rq2->lock)
2845 __acquire(rq2->lock); /* Fake it out ;) */
2856 __releases(rq1->lock)
2857 __releases(rq2->lock)
2861 __release(rq2->lock);
2867 double_rq_lock(_T->lock, _T->lock2),
2868 double_rq_unlock(_T->lock, _T->lock2))
3262 * the MM_CID_UNSET state without holding the rq lock, but the rq lock needs to
3323 * which owns a cid without holding a rq lock.
3353 * All allocations (even those using the cid_lock) are lock-free. If