Lines Matching refs:list_lock

54  *   2. node->list_lock (Spinlock)
102 * list_lock
104 * The list_lock protects the partial and full list on each node and
110 * The list_lock is a centralized lock and thus we avoid taking it as
116 * For debug caches, all allocations are forced to go through a list_lock
142 * Interrupts are disabled as part of list_lock or local_lock operations, or
426 spinlock_t list_lock;
1483 lockdep_assert_held(&n->list_lock);
1492 lockdep_assert_held(&n->list_lock);
2565 lockdep_assert_held(&n->list_lock);
2572 lockdep_assert_held(&n->list_lock);
2589 lockdep_assert_held(&n->list_lock);
2634 spin_lock_irqsave(&n->list_lock, flags);
2642 spin_unlock_irqrestore(&n->list_lock, flags);
2675 spin_lock_irqsave(&n->list_lock, flags);
2709 spin_unlock_irqrestore(&n->list_lock, flags);
2948 spin_lock_irqsave(&n->list_lock, flags);
2950 spin_unlock_irqrestore(&n->list_lock, flags);
2971 spin_unlock_irqrestore(&n->list_lock, flags);
2974 spin_lock_irqsave(&n->list_lock, flags);
2987 spin_unlock_irqrestore(&n->list_lock, flags);
3311 spin_lock_irqsave(&n->list_lock, flags);
3314 spin_unlock_irqrestore(&n->list_lock, flags);
3328 spin_lock_irqsave(&n->list_lock, flags);
3353 spin_unlock_irqrestore(&n->list_lock, flags);
4183 spin_lock_irqsave(&n->list_lock, flags);
4216 * Update the counters while still holding n->list_lock to
4222 spin_unlock_irqrestore(&n->list_lock, flags);
4260 spin_unlock_irqrestore(&n->list_lock, flags);
4275 * Speculatively acquire the list_lock.
4277 * drop the list_lock without any processing.
4279 * Otherwise the list_lock will synchronize with
4282 spin_lock_irqsave(&n->list_lock, flags);
4318 spin_unlock_irqrestore(&n->list_lock, flags);
4333 spin_unlock_irqrestore(&n->list_lock, flags);
4345 spin_unlock_irqrestore(&n->list_lock, flags);
4828 * take the list_lock.
4847 * activity on the partial lists which requires taking the list_lock. This is
4947 spin_lock_init(&n->list_lock);
5330 * This is called from __kmem_cache_shutdown(). We must take list_lock
5339 spin_lock_irq(&n->list_lock);
5349 spin_unlock_irq(&n->list_lock);
5563 spin_lock_irqsave(&n->list_lock, flags);
5569 * list_lock. slab->inuse here is the upper limit.
5596 spin_unlock_irqrestore(&n->list_lock, flags);
5922 spin_lock_irqsave(&n->list_lock, flags);
5948 spin_unlock_irqrestore(&n->list_lock, flags);
7171 spin_lock_irqsave(&n->list_lock, flags);
7176 spin_unlock_irqrestore(&n->list_lock, flags);