/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_vm.c | 360 * amdgpu_vm_lock_pd - lock PD in drm_exec 671 mutex_lock(&id_mgr->lock); 675 mutex_unlock(&id_mgr->lock); 720 mutex_lock(&id_mgr->lock); 725 mutex_unlock(&id_mgr->lock); 729 mutex_lock(&id_mgr->lock); 733 mutex_unlock(&id_mgr->lock); 1462 /* The caller is already holding the reservation lock */ 2568 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); 2569 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flag [all...] |
H A D | amdgpu_ttm.h | 51 spinlock_t lock; member in struct:amdgpu_gtt_mgr
|
H A D | amdgpu_cs.c | 699 spin_lock(&adev->mm_stats.lock); 762 spin_unlock(&adev->mm_stats.lock); 772 spin_lock(&adev->mm_stats.lock); 775 spin_unlock(&adev->mm_stats.lock); 1288 /* No memory allocation is allowed while holding the notifier lock. 1289 * The lock is held until amdgpu_cs_submit is finished and fence is
|
/linux-master/drivers/char/ |
H A D | random.c | 76 * crng_init is protected by base_crng->lock, and only increases 157 spin_lock_irqsave(&random_ready_notifier.lock, flags); 162 spin_unlock_irqrestore(&random_ready_notifier.lock, flags); 209 spinlock_t lock; member in struct:__anon3 211 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) 217 local_lock_t lock; member in struct:crng 222 .lock = INIT_LOCAL_LOCK(crngs.lock), 268 spin_lock_irqsave(&base_crng.lock, flag 618 spinlock_t lock; member in struct:__anon5 [all...] |
/linux-master/fs/btrfs/ |
H A D | extent_io.c | 306 * Find and lock a contiguous range of bytes in the file marked as delalloc, no 360 * start comes from the offset of locked_page. We have to lock 368 * make sure to limit the number of pages we try to lock down 373 /* step two, lock all the pages after the page that has start */ 393 /* step three, lock the state bits for the whole range */ 586 * - clear the lock bit in the extent tree 888 * lock to prevent race. 1290 spin_lock_irqsave(&subpage->lock, flags); 1293 spin_unlock_irqrestore(&subpage->lock, flags); 1428 * records are inserted to lock range [all...] |
H A D | extent-tree.c | 195 spin_lock(&delayed_refs->lock); 200 spin_unlock(&delayed_refs->lock); 213 spin_lock(&head->lock); 220 spin_unlock(&head->lock); 223 spin_unlock(&delayed_refs->lock); 1844 spin_lock(&delayed_refs->lock); 1847 spin_unlock(&delayed_refs->lock); 1877 spin_unlock(&head->lock); 1896 spin_lock(&delayed_refs->lock); 1898 spin_unlock(&delayed_refs->lock); [all...] |
/linux-master/kernel/dma/ |
H A D | swiotlb.c | 90 .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock), 108 * This is a single area with a single lock. 112 * @lock: The lock to protect the above data structures in the map and 118 spinlock_t lock; member in struct:io_tlb_area 283 spin_lock_init(&mem->areas[i].lock); 309 spin_lock(&mem->lock); 312 spin_unlock(&mem->lock); 1060 spin_lock_irqsave(&area->lock, flag [all...] |
/linux-master/fs/kernfs/ |
H A D | file.c | 52 struct mutex *lock; local 54 lock = kernfs_open_file_mutex_ptr(kn); 56 mutex_lock(lock); 58 return lock;
|
/linux-master/fs/bcachefs/ |
H A D | sysfs.c | 249 mutex_lock(&c->btree_cache.lock); 253 mutex_unlock(&c->btree_cache.lock); 374 six_lock_wakeup_all(&b->lock);
|
H A D | ec.c | 1612 lockdep_assert_held(&h->lock); 1618 mutex_init(&s->lock); 1646 mutex_init(&h->lock); 1647 BUG_ON(!mutex_trylock(&h->lock)); 1689 mutex_unlock(&h->lock); 1720 ret = bch2_trans_mutex_lock(trans, &h->lock); 1730 mutex_unlock(&h->lock); 2075 mutex_lock(&h->lock); 2095 mutex_unlock(&h->lock);
|
H A D | ec.h | 158 struct mutex lock; member in struct:ec_stripe_new 187 struct mutex lock; member in struct:ec_stripe_head
|
H A D | buckets.h | 51 * while (xchg(&b->lock, 1) cpu_relax(); 55 * ulong for this - we just need to make sure the lock bit always ends up in the 74 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); 75 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR); 80 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
|
H A D | btree_write_buffer.c | 246 spin_lock(&j->lock); 248 spin_unlock(&j->lock); 267 mutex_lock(&wb->inc.lock); 269 mutex_unlock(&wb->inc.lock); 441 mutex_lock(&wb->flushing.lock); 443 mutex_unlock(&wb->flushing.lock); 475 if (mutex_trylock(&wb->flushing.lock)) { 477 mutex_unlock(&wb->flushing.lock); 501 mutex_lock(&wb->flushing.lock); 505 mutex_unlock(&wb->flushing.lock); [all...] |
H A D | btree_trans_commit.c | 230 six_unlock_read(&b->c.lock); 618 * Check if the insert will fit in the leaf node with the write lock
|
H A D | btree_iter.h | 45 return path->l[level].lock_seq == six_lock_seq(&b->c.lock); 255 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock) argument 257 return mutex_trylock(lock) 259 : __bch2_trans_mutex_lock(trans, lock);
|
H A D | btree_cache.c | 50 if (b->c.lock.readers) 184 mutex_lock(&bc->lock); 188 mutex_unlock(&bc->lock); 211 lockdep_assert_held(&bc->lock); 231 /* XXX: waiting on IO with btree cache lock held */ 236 if (!six_trylock_intent(&b->c.lock)) 239 if (!six_trylock_write(&b->c.lock)) 242 /* recheck under lock */ 247 six_unlock_write(&b->c.lock); 248 six_unlock_intent(&b->c.lock); [all...] |
H A D | bcachefs.h | 652 struct mutex lock; member in struct:btree_transaction_stats 974 * can read without a lock.
|
/linux-master/include/linux/ |
H A D | io_uring_types.h | 70 spinlock_t lock; member in struct:io_hash_bucket 417 spinlock_t napi_lock; /* napi_list lock */ 441 /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
|
/linux-master/fs/ceph/ |
H A D | mds_client.c | 2204 * of lock mds request will re-add auth caps. */ 2328 down_read(&osdc->lock); 2330 up_read(&osdc->lock); 3994 /* Defer closing the sessions after s_mutex lock being released */ 5027 * the mdsc->mutex's unlock/lock gap below in rare 5226 * lock unlock the session, to wait ongoing session activities
|
/linux-master/drivers/md/ |
H A D | raid1.c | 1702 lockdep_assert_held(&mddev->lock); 1810 * Called under mddev lock, so rcu protection not needed.
|
/linux-master/drivers/iommu/intel/ |
H A D | iommu.c | 456 spin_lock_irqsave(&domain->lock, flags); 468 spin_unlock_irqrestore(&domain->lock, flags); 943 /* we don't need lock here; nobody else touches the iova range */ 1013 /* We don't need lock here; nobody else touches the iova range */ 1104 /* we don't need lock here; nobody else touches the iova range */ 1280 spin_lock_irqsave(&domain->lock, flags); 1284 spin_unlock_irqrestore(&domain->lock, flags); 1288 spin_unlock_irqrestore(&domain->lock, flags); 1300 spin_lock_irqsave(&domain->lock, flags); 1316 spin_unlock_irqrestore(&domain->lock, flag [all...] |
/linux-master/drivers/iommu/amd/ |
H A D | iommu.c | 196 spin_lock_init(&dev_data->lock); 1247 raw_spin_lock_irqsave(&iommu->lock, flags); 1249 raw_spin_unlock_irqrestore(&iommu->lock, flags); 1276 raw_spin_lock_irqsave(&iommu->lock, flags); 1285 raw_spin_unlock_irqrestore(&iommu->lock, flags); 1622 spin_lock_irqsave(&domain->lock, flags); 1624 spin_unlock_irqrestore(&domain->lock, flags); 2086 spin_lock_irqsave(&domain->lock, flags); 2090 spin_lock(&dev_data->lock); 2103 spin_unlock(&dev_data->lock); [all...] |
H A D | init.c | 1775 raw_spin_lock_init(&iommu->lock);
|
/linux-master/drivers/ata/ |
H A D | libata-scsi.c | 105 spin_lock_irq(ap->lock); 126 spin_unlock_irq(ap->lock); 154 spin_lock_irqsave(ap->lock, flags); 187 spin_unlock_irqrestore(ap->lock, flags); 312 spin_lock_irqsave(ap->lock, flags); 321 spin_unlock_irqrestore(ap->lock, flags); 583 spin_lock_irqsave(ap->lock, flags); 585 spin_unlock_irqrestore(ap->lock, flags); 595 spin_lock_irqsave(ap->lock, flags); 605 spin_unlock_irqrestore(ap->lock, flag [all...] |
H A D | libata-core.c | 182 * Host lock or EH context. 242 * Host lock or EH context. 1523 spin_lock_irqsave(ap->lock, flags); 1527 spin_unlock_irqrestore(ap->lock, flags); 1578 spin_unlock_irqrestore(ap->lock, flags); 1598 spin_lock_irqsave(ap->lock, flags); 1614 spin_unlock_irqrestore(ap->lock, flags); 1636 spin_lock_irqsave(ap->lock, flags); 1647 spin_unlock_irqrestore(ap->lock, flags); 1703 * lead to controller lock u [all...] |