Searched refs:lock (Results 201 - 225 of 6515) sorted by last modified time

1234567891011>>

/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vm.c360 * amdgpu_vm_lock_pd - lock PD in drm_exec
671 mutex_lock(&id_mgr->lock);
675 mutex_unlock(&id_mgr->lock);
720 mutex_lock(&id_mgr->lock);
725 mutex_unlock(&id_mgr->lock);
729 mutex_lock(&id_mgr->lock);
733 mutex_unlock(&id_mgr->lock);
1462 /* The caller is already holding the reservation lock */
2568 spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2569 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flag
[all...]
H A Damdgpu_ttm.h51 spinlock_t lock; member in struct:amdgpu_gtt_mgr
H A Damdgpu_cs.c699 spin_lock(&adev->mm_stats.lock);
762 spin_unlock(&adev->mm_stats.lock);
772 spin_lock(&adev->mm_stats.lock);
775 spin_unlock(&adev->mm_stats.lock);
1288 /* No memory allocation is allowed while holding the notifier lock.
1289 * The lock is held until amdgpu_cs_submit is finished and fence is
/linux-master/drivers/char/
H A Drandom.c76 * crng_init is protected by base_crng->lock, and only increases
157 spin_lock_irqsave(&random_ready_notifier.lock, flags);
162 spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
209 spinlock_t lock; member in struct:__anon3
211 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
217 local_lock_t lock; member in struct:crng
222 .lock = INIT_LOCAL_LOCK(crngs.lock),
268 spin_lock_irqsave(&base_crng.lock, flag
618 spinlock_t lock; member in struct:__anon5
[all...]
/linux-master/fs/btrfs/
H A Dextent_io.c306 * Find and lock a contiguous range of bytes in the file marked as delalloc, no
360 * start comes from the offset of locked_page. We have to lock
368 * make sure to limit the number of pages we try to lock down
373 /* step two, lock all the pages after the page that has start */
393 /* step three, lock the state bits for the whole range */
586 * - clear the lock bit in the extent tree
888 * lock to prevent race.
1290 spin_lock_irqsave(&subpage->lock, flags);
1293 spin_unlock_irqrestore(&subpage->lock, flags);
1428 * records are inserted to lock range
[all...]
H A Dextent-tree.c195 spin_lock(&delayed_refs->lock);
200 spin_unlock(&delayed_refs->lock);
213 spin_lock(&head->lock);
220 spin_unlock(&head->lock);
223 spin_unlock(&delayed_refs->lock);
1844 spin_lock(&delayed_refs->lock);
1847 spin_unlock(&delayed_refs->lock);
1877 spin_unlock(&head->lock);
1896 spin_lock(&delayed_refs->lock);
1898 spin_unlock(&delayed_refs->lock);
[all...]
/linux-master/kernel/dma/
H A Dswiotlb.c90 .lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
108 * This is a single area with a single lock.
112 * @lock: The lock to protect the above data structures in the map and
118 spinlock_t lock; member in struct:io_tlb_area
283 spin_lock_init(&mem->areas[i].lock);
309 spin_lock(&mem->lock);
312 spin_unlock(&mem->lock);
1060 spin_lock_irqsave(&area->lock, flag
[all...]
/linux-master/fs/kernfs/
H A Dfile.c52 struct mutex *lock; local
54 lock = kernfs_open_file_mutex_ptr(kn);
56 mutex_lock(lock);
58 return lock;
/linux-master/fs/bcachefs/
H A Dsysfs.c249 mutex_lock(&c->btree_cache.lock);
253 mutex_unlock(&c->btree_cache.lock);
374 six_lock_wakeup_all(&b->lock);
H A Dec.c1612 lockdep_assert_held(&h->lock);
1618 mutex_init(&s->lock);
1646 mutex_init(&h->lock);
1647 BUG_ON(!mutex_trylock(&h->lock));
1689 mutex_unlock(&h->lock);
1720 ret = bch2_trans_mutex_lock(trans, &h->lock);
1730 mutex_unlock(&h->lock);
2075 mutex_lock(&h->lock);
2095 mutex_unlock(&h->lock);
H A Dec.h158 struct mutex lock; member in struct:ec_stripe_new
187 struct mutex lock; member in struct:ec_stripe_head
H A Dbuckets.h51 * while (xchg(&b->lock, 1) cpu_relax();
55 * ulong for this - we just need to make sure the lock bit always ends up in the
74 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock);
75 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR);
80 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR,
H A Dbtree_write_buffer.c246 spin_lock(&j->lock);
248 spin_unlock(&j->lock);
267 mutex_lock(&wb->inc.lock);
269 mutex_unlock(&wb->inc.lock);
441 mutex_lock(&wb->flushing.lock);
443 mutex_unlock(&wb->flushing.lock);
475 if (mutex_trylock(&wb->flushing.lock)) {
477 mutex_unlock(&wb->flushing.lock);
501 mutex_lock(&wb->flushing.lock);
505 mutex_unlock(&wb->flushing.lock);
[all...]
H A Dbtree_trans_commit.c230 six_unlock_read(&b->c.lock);
618 * Check if the insert will fit in the leaf node with the write lock
H A Dbtree_iter.h45 return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
255 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock) argument
257 return mutex_trylock(lock)
259 : __bch2_trans_mutex_lock(trans, lock);
H A Dbtree_cache.c50 if (b->c.lock.readers)
184 mutex_lock(&bc->lock);
188 mutex_unlock(&bc->lock);
211 lockdep_assert_held(&bc->lock);
231 /* XXX: waiting on IO with btree cache lock held */
236 if (!six_trylock_intent(&b->c.lock))
239 if (!six_trylock_write(&b->c.lock))
242 /* recheck under lock */
247 six_unlock_write(&b->c.lock);
248 six_unlock_intent(&b->c.lock);
[all...]
H A Dbcachefs.h652 struct mutex lock; member in struct:btree_transaction_stats
974 * can read without a lock.
/linux-master/include/linux/
H A Dio_uring_types.h70 spinlock_t lock; member in struct:io_hash_bucket
417 spinlock_t napi_lock; /* napi_list lock */
441 /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */
/linux-master/fs/ceph/
H A Dmds_client.c2204 * of lock mds request will re-add auth caps. */
2328 down_read(&osdc->lock);
2330 up_read(&osdc->lock);
3994 /* Defer closing the sessions after s_mutex lock being released */
5027 * the mdsc->mutex's unlock/lock gap below in rare
5226 * lock unlock the session, to wait ongoing session activities
/linux-master/drivers/md/
H A Draid1.c1702 lockdep_assert_held(&mddev->lock);
1810 * Called under mddev lock, so rcu protection not needed.
/linux-master/drivers/iommu/intel/
H A Diommu.c456 spin_lock_irqsave(&domain->lock, flags);
468 spin_unlock_irqrestore(&domain->lock, flags);
943 /* we don't need lock here; nobody else touches the iova range */
1013 /* We don't need lock here; nobody else touches the iova range */
1104 /* we don't need lock here; nobody else touches the iova range */
1280 spin_lock_irqsave(&domain->lock, flags);
1284 spin_unlock_irqrestore(&domain->lock, flags);
1288 spin_unlock_irqrestore(&domain->lock, flags);
1300 spin_lock_irqsave(&domain->lock, flags);
1316 spin_unlock_irqrestore(&domain->lock, flag
[all...]
/linux-master/drivers/iommu/amd/
H A Diommu.c196 spin_lock_init(&dev_data->lock);
1247 raw_spin_lock_irqsave(&iommu->lock, flags);
1249 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1276 raw_spin_lock_irqsave(&iommu->lock, flags);
1285 raw_spin_unlock_irqrestore(&iommu->lock, flags);
1622 spin_lock_irqsave(&domain->lock, flags);
1624 spin_unlock_irqrestore(&domain->lock, flags);
2086 spin_lock_irqsave(&domain->lock, flags);
2090 spin_lock(&dev_data->lock);
2103 spin_unlock(&dev_data->lock);
[all...]
H A Dinit.c1775 raw_spin_lock_init(&iommu->lock);
/linux-master/drivers/ata/
H A Dlibata-scsi.c105 spin_lock_irq(ap->lock);
126 spin_unlock_irq(ap->lock);
154 spin_lock_irqsave(ap->lock, flags);
187 spin_unlock_irqrestore(ap->lock, flags);
312 spin_lock_irqsave(ap->lock, flags);
321 spin_unlock_irqrestore(ap->lock, flags);
583 spin_lock_irqsave(ap->lock, flags);
585 spin_unlock_irqrestore(ap->lock, flags);
595 spin_lock_irqsave(ap->lock, flags);
605 spin_unlock_irqrestore(ap->lock, flag
[all...]
H A Dlibata-core.c182 * Host lock or EH context.
242 * Host lock or EH context.
1523 spin_lock_irqsave(ap->lock, flags);
1527 spin_unlock_irqrestore(ap->lock, flags);
1578 spin_unlock_irqrestore(ap->lock, flags);
1598 spin_lock_irqsave(ap->lock, flags);
1614 spin_unlock_irqrestore(ap->lock, flags);
1636 spin_lock_irqsave(ap->lock, flags);
1647 spin_unlock_irqrestore(ap->lock, flags);
1703 * lead to controller lock u
[all...]

Completed in 398 milliseconds

1234567891011>>