Searched refs:mmu_lock (Results 1 - 25 of 30) sorted by relevance

12

/linux-master/virt/kvm/
H A Dkvm_mm.h8 * for the mmu_lock. These macros, for use in common code
14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock)
15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock)
16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock)
18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock)
19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock)
20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
/linux-master/arch/x86/kvm/mmu/
H A Dpage_track.c93 lockdep_assert_held_write(&kvm->mmu_lock);
116 lockdep_assert_held_write(&kvm->mmu_lock);
236 write_lock(&kvm->mmu_lock);
238 write_unlock(&kvm->mmu_lock);
254 write_lock(&kvm->mmu_lock);
256 write_unlock(&kvm->mmu_lock);
332 write_lock(&kvm->mmu_lock);
334 write_unlock(&kvm->mmu_lock);
362 write_lock(&kvm->mmu_lock);
364 write_unlock(&kvm->mmu_lock);
[all...]
H A Dtdp_mmu.c26 lockdep_assert_held_read(&kvm->mmu_lock);
28 lockdep_assert_held_write(&kvm->mmu_lock);
112 * role.invalid are protected by mmu_lock.
114 lockdep_assert_held(&kvm->mmu_lock);
154 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
164 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \
168 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
169 * the implication being that any flow that holds mmu_lock for read is
171 * Holding mmu_lock for write obviates the need for RCU protection as the list
240 read_lock(&kvm->mmu_lock);
[all...]
H A Dmmu.c2119 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2126 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2576 lockdep_assert_held_write(&kvm->mmu_lock);
2743 write_lock(&kvm->mmu_lock);
2754 write_unlock(&kvm->mmu_lock);
2764 write_lock(&kvm->mmu_lock);
2770 write_unlock(&kvm->mmu_lock);
2837 * run with mmu_lock held for read, not write, and the unsync
2840 * no meaningful penalty if mmu_lock is held for write.
2850 * possible as clearing sp->unsync _must_ hold mmu_lock
[all...]
H A Dpaging_tmpl.h839 write_lock(&vcpu->kvm->mmu_lock);
850 write_unlock(&vcpu->kvm->mmu_lock);
/linux-master/arch/riscv/kvm/
H A Dmmu.c297 * If the range is too large, release the kvm->mmu_lock
301 cond_resched_lock(&kvm->mmu_lock);
340 spin_lock(&kvm->mmu_lock);
342 spin_unlock(&kvm->mmu_lock);
372 spin_lock(&kvm->mmu_lock);
374 spin_unlock(&kvm->mmu_lock);
388 spin_lock(&kvm->mmu_lock);
390 spin_unlock(&kvm->mmu_lock);
428 spin_lock(&kvm->mmu_lock);
430 spin_unlock(&kvm->mmu_lock);
[all...]
/linux-master/arch/powerpc/kvm/
H A Dbook3s_mmu_hpte.c63 spin_lock(&vcpu3s->mmu_lock);
92 spin_unlock(&vcpu3s->mmu_lock);
110 spin_lock(&vcpu3s->mmu_lock);
114 spin_unlock(&vcpu3s->mmu_lock);
127 spin_unlock(&vcpu3s->mmu_lock);
369 spin_lock_init(&vcpu3s->mmu_lock);
H A Dbook3s_hv_nested.c758 * so we don't need to hold kvm->mmu_lock.
775 spin_lock(&kvm->mmu_lock);
781 spin_unlock(&kvm->mmu_lock);
800 spin_lock(&kvm->mmu_lock);
810 spin_unlock(&kvm->mmu_lock);
827 spin_lock(&kvm->mmu_lock);
829 spin_unlock(&kvm->mmu_lock);
844 spin_lock(&kvm->mmu_lock);
848 spin_unlock(&kvm->mmu_lock);
862 spin_lock(&kvm->mmu_lock);
[all...]
H A Dbook3s_64_mmu_radix.c422 /* Called with kvm->mmu_lock held */
647 spin_lock(&kvm->mmu_lock);
783 spin_unlock(&kvm->mmu_lock);
871 spin_lock(&kvm->mmu_lock);
876 spin_unlock(&kvm->mmu_lock);
1011 spin_lock(&kvm->mmu_lock);
1015 spin_unlock(&kvm->mmu_lock);
1031 /* Called with kvm->mmu_lock held */
1050 /* Called with kvm->mmu_lock held */
1078 /* Called with kvm->mmu_lock hel
[all...]
H A Dbook3s_hv_rm_mmu.c248 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
263 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
277 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
938 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
950 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
966 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
981 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
H A Dbook3s_64_mmu_host.c153 spin_lock(&kvm->mmu_lock);
205 spin_unlock(&kvm->mmu_lock);
H A Dbook3s_64_mmu_hv.c632 spin_lock(&kvm->mmu_lock);
637 spin_unlock(&kvm->mmu_lock);
766 spin_lock(&kvm->mmu_lock);
773 spin_unlock(&kvm->mmu_lock);
1406 spin_lock(&kvm->mmu_lock);
1413 spin_unlock(&kvm->mmu_lock);
H A De500_mmu_host.c462 spin_lock(&kvm->mmu_lock);
473 * We are holding kvm->mmu_lock so a notifier invalidate
502 spin_unlock(&kvm->mmu_lock);
/linux-master/arch/arm64/kvm/
H A Dmmu.c76 cond_resched_rwlock_write(&kvm->mmu_lock);
105 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
122 lockdep_assert_held_write(&kvm->mmu_lock);
134 write_unlock(&kvm->mmu_lock);
140 write_lock(&kvm->mmu_lock);
315 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
325 lockdep_assert_held_write(&kvm->mmu_lock);
359 write_lock(&kvm->mmu_lock);
365 write_unlock(&kvm->mmu_lock);
1000 write_lock(&kvm->mmu_lock);
[all...]
/linux-master/arch/x86/kvm/
H A Ddebugfs.c112 write_lock(&kvm->mmu_lock);
132 write_unlock(&kvm->mmu_lock);
/linux-master/arch/loongarch/kvm/
H A Dmmu.c287 * @lock: Whether to hold mmu_lock or not
303 spin_lock(&kvm->mmu_lock);
306 spin_unlock(&kvm->mmu_lock);
316 * free pte table page after mmu_lock
334 * The caller must hold the @kvm->mmu_lock spinlock.
359 * acquire @kvm->mmu_lock.
460 spin_lock(&kvm->mmu_lock);
464 spin_unlock(&kvm->mmu_lock);
576 spin_lock(&kvm->mmu_lock);
618 spin_unlock(&kvm->mmu_lock);
[all...]
/linux-master/arch/mips/kvm/
H A Dmmu.c265 * The caller must hold the @kvm->mmu_lock spinlock.
390 * The caller must hold the @kvm->mmu_lock spinlock.
412 * acquire @kvm->mmu_lock.
521 spin_lock(&kvm->mmu_lock);
556 spin_unlock(&kvm->mmu_lock);
622 * chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
638 spin_lock(&kvm->mmu_lock);
646 spin_unlock(&kvm->mmu_lock);
675 spin_unlock(&kvm->mmu_lock);
H A Dmips.c198 spin_lock(&kvm->mmu_lock);
203 spin_unlock(&kvm->mmu_lock);
233 spin_lock(&kvm->mmu_lock);
239 spin_unlock(&kvm->mmu_lock);
/linux-master/drivers/accel/habanalabs/common/
H A Dcommand_buffer.c41 mutex_lock(&hdev->mmu_lock);
53 mutex_unlock(&hdev->mmu_lock);
62 mutex_unlock(&hdev->mmu_lock);
72 mutex_lock(&hdev->mmu_lock);
75 mutex_unlock(&hdev->mmu_lock);
H A Dmemory.c1185 mutex_lock(&hdev->mmu_lock);
1191 mutex_unlock(&hdev->mmu_lock);
1197 mutex_unlock(&hdev->mmu_lock);
1352 mutex_lock(&hdev->mmu_lock);
1365 mutex_unlock(&hdev->mmu_lock);
2784 mutex_lock(&hdev->mmu_lock);
2790 mutex_unlock(&hdev->mmu_lock);
/linux-master/drivers/accel/habanalabs/common/mmu/
H A Dmmu.c50 mutex_init(&hdev->mmu_lock);
94 mutex_destroy(&hdev->mmu_lock);
572 mutex_lock(&hdev->mmu_lock);
574 mutex_unlock(&hdev->mmu_lock);
677 mutex_lock(&hdev->mmu_lock);
681 mutex_unlock(&hdev->mmu_lock);
/linux-master/arch/powerpc/include/asm/
H A Dkvm_book3s_64.h654 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
655 "%s called with kvm mmu_lock not held \n", __func__);
666 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
667 "%s called with kvm mmu_lock not held \n", __func__);
H A Dkvm_book3s.h135 spinlock_t mmu_lock; member in struct:kvmppc_vcpu_book3s
/linux-master/include/linux/
H A Dkvm_host.h739 rwlock_t mmu_lock; member in struct:kvm
741 spinlock_t mmu_lock;
2028 * than under kvm->mmu_lock, for scalability, so can't rely on
2029 * kvm->mmu_lock to keep things ordered.
2041 lockdep_assert_held(&kvm->mmu_lock);
2050 * Dropping mmu_lock after bumping mmu_invalidate_in_progress
2069 * call to the locked version after acquiring mmu_lock, i.e. this is safe to
2070 * use only as a pre-check to avoid contending mmu_lock. This version *will*
/linux-master/drivers/accel/habanalabs/gaudi2/
H A Dgaudi2.c10750 mutex_lock(&hdev->mmu_lock);
10766 mutex_unlock(&hdev->mmu_lock);
10795 mutex_lock(&hdev->mmu_lock);
10805 mutex_unlock(&hdev->mmu_lock);
10857 mutex_lock(&hdev->mmu_lock);
10868 mutex_unlock(&hdev->mmu_lock);
10875 mutex_unlock(&hdev->mmu_lock);
10893 mutex_lock(&hdev->mmu_lock);
10897 mutex_unlock(&hdev->mmu_lock);

Completed in 500 milliseconds

12