/linux-master/virt/kvm/ |
H A D | kvm_mm.h | 8 * for the mmu_lock. These macros, for use in common code 14 #define KVM_MMU_LOCK_INIT(kvm) rwlock_init(&(kvm)->mmu_lock) 15 #define KVM_MMU_LOCK(kvm) write_lock(&(kvm)->mmu_lock) 16 #define KVM_MMU_UNLOCK(kvm) write_unlock(&(kvm)->mmu_lock) 18 #define KVM_MMU_LOCK_INIT(kvm) spin_lock_init(&(kvm)->mmu_lock) 19 #define KVM_MMU_LOCK(kvm) spin_lock(&(kvm)->mmu_lock) 20 #define KVM_MMU_UNLOCK(kvm) spin_unlock(&(kvm)->mmu_lock)
|
H A D | kvm_main.c | 386 * mmu_lock. The interaction between the various operations on memslot 752 lockdep_assert_held_write(&kvm->mmu_lock); 755 * spte can be established without taking the mmu_lock and 756 * count is also read inside the mmu_lock critical section. 768 lockdep_assert_held_write(&kvm->mmu_lock); 827 * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring 831 * Because this runs without holding mmu_lock, the pfn caches must use 840 * dropping mmu_lock, as x86's reclaim path is slooooow. 850 lockdep_assert_held_write(&kvm->mmu_lock); [all...] |
/linux-master/arch/x86/kvm/vmx/ |
H A D | vmx.c | 6809 read_lock(&vcpu->kvm->mmu_lock); 6812 read_unlock(&vcpu->kvm->mmu_lock); 6817 read_unlock(&vcpu->kvm->mmu_lock);
|
/linux-master/arch/x86/kvm/mmu/ |
H A D | tdp_mmu.c | 26 lockdep_assert_held_read(&kvm->mmu_lock); 28 lockdep_assert_held_write(&kvm->mmu_lock); 112 * role.invalid are protected by mmu_lock. 114 lockdep_assert_held(&kvm->mmu_lock); 154 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \ 164 ({ lockdep_assert_held(&(_kvm)->mmu_lock); }), _root; \ 168 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write, 169 * the implication being that any flow that holds mmu_lock for read is 171 * Holding mmu_lock for write obviates the need for RCU protection as the list 240 read_lock(&kvm->mmu_lock); [all...] |
H A D | mmu.c | 2119 if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) { 2126 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock); 2576 lockdep_assert_held_write(&kvm->mmu_lock); 2743 write_lock(&kvm->mmu_lock); 2754 write_unlock(&kvm->mmu_lock); 2764 write_lock(&kvm->mmu_lock); 2770 write_unlock(&kvm->mmu_lock); 2837 * run with mmu_lock held for read, not write, and the unsync 2840 * no meaningful penalty if mmu_lock is held for write. 2850 * possible as clearing sp->unsync _must_ hold mmu_lock [all...] |
H A D | page_track.c | 93 lockdep_assert_held_write(&kvm->mmu_lock); 116 lockdep_assert_held_write(&kvm->mmu_lock); 236 write_lock(&kvm->mmu_lock); 238 write_unlock(&kvm->mmu_lock); 254 write_lock(&kvm->mmu_lock); 256 write_unlock(&kvm->mmu_lock); 332 write_lock(&kvm->mmu_lock); 334 write_unlock(&kvm->mmu_lock); 362 write_lock(&kvm->mmu_lock); 364 write_unlock(&kvm->mmu_lock); [all...] |
H A D | paging_tmpl.h | 839 write_lock(&vcpu->kvm->mmu_lock); 850 write_unlock(&vcpu->kvm->mmu_lock);
|
/linux-master/arch/arm64/kvm/ |
H A D | mmu.c | 76 cond_resched_rwlock_write(&kvm->mmu_lock); 105 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) 122 lockdep_assert_held_write(&kvm->mmu_lock); 134 write_unlock(&kvm->mmu_lock); 140 write_lock(&kvm->mmu_lock); 315 * be called while holding mmu_lock (unless for freeing the stage2 pgd before 325 lockdep_assert_held_write(&kvm->mmu_lock); 359 write_lock(&kvm->mmu_lock); 365 write_unlock(&kvm->mmu_lock); 1000 write_lock(&kvm->mmu_lock); [all...] |
/linux-master/include/linux/ |
H A D | kvm_host.h | 739 rwlock_t mmu_lock; member in struct:kvm 741 spinlock_t mmu_lock; 2028 * than under kvm->mmu_lock, for scalability, so can't rely on 2029 * kvm->mmu_lock to keep things ordered. 2041 lockdep_assert_held(&kvm->mmu_lock); 2050 * Dropping mmu_lock after bumping mmu_invalidate_in_progress 2069 * call to the locked version after acquiring mmu_lock, i.e. this is safe to 2070 * use only as a pre-check to avoid contending mmu_lock. This version *will*
|
/linux-master/arch/x86/kvm/ |
H A D | debugfs.c | 112 write_lock(&kvm->mmu_lock); 132 write_unlock(&kvm->mmu_lock);
|
/linux-master/arch/powerpc/kvm/ |
H A D | book3s_hv.c | 5402 spin_lock(&kvm->mmu_lock); 5404 spin_unlock(&kvm->mmu_lock); 5430 spin_lock(&kvm->mmu_lock); 5432 spin_unlock(&kvm->mmu_lock);
|
H A D | book3s_hv_nested.c | 758 * so we don't need to hold kvm->mmu_lock. 775 spin_lock(&kvm->mmu_lock); 781 spin_unlock(&kvm->mmu_lock); 800 spin_lock(&kvm->mmu_lock); 810 spin_unlock(&kvm->mmu_lock); 827 spin_lock(&kvm->mmu_lock); 829 spin_unlock(&kvm->mmu_lock); 844 spin_lock(&kvm->mmu_lock); 848 spin_unlock(&kvm->mmu_lock); 862 spin_lock(&kvm->mmu_lock); [all...] |
H A D | book3s_64_mmu_radix.c | 422 /* Called with kvm->mmu_lock held */ 647 spin_lock(&kvm->mmu_lock); 783 spin_unlock(&kvm->mmu_lock); 871 spin_lock(&kvm->mmu_lock); 876 spin_unlock(&kvm->mmu_lock); 1011 spin_lock(&kvm->mmu_lock); 1015 spin_unlock(&kvm->mmu_lock); 1031 /* Called with kvm->mmu_lock held */ 1050 /* Called with kvm->mmu_lock held */ 1078 /* Called with kvm->mmu_lock hel [all...] |
H A D | book3s_hv_rm_mmu.c | 248 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); 263 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); 277 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); 938 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); 950 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); 966 arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); 981 arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
|
H A D | book3s_64_mmu_hv.c | 632 spin_lock(&kvm->mmu_lock); 637 spin_unlock(&kvm->mmu_lock); 766 spin_lock(&kvm->mmu_lock); 773 spin_unlock(&kvm->mmu_lock); 1406 spin_lock(&kvm->mmu_lock); 1413 spin_unlock(&kvm->mmu_lock);
|
/linux-master/drivers/accel/habanalabs/gaudi2/ |
H A D | gaudi2.c | 10750 mutex_lock(&hdev->mmu_lock); 10766 mutex_unlock(&hdev->mmu_lock); 10795 mutex_lock(&hdev->mmu_lock); 10805 mutex_unlock(&hdev->mmu_lock); 10857 mutex_lock(&hdev->mmu_lock); 10868 mutex_unlock(&hdev->mmu_lock); 10875 mutex_unlock(&hdev->mmu_lock); 10893 mutex_lock(&hdev->mmu_lock); 10897 mutex_unlock(&hdev->mmu_lock);
|
/linux-master/drivers/accel/habanalabs/gaudi/ |
H A D | gaudi.c | 8342 mutex_lock(&hdev->mmu_lock); 8354 mutex_unlock(&hdev->mmu_lock); 8362 mutex_unlock(&hdev->mmu_lock); 8382 mutex_lock(&hdev->mmu_lock); 8388 mutex_unlock(&hdev->mmu_lock);
|
/linux-master/drivers/accel/habanalabs/common/mmu/ |
H A D | mmu.c | 50 mutex_init(&hdev->mmu_lock); 94 mutex_destroy(&hdev->mmu_lock); 572 mutex_lock(&hdev->mmu_lock); 574 mutex_unlock(&hdev->mmu_lock); 677 mutex_lock(&hdev->mmu_lock); 681 mutex_unlock(&hdev->mmu_lock);
|
/linux-master/drivers/accel/habanalabs/common/ |
H A D | habanalabs.h | 3228 * @mmu_lock: protects the MMU page tables and invalidation h/w. Although the 3412 struct mutex mmu_lock; member in struct:hl_device
|
H A D | memory.c | 1185 mutex_lock(&hdev->mmu_lock); 1191 mutex_unlock(&hdev->mmu_lock); 1197 mutex_unlock(&hdev->mmu_lock); 1352 mutex_lock(&hdev->mmu_lock); 1365 mutex_unlock(&hdev->mmu_lock); 2784 mutex_lock(&hdev->mmu_lock); 2790 mutex_unlock(&hdev->mmu_lock);
|
H A D | command_buffer.c | 41 mutex_lock(&hdev->mmu_lock); 53 mutex_unlock(&hdev->mmu_lock); 62 mutex_unlock(&hdev->mmu_lock); 72 mutex_lock(&hdev->mmu_lock); 75 mutex_unlock(&hdev->mmu_lock);
|
/linux-master/arch/loongarch/kvm/ |
H A D | mmu.c | 287 * @lock: Whether to hold mmu_lock or not 303 spin_lock(&kvm->mmu_lock); 306 spin_unlock(&kvm->mmu_lock); 316 * free pte table page after mmu_lock 334 * The caller must hold the @kvm->mmu_lock spinlock. 359 * acquire @kvm->mmu_lock. 460 spin_lock(&kvm->mmu_lock); 464 spin_unlock(&kvm->mmu_lock); 576 spin_lock(&kvm->mmu_lock); 618 spin_unlock(&kvm->mmu_lock); [all...] |
/linux-master/arch/riscv/kvm/ |
H A D | mmu.c | 297 * If the range is too large, release the kvm->mmu_lock 301 cond_resched_lock(&kvm->mmu_lock); 340 spin_lock(&kvm->mmu_lock); 342 spin_unlock(&kvm->mmu_lock); 372 spin_lock(&kvm->mmu_lock); 374 spin_unlock(&kvm->mmu_lock); 388 spin_lock(&kvm->mmu_lock); 390 spin_unlock(&kvm->mmu_lock); 428 spin_lock(&kvm->mmu_lock); 430 spin_unlock(&kvm->mmu_lock); [all...] |
/linux-master/arch/powerpc/include/asm/ |
H A D | kvm_book3s_64.h | 654 VM_WARN(!spin_is_locked(&kvm->mmu_lock), 655 "%s called with kvm mmu_lock not held \n", __func__); 666 VM_WARN(!spin_is_locked(&kvm->mmu_lock), 667 "%s called with kvm mmu_lock not held \n", __func__);
|
H A D | kvm_book3s.h | 135 spinlock_t mmu_lock; member in struct:kvmppc_vcpu_book3s
|