Lines Matching refs:mmu_lock

2119 		if (need_resched() || rwlock_needbreak(&vcpu->kvm->mmu_lock)) {
2126 cond_resched_rwlock_write(&vcpu->kvm->mmu_lock);
2576 lockdep_assert_held_write(&kvm->mmu_lock);
2743 write_lock(&kvm->mmu_lock);
2754 write_unlock(&kvm->mmu_lock);
2764 write_lock(&kvm->mmu_lock);
2770 write_unlock(&kvm->mmu_lock);
2837 * run with mmu_lock held for read, not write, and the unsync
2840 * no meaningful penalty if mmu_lock is held for write.
2850 * possible as clearing sp->unsync _must_ hold mmu_lock
2852 * while this CPU holds mmu_lock for read (or write).
3066 * consuming it. In this case, mmu_lock doesn't need to be held during the
3069 * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation
3075 * not required to hold mmu_lock (though it's highly likely the caller will
3076 * want to hold mmu_lock anyways, e.g. to modify SPTEs).
3207 * mmu_invalidate_retry() was successful and mmu_lock is held, so
3365 * by setting the Writable bit, which can be done out of mmu_lock.
3508 * be made fully writable outside of mmu_lock, e.g. only SPTEs
3579 lockdep_assert_held_read(&kvm->mmu_lock);
3582 lockdep_assert_held_write(&kvm->mmu_lock);
3618 read_lock(&kvm->mmu_lock);
3620 write_lock(&kvm->mmu_lock);
3647 read_unlock(&kvm->mmu_lock);
3651 write_unlock(&kvm->mmu_lock);
3712 write_lock(&vcpu->kvm->mmu_lock);
3744 write_unlock(&vcpu->kvm->mmu_lock);
3827 * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
3844 write_lock(&vcpu->kvm->mmu_lock);
3922 write_unlock(&vcpu->kvm->mmu_lock);
4055 write_lock(&vcpu->kvm->mmu_lock);
4057 write_unlock(&vcpu->kvm->mmu_lock);
4061 write_lock(&vcpu->kvm->mmu_lock);
4072 write_unlock(&vcpu->kvm->mmu_lock);
4423 * the pfn from the primary MMU, and before acquiring mmu_lock.
4425 * For mmu_lock, if there is an in-progress invalidation and the kernel
4426 * allows preemption, the invalidation task may drop mmu_lock and yield
4427 * in response to mmu_lock being contended, which is *very* counter-
4438 * will never yield mmu_lock in response to contention, as this vCPU is
4439 * *guaranteed* to need to retry, i.e. waiting until mmu_lock is held
4458 * avoid contending mmu_lock. Most invalidations will be detected by
4461 * mmu_lock is acquired.
4497 * now that mmu_lock is held, as the "unsafe" checks performed without
4498 * holding mmu_lock can get false negatives.
4528 write_lock(&vcpu->kvm->mmu_lock);
4540 write_unlock(&vcpu->kvm->mmu_lock);
4608 read_lock(&vcpu->kvm->mmu_lock);
4616 read_unlock(&vcpu->kvm->mmu_lock);
5811 write_lock(&vcpu->kvm->mmu_lock);
5840 write_unlock(&vcpu->kvm->mmu_lock);
5933 write_lock(&vcpu->kvm->mmu_lock);
5949 write_unlock(&vcpu->kvm->mmu_lock);
6065 lockdep_assert_held_write(&kvm->mmu_lock);
6072 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
6078 cond_resched_rwlock_write(&kvm->mmu_lock);
6227 cond_resched_rwlock_write(&kvm->mmu_lock)) {
6265 write_lock(&kvm->mmu_lock);
6279 * invalidating TDP MMU roots must be done while holding mmu_lock for
6281 * e.g. before kvm_zap_obsolete_pages() could drop mmu_lock and yield.
6291 * Note: we need to do this under the protection of mmu_lock,
6298 write_unlock(&kvm->mmu_lock);
6393 write_lock(&kvm->mmu_lock);
6409 write_unlock(&kvm->mmu_lock);
6424 write_lock(&kvm->mmu_lock);
6427 write_unlock(&kvm->mmu_lock);
6431 read_lock(&kvm->mmu_lock);
6433 read_unlock(&kvm->mmu_lock);
6444 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock))
6469 * capacity so that KVM doesn't have to drop mmu_lock to topup if KVM
6583 write_unlock(&kvm->mmu_lock);
6591 write_lock(&kvm->mmu_lock);
6666 /* Must be called with the mmu_lock held in write-mode. */
6697 write_lock(&kvm->mmu_lock);
6699 write_unlock(&kvm->mmu_lock);
6702 read_lock(&kvm->mmu_lock);
6704 read_unlock(&kvm->mmu_lock);
6770 write_lock(&kvm->mmu_lock);
6772 write_unlock(&kvm->mmu_lock);
6776 read_lock(&kvm->mmu_lock);
6778 read_unlock(&kvm->mmu_lock);
6786 write_lock(&kvm->mmu_lock);
6792 write_unlock(&kvm->mmu_lock);
6796 read_lock(&kvm->mmu_lock);
6798 read_unlock(&kvm->mmu_lock);
6817 write_lock(&kvm->mmu_lock);
6824 if (cond_resched_rwlock_write(&kvm->mmu_lock))
6833 write_unlock(&kvm->mmu_lock);
6894 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
6904 write_lock(&kvm->mmu_lock);
6915 write_unlock(&kvm->mmu_lock);
7171 write_lock(&kvm->mmu_lock);
7236 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
7240 cond_resched_rwlock_write(&kvm->mmu_lock);
7250 write_unlock(&kvm->mmu_lock);
7375 lockdep_assert_held_write(&kvm->mmu_lock);