Lines Matching defs:access

293 			   unsigned int access)
295 u64 spte = make_mmio_spte(vcpu, gfn, access);
628 * lost when the SPTE is marked for access tracking.
729 * For leaf SPTEs, fetch the *guest* access permissions being shadowed. Note
730 * that the SPTE itself may have a more constrained access permissions that
741 * KVM is not shadowing any guest page tables, so the "guest access
745 * is shadowing a guest huge page with small pages, the guest access
746 * permissions being shadowed are the access permissions of the huge
749 * In both cases, sp->role.access contains the correct access bits.
751 return sp->role.access;
755 gfn_t gfn, unsigned int access)
758 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
762 WARN_ONCE(access != kvm_mmu_page_get_access(sp, index),
763 "access mismatch under %s page %llx (expected %u, got %u)\n",
765 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
774 unsigned int access)
778 kvm_mmu_page_set_translation(sp, index, gfn, access);
1644 u64 *spte, gfn_t gfn, unsigned int access)
1651 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1666 u64 *spte, gfn_t gfn, unsigned int access)
1670 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1928 * - access: updated based on the new guest PTE
1933 .access = 0x7,
2308 unsigned int access)
2315 role.access = access;
2355 bool direct, unsigned int access)
2362 role = kvm_mmu_child_role(sptep, direct, access);
2479 * sp's access: allow writable in the read-only sp,
2481 * a new sp with the correct access.
2484 if (child->role.access == direct_access)
2983 unsigned int access = sp->role.access;
2988 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2997 mmu_set_spte(vcpu, slot, start, access, gfn,
3297 * into the spte otherwise read access on readonly gfn also can
3298 * caused mmio page fault and treat it as mmio access.
3313 unsigned int access)
3318 access & shadow_mmio_access_mask);
3357 * caused by access tracking (if enabled). If A/D bits are enabled
3359 * bits for L2 and employ access tracking, but the fast page fault
3361 * 2. The shadow page table entry is present, the access is a write,
3414 /* Fault was on Read access */
3481 * Check whether the memory access that caused the fault would
3487 * Need not check the access of upper level table entries since
3501 * enabled, the SPTE can't be an access-tracked SPTE.
3509 * that were write-protected for dirty-logging or access
3514 * shadow-present, i.e. except for access tracking restoration
3528 * normal spte to fix the access.
3826 * On SVM, reading PDPTRs might access guest memory, which might fault
4089 gpa_t vaddr, u64 access,
4094 return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
4201 unsigned int access = get_mmio_spte_access(spte);
4209 trace_handle_mmio_page_fault(addr, gfn, access);
4210 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4366 * If the APIC access page exists but is disabled, go directly
4367 * to emulation without caching the MMIO access or creating a
4414 unsigned int access)
4454 return kvm_handle_noslot_fault(vcpu, fault, access);
4792 * The last MMIO access's GVA and GPA are cached in the VCPU. When
4813 unsigned int access)
4821 mark_mmio_spte(vcpu, sptep, gfn, access);
5082 #define BYTE_MASK(access) \
5083 ((1 & (access) ? 2 : 0) | \
5084 (2 & (access) ? 4 : 0) | \
5085 (3 & (access) ? 8 : 0) | \
5086 (4 & (access) ? 16 : 0) | \
5087 (5 & (access) ? 32 : 0) | \
5088 (6 & (access) ? 64 : 0) | \
5089 (7 & (access) ? 128 : 0))
5147 * - The access is not a fetch
5148 * - The access is supervisor mode
5149 * - If implicit supervisor access or X86_EFLAGS_AC is clear
5153 * PFERR_RSVD_MASK bit will be set in PFEC if the access is
5165 * PKU is an additional mechanism by which the paging controls access to
5178 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
5213 * Only need to check the access which is not an
5218 * write access is controlled by PKRU if it is a
5219 * user access or CR0.WP = 1.
5223 /* PKRU.AD stops both read and write access. */
5225 /* PKRU.WD stops write access. */
5262 role.base.access = ACC_ALL;
5331 role.access = ACC_ALL;
5462 role.base.access = ACC_ALL;
5851 * checks when emulating instructions that triggers implicit access.
6495 unsigned int access;
6499 access = kvm_mmu_page_get_access(huge_sp, spte_index(huge_sptep));
6507 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6538 * page is aliased by multiple sptes with the same access
6557 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);