Lines Matching refs:role

218  * The MMU itself (with a valid role) is the single source of truth for the
219 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
565 int level = sptep_to_sp(sptep)->role.level;
719 if (sp->role.passthrough)
722 if (!sp->role.direct)
725 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
749 * In both cases, sp->role.access contains the correct access bits.
751 return sp->role.access;
764 sp->role.passthrough ? "passthrough" : "direct",
769 sp->role.passthrough ? "passthrough" : "direct",
835 slots = kvm_memslots_for_spte_role(kvm, sp->role);
839 if (sp->role.level > PG_LEVEL_4K)
883 slots = kvm_memslots_for_spte_role(kvm, sp->role);
885 if (sp->role.level > PG_LEVEL_4K)
1109 * information in sp->role.
1111 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1114 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1208 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1652 kvm_update_page_stats(kvm, sp->role.level, 1);
1654 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1661 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1744 if (!sp->role.direct)
1899 if (sp->role.direct)
1902 if (sp->role.passthrough)
1926 * - level: not part of the overall MMU role and will never match as the MMU's
1929 * - quadrant: not part of the overall MMU role (similar to level)
1940 * sync a shadow page for a different MMU context, e.g. if the role
1944 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1945 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
2013 if (sp->role.invalid)
2040 int level = sp->role.level;
2064 level = sp->role.level;
2149 * unsync, thus @vcpu can be NULL if @role.direct is true.
2155 union kvm_mmu_page_role role)
2168 if (sp->role.word != role.word) {
2178 if (role.level > PG_LEVEL_4K && sp->unsync)
2185 if (sp->role.direct)
2240 union kvm_mmu_page_role role)
2246 if (!role.direct)
2263 sp->role = role;
2271 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
2276 union kvm_mmu_page_role role)
2284 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2287 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2296 union kvm_mmu_page_role role)
2304 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2311 union kvm_mmu_page_role role;
2313 role = parent_sp->role;
2314 role.level--;
2315 role.access = access;
2316 role.direct = direct;
2317 role.passthrough = 0;
2324 * requires extra bookkeeping in the role.
2328 * 1GiB of the address space. @role.quadrant encodes which quarter of
2333 * @role.quadrant encodes which half of the region they map.
2345 if (role.has_4_byte_gpte) {
2346 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2347 role.quadrant = spte_index(sptep) & 1;
2350 return role;
2357 union kvm_mmu_page_role role;
2362 role = kvm_mmu_child_role(sptep, direct, access);
2363 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2484 if (child->role.access == direct_access)
2501 if (is_last_spte(pte, sp->role.level)) {
2513 child->role.guest_mode && !child->parent_ptes.val)
2553 if (parent->role.level == PG_LEVEL_4K)
2586 if (!sp->role.invalid && sp_has_gptes(sp))
2600 if (sp->role.invalid)
2623 sp->role.invalid = 1;
2663 WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2814 * i.e. this guards the role.level == 4K assertion below!
2858 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2911 int level = sp->role.level;
2983 unsigned int access = sp->role.access;
3011 WARN_ON_ONCE(!sp->role.direct);
3044 if (sp->role.level > PG_LEVEL_4K)
3477 if (!is_last_spte(spte, sp->role.level))
3530 if (sp->role.level > PG_LEVEL_4K &&
3583 if (!--sp->root_count && sp->role.invalid)
3675 if (!sp || sp->role.guest_mode)
3686 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3689 role.level = level;
3690 role.quadrant = quadrant;
3692 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3693 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3695 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
4670 union kvm_mmu_page_role role)
4677 if (!role.direct && pgd != root->pgd)
4684 return role.word == sp->role.word;
4688 * Find out if a previously cached root matching the new pgd/role is available,
4723 * Find out if a previously cached root matching the new pgd/role is available.
5260 union kvm_cpu_role role = {0};
5262 role.base.access = ACC_ALL;
5263 role.base.smm = is_smm(vcpu);
5264 role.base.guest_mode = is_guest_mode(vcpu);
5265 role.ext.valid = 1;
5268 role.base.direct = 1;
5269 return role;
5272 role.base.efer_nx = ____is_efer_nx(regs);
5273 role.base.cr0_wp = ____is_cr0_wp(regs);
5274 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5275 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5276 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5279 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5282 role.base.level = PT32E_ROOT_LEVEL;
5284 role.base.level = PT32_ROOT_LEVEL;
5286 role.ext.cr4_smep = ____is_cr4_smep(regs);
5287 role.ext.cr4_smap = ____is_cr4_smap(regs);
5288 role.ext.cr4_pse = ____is_cr4_pse(regs);
5291 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5292 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5293 role.ext.efer_lma = ____is_efer_lma(regs);
5294 return role;
5329 union kvm_mmu_page_role role = {0};
5331 role.access = ACC_ALL;
5332 role.cr0_wp = true;
5333 role.efer_nx = true;
5334 role.smm = cpu_role.base.smm;
5335 role.guest_mode = cpu_role.base.guest_mode;
5336 role.ad_disabled = !kvm_ad_enabled();
5337 role.level = kvm_mmu_get_tdp_level(vcpu);
5338 role.direct = true;
5339 role.has_4_byte_gpte = false;
5341 return role;
5407 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5450 union kvm_cpu_role role = {0};
5454 * support the "entry to SMM" control either. role.base.smm is always 0.
5457 role.base.level = level;
5458 role.base.has_4_byte_gpte = false;
5459 role.base.direct = false;
5460 role.base.ad_disabled = !accessed_dirty;
5461 role.base.guest_mode = true;
5462 role.base.access = ACC_ALL;
5464 role.ext.word = 0;
5465 role.ext.execonly = execonly;
5466 role.ext.valid = 1;
5468 return role;
5731 if (sp->role.level == PG_LEVEL_4K)
5748 pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5770 level = sp->role.level;
5772 if (sp->role.has_4_byte_gpte) {
5786 if (quadrant != sp->role.quadrant)
5832 if (gentry && sp->role.level != PG_LEVEL_4K)
6217 if (WARN_ON_ONCE(sp->role.invalid))
6494 union kvm_mmu_page_role role;
6507 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6514 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6551 flush |= !is_last_spte(*sptep, sp->role.level);
6555 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
6557 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6574 level = huge_sp->role.level;
6616 if (WARN_ON_ONCE(!sp->role.guest_mode))
6628 if (sp->role.invalid)
6737 if (sp->role.direct &&
6738 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6820 if (WARN_ON_ONCE(sp->role.invalid))
7197 WARN_ON_ONCE(!sp->role.direct);
7223 slots = kvm_memslots_for_spte_role(kvm, sp->role);