Lines Matching defs:role

218  * The MMU itself (with a valid role) is the single source of truth for the
219 * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
289 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
565 int level = sptep_to_sp(sptep)->role.level;
719 if (sp->role.passthrough)
722 if (!sp->role.direct)
725 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
749 * In both cases, sp->role.access contains the correct access bits.
751 return sp->role.access;
764 sp->role.passthrough ? "passthrough" : "direct",
769 sp->role.passthrough ? "passthrough" : "direct",
844 slots = kvm_memslots_for_spte_role(kvm, sp->role);
848 if (sp->role.level > PG_LEVEL_4K)
892 slots = kvm_memslots_for_spte_role(kvm, sp->role);
894 if (sp->role.level > PG_LEVEL_4K)
1118 * information in sp->role.
1120 slots = kvm_memslots_for_spte_role(kvm, sp->role);
1123 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1217 WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
1608 kvm_update_page_stats(kvm, sp->role.level, 1);
1610 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1617 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1700 if (!sp->role.direct)
1855 if (sp->role.direct)
1858 if (sp->role.passthrough)
1882 * - level: not part of the overall MMU role and will never match as the MMU's
1885 * - quadrant: not part of the overall MMU role (similar to level)
1896 * sync a shadow page for a different MMU context, e.g. if the role
1900 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1901 (sp->role.word ^ root_role.word) & ~sync_role_ign.word))
1970 if (sp->role.invalid)
1997 int level = sp->role.level;
2021 level = sp->role.level;
2106 * unsync, thus @vcpu can be NULL if @role.direct is true.
2112 union kvm_mmu_page_role role)
2125 if (sp->role.word != role.word) {
2135 if (role.level > PG_LEVEL_4K && sp->unsync)
2142 if (sp->role.direct)
2197 union kvm_mmu_page_role role)
2203 if (!role.direct)
2220 sp->role = role;
2228 /* Note, @vcpu may be NULL if @role.direct is true; see kvm_mmu_find_shadow_page. */
2233 union kvm_mmu_page_role role)
2241 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2244 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2253 union kvm_mmu_page_role role)
2261 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2268 union kvm_mmu_page_role role;
2270 role = parent_sp->role;
2271 role.level--;
2272 role.access = access;
2273 role.direct = direct;
2274 role.passthrough = 0;
2281 * requires extra bookkeeping in the role.
2285 * 1GiB of the address space. @role.quadrant encodes which quarter of
2290 * @role.quadrant encodes which half of the region they map.
2302 if (role.has_4_byte_gpte) {
2303 WARN_ON_ONCE(role.level != PG_LEVEL_4K);
2304 role.quadrant = spte_index(sptep) & 1;
2307 return role;
2314 union kvm_mmu_page_role role;
2319 role = kvm_mmu_child_role(sptep, direct, access);
2320 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2441 if (child->role.access == direct_access)
2458 if (is_last_spte(pte, sp->role.level)) {
2470 child->role.guest_mode && !child->parent_ptes.val)
2510 if (parent->role.level == PG_LEVEL_4K)
2543 if (!sp->role.invalid && sp_has_gptes(sp))
2557 if (sp->role.invalid)
2580 sp->role.invalid = 1;
2620 WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
2771 * i.e. this guards the role.level == 4K assertion below!
2815 WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
2868 int level = sp->role.level;
2940 unsigned int access = sp->role.access;
2968 WARN_ON_ONCE(!sp->role.direct);
3001 if (sp->role.level > PG_LEVEL_4K)
3444 if (!is_last_spte(spte, sp->role.level))
3497 if (sp->role.level > PG_LEVEL_4K &&
3550 if (!--sp->root_count && sp->role.invalid)
3642 if (!sp || sp->role.guest_mode)
3653 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3656 role.level = level;
3657 role.quadrant = quadrant;
3659 WARN_ON_ONCE(quadrant && !role.has_4_byte_gpte);
3660 WARN_ON_ONCE(role.direct && role.has_4_byte_gpte);
3662 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
4664 union kvm_mmu_page_role role)
4671 if (!role.direct && pgd != root->pgd)
4678 return role.word == sp->role.word;
4682 * Find out if a previously cached root matching the new pgd/role is available,
4717 * Find out if a previously cached root matching the new pgd/role is available.
5254 union kvm_cpu_role role = {0};
5256 role.base.access = ACC_ALL;
5257 role.base.smm = is_smm(vcpu);
5258 role.base.guest_mode = is_guest_mode(vcpu);
5259 role.ext.valid = 1;
5262 role.base.direct = 1;
5263 return role;
5266 role.base.efer_nx = ____is_efer_nx(regs);
5267 role.base.cr0_wp = ____is_cr0_wp(regs);
5268 role.base.smep_andnot_wp = ____is_cr4_smep(regs) && !____is_cr0_wp(regs);
5269 role.base.smap_andnot_wp = ____is_cr4_smap(regs) && !____is_cr0_wp(regs);
5270 role.base.has_4_byte_gpte = !____is_cr4_pae(regs);
5273 role.base.level = ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL
5276 role.base.level = PT32E_ROOT_LEVEL;
5278 role.base.level = PT32_ROOT_LEVEL;
5280 role.ext.cr4_smep = ____is_cr4_smep(regs);
5281 role.ext.cr4_smap = ____is_cr4_smap(regs);
5282 role.ext.cr4_pse = ____is_cr4_pse(regs);
5285 role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
5286 role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
5287 role.ext.efer_lma = ____is_efer_lma(regs);
5288 return role;
5328 union kvm_mmu_page_role role = {0};
5330 role.access = ACC_ALL;
5331 role.cr0_wp = true;
5332 role.efer_nx = true;
5333 role.smm = cpu_role.base.smm;
5334 role.guest_mode = cpu_role.base.guest_mode;
5335 role.ad_disabled = !kvm_ad_enabled();
5336 role.level = kvm_mmu_get_tdp_level(vcpu);
5337 role.direct = true;
5338 role.has_4_byte_gpte = false;
5340 return role;
5406 * KVM forces EFER.NX=1 when TDP is disabled, reflect it in the MMU role.
5449 union kvm_cpu_role role = {0};
5453 * support the "entry to SMM" control either. role.base.smm is always 0.
5456 role.base.level = level;
5457 role.base.has_4_byte_gpte = false;
5458 role.base.direct = false;
5459 role.base.ad_disabled = !accessed_dirty;
5460 role.base.guest_mode = true;
5461 role.base.access = ACC_ALL;
5463 role.ext.word = 0;
5464 role.ext.execonly = execonly;
5465 role.ext.valid = 1;
5467 return role;
5730 if (sp->role.level == PG_LEVEL_4K)
5747 pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
5769 level = sp->role.level;
5771 if (sp->role.has_4_byte_gpte) {
5785 if (quadrant != sp->role.quadrant)
5836 if (gentry && sp->role.level != PG_LEVEL_4K)
6229 if (WARN_ON_ONCE(sp->role.invalid))
6507 union kvm_mmu_page_role role;
6520 role = kvm_mmu_child_role(huge_sptep, /*direct=*/true, access);
6527 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6564 flush |= !is_last_spte(*sptep, sp->role.level);
6568 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
6570 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6587 level = huge_sp->role.level;
6629 if (WARN_ON_ONCE(!sp->role.guest_mode))
6641 if (sp->role.invalid)
6750 if (sp->role.direct &&
6751 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
6833 if (WARN_ON_ONCE(sp->role.invalid))
7210 WARN_ON_ONCE(!sp->role.direct);
7236 slots = kvm_memslots_for_spte_role(kvm, sp->role);