/linux-master/arch/x86/kvm/mmu/ |
H A D | spte.c | 16 #include "spte.h" 74 u64 spte = generation_mmio_spte_mask(gen); local 80 spte |= shadow_mmio_value | access; 81 spte |= gpa | shadow_nonpresent_or_rsvd_mask; 82 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 85 return spte; 114 bool spte_has_volatile_bits(u64 spte) argument 117 * Always atomically update spte if it can be updated 122 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte)) 144 u64 spte = SPTE_MMU_PRESENT_MASK; local 251 make_spte_executable(u64 spte) argument 312 u64 spte = SPTE_MMU_PRESENT_MASK; local 341 mark_spte_for_access_track(u64 spte) argument [all...] |
H A D | spte.h | 103 * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10 104 * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62 111 * checking for MMIO spte cache hits. 202 static inline bool is_removed_spte(u64 spte) argument 204 return spte == REMOVED_SPTE; 230 static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) argument 232 return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); 252 static inline bool is_mmio_spte(u64 spte) argument 254 return (spte & shadow_mmio_mask) == shadow_mmio_value && 279 static inline bool spte_ad_enabled(u64 spte) argument 285 spte_ad_need_write_protect(u64 spte) argument 296 spte_shadow_accessed_mask(u64 spte) argument 302 spte_shadow_dirty_mask(u64 spte) argument 308 is_access_track_spte(u64 spte) argument 323 is_executable_pte(u64 spte) argument 333 is_accessed_spte(u64 spte) argument 341 is_dirty_spte(u64 spte) argument 368 is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level) argument 447 check_spte_writable_invariants(u64 spte) argument 455 KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx", spte); local 458 is_mmu_writable_spte(u64 spte) argument 463 get_mmio_spte_generation(u64 spte) argument 486 restore_acc_track_spte(u64 spte) argument [all...] |
H A D | mmutrace.h | 212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 213 TP_ARGS(sptep, gfn, spte), 225 __entry->access = spte & ACC_ALL; 226 __entry->gen = get_mmio_spte_generation(spte); 312 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen), 313 TP_ARGS(spte, kvm_gen, spte_gen), 318 __field(u64, spte) 324 __entry->spte = spte; 327 TP_printk("spte [all...] |
H A D | tdp_iter.c | 6 #include "spte.h" 62 tdp_ptep_t spte_to_child_pt(u64 spte, int level) argument 68 if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) 71 return (tdp_ptep_t)__va(spte_to_pfn(spte) << PAGE_SHIFT);
|
H A D | mmu.c | 30 #include "spte.h" 174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ 177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 178 __shadow_walk_next(&(_walker), spte)) 184 static void mmu_spte_set(u64 *sptep, u64 spte); 295 u64 spte = make_mmio_spte(vcpu, gfn, access); local 297 trace_mark_mmio_spte(sptep, gfn, spte); 298 mmu_spte_set(sptep, spte); 301 static gfn_t get_mmio_spte_gfn(u64 spte) argument 303 u64 gpa = spte 311 get_mmio_spte_access(u64 spte) argument 316 check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) argument 337 __set_spte(u64 *sptep, u64 spte) argument 342 __update_clear_spte_fast(u64 *sptep, u64 spte) argument 347 __update_clear_spte_slow(u64 *sptep, u64 spte) argument 362 u64 spte; member in union:split_spte 365 count_spte_clear(u64 *sptep, u64 spte) argument 377 __set_spte(u64 *sptep, u64 spte) argument 396 __update_clear_spte_fast(u64 *sptep, u64 spte) argument 415 __update_clear_spte_slow(u64 *sptep, u64 spte) argument 452 union split_spte spte, *orig = (union split_spte *)sptep; local 617 u64 spte = mmu_spte_get_lockless(sptep); local 933 pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, struct kvm_rmap_head *rmap_head) argument 1006 pte_list_remove(struct kvm *kvm, u64 *spte, struct kvm_rmap_head *rmap_head) argument 1095 rmap_remove(struct kvm *kvm, u64 *spte) argument 1231 u64 spte = *sptep; local 1259 u64 spte = *sptep; local 1641 __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) argument 1665 rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) argument 1787 mark_unsync(u64 *spte) argument 2140 clear_sp_write_flooding_count(u64 *spte) argument 2412 __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte) argument 2433 u64 spte; local 2493 mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list) argument 2916 u64 spte; local 3008 u64 *spte, *start = NULL; local 3216 disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level) argument 3406 is_access_allowed(struct kvm_page_fault *fault, u64 spte) argument 3427 fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) argument 3448 u64 spte; local 4122 u64 spte; local 4189 u64 spte; local 4243 u64 spte; local 5766 u64 *spte; local 5800 u64 entry, gentry, *spte; local 6526 u64 *sptep, spte; local 6570 u64 spte; local [all...] |
H A D | paging_tmpl.h | 152 struct kvm_mmu_page *sp, u64 *spte, 169 drop_spte(vcpu->kvm, spte); 534 u64 *spte, pt_element_t gpte) 541 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) 556 mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL); 589 u64 *spte; local 608 spte = sp->spt + i; 610 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { 611 if (spte == sptep) 614 if (is_shadow_present_pte(*spte)) 151 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) argument 533 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte) argument 907 u64 *sptep, spte; local [all...] |
H A D | tdp_mmu.h | 8 #include "spte.h" 69 u64 *spte);
|
H A D | mmu_internal.h | 121 * Used out of the mmu-lock to avoid reading spte values while an 261 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 346 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
|
H A D | tdp_mmu.c | 9 #include "spte.h" 539 * refreshed to the current value of the spte. 548 * known value of the spte. 1045 * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the 1059 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); local 1063 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); 1067 tdp_mmu_iter_set_spte(kvm, iter, spte); 1441 * Replace the huge spte with a pointer to the populated lower level 1817 * Returns the last level spte pointe 1827 kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *spte) argument [all...] |
/linux-master/arch/x86/kvm/ |
H A D | Makefile | 11 mmu/spte.o
|
/linux-master/arch/s390/mm/ |
H A D | pgtable.c | 682 pte_t spte, tpte; local 688 spte = *sptep; 689 if (!(pte_val(spte) & _PAGE_INVALID) && 690 !((pte_val(spte) & _PAGE_PROTECT) && 694 tpte = __pte((pte_val(spte) & PAGE_MASK) |
|
/linux-master/mm/ |
H A D | hugetlb.c | 7400 pte_t *spte = NULL; local 7410 spte = hugetlb_walk(svma, saddr, 7412 if (spte) { 7413 get_page(virt_to_page(spte)); 7419 if (!spte) 7425 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7428 put_page(virt_to_page(spte));
|
/linux-master/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | vmm.c | 448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; local 451 if (spte != next) 455 if (!spte) {
|