/linux-master/arch/x86/include/asm/ |
H A D | pgtable_64_types.h | 22 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon217
|
H A D | paravirt.h | 439 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) argument 441 PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd)); 450 static inline pmdval_t pmd_val(pmd_t pmd) argument 452 return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
|
H A D | kexec.h | 150 pmd_t *pmd; member in struct:kimage_arch
|
/linux-master/mm/ |
H A D | vmalloc.c | 93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument 102 pte = pte_alloc_kernel_track(pmd, addr, mask); 126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, argument 145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 148 return pmd_set_huge(pmd, phys_addr, prot); 155 pmd_t *pmd; local 158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 159 if (!pmd) 164 if (vmap_try_huge_pmd(pmd, add 341 vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) argument 357 pmd_t *pmd; local 478 vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) argument 513 pmd_t *pmd; local 738 pmd_t *pmd; local [all...] |
H A D | mapping_dirty_helpers.c | 118 * wp_clean_pmd_entry - The pagewalk pmd callback. 121 * WARN() if encountering a dirty huge pmd. 126 static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, argument 129 pmd_t pmdval = pmdp_get_lockless(pmd); 131 /* Do not split a huge pmd, present or migrated */
|
H A D | page_idle.c | 69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) 72 /* unexpected pmd-mapped page? */
|
H A D | rmap.c | 52 * mapping->i_mmap_rwsem (also used for hugetlb pmd sharing) 806 pmd_t *pmd = NULL; local 820 pmd = pmd_offset(pud, address); 822 return pmd; 882 pvmw.pmd)) 885 /* unexpected pmd-mapped folio? */ 1007 * We have to assume the worse case ie pmd for invalidation. Note that 1033 pmd_t *pmd = pvmw->pmd; local 1036 if (!pmd_dirty(*pmd) [all...] |
H A D | memory-failure.c | 395 pmd_t *pmd; local 411 pmd = pmd_offset(pud, address); 412 if (!pmd_present(*pmd)) 414 if (pmd_devmap(*pmd)) 416 pte = pte_offset_map(pmd, address); 775 pmd_t pmd = *pmdp; local 779 if (!pmd_present(pmd)) 781 pfn = pmd_pfn(pmd); 1198 * To narrow down kill region to one page, we need to break up pmd.
|
H A D | vmscan.c | 3281 static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) argument 3283 unsigned long pfn = pmd_pfn(pmd); 3287 if (!pmd_present(pmd) || is_huge_zero_pmd(pmd)) 3290 if (WARN_ON_ONCE(pmd_devmap(pmd))) 3330 static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, argument 3345 pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); 3405 pmd_t *pmd; local 3428 pmd = pmd_offset(pud, *first); 3430 ptl = pmd_lockptr(args->mm, pmd); 3484 pmd_t *pmd; local [all...] |
/linux-master/arch/sparc/mm/ |
H A D | init_64.c | 1640 pmd_t *pmd; local 1671 pmd = pmd_offset(pud, addr); 1672 if (pmd_none(*pmd)) 1675 if (pmd_leaf(*pmd)) 1676 return pfn_valid(pmd_pfn(*pmd)); 1678 pte = pte_offset_kernel(pmd, addr); 1726 pmd_t *pmd) 1736 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE; 1753 pmd_val(*pmd) = pte_val; 1757 pmd 1724 kernel_map_hugepmd(unsigned long vstart, unsigned long vend, pmd_t *pmd) argument 1791 pmd_t *pmd; local 2616 pmd_t *pmd; local 2964 update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) argument [all...] |
H A D | srmmu.c | 106 static inline int srmmu_pmd_none(pmd_t pmd) argument 107 { return !(pmd_val(pmd) & 0xFFFFFFF); } 273 pmd_t *pmd; local 308 pmd = pmd_offset(__nocache_fix(pud), vaddr); 309 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr); 697 early_pgtable_allocfail("pmd"); 731 early_pgtable_allocfail("pmd"); 782 int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ 822 early_pgtable_allocfail("pmd"); 903 pmd_t *pmd; local [all...] |
/linux-master/mm/damon/ |
H A D | paddr.c | 29 damon_pmdp_mkold(pvmw.pmd, vma, addr); 97 *accessed = pmd_young(pmdp_get(pvmw.pmd)) ||
|
/linux-master/arch/s390/mm/ |
H A D | gmap.c | 560 pmd_t *pmd; local 603 pmd = pmd_offset(pud, vmaddr); 604 VM_BUG_ON(pmd_none(*pmd)); 606 if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) 612 ptl = pmd_lock(mm, pmd); 618 if (pmd_leaf(*pmd)) { 619 *table = (pmd_val(*pmd) & 623 *table = pmd_val(*pmd) & 627 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) { 922 * and return the pmd pointe 2521 thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument 2563 __zap_zero_pages(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument 2638 __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) argument 2649 pmd_t *pmd = (pmd_t *)pte; local [all...] |
H A D | hugetlbpage.c | 30 * Convert encoding pte bits pmd / pud bits 85 * Convert encoding pmd / pud bits pte bits 236 int pmd_huge(pmd_t pmd) argument 238 return pmd_leaf(pmd);
|
/linux-master/include/linux/ |
H A D | hugetlb.h | 191 static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) argument 193 return pte_offset_kernel(pmd, address); 195 static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, argument 198 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); 218 * (1) For private mappings: pmd unsharing is not possible, so holding the 223 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged 224 * pgtable page can go away from under us! It can be done by a pmd 228 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare 236 * Option (2.1) is the safest, which guarantees pte stability from pmd 402 pmd_huge(pmd_t pmd) argument [all...] |
/linux-master/arch/x86/kernel/ |
H A D | head32.c | 98 #define SET_PL2(val) { .pmd = (val), }
|
H A D | ldt.c | 256 had_kernel = (k_pmd->pmd != 0); 257 had_user = (u_pmd->pmd != 0);
|
/linux-master/arch/riscv/mm/ |
H A D | kasan_init.c | 29 static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end) argument 34 if (pmd_none(pmdp_get(pmd))) { 36 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE)); 39 ptep = pte_offset_kernel(pmd, vaddr);
|
/linux-master/arch/parisc/kernel/ |
H A D | cache.c | 391 pmd_t *pmd; local 398 pmd = pmd_offset(pud, addr); 399 if (!pmd_none(*pmd)) 400 ptep = pte_offset_map(pmd, addr);
|
H A D | entry.S | 386 .macro L2_ptep pmd,pte,index,va,fault 392 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 396 ldw,s \index(\pmd),\pmd 397 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault 398 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */ 399 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd 401 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */ 402 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pm [all...] |
/linux-master/arch/arm64/mm/ |
H A D | fault.c | 166 pmd_t *pmdp, pmd; local 185 pmd = READ_ONCE(*pmdp); 186 pr_cont(", pmd=%016llx", pmd_val(pmd)); 187 if (pmd_none(pmd) || pmd_bad(pmd))
|
/linux-master/arch/loongarch/kvm/ |
H A D | mmu.c | 181 * The last level is small pte page or huge pmd page 704 pmd_t pmd; local 742 pmd = READ_ONCE(*pmd_offset(&pud, hva)); 743 if (pmd_none(pmd) || !pmd_present(pmd)) 746 if (kvm_pte_huge(pmd_val(pmd))) 909 * previous pmd entry is invalid_pte_table
|
/linux-master/arch/arc/mm/ |
H A D | tlb.c | 531 pmd_t *pmd) 533 pte_t pte = __pte(pmd_val(*pmd)); 530 update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) argument
|
/linux-master/arch/x86/include/asm/xen/ |
H A D | page.h | 334 #define pmd_val_ma(v) ((v).pmd)
|
/linux-master/arch/powerpc/include/asm/nohash/ |
H A D | pgtable.h | 354 static inline int pmd_huge(pmd_t pmd) argument
|