/linux-master/arch/s390/include/asm/ |
H A D | tlb.h | 94 * pmd_free_tlb frees a pmd table and clears the CRSTE for the 96 * If the mm uses a two level page table the single pmd is freed 98 * to avoid the double free of the pmd in this case. 100 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, argument 105 pagetable_pmd_dtor(virt_to_ptdesc(pmd)); 110 tlb_remove_ptdesc(tlb, pmd);
|
H A D | page.h | 86 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon9 100 static inline unsigned long pmd_val(pmd_t pmd) argument 102 return pmd.pmd;
|
/linux-master/arch/x86/kernel/ |
H A D | machine_kexec_64.c | 116 free_page((unsigned long)image->arch.pmd); 117 image->arch.pmd = NULL; 129 pmd_t *pmd; local 152 pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); 153 if (!pmd) 155 image->arch.pmd = pmd; 156 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 158 pmd = pmd_offset(pud, vaddr); 159 if (!pmd_present(*pmd)) { [all...] |
/linux-master/arch/x86/mm/ |
H A D | fault.c | 182 pmd_t *pmd, *pmd_k; local 205 pmd = pmd_offset(pud, address); 208 if (pmd_present(*pmd) != pmd_present(*pmd_k)) 209 set_pmd(pmd, *pmd_k); 214 BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k)); 301 pmd_t *pmd; local 314 pmd = pmd_offset(pud, address); 315 pr_pde("*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd)); 324 if (!low_pfn(pmd_pfn(*pmd)) || !pmd_presen 357 pmd_t *pmd; local 1014 pmd_t *pmd; local [all...] |
/linux-master/arch/x86/mm/pat/ |
H A D | set_memory.c | 667 pmd_t *pmd; local 690 pmd = pmd_offset(pud, address); 691 if (pmd_none(*pmd)) 695 if (pmd_leaf(*pmd) || !pmd_present(*pmd)) 696 return (pte_t *)pmd; 700 return pte_offset_kernel(pmd, address); 707 * Note: We return pud and pmd either when the entry is marked large 802 * Set the new pmd in all the pgds we know about: 816 pmd_t *pmd; local 1177 try_to_free_pmd_page(pmd_t *pmd) argument 1189 unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) argument 1207 __unmap_pmd_range(pud_t *pud, pmd_t *pmd, unsigned long start, unsigned long end) argument 1217 pmd_t *pmd = pmd_offset(pud, start); local 1302 alloc_pte_page(pmd_t *pmd) argument 1314 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL); local 1322 populate_pte(struct cpa_data *cpa, unsigned long start, unsigned long end, unsigned num_pages, pmd_t *pmd, pgprot_t pgprot) argument 1346 pmd_t *pmd; local [all...] |
/linux-master/arch/arc/mm/ |
H A D | fault.c | 35 pmd_t *pmd, *pmd_k; local 59 pmd = pmd_offset(pud, address); 63 if (!pmd_present(*pmd)) 64 set_pmd(pmd, *pmd_k);
|
/linux-master/arch/powerpc/include/asm/nohash/32/ |
H A D | pte-8xx.h | 176 static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) argument 180 else if (hugepd_ok(*((hugepd_t *)pmd))) 195 pmd_t *pmd = pmd_off(mm, addr); local 197 num = number_of_cells_per_pte(pmd, new, huge);
|
/linux-master/arch/um/include/asm/ |
H A D | page.h | 38 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon145 50 #define pmd_val(x) ((x).pmd) 61 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon149 62 #define pmd_val(x) ((x).pmd)
|
H A D | pgtable-3level.h | 49 printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \ 58 #define pud_populate(mm, pud, pmd) \ 59 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
|
H A D | pgtable.h | 98 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 99 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK) 278 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] 280 * this macro returns the index of the entry in the pmd page which would 283 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
|
/linux-master/arch/powerpc/include/asm/ |
H A D | pgtable.h | 83 static inline const void *pmd_page_vaddr(pmd_t pmd) argument 85 return __va(pmd_val(pmd) & ~PMD_MASKED_BITS); 182 static inline pgtable_t pmd_pgtable(pmd_t pmd) argument 184 return (pgtable_t)pmd_page_vaddr(pmd);
|
/linux-master/arch/arm64/mm/ |
H A D | pageattr.c | 218 pmd_t *pmdp, pmd; local 238 pmd = READ_ONCE(*pmdp); 239 if (pmd_none(pmd)) 241 if (pmd_sect(pmd))
|
/linux-master/arch/riscv/mm/ |
H A D | hugetlbpage.c | 40 pmd_t *pmd; local 64 pmd = pmd_alloc(mm, pud, addr); 65 if (!pmd) 70 pte = pte_alloc_huge(mm, pmd, addr & napot_cont_mask(order)); 93 pmd_t *pmd; local 111 pmd = pmd_offset(pud, addr); 113 /* must be pmd huge, non-present or none */ 114 return (pte_t *)pmd; 116 if (!pmd_present(pmdp_get(pmd))) 121 pte = pte_offset_huge(pmd, add 407 pmd_huge(pmd_t pmd) argument [all...] |
/linux-master/include/asm-generic/ |
H A D | page.h | 41 unsigned long pmd[16]; member in struct:__anon241 52 #define pmd_val(x) ((&x)->pmd[0])
|
/linux-master/arch/arc/include/asm/ |
H A D | page.h | 58 unsigned long pmd; member in struct:__anon3 61 #define pmd_val(x) ((x).pmd)
|
/linux-master/arch/alpha/include/asm/ |
H A D | page.h | 31 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon2 36 #define pmd_val(x) ((x).pmd)
|
/linux-master/arch/arm/include/asm/ |
H A D | pgtable.h | 62 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd) 74 * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd 159 #define pmd_none(pmd) (!pmd_val(pmd)) 161 static inline pte_t *pmd_page_vaddr(pmd_t pmd) argument 163 return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK); 166 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) [all...] |
/linux-master/arch/csky/mm/ |
H A D | init.c | 159 pmd_t *pmd; local 173 pmd = (pmd_t *)pud; 174 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 175 if (pmd_none(*pmd)) { 182 set_pmd(pmd, __pmd(__pa(pte))); 183 BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
/linux-master/arch/powerpc/include/asm/book3s/64/ |
H A D | hash-64k.h | 216 * The linux hugepage PMD now include the pmd entries followed by the address 256 * for THP we also track the subpage details at the pmd level. We don't do 260 static inline int hash__pmd_trans_huge(pmd_t pmd) argument 262 return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) == 266 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) argument 268 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE)); 284 static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) argument 286 return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
|
/linux-master/arch/powerpc/mm/book3s64/ |
H A D | subpage_prot.c | 59 pmd_t *pmd; local 70 pmd = pmd_offset(pud, addr); 71 if (pmd_none(*pmd)) 73 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 138 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, argument 142 split_huge_pmd(vma, pmd, addr);
|
/linux-master/arch/parisc/mm/ |
H A D | init.c | 42 pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); 346 pmd_t *pmd; local 379 pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, 381 if (!pmd) 382 panic("pmd allocation failed.\n"); 383 pud_populate(NULL, pud, pmd); 387 pmd = pmd_offset(pud, vaddr); 388 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { 389 if (pmd_none(*pmd)) { 393 pmd_populate_kernel(NULL, pmd, pg_tabl 677 pmd_t *pmd; local [all...] |
/linux-master/mm/ |
H A D | pagewalk.c | 41 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument 51 * Indeed, on x86_64 the pmd entries set up by init_espfix_ap() 56 pte = pte_offset_kernel(pmd, addr); 58 pte = pte_offset_map(pmd, addr); 65 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 118 pmd_t *pmd; local 124 pmd = pmd_offset(pud, addr); 128 if (pmd_none(*pmd)) { 143 err = ops->pmd_entry(pmd, addr, next, walk); 154 if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_presen [all...] |
H A D | mprotect.c | 84 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, 96 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 303 * thp is handled differently when split by erasing the pmd so far. 325 * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable 329 #define change_pmd_prepare(vma, pmd, cp_flags) \ 333 if (pte_alloc(vma->vm_mm, pmd)) \ 342 * while {pmd|pud|p4d}_alloc() returns the valid pointer on success. 359 pmd_t *pmd; local 367 pmd = pmd_offset(pud, addr); 374 ret = change_pmd_prepare(vma, pmd, cp_flag 83 change_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) argument [all...] |
H A D | memory.c | 187 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, argument 190 pgtable_t token = pmd_pgtable(*pmd); 191 pmd_clear(pmd); 200 pmd_t *pmd; local 205 pmd = pmd_offset(pud, addr); 208 if (pmd_none_or_clear_bad(pmd)) 210 free_pte_range(tlb, pmd, addr); 211 } while (pmd++, addr = next, addr != end); 224 pmd = pmd_offset(pud, start); 226 pmd_free_tlb(tlb, pmd, star 412 pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) argument 438 __pte_alloc(struct mm_struct *mm, pmd_t *pmd) argument 450 __pte_alloc_kernel(pmd_t *pmd) argument 495 pmd_t *pmd = pmd_offset(pud, addr); local 656 vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument 696 vm_normal_folio_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument 1568 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument 1693 pmd_t *pmd; local 1951 pmd_t *pmd; local 1971 pmd_t *pmd = walk_to_pmd(mm, addr); local 2050 pmd_t *pmd = NULL; local 2489 remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument 2519 pmd_t *pmd; local 2716 apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) argument 2764 pmd_t *pmd; local 5896 pmd_t *pmd; local [all...] |
/linux-master/arch/parisc/include/asm/ |
H A D | pgtable.h | 82 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) 215 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds 291 static inline void pmd_clear(pmd_t *pmd) { argument 292 set_pmd(pmd, __pmd(0)); 372 static inline unsigned long pmd_page_vaddr(pmd_t pmd) argument 374 return ((unsigned long) __va(pmd_address(pmd))); 377 #define pmd_pfn(pmd) (pmd_address(pmd) >> PAGE_SHIFT) 378 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) [all...] |