Searched refs:pmd (Results 226 - 250 of 300) sorted by relevance

1234567891011>>

/linux-master/arch/powerpc/mm/book3s64/
H A Dhash_pgtable.c226 pmd_t pmd; local
232 pmd = *pmdp;
237 * to hugepage, we first clear the pmd, then invalidate all
239 * page fault will see a none pmd and take the slow path that
252 * covered by pmd. This make sure we take a
253 * fault and will find the pmd as none, which will
259 flush_hash_table_pmd_range(vma->vm_mm, &pmd, address);
260 return pmd;
264 * We want to put the pgtable in pmd and use pgtable for tracking
280 * before we set the hugepage PTE at pmd leve
[all...]
/linux-master/arch/m68k/kernel/
H A Dsys_m68k.c472 pmd_t *pmd; local
487 pmd = pmd_offset(pud, (unsigned long)mem);
488 if (!pmd_present(*pmd))
490 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
/linux-master/arch/riscv/include/asm/
H A Dpgtable-64.h66 unsigned long pmd; member in struct:__anon27
69 #define pmd_val(x) ((x).pmd)
262 static inline unsigned long _pmd_pfn(pmd_t pmd) argument
264 return __page_val_to_pfn(pmd_val(pmd));
270 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
/linux-master/arch/powerpc/mm/book3s32/
H A Dmmu.c303 pmd_t *pmd; local
307 pmd = pmd_off(mm, ea);
308 if (!pmd_none(*pmd))
309 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
/linux-master/arch/m68k/mm/
H A Dkmap.c94 printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
375 unsigned long pmd = pmd_val(*pmd_dir); local
377 if ((pmd & _DESCTYPE_MASK) == _PAGE_PRESENT) {
378 *pmd_dir = __pmd((pmd & _CACHEMASK040) | cmode);
387 printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
H A Dmotorola.c251 unsigned long pmd, last; local
264 pmd = pgd_page_vaddr(kernel_pg_dir[i]);
265 if (pmd > last)
266 last = pmd;
/linux-master/arch/um/kernel/
H A Dtrap.c29 pmd_t *pmd; local
101 pmd = pmd_off(mm, address);
102 pte = pte_offset_kernel(pmd, address);
/linux-master/arch/csky/mm/
H A Dfault.c119 pmd_t *pmd, *pmd_k; local
154 pmd = pmd_offset(pud, addr);
160 set_pmd(pmd, *pmd_k);
/linux-master/mm/
H A Dmincore.c100 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
109 ptl = pmd_trans_huge_lock(pmd, vma);
116 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
H A Dmremap.c59 pmd_t *pmd; local
65 pmd = pmd_offset(pud, addr);
66 if (pmd_none(*pmd))
69 return pmd;
90 pmd_t *pmd; local
96 pmd = pmd_alloc(mm, pud, addr);
97 if (!pmd)
100 VM_BUG_ON(pmd_trans_huge(*pmd));
102 return pmd;
241 pmd_t pmd; local
[all...]
H A Dmlock.c363 static int mlock_pte_range(pmd_t *pmd, unsigned long addr, argument
375 ptl = pmd_trans_huge_lock(pmd, vma);
377 if (!pmd_present(*pmd))
379 if (is_huge_zero_pmd(*pmd))
381 folio = page_folio(pmd_page(*pmd));
389 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
/linux-master/arch/sh/mm/
H A Dcache-sh4.c221 pmd_t *pmd; local
234 pmd = pmd_off(vma->vm_mm, address);
235 pte = pte_offset_kernel(pmd, address);
/linux-master/arch/mips/mm/
H A Dfault.c289 pmd_t *pmd, *pmd_k; local
309 pmd = pmd_offset(pud, address);
313 set_pmd(pmd, *pmd_k);
/linux-master/arch/x86/boot/compressed/
H A Dident_map_64.c198 pmd_t pmd; local
224 pmd = __pmd((unsigned long)pte | info->kernpg_flag);
225 set_pmd(pmdp, pmd);
/linux-master/include/linux/
H A Duserfaultfd_k.h156 * Never enable huge pmd sharing on some uffd registered vmas:
163 * with huge pmd sharing this would *also* setup the second UFFD-registered
205 pmd_t pmd)
207 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
311 pmd_t pmd)
204 userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, pmd_t pmd) argument
310 userfaultfd_huge_pmd_wp(struct vm_area_struct *vma, pmd_t pmd) argument
H A Dpfn_t.h113 pmd_t pmd_mkdevmap(pmd_t pmd);
/linux-master/mm/kasan/
H A Dshadow.c191 pmd_t *pmd; local
204 pmd = pmd_offset(pud, addr);
205 if (pmd_none(*pmd))
207 if (pmd_leaf(*pmd))
209 pte = pte_offset_kernel(pmd, addr);
/linux-master/arch/x86/mm/
H A Dioremap.c862 pmd_t *pmd = pmd_offset(pud, addr); local
864 return pmd;
879 pmd_t *pmd; local
889 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
891 pmd_populate_kernel(&init_mm, pmd, bm_pte);
901 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
903 printk(KERN_WARNING "pmd %p != %p\n",
904 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
H A Dmem_encrypt_amd.c160 pmdval_t pmd_flags, pmd; local
166 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
167 __early_make_pgtable((unsigned long)vaddr, pmd);
H A Dkmmio.c133 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old) argument
136 pmdval_t v = pmd_val(*pmd);
139 new_pmd = pmd_mkinvalid(*pmd);
144 set_pmd(pmd, new_pmd);
/linux-master/fs/
H A Ddax.c709 * pte or pmd without holding the respective lock, so we are
1230 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1231 if (!pmd_none(*(vmf->pmd))) {
1237 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1242 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1640 * @pmd: distinguish whether it is a pmd fault
1644 struct xa_state *xas, void **entry, bool pmd)
1648 size_t size = pmd ? PMD_SIZE : PAGE_SIZE;
1651 unsigned long entry_flags = pmd
1642 dax_fault_iter(struct vm_fault *vmf, const struct iomap_iter *iter, pfn_t *pfnp, struct xa_state *xas, void **entry, bool pmd) argument
[all...]
/linux-master/arch/loongarch/mm/
H A Dkasan_init.c30 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
31 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
/linux-master/arch/x86/kernel/
H A Dtboot.c119 pmd_t *pmd; local
129 pmd = pmd_alloc(&tboot_mm, pud, vaddr);
130 if (!pmd)
132 pte = pte_alloc_map(&tboot_mm, pmd, vaddr);
/linux-master/arch/arm/mach-sa1100/
H A Dassabet.c606 pmd_t *pmd; local
608 pmd = pmd_off_k(virt);
609 *pmd = __pmd(phys | prot);
610 flush_pmd_entry(pmd);
/linux-master/arch/parisc/kernel/
H A Dvmlinux.lds.S17 #define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \

Completed in 279 milliseconds

1234567891011>>