Searched refs:pmd (Results 126 - 150 of 300) sorted by relevance

1234567891011>>

/linux-master/arch/arm/mm/
H A Ddump.c216 }, { /* pmd */
310 static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start, argument
313 pte_t *pte = pte_offset_kernel(pmd, 0);
323 static const char *get_domain_name(pmd_t *pmd) argument
326 switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) {
344 pmd_t *pmd = pmd_offset(pud, 0); local
349 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
351 domain = get_domain_name(pmd);
352 if (pmd_none(*pmd) || pmd_leaf(*pmd) || !pmd_presen
[all...]
H A Dmmu.c52 * The pmd table for the upper-most set of pages.
75 pmdval_t pmd; member in struct:cachepolicy
83 .pmd = PMD_SECT_UNCACHED,
88 .pmd = PMD_SECT_BUFFERED,
93 .pmd = PMD_SECT_WT,
98 .pmd = PMD_SECT_WB,
103 .pmd = PMD_SECT_WBWA,
113 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
118 void __init init_default_cache_policy(unsigned long pmd) argument
122 initial_pmd_value = pmd;
377 pmd_t *pmd; local
748 arm_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot, void *(*alloc)(unsigned long sz)) argument
760 early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) argument
766 alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type, void *(*alloc)(unsigned long sz), bool ng) argument
780 __map_init_section(pmd_t *pmd, unsigned long addr, unsigned long end, phys_addr_t phys, const struct mem_type *type, bool ng) argument
812 pmd_t *pmd = pmd_offset(pud, addr); local
918 pmd_t *pmd = pmd_offset(pud, addr); local
1093 pmd_t *pmd; local
[all...]
H A Dioremap.c154 pmd_t pmd = *pmdp; local
156 if (!pmd_none(pmd)) {
170 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
171 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
192 pmd_t *pmd = pmd_off_k(addr); local
201 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
203 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
205 flush_pmd_entry(pmd);
208 pmd += 2;
219 pmd_t *pmd local
[all...]
H A Dinit.c369 pmd_t *pmd; local
371 pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
374 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
377 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
379 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
381 flush_pmd_entry(pmd);
/linux-master/arch/sparc/mm/
H A Dleon_mm.c40 unsigned int pgd, pmd, ped; local
93 pmd = LEON_BYPASS_LOAD_PA(ptr);
94 if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
96 printk(KERN_INFO "swprobe: pmd is entry level 2\n");
98 pte = pmd;
99 paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
102 if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
104 printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
109 printk(KERN_INFO "swprobe: --- pmd (%x) ---\n", pmd);
[all...]
/linux-master/arch/arm/include/asm/
H A Dpgtable-3level-types.h25 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon47
30 #define pmd_val(x) ((x).pmd)
/linux-master/arch/powerpc/mm/kasan/
H A Dbook3s_32.c51 pmd_t *pmd = pmd_off_k(k_cur); local
54 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
/linux-master/arch/um/kernel/
H A Dmem.c83 static void __init one_page_table_init(pmd_t *pmd) argument
85 if (pmd_none(*pmd)) {
92 set_pmd(pmd, __pmd(_KERNPG_TABLE +
94 BUG_ON(pte != pte_offset_kernel(pmd, 0));
117 pmd_t *pmd; local
131 pmd = pmd_offset(pud, vaddr);
132 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
133 one_page_table_init(pmd);
H A Dtlb.c217 static inline int update_pte_range(pmd_t *pmd, unsigned long addr, argument
224 pte = pte_offset_kernel(pmd, addr);
255 pmd_t *pmd; local
259 pmd = pmd_offset(pud, addr);
262 if (!pmd_present(*pmd)) {
263 if (hvc->force || pmd_newpage(*pmd)) {
265 pmd_mkuptodate(*pmd);
268 else ret = update_pte_range(pmd, addr, next, hvc);
269 } while (pmd++, addr = next, ((addr < end) && !ret));
357 pmd_t *pmd; local
460 pmd_t *pmd; local
[all...]
/linux-master/arch/powerpc/include/asm/
H A Dpgtable-types.h35 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon29
39 return x.pmd;
H A Dpgtable-be-types.h23 typedef struct { __be64 pmd; } pmd_t; member in struct:__anon20
28 return be64_to_cpu(x.pmd);
33 return x.pmd;
/linux-master/arch/arm/lib/
H A Duaccess_with_memcpy.c28 pmd_t *pmd; local
45 pmd = pmd_offset(pud, addr);
46 if (unlikely(pmd_none(*pmd)))
50 * A pmd can be bad if it refers to a HugeTLB or THP page.
52 * Both THP and HugeTLB pages have the same pmd layout
59 if (unlikely(pmd_thp_or_huge(*pmd))) {
62 if (unlikely(!pmd_thp_or_huge(*pmd)
63 || pmd_hugewillfault(*pmd))) {
73 if (unlikely(pmd_bad(*pmd)))
76 pte = pte_offset_map_lock(current->mm, pmd, add
[all...]
/linux-master/mm/
H A Dpage_vma_mapped.c22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd,
30 * in case *pvmw->pmd changes underneath us; so we need to
35 pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd,
153 * must be set. pmd, pte and ptl must be NULL.
155 * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
159 * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
164 * regardless of which page table level the page is mapped at. @pvmw->pmd is
184 /* The only possible pmd mapping has been handled on last iteration */
185 if (pvmw->pmd && !pvmw->pte)
230 pvmw->pmd
[all...]
H A Dhmm.c53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
177 pmd_t pmd)
179 if (pmd_protnone(pmd))
181 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
189 pmd_t pmd)
198 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
204 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
212 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
332 pmd_t pmd; local
335 pmd
176 pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) argument
187 hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, unsigned long end, unsigned long hmm_pfns[], pmd_t pmd) argument
[all...]
/linux-master/arch/arm64/include/asm/
H A Dpgtable-types.h28 typedef struct { pmdval_t pmd; } pmd_t; member in struct:__anon12
29 #define pmd_val(x) ((x).pmd)
/linux-master/arch/powerpc/include/asm/book3s/64/
H A Dradix.h238 static inline int radix__pmd_bad(pmd_t pmd) argument
240 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
265 static inline int radix__pmd_trans_huge(pmd_t pmd) argument
267 return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
270 static inline pmd_t radix__pmd_mkhuge(pmd_t pmd) argument
272 return __pmd(pmd_val(pmd) | _PAGE_PTE);
318 static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd) argument
320 return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
/linux-master/fs/proc/
H A Dtask_mmu.c572 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, argument
581 if (pmd_present(*pmd)) {
582 page = vm_normal_page_pmd(vma, addr, *pmd);
583 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
584 swp_entry_t entry = pmd_to_swp_entry(*pmd);
602 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
606 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, argument
612 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
619 ptl = pmd_trans_huge_lock(pmd, vm
1131 pmd_t old, pmd = *pmdp; local
1157 clear_refs_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
1471 pmd_t pmd = *pmdp; local
1840 pagemap_thp_category(struct pagemap_scan_private *p, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
1886 pmd_t old, pmd = *pmdp; local
2098 pagemap_scan_thp_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument
2149 pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument
2598 can_gather_numa_stats_pmd(pmd_t pmd, struct vm_area_struct *vma, unsigned long addr) argument
2623 gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
[all...]
/linux-master/arch/powerpc/kvm/
H A Dbook3s_64_mmu_radix.c408 pmd_t *pmd; local
410 pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
411 /* pud_populate() will only reference _pa(pmd). */
412 kmemleak_ignore(pmd);
414 return pmd;
497 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, argument
501 pmd_t *p = pmd;
523 kvmppc_pmd_free(pmd);
538 pmd_t *pmd; local
540 pmd
574 kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, unsigned long gpa, u64 lpid) argument
593 pmd_t *pmd = pmd_offset(pud, 0); local
623 pmd_t *pmd, *new_pmd = NULL; local
1314 pmd_t pmd, *pmdp; local
[all...]
/linux-master/arch/mips/kvm/
H A Dmmu.c109 pmd_t *pmd; local
128 pmd = pmd_offset(pud, addr);
129 if (pmd_none(*pmd)) {
136 pmd_populate_kernel(NULL, pmd, new_pte);
138 return pte_offset_kernel(pmd, addr);
150 * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
171 static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa, argument
182 if (!pmd_present(pmd[i]))
185 pte = pte_offset_kernel(pmd + i, 0);
190 pmd_clear(pmd
202 pmd_t *pmd; local
[all...]
/linux-master/arch/alpha/mm/
H A Dinit.c150 pmd_t *pmd; local
191 pmd = pmd_offset(pud, VMALLOC_START);
192 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
220 if (pmd != pmd_offset(pud, vaddr)) {
222 pmd = pmd_offset(pud, vaddr);
223 pmd_set(pmd, (pte_t *)kernel_end);
226 set_pte(pte_offset_kernel(pmd, vaddr),
/linux-master/arch/mips/include/asm/
H A Dpgtable-32.h122 * Empty pgd/pmd entries point to the invalid_pte_table.
124 static inline int pmd_none(pmd_t pmd) argument
126 return pmd_val(pmd) == (unsigned long) invalid_pte_table;
129 static inline int pmd_bad(pmd_t pmd) argument
132 /* pmd_huge(pmd) but inline */
133 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
137 if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
143 static inline int pmd_present(pmd_t pmd) argument
145 return pmd_val(pmd) != (unsigned long) invalid_pte_table;
/linux-master/arch/x86/include/asm/
H A Dpgtable_64.h38 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
81 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) argument
83 WRITE_ONCE(*pmdp, pmd);
86 static inline void native_pmd_clear(pmd_t *pmd) argument
88 native_set_pmd(pmd, native_make_pmd(0));
107 return native_make_pmd(xchg(&xp->pmd, 0));
240 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
/linux-master/include/asm-generic/
H A Dpgalloc.h146 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) argument
148 struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
150 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
/linux-master/mm/damon/
H A Dops-common.c54 void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) argument
57 struct folio *folio = damon_get_folio(pmd_pfn(pmdp_get(pmd)));
62 if (pmdp_clear_young_notify(vma, addr, pmd))
/linux-master/arch/powerpc/mm/
H A Dhugetlbpage.c347 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, argument
351 pgtable_t token = pmd_pgtable(*pmd);
356 pmd_clear(pmd);
365 pmd_t *pmd; local
373 pmd = pmd_offset(pud, addr);
375 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
376 if (pmd_none_or_clear_bad(pmd))
385 hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
395 more = addr + (1UL << hugepd_shift(*(hugepd_t *)pmd));
399 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIF
[all...]

Completed in 205 milliseconds

1234567891011>>