Searched refs:pmd (Results 101 - 125 of 300) sorted by relevance

1234567891011>>

/linux-master/mm/
H A Dhuge_memory.c762 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) argument
765 pmd = pmd_mkwrite(pmd, vma);
766 return pmd;
901 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
902 if (unlikely(!pmd_none(*vmf->pmd))) {
925 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
926 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
927 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
983 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
982 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) argument
1057 insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) argument
1231 touch_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, bool write) argument
1244 follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) argument
1291 pmd_t pmd; local
1600 can_change_pmd_writable(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument
1631 can_follow_write_pmd(pmd_t pmd, struct page *page, struct vm_area_struct *vma, unsigned int flags) argument
1668 follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) argument
1717 pmd_t pmd; local
1802 madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next) argument
1872 zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) argument
1881 zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) argument
1962 move_soft_dirty_pmd(pmd_t pmd) argument
1977 pmd_t pmd; local
2029 change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) argument
2307 __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) argument
2391 __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) argument
2432 __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) argument
2649 __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio) argument
2687 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); local
[all...]
H A Dpage_table_check.c163 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd) argument
168 if (pmd_user_accessible_page(pmd)) {
169 page_table_check_clear(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT);
200 void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd) argument
206 if (pmd_user_accessible_page(pmd)) {
207 page_table_check_set(pmd_pfn(pmd), PMD_SIZE >> PAGE_SHIFT,
208 pmd_write(pmd));
228 pmd_t pmd)
233 if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
226 __page_table_check_pte_clear_range(struct mm_struct *mm, unsigned long addr, pmd_t pmd) argument
[all...]
H A Dsparse-vmemmap.c144 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, argument
148 pte_t *pte = pte_offset_kernel(pmd, addr);
189 pmd_t *pmd = pmd_offset(pud, addr); local
190 if (pmd_none(*pmd)) {
194 pmd_populate_kernel(&init_mm, pmd, p);
196 return pmd;
252 pmd_t *pmd; local
264 pmd = vmemmap_pmd_populate(pud, addr, node);
265 if (!pmd)
267 pte = vmemmap_pte_populate(pmd, add
298 vmemmap_set_pmd(pmd_t *pmd, void *p, int node, unsigned long addr, unsigned long next) argument
303 vmemmap_check_pmd(pmd_t *pmd, int node, unsigned long addr, unsigned long next) argument
317 pmd_t *pmd; local
[all...]
/linux-master/arch/sparc/include/asm/
H A Dpgtable_64.h100 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
321 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) argument
323 pte_t pte = __pte(pmd_val(pmd));
430 static inline bool is_hugetlb_pmd(pmd_t pmd) argument
432 return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
441 static inline pmd_t pmd_mkhuge(pmd_t pmd) argument
443 pte_t pte = __pte(pmd_val(pmd));
684 static inline bool pmd_leaf(pmd_t pmd) argument
686 pte_t pte = __pte(pmd_val(pmd));
691 static inline unsigned long pmd_pfn(pmd_t pmd) argument
699 pmd_write(pmd_t pmd) argument
710 pmd_dirty(pmd_t pmd) argument
718 pmd_young(pmd_t pmd) argument
725 pmd_trans_huge(pmd_t pmd) argument
732 pmd_mkold(pmd_t pmd) argument
741 pmd_wrprotect(pmd_t pmd) argument
750 pmd_mkdirty(pmd_t pmd) argument
759 pmd_mkclean(pmd_t pmd) argument
768 pmd_mkyoung(pmd_t pmd) argument
777 pmd_mkwrite_novma(pmd_t pmd) argument
794 pmd_present(pmd_t pmd) argument
821 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument
837 pmd_page_vaddr(pmd_t pmd) argument
918 pmd_t pmd = *pmdp; local
[all...]
/linux-master/arch/x86/power/
H A Dhibernate_64.c30 pmd_t *pmd; local
64 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
65 if (!pmd)
68 set_pmd(pmd + pmd_index(restore_jump_address),
71 __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
H A Dhibernate.c155 pmd_t *pmd; local
177 pmd = pmd_offset(pud, relocated_restore_code);
178 if (pmd_leaf(*pmd)) {
179 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
182 pte = pte_offset_kernel(pmd, relocated_restore_code);
/linux-master/arch/arm/include/asm/
H A Dpgalloc.h26 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) argument
28 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
38 #define pmd_free(mm, pmd) do { } while (0)
41 #define pud_populate(mm,pmd,pte) do { } while (0)
43 #define pud_populate(mm,pmd,pte) BUG()
119 * Populate the pmdp entry with a pointer to the pte. This pmd is part
128 * The pmd must be loaded with the physical address of the PTE table
/linux-master/arch/powerpc/include/asm/book3s/64/
H A Dhash-4k.h134 static inline int hash__pmd_trans_huge(pmd_t pmd) argument
139 static inline pmd_t hash__pmd_mkhuge(pmd_t pmd) argument
142 return pmd;
158 static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd) argument
161 return pmd;
/linux-master/arch/mips/mm/
H A Dpgtable-64.c95 pmd_t pmd; local
97 pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
99 return pmd;
103 pmd_t *pmdp, pmd_t pmd)
105 *pmdp = pmd;
102 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument
/linux-master/arch/loongarch/mm/
H A Dpgtable.c121 pmd_t pmd; local
123 pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
125 return pmd;
129 pmd_t *pmdp, pmd_t pmd)
131 *pmdp = pmd;
128 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument
/linux-master/arch/x86/kernel/
H A Dmachine_kexec_32.c80 pgd_t *pgd, pmd_t *pmd, pte_t *pte,
89 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT));
93 pmd = pmd_offset(pud, vaddr);
94 if (!(pmd_val(*pmd) & _PAGE_PRESENT))
95 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
96 pte = pte_offset_kernel(pmd, vaddr);
103 pmd_t *pmd = NULL; local
107 pmd = image->arch.pmd0;
110 image->arch.pgd, pmd, image->arch.pte0,
113 pmd
79 machine_kexec_page_table_set_one( pgd_t *pgd, pmd_t *pmd, pte_t *pte, unsigned long vaddr, unsigned long paddr) argument
[all...]
H A Despfix_64.c130 pmd_t pmd, *pmd_p; local
175 pmd = *pmd_p;
176 if (!pmd_present(pmd)) {
180 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
183 set_pmd(&pmd_p[n], pmd);
186 pte_p = pte_offset_kernel(&pmd, addr);
/linux-master/arch/mips/include/asm/
H A Dpgtable-64.h30 * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
34 * invalid_pmd_table, each pmd entry is initialized to point to
72 * We used to implement 41 bits by having an order 1 pmd level but that seemed
158 printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
229 typedef struct { unsigned long pmd; } pmd_t; member in struct:__anon16
230 #define pmd_val(x) ((x).pmd)
238 * Empty pgd/pmd entries point to the invalid_pte_table.
240 static inline int pmd_none(pmd_t pmd) argument
242 return pmd_val(pmd)
245 pmd_bad(pmd_t pmd) argument
259 pmd_present(pmd_t pmd) argument
[all...]
/linux-master/arch/hexagon/include/asm/
H A Dpgtable.h184 * @pmd_entry: pmd entry
188 static inline int pmd_none(pmd_t pmd) argument
190 return pmd_val(pmd) == _NULL_PMD;
199 static inline int pmd_present(pmd_t pmd) argument
201 return pmd_val(pmd) != (unsigned long)_NULL_PMD;
209 static inline int pmd_bad(pmd_t pmd) argument
217 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
222 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIF
349 pmd_page_vaddr(pmd_t pmd) argument
[all...]
/linux-master/arch/nios2/mm/
H A Dioremap.c50 static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, argument
64 pte_t *pte = pte_alloc_kernel(pmd, address);
71 pmd++;
91 pmd_t *pmd; local
100 pmd = pmd_alloc(&init_mm, pud, address);
101 if (!pmd)
103 if (remap_area_pmd(pmd, address, end - address,
/linux-master/arch/s390/boot/
H A Dvmem.c162 static bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr, argument
167 pmd_populate(&init_mm, pmd, kasan_early_shadow_pte);
203 static inline bool kasan_pmd_populate_zero_shadow(pmd_t *pmd, unsigned long addr, argument
217 * Mimic virt_to_kpte() in lack of init_mm symbol. Skip pmd NULL check though.
289 static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end, argument
295 pte = pte_offset_kernel(pmd, addr);
316 pmd_t *pmd, entry; local
319 pmd = pmd_offset(pud, addr);
320 for (; addr < end; addr = next, pmd++) {
322 if (pmd_none(*pmd)) {
350 pmd_t *pmd; local
[all...]
/linux-master/arch/sh/mm/
H A Dfault.c58 pmd_t *pmd; local
95 pmd = pmd_offset(pud, addr);
97 pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
98 (u64)pmd_val(*pmd));
100 if (pmd_none(*pmd))
103 if (pmd_bad(*pmd)) {
109 if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
112 pte = pte_offset_kernel(pmd, addr);
126 pmd_t *pmd, *pmd_ local
[all...]
/linux-master/arch/parisc/kernel/
H A Dpci-dma.c104 static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr, argument
115 pte_t * pte = pte_alloc_kernel(pmd, vaddr);
122 pmd++;
137 pmd_t *pmd; local
141 pmd = pmd_alloc(NULL, pud, vaddr);
143 if (!pmd)
145 if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
153 static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr, argument
160 if (pmd_none(*pmd))
162 if (pmd_bad(*pmd)) {
192 pmd_t * pmd; local
[all...]
/linux-master/arch/powerpc/mm/book3s64/
H A Dpgtable.c102 * set a new huge pmd. We should not be called for updating
103 * an existing pmd entry. That should go via pmd_hugepage_update.
106 pmd_t *pmdp, pmd_t pmd)
116 WARN_ON(!(pmd_leaf(pmd)));
118 trace_hugepage_set_pmd(addr, pmd_val(pmd));
119 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
152 * pmd_t we want to prevent transit from pmd pointing to page table
153 * to pmd pointing to huge page (and back) while interrupts are disabled.
154 * We clear pmd to possibly replace it with page table pointer in
166 * hugepte to regular pmd entr
105 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument
181 pmd_t pmd; local
215 pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) argument
253 pmd_modify(pmd_t pmd, pgprot_t newprot) argument
428 pmd_t *pmd; local
437 pmd_fragment_free(unsigned long *pmd) argument
[all...]
/linux-master/arch/m68k/mm/
H A Dinit.c112 pmd_t *pmd = &pmd_dir[j]; local
115 if (!pmd_present(*pmd))
118 pte_dir = (pte_t *)pmd_page_vaddr(*pmd);
/linux-master/arch/m68k/include/asm/
H A Dmcf_pgalloc.h31 #define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
69 * In our implementation, each pgd entry contains 1 pmd that is never allocated
72 #define pmd_free(mm, pmd) BUG()
/linux-master/arch/alpha/include/asm/
H A Dpgtable.h217 pmd_page_vaddr(pmd_t pmd) argument
219 return ((pmd_val(pmd) & _PFN_MASK) >> (32-PAGE_SHIFT)) + PAGE_OFFSET;
222 #define pmd_pfn(pmd) (pmd_val(pmd) >> 32)
223 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> 32))
238 extern inline int pmd_none(pmd_t pmd) { return !pmd_val(pmd); } argument
239 extern inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) argument
240 pmd_present(pmd_t pmd) argument
[all...]
/linux-master/arch/microblaze/include/asm/
H A Dpgtable.h226 #define pmd_none(pmd) (!pmd_val(pmd))
227 #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
228 #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
374 /* Convert pmd entry to page */
375 /* our pmd entry is an effective address of pte table*/
376 /* returns effective address of the pmd entry*/
377 static inline unsigned long pmd_page_vaddr(pmd_t pmd) argument
[all...]
/linux-master/arch/x86/include/asm/
H A Dpgtable-3level_types.h26 pmdval_t pmd; member in union:__anon39
/linux-master/arch/x86/mm/
H A Dmem_encrypt_identity.c118 pmd_t *pmd; local
138 pmd = ppd->pgtable_area;
139 memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
140 ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
141 set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
153 pmd_t *pmd; local
159 pmd = pmd_offset(pud, ppd->vaddr);
160 if (pmd_leaf(*pmd))
163 set_pmd(pmd, __pm
169 pmd_t *pmd; local
[all...]

Completed in 400 milliseconds

1234567891011>>