/linux-master/arch/s390/mm/ |
H A D | hugetlbpage.c | 30 * Convert encoding pte bits pmd / pud bits 85 * Convert encoding pmd / pud bits pte bits 236 int pmd_huge(pmd_t pmd) argument 238 return pmd_leaf(pmd);
|
H A D | gmap.c | 560 pmd_t *pmd; local 603 pmd = pmd_offset(pud, vmaddr); 604 VM_BUG_ON(pmd_none(*pmd)); 606 if (pmd_leaf(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m) 612 ptl = pmd_lock(mm, pmd); 618 if (pmd_leaf(*pmd)) { 619 *table = (pmd_val(*pmd) & 623 *table = pmd_val(*pmd) & 627 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) { 922 * and return the pmd pointe 2521 thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument 2563 __zap_zero_pages(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *walk) argument 2638 __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) argument 2649 pmd_t *pmd = (pmd_t *)pte; local [all...] |
/linux-master/arch/x86/include/asm/ |
H A D | pgtable_types.h | 284 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */ 288 * Extracts the flags from a (pte|pmd|pud|pgd)val_t 396 return (pmd_t) { .pmd = val }; 399 static inline pmdval_t native_pmd_val(pmd_t pmd) argument 401 return pmd.pmd; 411 static inline pmdval_t native_pmd_val(pmd_t pmd) argument 413 return native_pgd_val(pmd.pud.p4d.pgd); 451 static inline pmdval_t pmd_pfn_mask(pmd_t pmd) argument 453 if (native_pmd_val(pmd) 459 pmd_flags_mask(pmd_t pmd) argument 464 pmd_flags(pmd_t pmd) argument [all...] |
/linux-master/arch/riscv/mm/ |
H A D | init.c | 212 * any allocation to happen between _end and the next pmd aligned page. 1197 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn 1401 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, argument 1404 pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL); 1444 pmd_t *pmd; local 1462 lvl = "pmd"; 1463 pmd = pmd_alloc(&init_mm, pud, addr); 1464 if (!pmd)
|
/linux-master/arch/riscv/include/asm/ |
H A D | pgtable.h | 210 static inline int pmd_present(pmd_t pmd) argument 218 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); 221 static inline int pmd_present(pmd_t pmd) argument 223 return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); 227 static inline int pmd_none(pmd_t pmd) argument 229 return (pmd_val(pmd) == 0); 232 static inline int pmd_bad(pmd_t pmd) argument 234 return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); 238 static inline bool pmd_leaf(pmd_t pmd) argument 243 set_pmd(pmd_t *pmdp, pmd_t pmd) argument 267 pmd_page(pmd_t pmd) argument 272 pmd_page_vaddr(pmd_t pmd) argument 277 pmd_pte(pmd_t pmd) argument 451 pmd_protnone(pmd_t pmd) argument 632 pmd_mkhuge(pmd_t pmd) argument 637 pmd_mkinvalid(pmd_t pmd) argument 644 pmd_pfn(pmd_t pmd) argument 656 pmd_modify(pmd_t pmd, pgprot_t newprot) argument 662 pmd_write(pmd_t pmd) argument 674 pmd_dirty(pmd_t pmd) argument 680 pmd_young(pmd_t pmd) argument 685 pmd_user(pmd_t pmd) argument 690 pmd_mkold(pmd_t pmd) argument 695 pmd_mkyoung(pmd_t pmd) argument 700 pmd_mkwrite_novma(pmd_t pmd) argument 705 pmd_wrprotect(pmd_t pmd) argument 710 pmd_mkclean(pmd_t pmd) argument 715 pmd_mkdirty(pmd_t pmd) argument 720 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument 740 pmd_user_accessible_page(pmd_t pmd) argument 752 pmd_trans_huge(pmd_t pmd) argument 776 pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); local 791 pmdp_establish(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t pmd) argument [all...] |
/linux-master/mm/ |
H A D | hugetlb.c | 416 * participate in pmd sharing. This is only a possible 5396 * returned ptep could go away if part of a shared pmd and 5830 * for pmd sharing. And, i_mmap_rwsem is required to set up 5831 * pmd sharing. This is important as page tables for this 7114 * vma specific semaphore used for pmd sharing and fault/truncation 7347 * Determine if start,end range within vma could be mapped by shared pmd. 7349 * shared pmd mappings. 7374 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() 7376 * !shared pmd case because we can allocate the pmd late 7533 pmd_t *pmd; local [all...] |
H A D | memory-failure.c | 395 pmd_t *pmd; local 411 pmd = pmd_offset(pud, address); 412 if (!pmd_present(*pmd)) 414 if (pmd_devmap(*pmd)) 416 pte = pte_offset_map(pmd, address); 775 pmd_t pmd = *pmdp; local 779 if (!pmd_present(pmd)) 781 pfn = pmd_pfn(pmd); 1198 * To narrow down kill region to one page, we need to break up pmd.
|
H A D | madvise.c | 171 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, argument 186 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 324 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, argument 347 if (pmd_trans_huge(*pmd)) { 352 ptl = pmd_trans_huge_lock(pmd, vma); 356 orig_pmd = *pmd; 390 pmdp_invalidate(vma, addr, pmd); 393 set_pmd_at(mm, addr, pmd, orig_pmd); 394 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 421 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, add 619 madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument [all...] |
H A D | internal.h | 209 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte); 746 * under page table lock for the pte/pmd being added or removed. 790 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 1106 unsigned long addr, pmd_t *pmd,
|
H A D | huge_memory.c | 762 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) argument 765 pmd = pmd_mkwrite(pmd, vma); 766 return pmd; 901 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); 902 if (unlikely(!pmd_none(*vmf->pmd))) { 925 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); 926 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); 927 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); 983 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, 982 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) argument 1057 insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) argument 1231 touch_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, bool write) argument 1244 follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) argument 1291 pmd_t pmd; local 1600 can_change_pmd_writable(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument 1631 can_follow_write_pmd(pmd_t pmd, struct page *page, struct vm_area_struct *vma, unsigned int flags) argument 1668 follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) argument 1717 pmd_t pmd; local 1802 madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long next) argument 1872 zap_deposited_table(struct mm_struct *mm, pmd_t *pmd) argument 1881 zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) argument 1962 move_soft_dirty_pmd(pmd_t pmd) argument 1977 pmd_t pmd; local 2029 change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags) argument 2307 __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) argument 2391 __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) argument 2432 __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, unsigned long haddr, bool freeze) argument 2649 __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio) argument 2687 pmd_t *pmd = mm_find_pmd(vma->vm_mm, address); local [all...] |
H A D | gup.c | 580 unsigned long address, pmd_t *pmd, unsigned int flags, 594 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 696 pmd_t *pmd, pmdval; local 701 pmd = pmd_offset(pudp, address); 702 pmdval = pmdp_get_lockless(pmd); 708 ptl = pmd_lock(mm, pmd); 709 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); 716 return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); 721 ptl = pmd_lock(mm, pmd); 722 if (unlikely(!pmd_present(*pmd))) { 579 follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) argument 866 pmd_t *pmd; local 2579 gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) argument 2685 gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) argument 3013 pmd_t pmd = pmdp_get_lockless(pmdp); local [all...] |
H A D | vmalloc.c | 93 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument 102 pte = pte_alloc_kernel_track(pmd, addr, mask); 126 static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, argument 145 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr)) 148 return pmd_set_huge(pmd, phys_addr, prot); 155 pmd_t *pmd; local 158 pmd = pmd_alloc_track(&init_mm, pud, addr, mask); 159 if (!pmd) 164 if (vmap_try_huge_pmd(pmd, add 341 vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgtbl_mod_mask *mask) argument 357 pmd_t *pmd; local 478 vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr, pgtbl_mod_mask *mask) argument 513 pmd_t *pmd; local 738 pmd_t *pmd; local [all...] |
H A D | memory.c | 187 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, argument 190 pgtable_t token = pmd_pgtable(*pmd); 191 pmd_clear(pmd); 200 pmd_t *pmd; local 205 pmd = pmd_offset(pud, addr); 208 if (pmd_none_or_clear_bad(pmd)) 210 free_pte_range(tlb, pmd, addr); 211 } while (pmd++, addr = next, addr != end); 224 pmd = pmd_offset(pud, start); 226 pmd_free_tlb(tlb, pmd, star 412 pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte) argument 438 __pte_alloc(struct mm_struct *mm, pmd_t *pmd) argument 450 __pte_alloc_kernel(pmd_t *pmd) argument 495 pmd_t *pmd = pmd_offset(pud, addr); local 656 vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument 696 vm_normal_folio_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd) argument 1568 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument 1693 pmd_t *pmd; local 1951 pmd_t *pmd; local 1971 pmd_t *pmd = walk_to_pmd(mm, addr); local 2050 pmd_t *pmd = NULL; local 2489 remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument 2519 pmd_t *pmd; local 2716 apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, pgtbl_mod_mask *mask) argument 2764 pmd_t *pmd; local 5896 pmd_t *pmd; local [all...] |
H A D | filemap.c | 3210 ptep = pte_offset_map(vmf->pmd, vmf->address); 3414 if (pmd_trans_huge(*vmf->pmd)) { 3420 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { 3430 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) 3431 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); 3587 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
|
/linux-master/include/linux/ |
H A D | mm.h | 530 pmd_t *pmd; /* Pointer to pmd entry matching member in struct:vm_fault 555 * is not NULL, otherwise pmd. 2373 unsigned long addr, pmd_t pmd); 2375 pmd_t pmd); 2806 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd); 2807 int __pte_alloc_kernel(pmd_t *pmd); 2913 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) argument 2915 return ptlock_ptr(page_ptdesc(pmd_page(*pmd))); 2938 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd) argument 2968 pte_offset_map(pmd_t *pmd, unsigned long addr) argument 2975 pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) argument 3007 pmd_pgtable_page(pmd_t *pmd) argument 3013 pmd_ptdesc(pmd_t *pmd) argument 3018 pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) argument 3043 pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) argument 3055 pmd_lock(struct mm_struct *mm, pmd_t *pmd) argument [all...] |
H A D | swapops.h | 335 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, 359 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, argument 537 extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd); 539 static inline swp_entry_t pmd_to_swp_entry(pmd_t pmd) 543 if (pmd_swp_soft_dirty(pmd)) 544 pmd = pmd_swp_clear_soft_dirty(pmd); 545 if (pmd_swp_uffd_wp(pmd)) 546 pmd = pmd_swp_clear_uffd_wp(pmd); [all...] |
/linux-master/arch/arc/mm/ |
H A D | tlb.c | 531 pmd_t *pmd) 533 pte_t pte = __pte(pmd_val(*pmd)); 530 update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd) argument
|
/linux-master/arch/x86/kvm/mmu/ |
H A D | mmu.c | 3091 pmd_t pmd; local 3134 pmd = READ_ONCE(*pmd_offset(&pud, hva)); 3135 if (pmd_none(pmd) || !pmd_present(pmd)) 3138 if (pmd_leaf(pmd)) 3208 * the pmd can't be split from under us.
|
/linux-master/arch/arm64/mm/ |
H A D | pageattr.c | 218 pmd_t *pmdp, pmd; local 238 pmd = READ_ONCE(*pmdp); 239 if (pmd_none(pmd)) 241 if (pmd_sect(pmd))
|
H A D | hugetlbpage.c | 82 int pmd_huge(pmd_t pmd) argument 84 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 316 pmd_t *pmdp, pmd; local 339 pmd = READ_ONCE(*pmdp); 341 pmd_none(pmd)) 343 if (pmd_huge(pmd) || !pmd_present(pmd))
|
/linux-master/arch/loongarch/mm/ |
H A D | pgtable.c | 121 pmd_t pmd; local 123 pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot); 125 return pmd; 129 pmd_t *pmdp, pmd_t pmd) 131 *pmdp = pmd; 128 set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd) argument
|
/linux-master/sound/soc/codecs/ |
H A D | cs42l43.c | 1467 static int cs42l43_dapm_wait_completion(struct completion *pmu, struct completion *pmd, argument 1477 reinit_completion(pmd); 1483 time_left = wait_for_completion_timeout(pmd, msecs_to_jiffies(timeout_ms));
|
/linux-master/drivers/net/ethernet/intel/ice/ |
H A D | ice_common.c | 3846 options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M, 3847 options[i].pmd); 3851 options[i].pmd, options[i].max_lane_speed);
|
/linux-master/arch/x86/mm/ |
H A D | mem_encrypt_amd.c | 160 pmdval_t pmd_flags, pmd; local 166 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; 167 __early_make_pgtable((unsigned long)vaddr, pmd);
|
H A D | ident_map.c | 12 pmd_t *pmd = pmd_page + pmd_index(addr); local 14 if (pmd_present(*pmd)) 17 set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag)); 28 pmd_t *pmd; local 47 pmd = pmd_offset(pud, 0); 48 ident_pmd_init(info, pmd, addr, next); 51 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 52 if (!pmd) 54 ident_pmd_init(info, pmd, addr, next); 55 set_pud(pud, __pud(__pa(pmd) | inf [all...] |