Lines Matching defs:pmd

187 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
190 pgtable_t token = pmd_pgtable(*pmd);
191 pmd_clear(pmd);
200 pmd_t *pmd;
205 pmd = pmd_offset(pud, addr);
208 if (pmd_none_or_clear_bad(pmd))
210 free_pte_range(tlb, pmd, addr);
211 } while (pmd++, addr = next, addr != end);
224 pmd = pmd_offset(pud, start);
226 pmd_free_tlb(tlb, pmd, start);
412 void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte)
414 spinlock_t *ptl = pmd_lock(mm, pmd);
416 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
432 pmd_populate(mm, pmd, *pte);
438 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
444 pmd_install(mm, pmd, &new);
450 int __pte_alloc_kernel(pmd_t *pmd)
457 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
459 pmd_populate_kernel(&init_mm, pmd, new);
495 pmd_t *pmd = pmd_offset(pud, addr);
524 pr_alert("BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n",
526 (long long)pte_val(pte), (long long)pmd_val(*pmd));
657 pmd_t pmd)
659 unsigned long pfn = pmd_pfn(pmd);
681 if (pmd_devmap(pmd))
683 if (is_huge_zero_pmd(pmd))
697 unsigned long addr, pmd_t pmd)
699 struct page *page = vm_normal_page_pmd(vma, addr, pmd);
1569 struct vm_area_struct *vma, pmd_t *pmd,
1584 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1693 pmd_t *pmd;
1696 pmd = pmd_offset(pud, addr);
1699 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1701 __split_huge_pmd(vma, pmd, addr, false, NULL);
1702 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1709 next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1710 spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1712 * Take and drop THP pmd lock so that we cannot return
1713 * prematurely, while zap_huge_pmd() has cleared *pmd,
1718 if (pmd_none(*pmd)) {
1722 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1724 pmd--;
1725 } while (pmd++, cond_resched(), addr != end);
1916 * could have been expanded for hugetlb pmd sharing.
1951 pmd_t *pmd;
1960 pmd = pmd_alloc(mm, pud, addr);
1961 if (!pmd)
1964 VM_BUG_ON(pmd_trans_huge(*pmd));
1965 return pmd;
1971 pmd_t *pmd = walk_to_pmd(mm, addr);
1973 if (!pmd)
1975 return pte_alloc_map_lock(mm, pmd, addr, ptl);
2050 pmd_t *pmd = NULL;
2060 pmd = walk_to_pmd(mm, addr);
2061 if (!pmd)
2069 if (pte_alloc(mm, pmd))
2076 start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
2106 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
2489 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2497 mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2519 pmd_t *pmd;
2524 pmd = pmd_alloc(mm, pud, addr);
2525 if (!pmd)
2527 VM_BUG_ON(pmd_trans_huge(*pmd));
2530 err = remap_pte_range(mm, pmd, addr, next,
2534 } while (pmd++, addr = next, addr != end);
2716 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2727 pte_alloc_kernel_track(pmd, addr, mask) :
2728 pte_alloc_map_lock(mm, pmd, addr, &ptl);
2733 pte_offset_kernel(pmd, addr) :
2734 pte_offset_map_lock(mm, pmd, addr, &ptl);
2764 pmd_t *pmd;
2771 pmd = pmd_alloc_track(mm, pud, addr, mask);
2772 if (!pmd)
2775 pmd = pmd_offset(pud, addr);
2779 if (pmd_none(*pmd) && !create)
2781 if (WARN_ON_ONCE(pmd_leaf(*pmd)))
2783 if (!pmd_none(*pmd) && WARN_ON_ONCE(pmd_bad(*pmd))) {
2786 pmd_clear_bad(pmd);
2788 err = apply_to_pte_range(mm, pmd, addr, next,
2792 } while (pmd++, addr = next, addr != end);
2996 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3024 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
3299 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3412 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3817 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3852 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3945 migration_entry_wait(vma->vm_mm, vmf->pmd,
3962 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4055 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4126 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4331 pte = pte_offset_map(vmf->pmd, vmf->address & PMD_MASK);
4397 if (pte_alloc(vma->vm_mm, vmf->pmd))
4405 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4449 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
4520 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4560 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4603 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4604 if (unlikely(!pmd_none(*vmf->pmd)))
4617 * deposit and withdraw with pmd lock held
4622 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4624 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4730 if (pmd_none(*vmf->pmd)) {
4738 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4739 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4743 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4840 if (pmd_none(*vmf->pmd)) {
4999 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
5135 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
5202 /* COW or write-notify handled on pte level: split pmd. */
5203 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
5265 if (unlikely(pmd_none(*vmf->pmd))) {
5276 * A regular pmd is established and it can't morph into a huge
5277 * pmd by anon khugepaged, since that takes mmap_lock in write
5279 * it into a huge pmd: just retry later if so.
5281 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
5397 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5398 if (!vmf.pmd)
5405 if (pmd_none(*vmf.pmd) &&
5411 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
5417 pmd_migration_entry_wait(mm, vmf.pmd);
5891 pmd_t *pmd;
5906 pmd = pmd_offset(pud, address);
5907 VM_BUG_ON(pmd_trans_huge(*pmd));
5909 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);