Lines Matching refs:end

127  * that high_memory defines the upper bound on direct map memory, then end
199 unsigned long addr, unsigned long end,
209 next = pmd_addr_end(addr, end);
213 } while (pmd++, addr = next, addr != end);
223 if (end - 1 > ceiling - 1)
233 unsigned long addr, unsigned long end,
243 next = pud_addr_end(addr, end);
247 } while (pud++, addr = next, addr != end);
257 if (end - 1 > ceiling - 1)
267 unsigned long addr, unsigned long end,
277 next = p4d_addr_end(addr, end);
281 } while (p4d++, addr = next, addr != end);
291 if (end - 1 > ceiling - 1)
303 unsigned long addr, unsigned long end,
320 * the address space, but end 0 and ceiling 0 refer to the top
321 * Comparisons need to use "end - 1" and "ceiling - 1" (though
322 * that end 0 case should be mythical).
326 * subsequent tests. But what about where end is brought down
327 * by PMD_SIZE below? no, end can't go down to 0 there.
332 * bother to round floor or end up - the tests don't need that.
346 if (end - 1 > ceiling - 1)
347 end -= PMD_SIZE;
348 if (addr > end - 1)
357 next = pgd_addr_end(addr, end);
361 } while (pgd++, addr = next, addr != end);
1068 unsigned long end)
1152 max_nr = (end - addr) / PAGE_SIZE;
1174 addr != end);
1202 if (addr != end)
1213 unsigned long end)
1225 next = pmd_addr_end(addr, end);
1243 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
1250 unsigned long end)
1262 next = pud_addr_end(addr, end);
1280 } while (dst_pud++, src_pud++, addr = next, addr != end);
1287 unsigned long end)
1298 next = p4d_addr_end(addr, end);
1304 } while (dst_p4d++, src_p4d++, addr = next, addr != end);
1346 unsigned long end = src_vma->vm_end;
1379 0, src_mm, addr, end);
1396 next = pgd_addr_end(addr, end);
1405 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
1577 unsigned long addr, unsigned long end,
1611 max_nr = (end - addr) / PAGE_SIZE;
1641 max_nr = (end - addr) / PAGE_SIZE;
1672 } while (pte += nr, addr += PAGE_SIZE * nr, addr != end);
1698 unsigned long addr, unsigned long end,
1706 next = pmd_addr_end(addr, end);
1733 } while (pmd++, cond_resched(), addr != end);
1740 unsigned long addr, unsigned long end,
1748 next = pud_addr_end(addr, end);
1762 } while (pud++, addr = next, addr != end);
1769 unsigned long addr, unsigned long end,
1777 next = p4d_addr_end(addr, end);
1781 } while (p4d++, addr = next, addr != end);
1788 unsigned long addr, unsigned long end,
1794 BUG_ON(addr >= end);
1798 next = pgd_addr_end(addr, end);
1802 } while (pgd++, addr = next, addr != end);
1813 unsigned long end;
1817 end = min(vma->vm_end, end_addr);
1818 if (end <= vma->vm_start)
1822 uprobe_munmap(vma, start, end);
1827 if (start != end) {
1843 __unmap_hugepage_range(tlb, vma, start, end,
1847 unmap_page_range(tlb, vma, start, end, details);
1857 * @end_addr: virtual address at which to end unmapping
1863 * Only addresses between `start' and `end' will be unmapped.
1889 unsigned long end = end_addr;
1890 hugetlb_zap_begin(vma, &start, &end);
1891 unmap_single_vma(tlb, vma, start, end, &details,
1911 const unsigned long end = address + size;
1917 address, end);
1918 hugetlb_zap_begin(vma, &range.start, &range.end);
1923 * unmap 'address-end' not 'range.start-range.end' as range
1926 unmap_single_vma(&tlb, vma, address, end, details, false);
2208 /* Fail if the user requested offset is beyond the end of the object */
2498 unsigned long addr, unsigned long end,
2517 } while (pte++, addr += PAGE_SIZE, addr != end);
2524 unsigned long addr, unsigned long end,
2537 next = pmd_addr_end(addr, end);
2542 } while (pmd++, addr = next, addr != end);
2547 unsigned long addr, unsigned long end,
2559 next = pud_addr_end(addr, end);
2564 } while (pud++, addr = next, addr != end);
2569 unsigned long addr, unsigned long end,
2581 next = p4d_addr_end(addr, end);
2586 } while (p4d++, addr = next, addr != end);
2599 unsigned long end = addr + PAGE_ALIGN(size);
2625 if (addr != vma->vm_start || end != vma->vm_end)
2632 BUG_ON(addr >= end);
2635 flush_cache_range(vma, addr, end);
2637 next = pgd_addr_end(addr, end);
2642 } while (pgd++, addr = next, addr != end);
2725 unsigned long addr, unsigned long end,
2756 } while (addr += PAGE_SIZE, addr != end);
2768 unsigned long addr, unsigned long end,
2786 next = pmd_addr_end(addr, end);
2800 } while (pmd++, addr = next, addr != end);
2806 unsigned long addr, unsigned long end,
2822 next = pud_addr_end(addr, end);
2836 } while (pud++, addr = next, addr != end);
2842 unsigned long addr, unsigned long end,
2858 next = p4d_addr_end(addr, end);
2872 } while (p4d++, addr = next, addr != end);
2883 unsigned long end = addr + size;
2887 if (WARN_ON(addr >= end))
2892 next = pgd_addr_end(addr, end);
2906 } while (pgd++, addr = next, addr != end);
3752 * @nr: Number of pages to be unmapped. 0 to unmap to end of file.
3792 * end of the file.
4873 /* The PTE offset of the end address, clamped to the VMA and PTE. */
5110 unsigned long end = min(vmf->address + (folio_nr_pages(folio) - nr) * PAGE_SIZE, vma->vm_end);
5115 for (addr = start; addr != end; start_ptep++, addr += PAGE_SIZE) {
6261 /* Process subpages at the end of huge page */