Lines Matching refs:pfn

28 #include <linux/pfn.h>
299 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)
316 if (!IS_ALIGNED(pfn | nr_pages, min_align))
322 * Return page for the valid pfn only if the page is online. All pfn
326 struct page *pfn_to_online_page(unsigned long pfn)
328 unsigned long nr = pfn_to_section_nr(pfn);
343 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn))
346 if (!pfn_section_valid(ms, pfn))
350 return pfn_to_page(pfn);
358 pgmap = get_dev_pagemap(pfn, NULL);
361 /* The presence of a pgmap indicates ZONE_DEVICE offline pfn */
365 return pfn_to_page(pfn);
369 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
372 const unsigned long end_pfn = pfn + nr_pages;
380 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false));
386 if (altmap->base_pfn != pfn
394 if (check_pfn_span(pfn, nr_pages)) {
395 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
399 for (; pfn < end_pfn; pfn += cur_nr_pages) {
401 cur_nr_pages = min(end_pfn - pfn,
402 SECTION_ALIGN_UP(pfn + 1) - pfn);
403 err = sparse_add_section(nid, pfn, cur_nr_pages, altmap,
413 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */
434 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */
439 unsigned long pfn;
441 /* pfn is the end pfn of a memory section. */
442 pfn = end_pfn - 1;
443 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) {
444 if (unlikely(!pfn_to_online_page(pfn)))
447 if (unlikely(pfn_to_nid(pfn) != nid))
450 if (zone != page_zone(pfn_to_page(pfn)))
453 return pfn;
462 unsigned long pfn;
472 pfn = find_smallest_section_pfn(nid, zone, end_pfn,
474 if (pfn) {
475 zone->spanned_pages = zone_end_pfn(zone) - pfn;
476 zone->zone_start_pfn = pfn;
488 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
490 if (pfn)
491 zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
533 unsigned long pfn, cur_nr_pages;
536 for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
541 min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
542 page_init_poison(pfn_to_page(pfn),
564 * @pfn: starting pageframe (must be aligned to start of a section)
573 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
576 const unsigned long end_pfn = pfn + nr_pages;
579 if (check_pfn_span(pfn, nr_pages)) {
580 WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1);
584 for (; pfn < end_pfn; pfn += cur_nr_pages) {
587 cur_nr_pages = min(end_pfn - pfn,
588 SECTION_ALIGN_UP(pfn + 1) - pfn);
589 sparse_remove_section(pfn, cur_nr_pages, altmap);
647 unsigned long pfn;
658 for (pfn = start_pfn; pfn < end_pfn;) {
668 if (pfn)
669 order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
673 (*online_page_callback)(pfn_to_page(pfn), order);
674 pfn += (1UL << order);
729 static void section_taint_zone_device(unsigned long pfn)
731 struct mem_section *ms = __pfn_to_section(pfn);
736 static inline void section_taint_zone_device(unsigned long pfn)
742 * Associate the pfn range with the given zone, initializing the memmaps
780 * expects the zone spans the pfn range. All the pages in the range
893 * Returns a default kernel memory zone for the given pfn range.
894 * If no kernel zone covers this pfn range it will automatically go
965 unsigned long pfn,
991 pfn = ALIGN_DOWN(pfn, group->d.unit_pages);
992 end_pfn = pfn + group->d.unit_pages;
993 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
994 page = pfn_to_online_page(pfn);
1021 return default_kernel_zone_for_pfn(nid, pfn, nr_pages);
1089 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages,
1092 unsigned long end_pfn = pfn + nr_pages;
1095 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1106 page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages);
1108 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE);
1111 SetPageVmemmapSelfHosted(pfn_to_page(pfn + i));
1119 online_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1124 void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
1126 unsigned long end_pfn = pfn + nr_pages;
1134 offline_mem_sections(pfn, ALIGN_DOWN(end_pfn, PAGES_PER_SECTION));
1140 remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages);
1141 kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
1147 int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
1163 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) ||
1164 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
1168 /* associate pfn range with the zone */
1169 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
1171 arg.start_pfn = pfn;
1198 online_pages_range(pfn, nr_pages);
1199 adjust_present_page_count(pfn_to_page(pfn), group, nr_pages);
1206 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE);
1229 (unsigned long long) pfn << PAGE_SHIFT,
1230 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
1232 remove_pfn_range_from_zone(zone, pfn, nr_pages);
1376 * start pfn should be pageblock_nr_pages aligned for correctly
1718 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages,
1731 unsigned long pfn;
1733 for (pfn = start; pfn < end; pfn++) {
1737 if (!pfn_valid(pfn))
1739 page = pfn_to_page(pfn);
1766 skip = compound_nr(head) - (pfn - page_to_pfn(head));
1767 pfn += skip - 1;
1771 *movable_pfn = pfn;
1777 unsigned long pfn;
1783 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1787 if (!pfn_valid(pfn))
1789 page = pfn_to_page(pfn);
1794 pfn = page_to_pfn(head) + compound_nr(head) - 1;
1798 pfn = page_to_pfn(head) + thp_nr_pages(page) - 1;
1833 pr_warn("failed to isolate pfn %lx\n", pfn);
1866 pr_warn("migrating pfn %lx failed ret:%d\n",
1947 unsigned long pfn, system_ram_pages = 0;
2022 pfn = start_pfn;
2037 ret = scan_movable_pages(pfn, end_pfn, &pfn);
2043 do_migrate_range(pfn, end_pfn);