Lines Matching refs:pfn

456 	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
565 void __meminit __init_single_page(struct page *page, unsigned long pfn,
569 set_page_links(page, zone, nid, pfn);
579 set_page_address(page, __va(pfn << PAGE_SHIFT));
600 static int __meminit __early_pfn_to_nid(unsigned long pfn,
606 if (state->last_start <= pfn && pfn < state->last_end)
609 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
619 int __meminit early_pfn_to_nid(unsigned long pfn)
625 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
659 /* Returns true if the struct page for the pfn is initialised */
660 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid)
662 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
673 defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
700 (pfn & (PAGES_PER_SECTION - 1)) == 0) {
701 NODE_DATA(nid)->first_deferred_pfn = pfn;
707 static void __meminit init_reserved_page(unsigned long pfn, int nid)
712 if (early_page_initialised(pfn, nid))
720 if (zone_spans_pfn(zone, pfn))
723 __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
728 static inline bool early_page_initialised(unsigned long pfn, int nid)
733 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
738 static inline void init_reserved_page(unsigned long pfn, int nid)
776 overlap_memmap_init(unsigned long zone, unsigned long *pfn)
781 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
783 if (*pfn < memblock_region_memory_end_pfn(r))
787 if (*pfn >= memblock_region_memory_base_pfn(r) &&
789 *pfn = memblock_region_memory_end_pfn(r);
823 unsigned long pfn;
826 for (pfn = spfn; pfn < epfn; pfn++) {
827 if (!pfn_valid(pageblock_start_pfn(pfn))) {
828 pfn = pageblock_end_pfn(pfn) - 1;
831 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
832 __SetPageReserved(pfn_to_page(pfn));
855 unsigned long pfn, end_pfn = start_pfn + size;
879 for (pfn = start_pfn; pfn < end_pfn; ) {
885 if (overlap_memmap_init(zone, &pfn))
887 if (defer_init(nid, pfn, zone_end_pfn)) {
893 page = pfn_to_page(pfn);
894 __init_single_page(page, pfn, zone, nid);
903 if (pageblock_aligned(pfn)) {
907 pfn++;
973 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn,
978 __init_single_page(page, pfn, zone_idx, nid);
1007 if (pageblock_aligned(pfn)) {
1044 unsigned long pfn, end_pfn = head_pfn + nr_pages;
1048 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) {
1049 struct page *page = pfn_to_page(pfn);
1051 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1052 prep_compound_tail(head, pfn - head_pfn);
1060 if (pfn == head_pfn + 1)
1070 unsigned long pfn, end_pfn = start_pfn + nr_pages;
1091 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) {
1092 struct page *page = pfn_to_page(pfn);
1094 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap);
1099 memmap_init_compound(page, pfn, zone_idx, nid, pgmap,
1920 * This is used to test whether pfn -> nid mapping of the chosen memory
1924 * Return: the determined alignment in pfn's. 0 if there is no alignment
1943 * start pfn and tick off bits one-by-one until it becomes
1959 static void __init deferred_free_range(unsigned long pfn,
1968 page = pfn_to_page(pfn);
1971 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) {
1979 accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages));
1981 for (i = 0; i < nr_pages; i++, page++, pfn++) {
1982 if (pageblock_aligned(pfn))
2002 * validity of the head pfn.
2004 static inline bool __init deferred_pfn_valid(unsigned long pfn)
2006 if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn))
2015 static void __init deferred_free_pages(unsigned long pfn,
2020 for (; pfn < end_pfn; pfn++) {
2021 if (!deferred_pfn_valid(pfn)) {
2022 deferred_free_range(pfn - nr_free, nr_free);
2024 } else if (IS_MAX_ORDER_ALIGNED(pfn)) {
2025 deferred_free_range(pfn - nr_free, nr_free);
2032 deferred_free_range(pfn - nr_free, nr_free);
2036 * Initialize struct pages. We minimize pfn page lookups and scheduler checks
2041 unsigned long pfn,
2049 for (; pfn < end_pfn; pfn++) {
2050 if (!deferred_pfn_valid(pfn)) {
2053 } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) {
2054 page = pfn_to_page(pfn);
2058 __init_single_page(page, pfn, zid, nid);
2567 void __init memblock_free_pages(struct page *page, unsigned long pfn,
2572 int nid = early_pfn_to_nid(pfn);
2574 if (!early_page_initialised(pfn, nid))