/linux-master/drivers/xen/ |
H A D | mem-reservation.c | 37 unsigned long pfn = page_to_pfn(page); local 48 set_phys_to_machine(pfn, frames[i]); 51 (unsigned long)__va(pfn << PAGE_SHIFT), 65 unsigned long pfn = page_to_pfn(page); local 75 (unsigned long)__va(pfn << PAGE_SHIFT), 79 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
|
/linux-master/arch/arm64/kvm/hyp/include/nvhe/ |
H A D | mem_protect.h | 66 int __pkvm_host_share_hyp(u64 pfn); 67 int __pkvm_host_unshare_hyp(u64 pfn); 68 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages); 69 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages); 70 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages); 71 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
|
/linux-master/mm/ |
H A D | memory-failure.c | 77 void num_poisoned_pages_inc(unsigned long pfn) argument 80 memblk_nr_poison_inc(pfn); 83 void num_poisoned_pages_sub(unsigned long pfn, long i) argument 86 if (pfn != -1UL) 87 memblk_nr_poison_sub(pfn, i); 340 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) argument 347 pfn, t->comm, t->pid); 523 unsigned long pfn, int flags) 536 pfn, tk->tsk->comm, tk->tsk->pid); 547 else if (kill_proc(tk, pfn, flag 522 kill_procs(struct list_head *to_kill, int forcekill, bool fail, unsigned long pfn, int flags) argument 740 unsigned long pfn; member in struct:hwpoison_walk 753 unsigned long pfn = 0; local 776 unsigned long pfn; local 864 kill_accessing_process(struct task_struct *p, unsigned long pfn, int flags) argument 948 truncate_error_folio(struct folio *folio, unsigned long pfn, struct address_space *mapping) argument 1300 update_per_node_mf_stats(unsigned long pfn, enum mf_result result) argument 1337 action_result(unsigned long pfn, enum mf_action_page_type type, enum mf_result result) argument 1352 page_action(struct page_state *ps, struct page *p, unsigned long pfn) argument 1570 hwpoison_user_mappings(struct page *p, unsigned long pfn, int flags, struct page *hpage) argument 1674 identify_page_state(unsigned long pfn, struct page *p, unsigned long page_flags) argument 1711 unmap_and_kill(struct list_head *to_kill, unsigned long pfn, struct address_space *mapping, pgoff_t index, int flags) argument 1745 mf_generic_kill_procs(unsigned long long pfn, int flags, struct dev_pagemap *pgmap) argument 1999 __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) argument 2052 try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) argument 2120 try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) argument 2132 put_ref_page(unsigned long pfn, int flags) argument 2144 memory_failure_dev_pagemap(unsigned long pfn, int flags, struct dev_pagemap *pgmap) argument 2197 memory_failure(unsigned long pfn, int flags) argument 2406 unsigned long pfn; member in struct:memory_failure_entry 2435 memory_failure_queue(unsigned long pfn, int flags) argument 2528 unpoison_memory(unsigned long pfn) argument 2673 unsigned long pfn = page_to_pfn(page); local 2764 soft_offline_page(unsigned long pfn, int flags) argument [all...] |
H A D | io-mapping.c | 11 * @pfn: physical address of kernel memory 17 unsigned long addr, unsigned long pfn, unsigned long size) 25 return remap_pfn_range_notrack(vma, addr, pfn, size, 16 io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size) argument
|
H A D | memory_hotplug.c | 28 #include <linux/pfn.h> 299 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) argument 316 if (!IS_ALIGNED(pfn | nr_pages, min_align)) 322 * Return page for the valid pfn only if the page is online. All pfn 326 struct page *pfn_to_online_page(unsigned long pfn) argument 328 unsigned long nr = pfn_to_section_nr(pfn); 343 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) 346 if (!pfn_section_valid(ms, pfn)) 350 return pfn_to_page(pfn); 369 __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, struct mhp_params *params) argument 439 unsigned long pfn; local 462 unsigned long pfn; local 533 unsigned long pfn, cur_nr_pages; local 573 __remove_pages(unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap) argument 647 unsigned long pfn; local 729 section_taint_zone_device(unsigned long pfn) argument 736 section_taint_zone_device(unsigned long pfn) argument 963 auto_movable_zone_for_pfn(int nid, struct memory_group *group, unsigned long pfn, unsigned long nr_pages) argument 1089 mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, struct zone *zone, bool mhp_off_inaccessible) argument 1124 mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) argument 1147 online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group) argument 1731 unsigned long pfn; local 1777 unsigned long pfn; local 1947 unsigned long pfn, system_ram_pages = 0; local [all...] |
H A D | bootmem_info.c | 104 unsigned long i, pfn, end_pfn, nr_pages; local 114 pfn = pgdat->node_start_pfn; 118 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 120 * Some platforms can assign the same pfn to multiple nodes - on 121 * node0 as well as nodeN. To avoid registering a pfn against 122 * multiple nodes we check that this pfn does not already 125 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) 126 register_page_bootmem_info_section(pfn); [all...] |
H A D | hwpoison-inject.c | 2 /* Inject a hwpoison memory failure on a arbitrary pfn */ 16 unsigned long pfn = val; local 24 if (!pfn_valid(pfn)) 27 p = pfn_to_page(pfn); 50 pr_info("Injecting memory failure at pfn %#lx\n", pfn); 51 err = memory_failure(pfn, MF_SW_SIMULATED); 81 debugfs_create_file("corrupt-pfn", 0200, hwpoison_dir, NULL, 84 debugfs_create_file("unpoison-pfn", 0200, hwpoison_dir, NULL,
|
/linux-master/arch/arm64/mm/ |
H A D | ioremap.c | 34 unsigned long pfn = PHYS_PFN(offset); local 36 return pfn_is_map_memory(pfn);
|
/linux-master/arch/xtensa/mm/ |
H A D | ioremap.c | 16 unsigned long pfn = __phys_to_pfn((phys_addr)); local 17 WARN_ON(pfn_valid(pfn));
|
/linux-master/arch/powerpc/platforms/powernv/ |
H A D | memtrace.c | 94 unsigned long pfn; local 97 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { 98 if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) 100 clear_page(__va(PFN_PHYS(pfn))); 114 unsigned long pfn, start_pfn; local 138 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn 218 unsigned long pfn; local [all...] |
/linux-master/arch/csky/abiv1/ |
H A D | cacheflush.c | 47 unsigned long pfn = pte_pfn(*ptep); local 52 if (!pfn_valid(pfn)) 55 if (is_zero_pfn(pfn)) 58 folio = page_folio(pfn_to_page(pfn));
|
/linux-master/tools/testing/memblock/ |
H A D | internal.h | 18 void memblock_free_pages(struct page *page, unsigned long pfn, argument
|
/linux-master/tools/testing/selftests/mm/ |
H A D | hugepage-vmemmap.c | 66 static int check_page_flags(unsigned long pfn) argument 75 lseek(fd, pfn * sizeof(pageflags), SEEK_SET); 107 unsigned long pfn; local 125 pfn = virt_to_pfn(addr); 126 if (pfn == -1UL) { 132 printf("Returned address is %p whose pfn is %lx\n", addr, pfn); 134 if (check_page_flags(pfn) < 0) {
|
/linux-master/include/trace/events/ |
H A D | kmem.h | 143 __field( unsigned long, pfn ) 148 __entry->pfn = page_to_pfn(page); 152 TP_printk("page=%p pfn=0x%lx order=%d", 153 pfn_to_page(__entry->pfn), 154 __entry->pfn, 165 __field( unsigned long, pfn ) 169 __entry->pfn = page_to_pfn(page); 172 TP_printk("page=%p pfn=0x%lx order=0", 173 pfn_to_page(__entry->pfn), 174 __entry->pfn) [all...] |
H A D | page_pool.h | 53 __field(unsigned long, pfn) 60 __entry->pfn = page_to_pfn(page); 63 TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u", 64 __entry->pool, __entry->page, __entry->pfn, __entry->release) 78 __field(unsigned long, pfn) 85 __entry->pfn = page_to_pfn(page); 88 TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u", 89 __entry->pool, __entry->page, __entry->pfn, __entry->hold)
|
/linux-master/arch/loongarch/include/asm/ |
H A D | page.h | 26 #include <linux/pfn.h> 75 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 81 #define pfn_to_phys(pfn) __pfn_to_phys(pfn) 110 #define pfn_to_virt(pfn) page_to_virt(pfn_to_page(pfn))
|
/linux-master/tools/testing/selftests/kvm/ |
H A D | access_tracking_perf_test.c | 99 uint64_t pfn; local 105 pfn = entry & PAGEMAP_PFN_MASK; 106 __TEST_REQUIRE(pfn, "Looking up PFNs requires CAP_SYS_ADMIN"); 108 return pfn; 111 static bool is_page_idle(int page_idle_fd, uint64_t pfn) argument 113 uint64_t bits = pread_uint64(page_idle_fd, "page_idle", pfn / 64); 115 return !!((bits >> (pfn % 64)) & 1); 118 static void mark_page_idle(int page_idle_fd, uint64_t pfn) argument 120 uint64_t bits = 1ULL << (pfn % 64); 122 TEST_ASSERT(pwrite(page_idle_fd, &bits, 8, 8 * (pfn / 6 123 "Set page_idle bits for PFN 0x%" PRIx64, pfn); local 150 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); local [all...] |
/linux-master/arch/x86/power/ |
H A D | hibernate_32.c | 83 unsigned long pfn; local 91 pfn = 0; 98 if (pfn >= max_low_pfn) 102 if (pfn >= max_low_pfn) 110 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); 111 pfn += PTRS_PER_PTE; 120 for (; pte < max_pte; pte++, pfn++) { 121 if (pfn >= max_low_pfn) 124 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
/linux-master/include/linux/ |
H A D | highmem-internal.h | 9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); 87 static inline void *kmap_local_pfn(unsigned long pfn) argument 89 return __kmap_local_pfn_prot(pfn, kmap_prot); 113 static inline void *kmap_atomic_pfn(unsigned long pfn) argument 121 return __kmap_local_pfn_prot(pfn, kmap_prot); 198 static inline void *kmap_local_pfn(unsigned long pfn) argument 200 return kmap_local_page(pfn_to_page(pfn)); 225 static inline void *kmap_atomic_pfn(unsigned long pfn) argument 227 return kmap_atomic(pfn_to_page(pfn));
|
H A D | iova.h | 20 unsigned long pfn_hi; /* Highest allocated pfn */ 21 unsigned long pfn_lo; /* Lowest allocated pfn */ 33 unsigned long granule; /* pfn granularity for this domain */ 84 void free_iova(struct iova_domain *iovad, unsigned long pfn); 89 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, 98 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 110 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) argument 127 unsigned long pfn, 154 unsigned long pfn) 126 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) argument 153 find_iova(struct iova_domain *iovad, unsigned long pfn) argument
|
/linux-master/arch/loongarch/kernel/ |
H A D | vdso.c | 51 unsigned long pfn; local 57 pfn = sym_to_pfn(vdso_data); 59 pfn = page_to_pfn(timens_page); 72 pfn = sym_to_pfn(vdso_data); 76 pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START; 82 return vmf_insert_pfn(vma, vmf->address, pfn); 102 unsigned long i, cpu, pfn; local 110 pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso)); 112 vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
|
/linux-master/arch/arm64/kernel/ |
H A D | hibernate.c | 91 int pfn_is_nosave(unsigned long pfn) argument 96 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) || 97 crash_is_nosave(pfn); 220 static int save_tags(struct page *page, unsigned long pfn) argument 230 ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL); 258 unsigned long pfn, max_zone_pfn; local 267 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn 299 unsigned long pfn = xa_state.xa_index; local [all...] |
/linux-master/arch/arm/mm/ |
H A D | fault-armv.c | 37 unsigned long pfn, pte_t *ptep) 52 flush_cache_page(vma, address, pfn); 53 outer_flush_range((pfn << PAGE_SHIFT), 54 (pfn << PAGE_SHIFT) + PAGE_SIZE); 89 unsigned long pfn) 126 ret = do_adjust_pte(vma, address, pfn, pte); 136 unsigned long addr, pte_t *ptep, unsigned long pfn) 163 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); 167 do_adjust_pte(vma, addr, pfn, ptep); 186 unsigned long pfn local 36 do_adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn, pte_t *ptep) argument 88 adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) argument 135 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned long pfn) argument [all...] |
/linux-master/virt/kvm/ |
H A D | pfncache.c | 35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && 48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) && 99 static void *gpc_map(kvm_pfn_t pfn) argument 101 if (pfn_valid(pfn)) 102 return kmap(pfn_to_page(pfn)); 105 return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB); 111 static void gpc_unmap(kvm_pfn_t pfn, void *khva) argument 113 /* Unmap the old pfn/page if it was mapped before. */ 114 if (is_error_noslot_pfn(pfn) || !khva) 117 if (pfn_valid(pfn)) { [all...] |
/linux-master/arch/microblaze/include/asm/ |
H A D | page.h | 15 #include <linux/pfn.h> 74 * Conversions for virtual address, physical address, pfn, and struct 80 * | linux/pfn.h 81 * pfn -+ 97 extern int page_is_ram(unsigned long pfn); 100 # define pfn_to_phys(pfn) (PFN_PHYS(pfn)) 130 static inline const void *pfn_to_virt(unsigned long pfn) argument 132 return __va(pfn_to_phys((pfn)));
|