/linux-master/include/linux/ |
H A D | iova.h | 20 unsigned long pfn_hi; /* Highest allocated pfn */ 21 unsigned long pfn_lo; /* Lowest allocated pfn */ 33 unsigned long granule; /* pfn granularity for this domain */ 89 void free_iova(struct iova_domain *iovad, unsigned long pfn); 94 void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, 103 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 115 static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) argument 132 unsigned long pfn, 159 unsigned long pfn) 131 free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) argument 158 find_iova(struct iova_domain *iovad, unsigned long pfn) argument
|
H A D | crash_dump.h | 24 unsigned long from, unsigned long pfn, 27 ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize, 29 ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn, 111 bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
|
/linux-master/scripts/gdb/linux/ |
H A D | mm.py | 133 def pfn_to_section_nr(self, pfn): 134 return pfn >> self.PFN_SECTION_SHIFT 139 def __pfn_to_section(self, pfn): 140 return self.__nr_to_section(self.pfn_to_section_nr(pfn)) 142 def pfn_to_section(self, pfn): 143 return self.__pfn_to_section(pfn) 145 def subsection_map_index(self, pfn): 146 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION 148 def pfn_section_valid(self, ms, pfn): 150 idx = self.subsection_map_index(pfn) [all...] |
/linux-master/arch/arm/mm/ |
H A D | ioremap.c | 188 remap_area_sections(unsigned long virt, unsigned long pfn, argument 201 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 202 pfn += SZ_1M >> PAGE_SHIFT; 203 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); 204 pfn += SZ_1M >> PAGE_SHIFT; 215 remap_area_supersections(unsigned long virt, unsigned long pfn, argument 229 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | 231 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; 242 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; 249 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, argument 332 unsigned long pfn = __phys_to_pfn(phys_addr); local 355 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) argument 493 unsigned long pfn = PHYS_PFN(offset); local [all...] |
/linux-master/mm/ |
H A D | mm_init.c | 456 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 565 void __meminit __init_single_page(struct page *page, unsigned long pfn, argument 569 set_page_links(page, zone, nid, pfn); 579 set_page_address(page, __va(pfn << PAGE_SHIFT)); 600 static int __meminit __early_pfn_to_nid(unsigned long pfn, argument 606 if (state->last_start <= pfn && pfn < state->last_end) 609 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 619 int __meminit early_pfn_to_nid(unsigned long pfn) argument 625 nid = __early_pfn_to_nid(pfn, 660 early_page_initialised(unsigned long pfn, int nid) argument 673 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) argument 707 init_reserved_page(unsigned long pfn, int nid) argument 723 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); local 728 early_page_initialised(unsigned long pfn, int nid) argument 733 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) argument 738 init_reserved_page(unsigned long pfn, int nid) argument 776 overlap_memmap_init(unsigned long zone, unsigned long *pfn) argument 823 unsigned long pfn; local 831 __init_single_page(pfn_to_page(pfn), pfn, zone, node); local 855 unsigned long pfn, end_pfn = start_pfn + size; local 973 __init_zone_device_page(struct page *page, unsigned long pfn, unsigned long zone_idx, int nid, struct dev_pagemap *pgmap) argument 1044 unsigned long pfn, end_pfn = head_pfn + nr_pages; local 1070 unsigned long pfn, end_pfn = start_pfn + nr_pages; local 1916 deferred_free_range(unsigned long pfn, unsigned long nr_pages) argument 1961 deferred_pfn_valid(unsigned long pfn) argument 1972 deferred_free_pages(unsigned long pfn, unsigned long end_pfn) argument 1997 deferred_init_pages(struct zone *zone, unsigned long pfn, unsigned long end_pfn) argument 2492 memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) argument [all...] |
/linux-master/arch/xtensa/mm/ |
H A D | highmem.c | 38 enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn) argument 40 return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
|
/linux-master/arch/arm/include/asm/ |
H A D | kfence.h | 14 unsigned long pfn = PFN_DOWN(__pa(addr)); local 21 set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
|
/linux-master/arch/arm64/kernel/ |
H A D | crash_dump.c | 15 ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, argument 23 vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
|
/linux-master/mm/damon/ |
H A D | ops-common.h | 10 struct folio *damon_get_folio(unsigned long pfn);
|
/linux-master/arch/m68k/include/asm/ |
H A D | page_no.h | 27 static inline void *pfn_to_virt(unsigned long pfn) argument 29 return __va(pfn << PAGE_SHIFT);
|
H A D | page_mm.h | 120 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots 128 static inline void *pfn_to_virt(unsigned long pfn) argument 130 return __va(pfn << PAGE_SHIFT); 145 #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
|
/linux-master/arch/openrisc/mm/ |
H A D | cache.c | 45 unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; local 46 struct folio *folio = page_folio(pfn_to_page(pfn));
|
/linux-master/drivers/gpu/drm/i915/gt/ |
H A D | shmem_utils.c | 98 unsigned long pfn; local 100 for (pfn = off >> PAGE_SHIFT; len; pfn++) { 106 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn, 133 unsigned long pfn; local 135 for (pfn = off >> PAGE_SHIFT; len; pfn++) { 141 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
|
/linux-master/arch/powerpc/include/asm/ |
H A D | ultravisor.h | 34 static inline int uv_share_page(u64 pfn, u64 npages) argument 36 return ucall_norets(UV_SHARE_PAGE, pfn, npages); 39 static inline int uv_unshare_page(u64 pfn, u64 npages) argument 41 return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
|
/linux-master/arch/arm/mach-s3c/ |
H A D | mach-s3c64xx-dt.c | 24 .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
|
/linux-master/arch/x86/include/asm/ |
H A D | iomap.h | 16 void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
|
/linux-master/arch/arm/mach-spear/ |
H A D | spear13xx.c | 61 .pfn = __phys_to_pfn(PERIP_GRP2_BASE), 66 .pfn = __phys_to_pfn(PERIP_GRP1_BASE), 71 .pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE), 76 .pfn = __phys_to_pfn(L2CC_BASE),
|
/linux-master/arch/riscv/include/asm/ |
H A D | kfence.h | 7 #include <linux/pfn.h>
|
/linux-master/arch/x86/mm/ |
H A D | init_32.c | 25 #include <linux/pfn.h> 259 unsigned long pfn; local 290 pfn = start_pfn; 291 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 296 if (pfn >= end_pfn) 299 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); 304 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; 306 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; 323 pfn &= PMD_MASK >> PAGE_SHIFT; 324 addr2 = (pfn 405 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), local 437 unsigned long pfn, va; local [all...] |
/linux-master/arch/powerpc/kvm/ |
H A D | e500_mmu_host.c | 164 kvm_pfn_t pfn; local 166 pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; 167 get_page(pfn_to_page(pfn)); 175 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) | 247 kvm_pfn_t pfn, unsigned int wimg) 249 ref->pfn = pfn; 256 kvm_set_pfn_accessed(pfn); 259 kvm_set_pfn_dirty(pfn); 265 /* FIXME: don't log bogus pfn fo 245 kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, kvm_pfn_t pfn, unsigned int wimg) argument 310 kvm_pfn_t pfn = ref->pfn; local 328 unsigned long pfn = 0; /* silence GCC warning */ local 630 hfn_t pfn; local [all...] |
/linux-master/arch/x86/include/asm/xen/ |
H A D | interface_32.h | 100 #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
|
/linux-master/arch/x86/xen/ |
H A D | mmu_hvm.c | 12 * The kdump kernel has to check whether a pfn of the crashed kernel 14 * whether to access a pfn of the crashed kernel. 15 * Returns "false" if the pfn is not backed by a RAM page, the caller may 16 * handle the pfn special in this case. 18 static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) argument 22 .pfn = pfn,
|
/linux-master/arch/powerpc/platforms/pseries/ |
H A D | svm.c | 85 unsigned long pfn = PHYS_PFN(__pa(addr)); local 86 struct page *page = pfn_to_page(pfn); 92 uv_share_page(pfn, 1);
|
/linux-master/arch/loongarch/power/ |
H A D | hibernate.c | 39 int pfn_is_nosave(unsigned long pfn) argument 44 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
|
/linux-master/arch/arm/kernel/ |
H A D | hibernate.c | 26 int pfn_is_nosave(unsigned long pfn) argument 31 return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
|