Searched refs:pfn (Results 76 - 100 of 620) sorted by relevance

1234567891011>>

/linux-master/kernel/power/
H A Dsnapshot.c745 * Walk the radix tree to find the page containing the bit that represents @pfn
748 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, argument
757 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
764 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
781 * pfn falls into the current node then we do not need to walk
786 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
790 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
805 bm->cur.node_pfn = (pfn
815 memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) argument
826 mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) argument
839 memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) argument
863 memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) argument
874 memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) argument
931 unsigned long bits, pfn, pages; local
1093 unsigned long pfn; local
1201 unsigned long pfn; local
1250 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; local
1321 saveable_highmem_page(struct zone *zone, unsigned long pfn) argument
1355 unsigned long pfn, max_zone_pfn; local
1385 saveable_page(struct zone *zone, unsigned long pfn) argument
1420 unsigned long pfn, max_zone_pfn; local
1477 page_is_saveable(struct zone *zone, unsigned long pfn) argument
1535 unsigned long pfn, copy_pfn; local
1761 unsigned long pfn = memory_bm_next_pfn(&copy_bm); local
2296 unsigned long pfn; local
2314 unsigned long pfn; local
2428 unsigned long pfn; local
2730 unsigned long pfn = memory_bm_next_pfn(bm); local
[all...]
/linux-master/scripts/gdb/linux/
H A Dmm.py133 def pfn_to_section_nr(self, pfn):
134 return pfn >> self.PFN_SECTION_SHIFT
139 def __pfn_to_section(self, pfn):
140 return self.__nr_to_section(self.pfn_to_section_nr(pfn))
142 def pfn_to_section(self, pfn):
143 return self.__pfn_to_section(pfn)
145 def subsection_map_index(self, pfn):
146 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
148 def pfn_section_valid(self, ms, pfn):
150 idx = self.subsection_map_index(pfn)
[all...]
/linux-master/arch/powerpc/mm/
H A Dmem.c38 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size, argument
42 return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
44 if (!page_is_ram(pfn))
299 unsigned long pfn, highmem_mapnr; local
302 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
303 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
304 struct page *page = pfn_to_page(pfn);
392 int devmem_is_allowed(unsigned long pfn) argument
[all...]
/linux-master/arch/arm/mm/
H A Dioremap.c188 remap_area_sections(unsigned long virt, unsigned long pfn, argument
201 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
202 pfn += SZ_1M >> PAGE_SHIFT;
203 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
204 pfn += SZ_1M >> PAGE_SHIFT;
215 remap_area_supersections(unsigned long virt, unsigned long pfn, argument
229 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
231 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
242 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
249 static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, argument
332 unsigned long pfn = __phys_to_pfn(phys_addr); local
355 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) argument
493 unsigned long pfn = PHYS_PFN(offset); local
[all...]
/linux-master/mm/
H A Dmm_init.c455 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
564 void __meminit __init_single_page(struct page *page, unsigned long pfn, argument
568 set_page_links(page, zone, nid, pfn);
578 set_page_address(page, __va(pfn << PAGE_SHIFT));
599 static int __meminit __early_pfn_to_nid(unsigned long pfn, argument
605 if (state->last_start <= pfn && pfn < state->last_end)
608 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
618 int __meminit early_pfn_to_nid(unsigned long pfn) argument
624 nid = __early_pfn_to_nid(pfn,
659 early_page_initialised(unsigned long pfn, int nid) argument
672 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) argument
706 init_reserved_page(unsigned long pfn, int nid) argument
722 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); local
727 early_page_initialised(unsigned long pfn, int nid) argument
732 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) argument
737 init_reserved_page(unsigned long pfn, int nid) argument
775 overlap_memmap_init(unsigned long zone, unsigned long *pfn) argument
822 unsigned long pfn; local
830 __init_single_page(pfn_to_page(pfn), pfn, zone, node); local
854 unsigned long pfn, end_pfn = start_pfn + size; local
972 __init_zone_device_page(struct page *page, unsigned long pfn, unsigned long zone_idx, int nid, struct dev_pagemap *pgmap) argument
1043 unsigned long pfn, end_pfn = head_pfn + nr_pages; local
1069 unsigned long pfn, end_pfn = start_pfn + nr_pages; local
1958 deferred_free_range(unsigned long pfn, unsigned long nr_pages) argument
2003 deferred_pfn_valid(unsigned long pfn) argument
2014 deferred_free_pages(unsigned long pfn, unsigned long end_pfn) argument
2039 deferred_init_pages(struct zone *zone, unsigned long pfn, unsigned long end_pfn) argument
2566 memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) argument
[all...]
/linux-master/arch/xtensa/mm/
H A Dhighmem.c38 enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn) argument
40 return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
/linux-master/arch/arm/include/asm/
H A Dkfence.h14 unsigned long pfn = PFN_DOWN(__pa(addr)); local
21 set_pte_ext(pte + i, pfn_pte(pfn + i, PAGE_KERNEL), 0);
/linux-master/arch/arm64/kernel/
H A Dcrash_dump.c15 ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, argument
23 vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
/linux-master/mm/damon/
H A Dops-common.h10 struct folio *damon_get_folio(unsigned long pfn);
/linux-master/arch/m68k/include/asm/
H A Dpage_no.h27 static inline void *pfn_to_virt(unsigned long pfn) argument
29 return __va(pfn << PAGE_SHIFT);
H A Dpage_mm.h120 * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
128 static inline void *pfn_to_virt(unsigned long pfn) argument
130 return __va(pfn << PAGE_SHIFT);
145 #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
/linux-master/arch/openrisc/mm/
H A Dcache.c45 unsigned long pfn = pte_val(*pte) >> PAGE_SHIFT; local
46 struct folio *folio = page_folio(pfn_to_page(pfn));
/linux-master/drivers/gpu/drm/i915/gt/
H A Dshmem_utils.c97 unsigned long pfn; local
99 for (pfn = off >> PAGE_SHIFT; len; pfn++) {
105 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
132 unsigned long pfn; local
134 for (pfn = off >> PAGE_SHIFT; len; pfn++) {
140 page = shmem_read_mapping_page_gfp(file->f_mapping, pfn,
/linux-master/arch/powerpc/include/asm/
H A Dultravisor.h34 static inline int uv_share_page(u64 pfn, u64 npages) argument
36 return ucall_norets(UV_SHARE_PAGE, pfn, npages);
39 static inline int uv_unshare_page(u64 pfn, u64 npages) argument
41 return ucall_norets(UV_UNSHARE_PAGE, pfn, npages);
/linux-master/arch/arm/mach-s3c/
H A Dmach-s3c64xx-dt.c24 .pfn = __phys_to_pfn(S3C64XX_PA_SYSCON),
/linux-master/arch/x86/include/asm/
H A Diomap.h16 void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
/linux-master/arch/arm/mach-spear/
H A Dspear13xx.c61 .pfn = __phys_to_pfn(PERIP_GRP2_BASE),
66 .pfn = __phys_to_pfn(PERIP_GRP1_BASE),
71 .pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE),
76 .pfn = __phys_to_pfn(L2CC_BASE),
/linux-master/include/linux/
H A Dcrash_dump.h24 unsigned long from, unsigned long pfn,
27 ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
29 ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
111 bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
/linux-master/arch/riscv/include/asm/
H A Dkfence.h7 #include <linux/pfn.h>
/linux-master/arch/powerpc/kvm/
H A De500_mmu_host.c164 kvm_pfn_t pfn; local
166 pfn = (kvm_pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
167 get_page(pfn_to_page(pfn));
175 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
247 kvm_pfn_t pfn, unsigned int wimg)
249 ref->pfn = pfn;
256 kvm_set_pfn_accessed(pfn);
259 kvm_set_pfn_dirty(pfn);
265 /* FIXME: don't log bogus pfn fo
245 kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, kvm_pfn_t pfn, unsigned int wimg) argument
310 kvm_pfn_t pfn = ref->pfn; local
328 unsigned long pfn = 0; /* silence GCC warning */ local
630 hfn_t pfn; local
[all...]
/linux-master/arch/x86/mm/
H A Dinit_32.c25 #include <linux/pfn.h>
259 unsigned long pfn; local
290 pfn = start_pfn;
291 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
296 if (pfn >= end_pfn)
299 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
304 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
306 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
323 pfn &= PMD_MASK >> PAGE_SHIFT;
324 addr2 = (pfn
405 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), local
437 unsigned long pfn, va; local
[all...]
/linux-master/arch/x86/include/asm/xen/
H A Dinterface_32.h100 #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
/linux-master/arch/x86/xen/
H A Dmmu_hvm.c12 * The kdump kernel has to check whether a pfn of the crashed kernel
14 * whether to access a pfn of the crashed kernel.
15 * Returns "false" if the pfn is not backed by a RAM page, the caller may
16 * handle the pfn special in this case.
18 static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) argument
22 .pfn = pfn,
/linux-master/arch/powerpc/platforms/pseries/
H A Dsvm.c85 unsigned long pfn = PHYS_PFN(__pa(addr)); local
86 struct page *page = pfn_to_page(pfn);
92 uv_share_page(pfn, 1);
/linux-master/arch/loongarch/power/
H A Dhibernate.c39 int pfn_is_nosave(unsigned long pfn) argument
44 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);

Completed in 326 milliseconds

1234567891011>>