Searched refs:pfn (Results 1 - 25 of 620) sorted by last modified time

1234567891011>>

/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ttm.c718 /* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
2427 unsigned long pfn; local
2439 pfn = addr >> PAGE_SHIFT;
2440 if (!pfn_valid(pfn))
2443 p = pfn_to_page(pfn);
2482 unsigned long pfn; local
2490 pfn = addr >> PAGE_SHIFT;
2491 if (!pfn_valid(pfn))
2494 p = pfn_to_page(pfn);
/linux-master/drivers/block/
H A Dublk_drv.c1304 unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT; local
1332 pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
1333 return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
/linux-master/arch/xtensa/include/asm/
H A Dcacheflush.h139 unsigned long address, unsigned long pfn);
/linux-master/drivers/dma/idxd/
H A Dcdev.c399 unsigned long pfn; local
408 pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
413 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
/linux-master/arch/x86/kernel/
H A Dsev-shared.c136 unsigned long pfn = paddr >> PAGE_SHIFT; local
139 sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn));
146 (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn))
/linux-master/arch/x86/include/asm/
H A Dpgtable_types.h529 /* Indicate that x86 has its own track and untrack pfn vma functions */
534 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
572 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
/linux-master/arch/riscv/include/asm/
H A Dpgtable.h253 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) argument
259 return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
334 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) argument
340 return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
H A Dpage.h12 #include <linux/pfn.h>
176 #define pfn_to_phys(pfn) (PFN_PHYS(pfn))
179 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
/linux-master/mm/
H A Dhugetlb.c2477 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2486 unsigned long pfn; local
2499 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2500 page = pfn_to_page(pfn);
3342 unsigned long pfn, end_pfn = head_pfn + end_page_number; local
3345 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn
7629 get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) argument
[all...]
H A Dpage_owner.c424 unsigned long pfn, block_end_pfn; local
431 pfn = zone->zone_start_pfn;
438 for (; pfn < end_pfn; ) {
439 page = pfn_to_online_page(pfn);
441 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
445 block_end_pfn = pageblock_end_pfn(pfn);
450 for (; pfn < block_end_pfn; pfn++) {
452 page = pfn_to_page(pfn);
541 print_page_owner(char __user *buf, size_t count, unsigned long pfn, struct page *page, struct page_owner *page_owner, depot_stack_handle_t handle) argument
655 unsigned long pfn; local
767 unsigned long pfn = zone->zone_start_pfn; local
[all...]
H A Dmemory-failure.c77 void num_poisoned_pages_inc(unsigned long pfn) argument
80 memblk_nr_poison_inc(pfn);
83 void num_poisoned_pages_sub(unsigned long pfn, long i) argument
86 if (pfn != -1UL)
87 memblk_nr_poison_sub(pfn, i);
340 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) argument
347 pfn, t->comm, t->pid);
523 unsigned long pfn, int flags)
536 pfn, tk->tsk->comm, tk->tsk->pid);
547 else if (kill_proc(tk, pfn, flag
522 kill_procs(struct list_head *to_kill, int forcekill, bool fail, unsigned long pfn, int flags) argument
740 unsigned long pfn; member in struct:hwpoison_walk
753 unsigned long pfn = 0; local
776 unsigned long pfn; local
864 kill_accessing_process(struct task_struct *p, unsigned long pfn, int flags) argument
948 truncate_error_folio(struct folio *folio, unsigned long pfn, struct address_space *mapping) argument
1300 update_per_node_mf_stats(unsigned long pfn, enum mf_result result) argument
1337 action_result(unsigned long pfn, enum mf_action_page_type type, enum mf_result result) argument
1352 page_action(struct page_state *ps, struct page *p, unsigned long pfn) argument
1570 hwpoison_user_mappings(struct page *p, unsigned long pfn, int flags, struct page *hpage) argument
1674 identify_page_state(unsigned long pfn, struct page *p, unsigned long page_flags) argument
1711 unmap_and_kill(struct list_head *to_kill, unsigned long pfn, struct address_space *mapping, pgoff_t index, int flags) argument
1745 mf_generic_kill_procs(unsigned long long pfn, int flags, struct dev_pagemap *pgmap) argument
1999 __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) argument
2052 try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) argument
2120 try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) argument
2132 put_ref_page(unsigned long pfn, int flags) argument
2144 memory_failure_dev_pagemap(unsigned long pfn, int flags, struct dev_pagemap *pgmap) argument
2197 memory_failure(unsigned long pfn, int flags) argument
2406 unsigned long pfn; member in struct:memory_failure_entry
2435 memory_failure_queue(unsigned long pfn, int flags) argument
2528 unpoison_memory(unsigned long pfn) argument
2673 unsigned long pfn = page_to_pfn(page); local
2764 soft_offline_page(unsigned long pfn, int flags) argument
[all...]
H A Dmadvise.c1107 unsigned long pfn; local
1114 pfn = page_to_pfn(page);
1124 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1125 pfn, start);
1126 ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1128 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1129 pfn, start);
1130 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED);
H A Dinternal.h443 * @pfn: The pfn of the page, it saves a call to page_to_pfn() when the
446 * @buddy_pfn: The output pointer to the buddy pfn, it also saves a call to
455 unsigned long pfn, unsigned int order, unsigned long *buddy_pfn)
457 unsigned long __buddy_pfn = __find_buddy_pfn(pfn, order);
460 buddy = page + (__buddy_pfn - pfn);
491 extern void memblock_free_pages(struct page *page, unsigned long pfn,
587 * isolate_migratepages_block will update the value to the next pfn
591 unsigned long fast_start_pfn; /* a pfn to start linear scan from */
1324 void __meminit __init_single_page(struct page *page, unsigned long pfn,
454 find_buddy_page_pfn(struct page *page, unsigned long pfn, unsigned int order, unsigned long *buddy_pfn) argument
[all...]
H A Dhuge_memory.c1058 pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write,
1068 if (pmd_pfn(*pmd) != pfn_t_to_pfn(pfn)) {
1081 entry = pmd_mkhuge(pfn_t_pmd(pfn, prot));
1082 if (pfn_t_devmap(pfn))
1105 * vmf_insert_pfn_pmd - insert a pmd size pfn
1107 * @pfn: pfn to insert
1110 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
1114 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write) argument
1127 !pfn_t_devmap(pfn));
1057 insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write, pgtable_t pgtable) argument
1156 insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pud, pfn_t pfn, bool write) argument
1203 vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write) argument
1247 unsigned long pfn = pmd_pfn(*pmd); local
1404 unsigned long pfn = pud_pfn(*pud); local
3374 unsigned long pfn, max_zone_pfn; local
[all...]
/linux-master/include/linux/
H A Dmm.h17 #include <linux/pfn.h>
1161 extern int page_is_ram(unsigned long pfn);
1924 static inline struct folio *pfn_folio(unsigned long pfn) argument
1926 return page_folio(pfn_to_page(pfn));
2057 unsigned long node, unsigned long pfn)
2062 set_page_section(page, pfn_to_section_nr(pfn));
2399 unsigned long *pfn);
3206 static inline int early_pfn_to_nid(unsigned long pfn) argument
3212 extern int __meminit early_pfn_to_nid(unsigned long pfn);
3565 unsigned long pfn, unsigne
2056 set_page_links(struct page *page, enum zone_type zone, unsigned long node, unsigned long pfn) argument
3599 io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
3989 memory_failure_queue(unsigned long pfn, int flags) argument
3993 __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) argument
3999 num_poisoned_pages_inc(unsigned long pfn) argument
4003 num_poisoned_pages_sub(unsigned long pfn, long i) argument
4018 memblk_nr_poison_inc(unsigned long pfn) argument
4022 memblk_nr_poison_sub(unsigned long pfn, long i) argument
4028 arch_memory_failure(unsigned long pfn, int flags) argument
4200 pfn_is_unaccepted_memory(unsigned long pfn) argument
[all...]
/linux-master/fs/proc/
H A Dpage.c52 unsigned long pfn; local
56 pfn = src / KPMSIZE;
68 ppage = pfn_to_online_page(pfn);
80 pfn++;
233 unsigned long pfn; local
236 pfn = src / KPMSIZE;
248 ppage = pfn_to_online_page(pfn);
255 pfn++;
282 unsigned long pfn; local
286 pfn
[all...]
/linux-master/drivers/md/
H A Ddm.c1234 pfn_t *pfn)
1252 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1232 dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, enum dax_access_mode mode, void **kaddr, pfn_t *pfn) argument
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_migrate.c213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) argument
217 page = pfn_to_page(pfn);
333 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
640 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
/linux-master/virt/kvm/
H A Dpfncache.c35 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
48 if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
99 static void *gpc_map(kvm_pfn_t pfn) argument
101 if (pfn_valid(pfn))
102 return kmap(pfn_to_page(pfn));
105 return memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
111 static void gpc_unmap(kvm_pfn_t pfn, void *khva) argument
113 /* Unmap the old pfn/page if it was mapped before. */
114 if (is_error_noslot_pfn(pfn) || !khva)
117 if (pfn_valid(pfn)) {
[all...]
H A Dkvm_main.c174 * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
179 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn) argument
183 if (!pfn_valid(pfn))
186 page = pfn_to_page(pfn);
191 if (is_zero_pfn(pfn))
826 * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
831 * Because this runs without holding mmu_lock, the pfn caches must use
2793 * The fast path to get the writable pfn which will be stored in @pfn,
2798 bool *writable, kvm_pfn_t *pfn)
2797 hva_to_pfn_fast(unsigned long addr, bool write_fault, bool *writable, kvm_pfn_t *pfn) argument
2825 hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, bool interruptible, bool *writable, kvm_pfn_t *pfn) argument
2884 kvm_try_get_pfn(kvm_pfn_t pfn) argument
2898 kvm_pfn_t pfn; local
2981 kvm_pfn_t pfn; local
3126 kvm_pfn_t pfn; local
3141 kvm_release_pfn(kvm_pfn_t pfn, bool dirty) argument
3151 kvm_pfn_t pfn; local
3238 kvm_release_pfn_clean(kvm_pfn_t pfn) argument
3262 kvm_release_pfn_dirty(kvm_pfn_t pfn) argument
3282 kvm_set_pfn_dirty(kvm_pfn_t pfn) argument
3292 kvm_set_pfn_accessed(kvm_pfn_t pfn) argument
[all...]
/linux-master/arch/x86/kvm/
H A Dx86.c8816 kvm_pfn_t pfn; local
8846 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
8849 * If the instruction failed on the error pfn, it can not be fixed,
8852 if (is_error_noslot_pfn(pfn))
8855 kvm_release_pfn_clean(pfn);
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.c6769 kvm_pfn_t pfn; local
6793 * retrieves the pfn from the primary MMU. Note, the memslot is
6805 pfn = gfn_to_pfn_memslot(slot, gfn);
6806 if (is_error_noslot_pfn(pfn))
6816 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
6828 kvm_release_pfn_clean(pfn);
/linux-master/arch/x86/kvm/svm/
H A Dsev.c3184 unsigned long pfn; local
3205 pfn = page_to_pfn(p);
3206 if (IS_ALIGNED(pfn, PTRS_PER_PMD))
/linux-master/arch/x86/kvm/mmu/
H A Dtdp_mmu.c1009 fault->pfn, iter->old_spte, fault->prefetch, true,
H A Dmmu.c512 * Update the state bits, it means the mapped pfn is not changed.
563 kvm_pfn_t pfn; local
579 pfn = spte_to_pfn(old_spte);
584 * before they are reclaimed. Sanity check that, if the pfn is backed
587 page = kvm_pfn_to_refcounted_page(pfn);
591 kvm_set_pfn_accessed(pfn);
594 kvm_set_pfn_dirty(pfn);
2908 kvm_pfn_t pfn, struct kvm_page_fault *fault)
2918 /* Prefetching always gets a writable pfn. */
2923 if (unlikely(is_noslot_pfn(pfn))) {
2906 mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) argument
[all...]

Completed in 411 milliseconds

1234567891011>>