Lines Matching refs:pfn

77 void num_poisoned_pages_inc(unsigned long pfn)
80 memblk_nr_poison_inc(pfn);
83 void num_poisoned_pages_sub(unsigned long pfn, long i)
86 if (pfn != -1UL)
87 memblk_nr_poison_sub(pfn, i);
340 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
347 pfn, t->comm, t->pid);
523 unsigned long pfn, int flags)
536 pfn, tk->tsk->comm, tk->tsk->pid);
547 else if (kill_proc(tk, pfn, flags) < 0)
549 pfn, tk->tsk->comm, tk->tsk->pid);
740 unsigned long pfn;
753 unsigned long pfn = 0;
756 pfn = pte_pfn(pte);
761 pfn = swp_offset_pfn(swp);
764 if (!pfn || pfn != poisoned_pfn)
776 unsigned long pfn;
781 pfn = pmd_pfn(pmd);
782 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) {
783 hwpoison_vaddr = addr + ((hwp->pfn - pfn) << PAGE_SHIFT);
819 hwp->pfn, &hwp->tk);
839 hwp->pfn, &hwp->tk);
864 static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
869 .pfn = pfn,
880 kill_proc(&priv.tk, pfn, flags);
948 static int truncate_error_folio(struct folio *folio, unsigned long pfn,
957 pr_info("%#lx: Failed to punch page: %d\n", pfn, err);
959 pr_info("%#lx: failed to release buffers\n", pfn);
970 pr_info("%#lx: Failed to invalidate\n", pfn);
1300 static void update_per_node_mf_stats(unsigned long pfn,
1306 nid = pfn_to_nid(pfn);
1308 WARN_ONCE(1, "Memory failure: pfn=%#lx, invalid nid=%d", pfn, nid);
1337 static int action_result(unsigned long pfn, enum mf_action_page_type type,
1340 trace_memory_failure_event(pfn, type, result);
1342 num_poisoned_pages_inc(pfn);
1344 update_per_node_mf_stats(pfn, result);
1347 pfn, action_page_types[type], action_name[result]);
1353 unsigned long pfn)
1365 return action_result(pfn, ps->type, result);
1570 static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1598 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
1616 pfn);
1640 pr_info("%#lx: could not lock mapping for mapped huge page\n", pfn);
1648 pfn, page_mapcount(p));
1669 kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
1674 static int identify_page_state(unsigned long pfn, struct page *p,
1694 return page_action(ps, p, pfn);
1711 static void unmap_and_kill(struct list_head *to_kill, unsigned long pfn,
1733 kill_procs(to_kill, flags & MF_MUST_KILL, false, pfn, flags);
1745 static int mf_generic_kill_procs(unsigned long long pfn, int flags,
1748 struct folio *folio = pfn_folio(pfn);
1757 * also prevents changes to the mapping of this pfn until
1797 unmap_and_kill(&to_kill, pfn, folio->mapping, folio->index, flags);
1999 int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
2002 struct page *page = pfn_to_page(pfn);
2052 static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2055 struct page *p = pfn_to_page(pfn);
2062 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);
2067 pr_err("%#lx: already hardware poisoned\n", pfn);
2078 return action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2106 return action_result(pfn, MF_MSG_FREE_HUGE, res);
2111 if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
2113 return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2116 return identify_page_state(pfn, p, page_flags);
2120 static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
2132 static void put_ref_page(unsigned long pfn, int flags)
2139 page = pfn_to_page(pfn);
2144 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
2150 if (!pgmap_pfn_valid(pgmap, pfn))
2158 rc = pgmap->ops->memory_failure(pgmap, pfn, 1, flags);
2167 rc = mf_generic_kill_procs(pfn, flags, pgmap);
2172 action_result(pfn, MF_MSG_DAX, rc ? MF_FAILED : MF_RECOVERED);
2178 * @pfn: Page Number of the corrupted page
2197 int memory_failure(unsigned long pfn, int flags)
2208 panic("Memory failure on page %lx", pfn);
2215 p = pfn_to_online_page(pfn);
2217 res = arch_memory_failure(pfn, flags);
2221 if (pfn_valid(pfn)) {
2222 pgmap = get_dev_pagemap(pfn, NULL);
2223 put_ref_page(pfn, flags);
2225 res = memory_failure_dev_pagemap(pfn, flags,
2230 pr_err("%#lx: memory outside kernel control\n", pfn);
2236 res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
2241 pr_err("%#lx: already hardware poisoned\n", pfn);
2244 res = kill_accessing_process(current, pfn, flags);
2277 res = action_result(pfn, MF_MSG_BUDDY, res);
2279 res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);
2283 res = action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
2305 res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
2338 res = action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED);
2377 if (!hwpoison_user_mappings(p, pfn, flags, p)) {
2378 res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
2386 res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
2391 res = identify_page_state(pfn, p, page_flags);
2406 unsigned long pfn;
2421 * @pfn: Page Number of the corrupted page
2435 void memory_failure_queue(unsigned long pfn, int flags)
2440 .pfn = pfn,
2450 pfn);
2471 soft_offline_page(entry.pfn, entry.flags);
2473 memory_failure(entry.pfn, entry.flags);
2510 #define unpoison_pr_info(fmt, pfn, rs) \
2513 pr_info(fmt, pfn); \
2518 * @pfn: Page number of the to be unpoisoned page
2528 int unpoison_memory(unsigned long pfn)
2538 if (!pfn_valid(pfn))
2541 p = pfn_to_page(pfn);
2548 pfn, &unpoison_rs);
2555 pfn, &unpoison_rs);
2561 pfn, &unpoison_rs);
2575 pfn, &unpoison_rs);
2581 pfn, &unpoison_rs);
2600 pfn, &unpoison_rs);
2623 num_poisoned_pages_sub(pfn, 1);
2673 unsigned long pfn = page_to_pfn(page);
2685 pr_info("soft offline: %#lx: thp split failed\n", pfn);
2697 pr_info("soft offline: %#lx page already poisoned\n", pfn);
2710 pr_info("soft_offline: %#lx: invalidated\n", pfn);
2728 pfn, msg_page[huge], ret, &page->flags);
2734 pfn, msg_page[huge], page_count(page), &page->flags);
2742 * @pfn: pfn to soft-offline
2764 int soft_offline_page(unsigned long pfn, int flags)
2770 if (!pfn_valid(pfn)) {
2776 page = pfn_to_online_page(pfn);
2778 put_ref_page(pfn, flags);
2785 pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
2786 put_ref_page(pfn, flags);