1/* 2 * Copyright (C) 2008, 2009 Intel Corporation 3 * Authors: Andi Kleen, Fengguang Wu 4 * 5 * This software may be redistributed and/or modified under the terms of 6 * the GNU General Public License ("GPL") version 2 only as published by the 7 * Free Software Foundation. 8 * 9 * High level machine check handler. Handles pages reported by the 10 * hardware as being corrupted usually due to a 2bit ECC memory or cache 11 * failure. 12 * 13 * Handles page cache pages in various states. The tricky part 14 * here is that we can access any page asynchronous to other VM 15 * users, because memory failures could happen anytime and anywhere, 16 * possibly violating some of their assumptions. This is why this code 17 * has to be extremely careful. Generally it tries to use normal locking 18 * rules, as in get the standard locks, even if that means the 19 * error handling takes potentially a long time. 20 * 21 * The operation to map back from RMAP chains to processes has to walk 22 * the complete process list and has non linear complexity with the number 23 * mappings. In short it can be quite slow. But since memory corruptions 24 * are rare we hope to get away with this. 25 */ 26 27/* 28 * Notebook: 29 * - hugetlb needs more code 30 * - kcore/oldmem/vmcore/mem/kmem check for hwpoison pages 31 * - pass bad pages to kdump next kernel 32 */ 33#define DEBUG 1 /* remove me in 2.6.34 */ 34#include <linux/kernel.h> 35#include <linux/mm.h> 36#include <linux/page-flags.h> 37#include <linux/kernel-page-flags.h> 38#include <linux/sched.h> 39#include <linux/ksm.h> 40#include <linux/rmap.h> 41#include <linux/pagemap.h> 42#include <linux/swap.h> 43#include <linux/backing-dev.h> 44#include <linux/migrate.h> 45#include <linux/page-isolation.h> 46#include <linux/suspend.h> 47#include <linux/slab.h> 48#include <linux/swapops.h> 49#include <linux/hugetlb.h> 50#include "internal.h" 51 52int sysctl_memory_failure_early_kill __read_mostly = 0; 53 54int sysctl_memory_failure_recovery __read_mostly = 1; 55 56atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); 57 58#if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) 59 60u32 hwpoison_filter_enable = 0; 61u32 hwpoison_filter_dev_major = ~0U; 62u32 hwpoison_filter_dev_minor = ~0U; 63u64 hwpoison_filter_flags_mask; 64u64 hwpoison_filter_flags_value; 65EXPORT_SYMBOL_GPL(hwpoison_filter_enable); 66EXPORT_SYMBOL_GPL(hwpoison_filter_dev_major); 67EXPORT_SYMBOL_GPL(hwpoison_filter_dev_minor); 68EXPORT_SYMBOL_GPL(hwpoison_filter_flags_mask); 69EXPORT_SYMBOL_GPL(hwpoison_filter_flags_value); 70 71static int hwpoison_filter_dev(struct page *p) 72{ 73 struct address_space *mapping; 74 dev_t dev; 75 76 if (hwpoison_filter_dev_major == ~0U && 77 hwpoison_filter_dev_minor == ~0U) 78 return 0; 79 80 /* 81 * page_mapping() does not accept slab page 82 */ 83 if (PageSlab(p)) 84 return -EINVAL; 85 86 mapping = page_mapping(p); 87 if (mapping == NULL || mapping->host == NULL) 88 return -EINVAL; 89 90 dev = mapping->host->i_sb->s_dev; 91 if (hwpoison_filter_dev_major != ~0U && 92 hwpoison_filter_dev_major != MAJOR(dev)) 93 return -EINVAL; 94 if (hwpoison_filter_dev_minor != ~0U && 95 hwpoison_filter_dev_minor != MINOR(dev)) 96 return -EINVAL; 97 98 return 0; 99} 100 101static int hwpoison_filter_flags(struct page *p) 102{ 103 if (!hwpoison_filter_flags_mask) 104 return 0; 105 106 if ((stable_page_flags(p) & hwpoison_filter_flags_mask) == 107 hwpoison_filter_flags_value) 108 return 0; 109 else 110 return -EINVAL; 111} 112 113/* 114 * This allows stress tests to limit test scope to a collection of tasks 115 * by putting them under some memcg. This prevents killing unrelated/important 116 * processes such as /sbin/init. Note that the target task may share clean 117 * pages with init (eg. libc text), which is harmless. If the target task 118 * share _dirty_ pages with another task B, the test scheme must make sure B 119 * is also included in the memcg. At last, due to race conditions this filter 120 * can only guarantee that the page either belongs to the memcg tasks, or is 121 * a freed page. 122 */ 123#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 124u64 hwpoison_filter_memcg; 125EXPORT_SYMBOL_GPL(hwpoison_filter_memcg); 126static int hwpoison_filter_task(struct page *p) 127{ 128 struct mem_cgroup *mem; 129 struct cgroup_subsys_state *css; 130 unsigned long ino; 131 132 if (!hwpoison_filter_memcg) 133 return 0; 134 135 mem = try_get_mem_cgroup_from_page(p); 136 if (!mem) 137 return -EINVAL; 138 139 css = mem_cgroup_css(mem); 140 /* root_mem_cgroup has NULL dentries */ 141 if (!css->cgroup->dentry) 142 return -EINVAL; 143 144 ino = css->cgroup->dentry->d_inode->i_ino; 145 css_put(css); 146 147 if (ino != hwpoison_filter_memcg) 148 return -EINVAL; 149 150 return 0; 151} 152#else 153static int hwpoison_filter_task(struct page *p) { return 0; } 154#endif 155 156int hwpoison_filter(struct page *p) 157{ 158 if (!hwpoison_filter_enable) 159 return 0; 160 161 if (hwpoison_filter_dev(p)) 162 return -EINVAL; 163 164 if (hwpoison_filter_flags(p)) 165 return -EINVAL; 166 167 if (hwpoison_filter_task(p)) 168 return -EINVAL; 169 170 return 0; 171} 172#else 173int hwpoison_filter(struct page *p) 174{ 175 return 0; 176} 177#endif 178 179EXPORT_SYMBOL_GPL(hwpoison_filter); 180 181/* 182 * Send all the processes who have the page mapped an ``action optional'' 183 * signal. 184 */ 185static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno, 186 unsigned long pfn, struct page *page) 187{ 188 struct siginfo si; 189 int ret; 190 191 printk(KERN_ERR 192 "MCE %#lx: Killing %s:%d early due to hardware memory corruption\n", 193 pfn, t->comm, t->pid); 194 si.si_signo = SIGBUS; 195 si.si_errno = 0; 196 si.si_code = BUS_MCEERR_AO; 197 si.si_addr = (void *)addr; 198#ifdef __ARCH_SI_TRAPNO 199 si.si_trapno = trapno; 200#endif 201 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT; 202 /* 203 * Don't use force here, it's convenient if the signal 204 * can be temporarily blocked. 205 * This could cause a loop when the user sets SIGBUS 206 * to SIG_IGN, but hopefully noone will do that? 207 */ 208 ret = send_sig_info(SIGBUS, &si, t); /* synchronous? */ 209 if (ret < 0) 210 printk(KERN_INFO "MCE: Error sending signal to %s:%d: %d\n", 211 t->comm, t->pid, ret); 212 return ret; 213} 214 215/* 216 * When a unknown page type is encountered drain as many buffers as possible 217 * in the hope to turn the page into a LRU or free page, which we can handle. 218 */ 219void shake_page(struct page *p, int access) 220{ 221 if (!PageSlab(p)) { 222 lru_add_drain_all(); 223 if (PageLRU(p)) 224 return; 225 drain_all_pages(); 226 if (PageLRU(p) || is_free_buddy_page(p)) 227 return; 228 } 229 230 /* 231 * Only all shrink_slab here (which would also 232 * shrink other caches) if access is not potentially fatal. 233 */ 234 if (access) { 235 int nr; 236 do { 237 nr = shrink_slab(1000, GFP_KERNEL, 1000); 238 if (page_count(p) == 1) 239 break; 240 } while (nr > 10); 241 } 242} 243EXPORT_SYMBOL_GPL(shake_page); 244 245/* 246 * Kill all processes that have a poisoned page mapped and then isolate 247 * the page. 248 * 249 * General strategy: 250 * Find all processes having the page mapped and kill them. 251 * But we keep a page reference around so that the page is not 252 * actually freed yet. 253 * Then stash the page away 254 * 255 * There's no convenient way to get back to mapped processes 256 * from the VMAs. So do a brute-force search over all 257 * running processes. 258 * 259 * Remember that machine checks are not common (or rather 260 * if they are common you have other problems), so this shouldn't 261 * be a performance issue. 262 * 263 * Also there are some races possible while we get from the 264 * error detection to actually handle it. 265 */ 266 267struct to_kill { 268 struct list_head nd; 269 struct task_struct *tsk; 270 unsigned long addr; 271 unsigned addr_valid:1; 272}; 273 274/* 275 * Failure handling: if we can't find or can't kill a process there's 276 * not much we can do. We just print a message and ignore otherwise. 277 */ 278 279/* 280 * Schedule a process for later kill. 281 * Uses GFP_ATOMIC allocations to avoid potential recursions in the VM. 282 * TBD would GFP_NOIO be enough? 283 */ 284static void add_to_kill(struct task_struct *tsk, struct page *p, 285 struct vm_area_struct *vma, 286 struct list_head *to_kill, 287 struct to_kill **tkc) 288{ 289 struct to_kill *tk; 290 291 if (*tkc) { 292 tk = *tkc; 293 *tkc = NULL; 294 } else { 295 tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC); 296 if (!tk) { 297 printk(KERN_ERR 298 "MCE: Out of memory while machine check handling\n"); 299 return; 300 } 301 } 302 tk->addr = page_address_in_vma(p, vma); 303 tk->addr_valid = 1; 304 305 /* 306 * In theory we don't have to kill when the page was 307 * munmaped. But it could be also a mremap. Since that's 308 * likely very rare kill anyways just out of paranoia, but use 309 * a SIGKILL because the error is not contained anymore. 310 */ 311 if (tk->addr == -EFAULT) { 312 pr_debug("MCE: Unable to find user space address %lx in %s\n", 313 page_to_pfn(p), tsk->comm); 314 tk->addr_valid = 0; 315 } 316 get_task_struct(tsk); 317 tk->tsk = tsk; 318 list_add_tail(&tk->nd, to_kill); 319} 320 321/* 322 * Kill the processes that have been collected earlier. 323 * 324 * Only do anything when DOIT is set, otherwise just free the list 325 * (this is used for clean pages which do not need killing) 326 * Also when FAIL is set do a force kill because something went 327 * wrong earlier. 328 */ 329static void kill_procs_ao(struct list_head *to_kill, int doit, int trapno, 330 int fail, struct page *page, unsigned long pfn) 331{ 332 struct to_kill *tk, *next; 333 334 list_for_each_entry_safe (tk, next, to_kill, nd) { 335 if (doit) { 336 /* 337 * In case something went wrong with munmapping 338 * make sure the process doesn't catch the 339 * signal and then access the memory. Just kill it. 340 */ 341 if (fail || tk->addr_valid == 0) { 342 printk(KERN_ERR 343 "MCE %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", 344 pfn, tk->tsk->comm, tk->tsk->pid); 345 force_sig(SIGKILL, tk->tsk); 346 } 347 348 /* 349 * In theory the process could have mapped 350 * something else on the address in-between. We could 351 * check for that, but we need to tell the 352 * process anyways. 353 */ 354 else if (kill_proc_ao(tk->tsk, tk->addr, trapno, 355 pfn, page) < 0) 356 printk(KERN_ERR 357 "MCE %#lx: Cannot send advisory machine check signal to %s:%d\n", 358 pfn, tk->tsk->comm, tk->tsk->pid); 359 } 360 put_task_struct(tk->tsk); 361 kfree(tk); 362 } 363} 364 365static int task_early_kill(struct task_struct *tsk) 366{ 367 if (!tsk->mm) 368 return 0; 369 if (tsk->flags & PF_MCE_PROCESS) 370 return !!(tsk->flags & PF_MCE_EARLY); 371 return sysctl_memory_failure_early_kill; 372} 373 374/* 375 * Collect processes when the error hit an anonymous page. 376 */ 377static void collect_procs_anon(struct page *page, struct list_head *to_kill, 378 struct to_kill **tkc) 379{ 380 struct vm_area_struct *vma; 381 struct task_struct *tsk; 382 struct anon_vma *av; 383 384 read_lock(&tasklist_lock); 385 av = page_lock_anon_vma(page); 386 if (av == NULL) /* Not actually mapped anymore */ 387 goto out; 388 for_each_process (tsk) { 389 struct anon_vma_chain *vmac; 390 391 if (!task_early_kill(tsk)) 392 continue; 393 list_for_each_entry(vmac, &av->head, same_anon_vma) { 394 vma = vmac->vma; 395 if (!page_mapped_in_vma(page, vma)) 396 continue; 397 if (vma->vm_mm == tsk->mm) 398 add_to_kill(tsk, page, vma, to_kill, tkc); 399 } 400 } 401 page_unlock_anon_vma(av); 402out: 403 read_unlock(&tasklist_lock); 404} 405 406/* 407 * Collect processes when the error hit a file mapped page. 408 */ 409static void collect_procs_file(struct page *page, struct list_head *to_kill, 410 struct to_kill **tkc) 411{ 412 struct vm_area_struct *vma; 413 struct task_struct *tsk; 414 struct prio_tree_iter iter; 415 struct address_space *mapping = page->mapping; 416 417 /* 418 * A note on the locking order between the two locks. 419 * We don't rely on this particular order. 420 * If you have some other code that needs a different order 421 * feel free to switch them around. Or add a reverse link 422 * from mm_struct to task_struct, then this could be all 423 * done without taking tasklist_lock and looping over all tasks. 424 */ 425 426 read_lock(&tasklist_lock); 427 spin_lock(&mapping->i_mmap_lock); 428 for_each_process(tsk) { 429 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 430 431 if (!task_early_kill(tsk)) 432 continue; 433 434 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, 435 pgoff) { 436 /* 437 * Send early kill signal to tasks where a vma covers 438 * the page but the corrupted page is not necessarily 439 * mapped it in its pte. 440 * Assume applications who requested early kill want 441 * to be informed of all such data corruptions. 442 */ 443 if (vma->vm_mm == tsk->mm) 444 add_to_kill(tsk, page, vma, to_kill, tkc); 445 } 446 } 447 spin_unlock(&mapping->i_mmap_lock); 448 read_unlock(&tasklist_lock); 449} 450 451/* 452 * Collect the processes who have the corrupted page mapped to kill. 453 * This is done in two steps for locking reasons. 454 * First preallocate one tokill structure outside the spin locks, 455 * so that we can kill at least one process reasonably reliable. 456 */ 457static void collect_procs(struct page *page, struct list_head *tokill) 458{ 459 struct to_kill *tk; 460 461 if (!page->mapping) 462 return; 463 464 tk = kmalloc(sizeof(struct to_kill), GFP_NOIO); 465 if (!tk) 466 return; 467 if (PageAnon(page)) 468 collect_procs_anon(page, tokill, &tk); 469 else 470 collect_procs_file(page, tokill, &tk); 471 kfree(tk); 472} 473 474/* 475 * Error handlers for various types of pages. 476 */ 477 478enum outcome { 479 IGNORED, /* Error: cannot be handled */ 480 FAILED, /* Error: handling failed */ 481 DELAYED, /* Will be handled later */ 482 RECOVERED, /* Successfully recovered */ 483}; 484 485static const char *action_name[] = { 486 [IGNORED] = "Ignored", 487 [FAILED] = "Failed", 488 [DELAYED] = "Delayed", 489 [RECOVERED] = "Recovered", 490}; 491 492static int delete_from_lru_cache(struct page *p) 493{ 494 if (!isolate_lru_page(p)) { 495 /* 496 * Clear sensible page flags, so that the buddy system won't 497 * complain when the page is unpoison-and-freed. 498 */ 499 ClearPageActive(p); 500 ClearPageUnevictable(p); 501 /* 502 * drop the page count elevated by isolate_lru_page() 503 */ 504 page_cache_release(p); 505 return 0; 506 } 507 return -EIO; 508} 509 510/* 511 * Error hit kernel page. 512 * Do nothing, try to be lucky and not touch this instead. For a few cases we 513 * could be more sophisticated. 514 */ 515static int me_kernel(struct page *p, unsigned long pfn) 516{ 517 return IGNORED; 518} 519 520/* 521 * Page in unknown state. Do nothing. 522 */ 523static int me_unknown(struct page *p, unsigned long pfn) 524{ 525 printk(KERN_ERR "MCE %#lx: Unknown page state\n", pfn); 526 return FAILED; 527} 528 529/* 530 * Clean (or cleaned) page cache page. 531 */ 532static int me_pagecache_clean(struct page *p, unsigned long pfn) 533{ 534 int err; 535 int ret = FAILED; 536 struct address_space *mapping; 537 538 delete_from_lru_cache(p); 539 540 /* 541 * For anonymous pages we're done the only reference left 542 * should be the one m_f() holds. 543 */ 544 if (PageAnon(p)) 545 return RECOVERED; 546 547 /* 548 * Now truncate the page in the page cache. This is really 549 * more like a "temporary hole punch" 550 * Don't do this for block devices when someone else 551 * has a reference, because it could be file system metadata 552 * and that's not safe to truncate. 553 */ 554 mapping = page_mapping(p); 555 if (!mapping) { 556 /* 557 * Page has been teared down in the meanwhile 558 */ 559 return FAILED; 560 } 561 562 /* 563 * Truncation is a bit tricky. Enable it per file system for now. 564 * 565 * Open: to take i_mutex or not for this? Right now we don't. 566 */ 567 if (mapping->a_ops->error_remove_page) { 568 err = mapping->a_ops->error_remove_page(mapping, p); 569 if (err != 0) { 570 printk(KERN_INFO "MCE %#lx: Failed to punch page: %d\n", 571 pfn, err); 572 } else if (page_has_private(p) && 573 !try_to_release_page(p, GFP_NOIO)) { 574 pr_debug("MCE %#lx: failed to release buffers\n", pfn); 575 } else { 576 ret = RECOVERED; 577 } 578 } else { 579 /* 580 * If the file system doesn't support it just invalidate 581 * This fails on dirty or anything with private pages 582 */ 583 if (invalidate_inode_page(p)) 584 ret = RECOVERED; 585 else 586 printk(KERN_INFO "MCE %#lx: Failed to invalidate\n", 587 pfn); 588 } 589 return ret; 590} 591 592/* 593 * Dirty cache page page 594 * Issues: when the error hit a hole page the error is not properly 595 * propagated. 596 */ 597static int me_pagecache_dirty(struct page *p, unsigned long pfn) 598{ 599 struct address_space *mapping = page_mapping(p); 600 601 SetPageError(p); 602 /* TBD: print more information about the file. */ 603 if (mapping) { 604 /* 605 * IO error will be reported by write(), fsync(), etc. 606 * who check the mapping. 607 * This way the application knows that something went 608 * wrong with its dirty file data. 609 * 610 * There's one open issue: 611 * 612 * The EIO will be only reported on the next IO 613 * operation and then cleared through the IO map. 614 * Normally Linux has two mechanisms to pass IO error 615 * first through the AS_EIO flag in the address space 616 * and then through the PageError flag in the page. 617 * Since we drop pages on memory failure handling the 618 * only mechanism open to use is through AS_AIO. 619 * 620 * This has the disadvantage that it gets cleared on 621 * the first operation that returns an error, while 622 * the PageError bit is more sticky and only cleared 623 * when the page is reread or dropped. If an 624 * application assumes it will always get error on 625 * fsync, but does other operations on the fd before 626 * and the page is dropped inbetween then the error 627 * will not be properly reported. 628 * 629 * This can already happen even without hwpoisoned 630 * pages: first on metadata IO errors (which only 631 * report through AS_EIO) or when the page is dropped 632 * at the wrong time. 633 * 634 * So right now we assume that the application DTRT on 635 * the first EIO, but we're not worse than other parts 636 * of the kernel. 637 */ 638 mapping_set_error(mapping, EIO); 639 } 640 641 return me_pagecache_clean(p, pfn); 642} 643 644/* 645 * Clean and dirty swap cache. 646 * 647 * Dirty swap cache page is tricky to handle. The page could live both in page 648 * cache and swap cache(ie. page is freshly swapped in). So it could be 649 * referenced concurrently by 2 types of PTEs: 650 * normal PTEs and swap PTEs. We try to handle them consistently by calling 651 * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 652 * and then 653 * - clear dirty bit to prevent IO 654 * - remove from LRU 655 * - but keep in the swap cache, so that when we return to it on 656 * a later page fault, we know the application is accessing 657 * corrupted data and shall be killed (we installed simple 658 * interception code in do_swap_page to catch it). 659 * 660 * Clean swap cache pages can be directly isolated. A later page fault will 661 * bring in the known good data from disk. 662 */ 663static int me_swapcache_dirty(struct page *p, unsigned long pfn) 664{ 665 ClearPageDirty(p); 666 /* Trigger EIO in shmem: */ 667 ClearPageUptodate(p); 668 669 if (!delete_from_lru_cache(p)) 670 return DELAYED; 671 else 672 return FAILED; 673} 674 675static int me_swapcache_clean(struct page *p, unsigned long pfn) 676{ 677 delete_from_swap_cache(p); 678 679 if (!delete_from_lru_cache(p)) 680 return RECOVERED; 681 else 682 return FAILED; 683} 684 685/* 686 * Huge pages. Needs work. 687 * Issues: 688 * - Error on hugepage is contained in hugepage unit (not in raw page unit.) 689 * To narrow down kill region to one page, we need to break up pmd. 690 * - To support soft-offlining for hugepage, we need to support hugepage 691 * migration. 692 */ 693static int me_huge_page(struct page *p, unsigned long pfn) 694{ 695 struct page *hpage = compound_head(p); 696 /* 697 * We can safely recover from error on free or reserved (i.e. 698 * not in-use) hugepage by dequeuing it from freelist. 699 * To check whether a hugepage is in-use or not, we can't use 700 * page->lru because it can be used in other hugepage operations, 701 * such as __unmap_hugepage_range() and gather_surplus_pages(). 702 * So instead we use page_mapping() and PageAnon(). 703 * We assume that this function is called with page lock held, 704 * so there is no race between isolation and mapping/unmapping. 705 */ 706 if (!(page_mapping(hpage) || PageAnon(hpage))) { 707 __isolate_hwpoisoned_huge_page(hpage); 708 return RECOVERED; 709 } 710 return DELAYED; 711} 712 713/* 714 * Various page states we can handle. 715 * 716 * A page state is defined by its current page->flags bits. 717 * The table matches them in order and calls the right handler. 718 * 719 * This is quite tricky because we can access page at any time 720 * in its live cycle, so all accesses have to be extremly careful. 721 * 722 * This is not complete. More states could be added. 723 * For any missing state don't attempt recovery. 724 */ 725 726#define dirty (1UL << PG_dirty) 727#define sc (1UL << PG_swapcache) 728#define unevict (1UL << PG_unevictable) 729#define mlock (1UL << PG_mlocked) 730#define writeback (1UL << PG_writeback) 731#define lru (1UL << PG_lru) 732#define swapbacked (1UL << PG_swapbacked) 733#define head (1UL << PG_head) 734#define tail (1UL << PG_tail) 735#define compound (1UL << PG_compound) 736#define slab (1UL << PG_slab) 737#define reserved (1UL << PG_reserved) 738 739static struct page_state { 740 unsigned long mask; 741 unsigned long res; 742 char *msg; 743 int (*action)(struct page *p, unsigned long pfn); 744} error_states[] = { 745 { reserved, reserved, "reserved kernel", me_kernel }, 746 /* 747 * free pages are specially detected outside this table: 748 * PG_buddy pages only make a small fraction of all free pages. 749 */ 750 751 /* 752 * Could in theory check if slab page is free or if we can drop 753 * currently unused objects without touching them. But just 754 * treat it as standard kernel for now. 755 */ 756 { slab, slab, "kernel slab", me_kernel }, 757 758#ifdef CONFIG_PAGEFLAGS_EXTENDED 759 { head, head, "huge", me_huge_page }, 760 { tail, tail, "huge", me_huge_page }, 761#else 762 { compound, compound, "huge", me_huge_page }, 763#endif 764 765 { sc|dirty, sc|dirty, "swapcache", me_swapcache_dirty }, 766 { sc|dirty, sc, "swapcache", me_swapcache_clean }, 767 768 { unevict|dirty, unevict|dirty, "unevictable LRU", me_pagecache_dirty}, 769 { unevict, unevict, "unevictable LRU", me_pagecache_clean}, 770 771 { mlock|dirty, mlock|dirty, "mlocked LRU", me_pagecache_dirty }, 772 { mlock, mlock, "mlocked LRU", me_pagecache_clean }, 773 774 { lru|dirty, lru|dirty, "LRU", me_pagecache_dirty }, 775 { lru|dirty, lru, "clean LRU", me_pagecache_clean }, 776 777 /* 778 * Catchall entry: must be at end. 779 */ 780 { 0, 0, "unknown page state", me_unknown }, 781}; 782 783#undef dirty 784#undef sc 785#undef unevict 786#undef mlock 787#undef writeback 788#undef lru 789#undef swapbacked 790#undef head 791#undef tail 792#undef compound 793#undef slab 794#undef reserved 795 796static void action_result(unsigned long pfn, char *msg, int result) 797{ 798 struct page *page = pfn_to_page(pfn); 799 800 printk(KERN_ERR "MCE %#lx: %s%s page recovery: %s\n", 801 pfn, 802 PageDirty(page) ? "dirty " : "", 803 msg, action_name[result]); 804} 805 806static int page_action(struct page_state *ps, struct page *p, 807 unsigned long pfn) 808{ 809 int result; 810 int count; 811 812 result = ps->action(p, pfn); 813 action_result(pfn, ps->msg, result); 814 815 count = page_count(p) - 1; 816 if (ps->action == me_swapcache_dirty && result == DELAYED) 817 count--; 818 if (count != 0) { 819 printk(KERN_ERR 820 "MCE %#lx: %s page still referenced by %d users\n", 821 pfn, ps->msg, count); 822 result = FAILED; 823 } 824 825 /* Could do more checks here if page looks ok */ 826 /* 827 * Could adjust zone counters here to correct for the missing page. 828 */ 829 830 return (result == RECOVERED || result == DELAYED) ? 0 : -EBUSY; 831} 832 833#define N_UNMAP_TRIES 5 834 835/* 836 * Do all that is necessary to remove user space mappings. Unmap 837 * the pages and send SIGBUS to the processes if the data was dirty. 838 */ 839static int hwpoison_user_mappings(struct page *p, unsigned long pfn, 840 int trapno) 841{ 842 enum ttu_flags ttu = TTU_UNMAP | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 843 struct address_space *mapping; 844 LIST_HEAD(tokill); 845 int ret; 846 int i; 847 int kill = 1; 848 struct page *hpage = compound_head(p); 849 850 if (PageReserved(p) || PageSlab(p)) 851 return SWAP_SUCCESS; 852 853 /* 854 * This check implies we don't kill processes if their pages 855 * are in the swap cache early. Those are always late kills. 856 */ 857 if (!page_mapped(hpage)) 858 return SWAP_SUCCESS; 859 860 if (PageKsm(p)) 861 return SWAP_FAIL; 862 863 if (PageSwapCache(p)) { 864 printk(KERN_ERR 865 "MCE %#lx: keeping poisoned page in swap cache\n", pfn); 866 ttu |= TTU_IGNORE_HWPOISON; 867 } 868 869 mapping = page_mapping(hpage); 870 if (!PageDirty(hpage) && mapping && 871 mapping_cap_writeback_dirty(mapping)) { 872 if (page_mkclean(hpage)) { 873 SetPageDirty(hpage); 874 } else { 875 kill = 0; 876 ttu |= TTU_IGNORE_HWPOISON; 877 printk(KERN_INFO 878 "MCE %#lx: corrupted page was clean: dropped without side effects\n", 879 pfn); 880 } 881 } 882 883 /* 884 * First collect all the processes that have the page 885 * mapped in dirty form. This has to be done before try_to_unmap, 886 * because ttu takes the rmap data structures down. 887 * 888 * Error handling: We ignore errors here because 889 * there's nothing that can be done. 890 */ 891 if (kill) 892 collect_procs(hpage, &tokill); 893 894 /* 895 * try_to_unmap can fail temporarily due to races. 896 * Try a few times (RED-PEN better strategy?) 897 */ 898 for (i = 0; i < N_UNMAP_TRIES; i++) { 899 ret = try_to_unmap(hpage, ttu); 900 if (ret == SWAP_SUCCESS) 901 break; 902 pr_debug("MCE %#lx: try_to_unmap retry needed %d\n", pfn, ret); 903 } 904 905 if (ret != SWAP_SUCCESS) 906 printk(KERN_ERR "MCE %#lx: failed to unmap page (mapcount=%d)\n", 907 pfn, page_mapcount(hpage)); 908 909 /* 910 * Now that the dirty bit has been propagated to the 911 * struct page and all unmaps done we can decide if 912 * killing is needed or not. Only kill when the page 913 * was dirty, otherwise the tokill list is merely 914 * freed. When there was a problem unmapping earlier 915 * use a more force-full uncatchable kill to prevent 916 * any accesses to the poisoned memory. 917 */ 918 kill_procs_ao(&tokill, !!PageDirty(hpage), trapno, 919 ret != SWAP_SUCCESS, p, pfn); 920 921 return ret; 922} 923 924static void set_page_hwpoison_huge_page(struct page *hpage) 925{ 926 int i; 927 int nr_pages = 1 << compound_order(hpage); 928 for (i = 0; i < nr_pages; i++) 929 SetPageHWPoison(hpage + i); 930} 931 932static void clear_page_hwpoison_huge_page(struct page *hpage) 933{ 934 int i; 935 int nr_pages = 1 << compound_order(hpage); 936 for (i = 0; i < nr_pages; i++) 937 ClearPageHWPoison(hpage + i); 938} 939 940int __memory_failure(unsigned long pfn, int trapno, int flags) 941{ 942 struct page_state *ps; 943 struct page *p; 944 struct page *hpage; 945 int res; 946 unsigned int nr_pages; 947 948 if (!sysctl_memory_failure_recovery) 949 panic("Memory failure from trap %d on page %lx", trapno, pfn); 950 951 if (!pfn_valid(pfn)) { 952 printk(KERN_ERR 953 "MCE %#lx: memory outside kernel control\n", 954 pfn); 955 return -ENXIO; 956 } 957 958 p = pfn_to_page(pfn); 959 hpage = compound_head(p); 960 if (TestSetPageHWPoison(p)) { 961 printk(KERN_ERR "MCE %#lx: already hardware poisoned\n", pfn); 962 return 0; 963 } 964 965 nr_pages = 1 << compound_order(hpage); 966 atomic_long_add(nr_pages, &mce_bad_pages); 967 968 /* 969 * We need/can do nothing about count=0 pages. 970 * 1) it's a free page, and therefore in safe hand: 971 * prep_new_page() will be the gate keeper. 972 * 2) it's part of a non-compound high order page. 973 * Implies some kernel user: cannot stop them from 974 * R/W the page; let's pray that the page has been 975 * used and will be freed some time later. 976 * In fact it's dangerous to directly bump up page count from 0, 977 * that may make page_freeze_refs()/page_unfreeze_refs() mismatch. 978 */ 979 if (!(flags & MF_COUNT_INCREASED) && 980 !get_page_unless_zero(hpage)) { 981 if (is_free_buddy_page(p)) { 982 action_result(pfn, "free buddy", DELAYED); 983 return 0; 984 } else { 985 action_result(pfn, "high order kernel", IGNORED); 986 return -EBUSY; 987 } 988 } 989 990 /* 991 * We ignore non-LRU pages for good reasons. 992 * - PG_locked is only well defined for LRU pages and a few others 993 * - to avoid races with __set_page_locked() 994 * - to avoid races with __SetPageSlab*() (and more non-atomic ops) 995 * The check (unnecessarily) ignores LRU pages being isolated and 996 * walked by the page reclaim code, however that's not a big loss. 997 */ 998 if (!PageLRU(p) && !PageHuge(p)) 999 shake_page(p, 0); 1000 if (!PageLRU(p) && !PageHuge(p)) { 1001 /* 1002 * shake_page could have turned it free. 1003 */ 1004 if (is_free_buddy_page(p)) { 1005 action_result(pfn, "free buddy, 2nd try", DELAYED); 1006 return 0; 1007 } 1008 action_result(pfn, "non LRU", IGNORED); 1009 put_page(p); 1010 return -EBUSY; 1011 } 1012 1013 /* 1014 * Lock the page and wait for writeback to finish. 1015 * It's very difficult to mess with pages currently under IO 1016 * and in many cases impossible, so we just avoid it here. 1017 */ 1018 lock_page_nosync(hpage); 1019 1020 /* 1021 * unpoison always clear PG_hwpoison inside page lock 1022 */ 1023 if (!PageHWPoison(p)) { 1024 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1025 res = 0; 1026 goto out; 1027 } 1028 if (hwpoison_filter(p)) { 1029 if (TestClearPageHWPoison(p)) 1030 atomic_long_sub(nr_pages, &mce_bad_pages); 1031 unlock_page(hpage); 1032 put_page(hpage); 1033 return 0; 1034 } 1035 1036 /* 1037 * For error on the tail page, we should set PG_hwpoison 1038 * on the head page to show that the hugepage is hwpoisoned 1039 */ 1040 if (PageTail(p) && TestSetPageHWPoison(hpage)) { 1041 action_result(pfn, "hugepage already hardware poisoned", 1042 IGNORED); 1043 unlock_page(hpage); 1044 put_page(hpage); 1045 return 0; 1046 } 1047 /* 1048 * Set PG_hwpoison on all pages in an error hugepage, 1049 * because containment is done in hugepage unit for now. 1050 * Since we have done TestSetPageHWPoison() for the head page with 1051 * page lock held, we can safely set PG_hwpoison bits on tail pages. 1052 */ 1053 if (PageHuge(p)) 1054 set_page_hwpoison_huge_page(hpage); 1055 1056 wait_on_page_writeback(p); 1057 1058 /* 1059 * Now take care of user space mappings. 1060 * Abort on fail: __remove_from_page_cache() assumes unmapped page. 1061 */ 1062 if (hwpoison_user_mappings(p, pfn, trapno) != SWAP_SUCCESS) { 1063 printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn); 1064 res = -EBUSY; 1065 goto out; 1066 } 1067 1068 /* 1069 * Torn down by someone else? 1070 */ 1071 if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { 1072 action_result(pfn, "already truncated LRU", IGNORED); 1073 res = -EBUSY; 1074 goto out; 1075 } 1076 1077 res = -EBUSY; 1078 for (ps = error_states;; ps++) { 1079 if ((p->flags & ps->mask) == ps->res) { 1080 res = page_action(ps, p, pfn); 1081 break; 1082 } 1083 } 1084out: 1085 unlock_page(hpage); 1086 return res; 1087} 1088EXPORT_SYMBOL_GPL(__memory_failure); 1089 1090/** 1091 * memory_failure - Handle memory failure of a page. 1092 * @pfn: Page Number of the corrupted page 1093 * @trapno: Trap number reported in the signal to user space. 1094 * 1095 * This function is called by the low level machine check code 1096 * of an architecture when it detects hardware memory corruption 1097 * of a page. It tries its best to recover, which includes 1098 * dropping pages, killing processes etc. 1099 * 1100 * The function is primarily of use for corruptions that 1101 * happen outside the current execution context (e.g. when 1102 * detected by a background scrubber) 1103 * 1104 * Must run in process context (e.g. a work queue) with interrupts 1105 * enabled and no spinlocks hold. 1106 */ 1107void memory_failure(unsigned long pfn, int trapno) 1108{ 1109 __memory_failure(pfn, trapno, 0); 1110} 1111 1112/** 1113 * unpoison_memory - Unpoison a previously poisoned page 1114 * @pfn: Page number of the to be unpoisoned page 1115 * 1116 * Software-unpoison a page that has been poisoned by 1117 * memory_failure() earlier. 1118 * 1119 * This is only done on the software-level, so it only works 1120 * for linux injected failures, not real hardware failures 1121 * 1122 * Returns 0 for success, otherwise -errno. 1123 */ 1124int unpoison_memory(unsigned long pfn) 1125{ 1126 struct page *page; 1127 struct page *p; 1128 int freeit = 0; 1129 unsigned int nr_pages; 1130 1131 if (!pfn_valid(pfn)) 1132 return -ENXIO; 1133 1134 p = pfn_to_page(pfn); 1135 page = compound_head(p); 1136 1137 if (!PageHWPoison(p)) { 1138 pr_debug("MCE: Page was already unpoisoned %#lx\n", pfn); 1139 return 0; 1140 } 1141 1142 nr_pages = 1 << compound_order(page); 1143 1144 if (!get_page_unless_zero(page)) { 1145 if (TestClearPageHWPoison(p)) 1146 atomic_long_sub(nr_pages, &mce_bad_pages); 1147 pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); 1148 return 0; 1149 } 1150 1151 lock_page_nosync(page); 1152 /* 1153 * This test is racy because PG_hwpoison is set outside of page lock. 1154 * That's acceptable because that won't trigger kernel panic. Instead, 1155 * the PG_hwpoison page will be caught and isolated on the entrance to 1156 * the free buddy page pool. 1157 */ 1158 if (TestClearPageHWPoison(page)) { 1159 pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); 1160 atomic_long_sub(nr_pages, &mce_bad_pages); 1161 freeit = 1; 1162 } 1163 if (PageHuge(p)) 1164 clear_page_hwpoison_huge_page(page); 1165 unlock_page(page); 1166 1167 put_page(page); 1168 if (freeit) 1169 put_page(page); 1170 1171 return 0; 1172} 1173EXPORT_SYMBOL(unpoison_memory); 1174 1175static struct page *new_page(struct page *p, unsigned long private, int **x) 1176{ 1177 int nid = page_to_nid(p); 1178 return alloc_pages_exact_node(nid, GFP_HIGHUSER_MOVABLE, 0); 1179} 1180 1181/* 1182 * Safely get reference count of an arbitrary page. 1183 * Returns 0 for a free page, -EIO for a zero refcount page 1184 * that is not free, and 1 for any other page type. 1185 * For 1 the page is returned with increased page count, otherwise not. 1186 */ 1187static int get_any_page(struct page *p, unsigned long pfn, int flags) 1188{ 1189 int ret; 1190 1191 if (flags & MF_COUNT_INCREASED) 1192 return 1; 1193 1194 /* 1195 * The lock_system_sleep prevents a race with memory hotplug, 1196 * because the isolation assumes there's only a single user. 1197 * This is a big hammer, a better would be nicer. 1198 */ 1199 lock_system_sleep(); 1200 1201 /* 1202 * Isolate the page, so that it doesn't get reallocated if it 1203 * was free. 1204 */ 1205 set_migratetype_isolate(p); 1206 if (!get_page_unless_zero(compound_head(p))) { 1207 if (is_free_buddy_page(p)) { 1208 pr_debug("get_any_page: %#lx free buddy page\n", pfn); 1209 /* Set hwpoison bit while page is still isolated */ 1210 SetPageHWPoison(p); 1211 ret = 0; 1212 } else { 1213 pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n", 1214 pfn, p->flags); 1215 ret = -EIO; 1216 } 1217 } else { 1218 /* Not a free page */ 1219 ret = 1; 1220 } 1221 unset_migratetype_isolate(p); 1222 unlock_system_sleep(); 1223 return ret; 1224} 1225 1226/** 1227 * soft_offline_page - Soft offline a page. 1228 * @page: page to offline 1229 * @flags: flags. Same as memory_failure(). 1230 * 1231 * Returns 0 on success, otherwise negated errno. 1232 * 1233 * Soft offline a page, by migration or invalidation, 1234 * without killing anything. This is for the case when 1235 * a page is not corrupted yet (so it's still valid to access), 1236 * but has had a number of corrected errors and is better taken 1237 * out. 1238 * 1239 * The actual policy on when to do that is maintained by 1240 * user space. 1241 * 1242 * This should never impact any application or cause data loss, 1243 * however it might take some time. 1244 * 1245 * This is not a 100% solution for all memory, but tries to be 1246 * ``good enough'' for the majority of memory. 1247 */ 1248int soft_offline_page(struct page *page, int flags) 1249{ 1250 int ret; 1251 unsigned long pfn = page_to_pfn(page); 1252 1253 ret = get_any_page(page, pfn, flags); 1254 if (ret < 0) 1255 return ret; 1256 if (ret == 0) 1257 goto done; 1258 1259 /* 1260 * Page cache page we can handle? 1261 */ 1262 if (!PageLRU(page)) { 1263 /* 1264 * Try to free it. 1265 */ 1266 put_page(page); 1267 shake_page(page, 1); 1268 1269 /* 1270 * Did it turn free? 1271 */ 1272 ret = get_any_page(page, pfn, 0); 1273 if (ret < 0) 1274 return ret; 1275 if (ret == 0) 1276 goto done; 1277 } 1278 if (!PageLRU(page)) { 1279 pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n", 1280 pfn, page->flags); 1281 return -EIO; 1282 } 1283 1284 lock_page(page); 1285 wait_on_page_writeback(page); 1286 1287 /* 1288 * Synchronized using the page lock with memory_failure() 1289 */ 1290 if (PageHWPoison(page)) { 1291 unlock_page(page); 1292 put_page(page); 1293 pr_debug("soft offline: %#lx page already poisoned\n", pfn); 1294 return -EBUSY; 1295 } 1296 1297 /* 1298 * Try to invalidate first. This should work for 1299 * non dirty unmapped page cache pages. 1300 */ 1301 ret = invalidate_inode_page(page); 1302 unlock_page(page); 1303 1304 /* 1305 * Drop count because page migration doesn't like raised 1306 * counts. The page could get re-allocated, but if it becomes 1307 * LRU the isolation will just fail. 1308 * RED-PEN would be better to keep it isolated here, but we 1309 * would need to fix isolation locking first. 1310 */ 1311 put_page(page); 1312 if (ret == 1) { 1313 ret = 0; 1314 pr_debug("soft_offline: %#lx: invalidated\n", pfn); 1315 goto done; 1316 } 1317 1318 /* 1319 * Simple invalidation didn't work. 1320 * Try to migrate to a new page instead. migrate.c 1321 * handles a large number of cases for us. 1322 */ 1323 ret = isolate_lru_page(page); 1324 if (!ret) { 1325 LIST_HEAD(pagelist); 1326 1327 list_add(&page->lru, &pagelist); 1328 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); 1329 if (ret) { 1330 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", 1331 pfn, ret, page->flags); 1332 if (ret > 0) 1333 ret = -EIO; 1334 } 1335 } else { 1336 pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1337 pfn, ret, page_count(page), page->flags); 1338 } 1339 if (ret) 1340 return ret; 1341 1342done: 1343 atomic_long_add(1, &mce_bad_pages); 1344 SetPageHWPoison(page); 1345 /* keep elevated page count for bad page */ 1346 return ret; 1347} 1348 1349/* 1350 * The caller must hold current->mm->mmap_sem in read mode. 1351 */ 1352int is_hwpoison_address(unsigned long addr) 1353{ 1354 pgd_t *pgdp; 1355 pud_t pud, *pudp; 1356 pmd_t pmd, *pmdp; 1357 pte_t pte, *ptep; 1358 swp_entry_t entry; 1359 1360 pgdp = pgd_offset(current->mm, addr); 1361 if (!pgd_present(*pgdp)) 1362 return 0; 1363 pudp = pud_offset(pgdp, addr); 1364 pud = *pudp; 1365 if (!pud_present(pud) || pud_large(pud)) 1366 return 0; 1367 pmdp = pmd_offset(pudp, addr); 1368 pmd = *pmdp; 1369 if (!pmd_present(pmd) || pmd_large(pmd)) 1370 return 0; 1371 ptep = pte_offset_map(pmdp, addr); 1372 pte = *ptep; 1373 pte_unmap(ptep); 1374 if (!is_swap_pte(pte)) 1375 return 0; 1376 entry = pte_to_swp_entry(pte); 1377 return is_hwpoison_entry(entry); 1378} 1379EXPORT_SYMBOL_GPL(is_hwpoison_address); 1380