1/* 2 * linux/mm/oom_kill.c 3 * 4 * Copyright (C) 1998,2000 Rik van Riel 5 * Thanks go out to Claus Fischer for some serious inspiration and 6 * for goading me into coding this file... 7 * Copyright (C) 2010 Google, Inc. 8 * Rewritten by David Rientjes 9 * 10 * The routines in this file are used to kill a process when 11 * we're seriously out of memory. This gets called from __alloc_pages() 12 * in mm/page_alloc.c when we really run out of memory. 13 * 14 * Since we won't call these routines often (on a well-configured 15 * machine) this file will double as a 'coding guide' and a signpost 16 * for newbie kernel hackers. It features several pointers to major 17 * kernel subsystems and hints as to where to find out what things do. 18 */ 19 20#include <linux/oom.h> 21#include <linux/mm.h> 22#include <linux/err.h> 23#include <linux/gfp.h> 24#include <linux/sched.h> 25#include <linux/swap.h> 26#include <linux/timex.h> 27#include <linux/jiffies.h> 28#include <linux/cpuset.h> 29#include <linux/module.h> 30#include <linux/notifier.h> 31#include <linux/memcontrol.h> 32#include <linux/mempolicy.h> 33#include <linux/security.h> 34 35int sysctl_panic_on_oom; 36int sysctl_oom_kill_allocating_task; 37int sysctl_oom_dump_tasks = 1; 38static DEFINE_SPINLOCK(zone_scan_lock); 39 40#ifdef CONFIG_NUMA 41/** 42 * has_intersects_mems_allowed() - check task eligiblity for kill 43 * @tsk: task struct of which task to consider 44 * @mask: nodemask passed to page allocator for mempolicy ooms 45 * 46 * Task eligibility is determined by whether or not a candidate task, @tsk, 47 * shares the same mempolicy nodes as current if it is bound by such a policy 48 * and whether or not it has the same set of allowed cpuset nodes. 49 */ 50static bool has_intersects_mems_allowed(struct task_struct *tsk, 51 const nodemask_t *mask) 52{ 53 struct task_struct *start = tsk; 54 55 do { 56 if (mask) { 57 /* 58 * If this is a mempolicy constrained oom, tsk's 59 * cpuset is irrelevant. Only return true if its 60 * mempolicy intersects current, otherwise it may be 61 * needlessly killed. 62 */ 63 if (mempolicy_nodemask_intersects(tsk, mask)) 64 return true; 65 } else { 66 /* 67 * This is not a mempolicy constrained oom, so only 68 * check the mems of tsk's cpuset. 69 */ 70 if (cpuset_mems_allowed_intersects(current, tsk)) 71 return true; 72 } 73 } while_each_thread(start, tsk); 74 75 return false; 76} 77#else 78static bool has_intersects_mems_allowed(struct task_struct *tsk, 79 const nodemask_t *mask) 80{ 81 return true; 82} 83#endif /* CONFIG_NUMA */ 84 85/* 86 * If this is a system OOM (not a memcg OOM) and the task selected to be 87 * killed is not already running at high (RT) priorities, speed up the 88 * recovery by boosting the dying task to the lowest FIFO priority. 89 * That helps with the recovery and avoids interfering with RT tasks. 90 */ 91static void boost_dying_task_prio(struct task_struct *p, 92 struct mem_cgroup *mem) 93{ 94 struct sched_param param = { .sched_priority = 1 }; 95 96 if (mem) 97 return; 98 99 if (!rt_task(p)) 100 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); 101} 102 103/* 104 * The process p may have detached its own ->mm while exiting or through 105 * use_mm(), but one or more of its subthreads may still have a valid 106 * pointer. Return p, or any of its subthreads with a valid ->mm, with 107 * task_lock() held. 108 */ 109struct task_struct *find_lock_task_mm(struct task_struct *p) 110{ 111 struct task_struct *t = p; 112 113 do { 114 task_lock(t); 115 if (likely(t->mm)) 116 return t; 117 task_unlock(t); 118 } while_each_thread(p, t); 119 120 return NULL; 121} 122 123/* return true if the task is not adequate as candidate victim task. */ 124static bool oom_unkillable_task(struct task_struct *p, 125 const struct mem_cgroup *mem, const nodemask_t *nodemask) 126{ 127 if (is_global_init(p)) 128 return true; 129 if (p->flags & PF_KTHREAD) 130 return true; 131 132 /* When mem_cgroup_out_of_memory() and p is not member of the group */ 133 if (mem && !task_in_mem_cgroup(p, mem)) 134 return true; 135 136 /* p may not have freeable memory in nodemask */ 137 if (!has_intersects_mems_allowed(p, nodemask)) 138 return true; 139 140 return false; 141} 142 143/** 144 * oom_badness - heuristic function to determine which candidate task to kill 145 * @p: task struct of which task we should calculate 146 * @totalpages: total present RAM allowed for page allocation 147 * 148 * The heuristic for determining which task to kill is made to be as simple and 149 * predictable as possible. The goal is to return the highest value for the 150 * task consuming the most memory to avoid subsequent oom failures. 151 */ 152unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, 153 const nodemask_t *nodemask, unsigned long totalpages) 154{ 155 int points; 156 157 if (oom_unkillable_task(p, mem, nodemask)) 158 return 0; 159 160 p = find_lock_task_mm(p); 161 if (!p) 162 return 0; 163 164 /* 165 * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't 166 * need to be executed for something that cannot be killed. 167 */ 168 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { 169 task_unlock(p); 170 return 0; 171 } 172 173 /* 174 * When the PF_OOM_ORIGIN bit is set, it indicates the task should have 175 * priority for oom killing. 176 */ 177 if (p->flags & PF_OOM_ORIGIN) { 178 task_unlock(p); 179 return 1000; 180 } 181 182 /* 183 * The memory controller may have a limit of 0 bytes, so avoid a divide 184 * by zero, if necessary. 185 */ 186 if (!totalpages) 187 totalpages = 1; 188 189 /* 190 * The baseline for the badness score is the proportion of RAM that each 191 * task's rss and swap space use. 192 */ 193 points = (get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS)) * 1000 / 194 totalpages; 195 task_unlock(p); 196 197 /* 198 * Root processes get 3% bonus, just like the __vm_enough_memory() 199 * implementation used by LSMs. 200 */ 201 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 202 points -= 30; 203 204 /* 205 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may 206 * either completely disable oom killing or always prefer a certain 207 * task. 208 */ 209 points += p->signal->oom_score_adj; 210 211 /* 212 * Never return 0 for an eligible task that may be killed since it's 213 * possible that no single user task uses more than 0.1% of memory and 214 * no single admin tasks uses more than 3.0%. 215 */ 216 if (points <= 0) 217 return 1; 218 return (points < 1000) ? points : 1000; 219} 220 221/* 222 * Determine the type of allocation constraint. 223 */ 224#ifdef CONFIG_NUMA 225static enum oom_constraint constrained_alloc(struct zonelist *zonelist, 226 gfp_t gfp_mask, nodemask_t *nodemask, 227 unsigned long *totalpages) 228{ 229 struct zone *zone; 230 struct zoneref *z; 231 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 232 bool cpuset_limited = false; 233 int nid; 234 235 /* Default to all available memory */ 236 *totalpages = totalram_pages + total_swap_pages; 237 238 if (!zonelist) 239 return CONSTRAINT_NONE; 240 /* 241 * Reach here only when __GFP_NOFAIL is used. So, we should avoid 242 * to kill current.We have to random task kill in this case. 243 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now. 244 */ 245 if (gfp_mask & __GFP_THISNODE) 246 return CONSTRAINT_NONE; 247 248 /* 249 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in 250 * the page allocator means a mempolicy is in effect. Cpuset policy 251 * is enforced in get_page_from_freelist(). 252 */ 253 if (nodemask && !nodes_subset(node_states[N_HIGH_MEMORY], *nodemask)) { 254 *totalpages = total_swap_pages; 255 for_each_node_mask(nid, *nodemask) 256 *totalpages += node_spanned_pages(nid); 257 return CONSTRAINT_MEMORY_POLICY; 258 } 259 260 /* Check this allocation failure is caused by cpuset's wall function */ 261 for_each_zone_zonelist_nodemask(zone, z, zonelist, 262 high_zoneidx, nodemask) 263 if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) 264 cpuset_limited = true; 265 266 if (cpuset_limited) { 267 *totalpages = total_swap_pages; 268 for_each_node_mask(nid, cpuset_current_mems_allowed) 269 *totalpages += node_spanned_pages(nid); 270 return CONSTRAINT_CPUSET; 271 } 272 return CONSTRAINT_NONE; 273} 274#else 275static enum oom_constraint constrained_alloc(struct zonelist *zonelist, 276 gfp_t gfp_mask, nodemask_t *nodemask, 277 unsigned long *totalpages) 278{ 279 *totalpages = totalram_pages + total_swap_pages; 280 return CONSTRAINT_NONE; 281} 282#endif 283 284/* 285 * Simple selection loop. We chose the process with the highest 286 * number of 'points'. We expect the caller will lock the tasklist. 287 * 288 * (not docbooked, we don't want this one cluttering up the manual) 289 */ 290static struct task_struct *select_bad_process(unsigned int *ppoints, 291 unsigned long totalpages, struct mem_cgroup *mem, 292 const nodemask_t *nodemask) 293{ 294 struct task_struct *p; 295 struct task_struct *chosen = NULL; 296 *ppoints = 0; 297 298 for_each_process(p) { 299 unsigned int points; 300 301 if (oom_unkillable_task(p, mem, nodemask)) 302 continue; 303 304 /* 305 * This task already has access to memory reserves and is 306 * being killed. Don't allow any other task access to the 307 * memory reserve. 308 * 309 * Note: this may have a chance of deadlock if it gets 310 * blocked waiting for another task which itself is waiting 311 * for memory. Is there a better alternative? 312 */ 313 if (test_tsk_thread_flag(p, TIF_MEMDIE)) 314 return ERR_PTR(-1UL); 315 316 /* 317 * This is in the process of releasing memory so wait for it 318 * to finish before killing some other task by mistake. 319 * 320 * However, if p is the current task, we allow the 'kill' to 321 * go ahead if it is exiting: this will simply set TIF_MEMDIE, 322 * which will allow it to gain access to memory reserves in 323 * the process of exiting and releasing its resources. 324 * Otherwise we could get an easy OOM deadlock. 325 */ 326 if (thread_group_empty(p) && (p->flags & PF_EXITING) && p->mm) { 327 if (p != current) 328 return ERR_PTR(-1UL); 329 330 chosen = p; 331 *ppoints = 1000; 332 } 333 334 points = oom_badness(p, mem, nodemask, totalpages); 335 if (points > *ppoints) { 336 chosen = p; 337 *ppoints = points; 338 } 339 } 340 341 return chosen; 342} 343 344/** 345 * dump_tasks - dump current memory state of all system tasks 346 * @mem: current's memory controller, if constrained 347 * @nodemask: nodemask passed to page allocator for mempolicy ooms 348 * 349 * Dumps the current memory state of all eligible tasks. Tasks not in the same 350 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 351 * are not shown. 352 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj 353 * value, oom_score_adj value, and name. 354 * 355 * Call with tasklist_lock read-locked. 356 */ 357static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) 358{ 359 struct task_struct *p; 360 struct task_struct *task; 361 362 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); 363 for_each_process(p) { 364 if (oom_unkillable_task(p, mem, nodemask)) 365 continue; 366 367 task = find_lock_task_mm(p); 368 if (!task) { 369 /* 370 * This is a kthread or all of p's threads have already 371 * detached their mm's. There's no need to report 372 * them; they can't be oom killed anyway. 373 */ 374 continue; 375 } 376 377 pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", 378 task->pid, task_uid(task), task->tgid, 379 task->mm->total_vm, get_mm_rss(task->mm), 380 task_cpu(task), task->signal->oom_adj, 381 task->signal->oom_score_adj, task->comm); 382 task_unlock(task); 383 } 384} 385 386static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, 387 struct mem_cgroup *mem, const nodemask_t *nodemask) 388{ 389 task_lock(current); 390 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " 391 "oom_adj=%d, oom_score_adj=%d\n", 392 current->comm, gfp_mask, order, current->signal->oom_adj, 393 current->signal->oom_score_adj); 394 cpuset_print_task_mems_allowed(current); 395 task_unlock(current); 396 dump_stack(); 397 mem_cgroup_print_oom_info(mem, p); 398 show_mem(); 399 if (sysctl_oom_dump_tasks) 400 dump_tasks(mem, nodemask); 401} 402 403#define K(x) ((x) << (PAGE_SHIFT-10)) 404static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) 405{ 406 p = find_lock_task_mm(p); 407 if (!p) 408 return 1; 409 410 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", 411 task_pid_nr(p), p->comm, K(p->mm->total_vm), 412 K(get_mm_counter(p->mm, MM_ANONPAGES)), 413 K(get_mm_counter(p->mm, MM_FILEPAGES))); 414 task_unlock(p); 415 416 417 set_tsk_thread_flag(p, TIF_MEMDIE); 418 force_sig(SIGKILL, p); 419 420 /* 421 * We give our sacrificial lamb high priority and access to 422 * all the memory it needs. That way it should be able to 423 * exit() and clear out its resources quickly... 424 */ 425 boost_dying_task_prio(p, mem); 426 427 return 0; 428} 429#undef K 430 431static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 432 unsigned int points, unsigned long totalpages, 433 struct mem_cgroup *mem, nodemask_t *nodemask, 434 const char *message) 435{ 436 struct task_struct *victim = p; 437 struct task_struct *child; 438 struct task_struct *t = p; 439 unsigned int victim_points = 0; 440 441 if (printk_ratelimit()) 442 dump_header(p, gfp_mask, order, mem, nodemask); 443 444 /* 445 * If the task is already exiting, don't alarm the sysadmin or kill 446 * its children or threads, just set TIF_MEMDIE so it can die quickly 447 */ 448 if (p->flags & PF_EXITING) { 449 set_tsk_thread_flag(p, TIF_MEMDIE); 450 boost_dying_task_prio(p, mem); 451 return 0; 452 } 453 454 task_lock(p); 455 pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n", 456 message, task_pid_nr(p), p->comm, points); 457 task_unlock(p); 458 459 /* 460 * If any of p's children has a different mm and is eligible for kill, 461 * the one with the highest badness() score is sacrificed for its 462 * parent. This attempts to lose the minimal amount of work done while 463 * still freeing memory. 464 */ 465 do { 466 list_for_each_entry(child, &t->children, sibling) { 467 unsigned int child_points; 468 469 /* 470 * oom_badness() returns 0 if the thread is unkillable 471 */ 472 child_points = oom_badness(child, mem, nodemask, 473 totalpages); 474 if (child_points > victim_points) { 475 victim = child; 476 victim_points = child_points; 477 } 478 } 479 } while_each_thread(p, t); 480 481 return oom_kill_task(victim, mem); 482} 483 484/* 485 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 486 */ 487static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 488 int order, const nodemask_t *nodemask) 489{ 490 if (likely(!sysctl_panic_on_oom)) 491 return; 492 if (sysctl_panic_on_oom != 2) { 493 /* 494 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel 495 * does not panic for cpuset, mempolicy, or memcg allocation 496 * failures. 497 */ 498 if (constraint != CONSTRAINT_NONE) 499 return; 500 } 501 read_lock(&tasklist_lock); 502 dump_header(NULL, gfp_mask, order, NULL, nodemask); 503 read_unlock(&tasklist_lock); 504 panic("Out of memory: %s panic_on_oom is enabled\n", 505 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 506} 507 508#ifdef CONFIG_CGROUP_MEM_RES_CTLR 509void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask) 510{ 511 unsigned long limit; 512 unsigned int points = 0; 513 struct task_struct *p; 514 515 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); 516 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; 517 read_lock(&tasklist_lock); 518retry: 519 p = select_bad_process(&points, limit, mem, NULL); 520 if (!p || PTR_ERR(p) == -1UL) 521 goto out; 522 523 if (oom_kill_process(p, gfp_mask, 0, points, limit, mem, NULL, 524 "Memory cgroup out of memory")) 525 goto retry; 526out: 527 read_unlock(&tasklist_lock); 528} 529#endif 530 531static BLOCKING_NOTIFIER_HEAD(oom_notify_list); 532 533int register_oom_notifier(struct notifier_block *nb) 534{ 535 return blocking_notifier_chain_register(&oom_notify_list, nb); 536} 537EXPORT_SYMBOL_GPL(register_oom_notifier); 538 539int unregister_oom_notifier(struct notifier_block *nb) 540{ 541 return blocking_notifier_chain_unregister(&oom_notify_list, nb); 542} 543EXPORT_SYMBOL_GPL(unregister_oom_notifier); 544 545/* 546 * Try to acquire the OOM killer lock for the zones in zonelist. Returns zero 547 * if a parallel OOM killing is already taking place that includes a zone in 548 * the zonelist. Otherwise, locks all zones in the zonelist and returns 1. 549 */ 550int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) 551{ 552 struct zoneref *z; 553 struct zone *zone; 554 int ret = 1; 555 556 spin_lock(&zone_scan_lock); 557 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 558 if (zone_is_oom_locked(zone)) { 559 ret = 0; 560 goto out; 561 } 562 } 563 564 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 565 /* 566 * Lock each zone in the zonelist under zone_scan_lock so a 567 * parallel invocation of try_set_zonelist_oom() doesn't succeed 568 * when it shouldn't. 569 */ 570 zone_set_flag(zone, ZONE_OOM_LOCKED); 571 } 572 573out: 574 spin_unlock(&zone_scan_lock); 575 return ret; 576} 577 578/* 579 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed 580 * allocation attempts with zonelists containing them may now recall the OOM 581 * killer, if necessary. 582 */ 583void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) 584{ 585 struct zoneref *z; 586 struct zone *zone; 587 588 spin_lock(&zone_scan_lock); 589 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { 590 zone_clear_flag(zone, ZONE_OOM_LOCKED); 591 } 592 spin_unlock(&zone_scan_lock); 593} 594 595/* 596 * Try to acquire the oom killer lock for all system zones. Returns zero if a 597 * parallel oom killing is taking place, otherwise locks all zones and returns 598 * non-zero. 599 */ 600static int try_set_system_oom(void) 601{ 602 struct zone *zone; 603 int ret = 1; 604 605 spin_lock(&zone_scan_lock); 606 for_each_populated_zone(zone) 607 if (zone_is_oom_locked(zone)) { 608 ret = 0; 609 goto out; 610 } 611 for_each_populated_zone(zone) 612 zone_set_flag(zone, ZONE_OOM_LOCKED); 613out: 614 spin_unlock(&zone_scan_lock); 615 return ret; 616} 617 618/* 619 * Clears ZONE_OOM_LOCKED for all system zones so that failed allocation 620 * attempts or page faults may now recall the oom killer, if necessary. 621 */ 622static void clear_system_oom(void) 623{ 624 struct zone *zone; 625 626 spin_lock(&zone_scan_lock); 627 for_each_populated_zone(zone) 628 zone_clear_flag(zone, ZONE_OOM_LOCKED); 629 spin_unlock(&zone_scan_lock); 630} 631 632/** 633 * out_of_memory - kill the "best" process when we run out of memory 634 * @zonelist: zonelist pointer 635 * @gfp_mask: memory allocation flags 636 * @order: amount of memory being requested as a power of 2 637 * @nodemask: nodemask passed to page allocator 638 * 639 * If we run out of memory, we have the choice between either 640 * killing a random task (bad), letting the system crash (worse) 641 * OR try to be smart about which process to kill. Note that we 642 * don't have to be perfect here, we just have to be good. 643 */ 644void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 645 int order, nodemask_t *nodemask) 646{ 647 const nodemask_t *mpol_mask; 648 struct task_struct *p; 649 unsigned long totalpages; 650 unsigned long freed = 0; 651 unsigned int points; 652 enum oom_constraint constraint = CONSTRAINT_NONE; 653 int killed = 0; 654 655 blocking_notifier_call_chain(&oom_notify_list, 0, &freed); 656 if (freed > 0) 657 /* Got some memory back in the last second. */ 658 return; 659 660 /* 661 * If current has a pending SIGKILL, then automatically select it. The 662 * goal is to allow it to allocate so that it may quickly exit and free 663 * its memory. 664 */ 665 if (fatal_signal_pending(current)) { 666 set_thread_flag(TIF_MEMDIE); 667 boost_dying_task_prio(current, NULL); 668 return; 669 } 670 671 /* 672 * Check if there were limitations on the allocation (only relevant for 673 * NUMA) that may require different handling. 674 */ 675 constraint = constrained_alloc(zonelist, gfp_mask, nodemask, 676 &totalpages); 677 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; 678 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); 679 680 read_lock(&tasklist_lock); 681 if (sysctl_oom_kill_allocating_task && 682 !oom_unkillable_task(current, NULL, nodemask) && 683 (current->signal->oom_adj != OOM_DISABLE)) { 684 /* 685 * oom_kill_process() needs tasklist_lock held. If it returns 686 * non-zero, current could not be killed so we must fallback to 687 * the tasklist scan. 688 */ 689 if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, 690 NULL, nodemask, 691 "Out of memory (oom_kill_allocating_task)")) 692 goto out; 693 } 694 695retry: 696 p = select_bad_process(&points, totalpages, NULL, mpol_mask); 697 if (PTR_ERR(p) == -1UL) 698 goto out; 699 700 /* Found nothing?!?! Either we hang forever, or we panic. */ 701 if (!p) { 702 dump_header(NULL, gfp_mask, order, NULL, mpol_mask); 703 read_unlock(&tasklist_lock); 704 panic("Out of memory and no killable processes...\n"); 705 } 706 707 if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, 708 nodemask, "Out of memory")) 709 goto retry; 710 killed = 1; 711out: 712 read_unlock(&tasklist_lock); 713 714 /* 715 * Give "p" a good chance of killing itself before we 716 * retry to allocate memory unless "p" is current 717 */ 718 if (killed && !test_thread_flag(TIF_MEMDIE)) 719 schedule_timeout_uninterruptible(1); 720} 721 722/* 723 * The pagefault handler calls here because it is out of memory, so kill a 724 * memory-hogging task. If a populated zone has ZONE_OOM_LOCKED set, a parallel 725 * oom killing is already in progress so do nothing. If a task is found with 726 * TIF_MEMDIE set, it has been killed so do nothing and allow it to exit. 727 */ 728void pagefault_out_of_memory(void) 729{ 730 if (try_set_system_oom()) { 731 out_of_memory(NULL, 0, 0, NULL); 732 clear_system_oom(); 733 } 734 if (!test_thread_flag(TIF_MEMDIE)) 735 schedule_timeout_uninterruptible(1); 736} 737