linux/mm/oom_kill.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/oom_kill.c
   3 * 
   4 *  Copyright (C)  1998,2000  Rik van Riel
   5 *      Thanks go out to Claus Fischer for some serious inspiration and
   6 *      for goading me into coding this file...
   7 *  Copyright (C)  2010  Google, Inc.
   8 *      Rewritten by David Rientjes
   9 *
  10 *  The routines in this file are used to kill a process when
  11 *  we're seriously out of memory. This gets called from __alloc_pages()
  12 *  in mm/page_alloc.c when we really run out of memory.
  13 *
  14 *  Since we won't call these routines often (on a well-configured
  15 *  machine) this file will double as a 'coding guide' and a signpost
  16 *  for newbie kernel hackers. It features several pointers to major
  17 *  kernel subsystems and hints as to where to find out what things do.
  18 */
  19
  20#include <linux/oom.h>
  21#include <linux/mm.h>
  22#include <linux/err.h>
  23#include <linux/gfp.h>
  24#include <linux/sched.h>
  25#include <linux/swap.h>
  26#include <linux/timex.h>
  27#include <linux/jiffies.h>
  28#include <linux/cpuset.h>
  29#include <linux/export.h>
  30#include <linux/notifier.h>
  31#include <linux/memcontrol.h>
  32#include <linux/mempolicy.h>
  33#include <linux/security.h>
  34#include <linux/ptrace.h>
  35#include <linux/freezer.h>
  36#include <linux/ftrace.h>
  37#include <linux/ratelimit.h>
  38
  39#define CREATE_TRACE_POINTS
  40#include <trace/events/oom.h>
  41
  42int sysctl_panic_on_oom;
  43int sysctl_oom_kill_allocating_task;
  44int sysctl_oom_dump_tasks = 1;
  45static DEFINE_SPINLOCK(zone_scan_lock);
  46
  47#ifdef CONFIG_NUMA
  48/**
  49 * has_intersects_mems_allowed() - check task eligiblity for kill
  50 * @start: task struct of which task to consider
  51 * @mask: nodemask passed to page allocator for mempolicy ooms
  52 *
  53 * Task eligibility is determined by whether or not a candidate task, @tsk,
  54 * shares the same mempolicy nodes as current if it is bound by such a policy
  55 * and whether or not it has the same set of allowed cpuset nodes.
  56 */
  57static bool has_intersects_mems_allowed(struct task_struct *start,
  58                                        const nodemask_t *mask)
  59{
  60        struct task_struct *tsk;
  61        bool ret = false;
  62
  63        rcu_read_lock();
  64        for_each_thread(start, tsk) {
  65                if (mask) {
  66                        /*
  67                         * If this is a mempolicy constrained oom, tsk's
  68                         * cpuset is irrelevant.  Only return true if its
  69                         * mempolicy intersects current, otherwise it may be
  70                         * needlessly killed.
  71                         */
  72                        ret = mempolicy_nodemask_intersects(tsk, mask);
  73                } else {
  74                        /*
  75                         * This is not a mempolicy constrained oom, so only
  76                         * check the mems of tsk's cpuset.
  77                         */
  78                        ret = cpuset_mems_allowed_intersects(current, tsk);
  79                }
  80                if (ret)
  81                        break;
  82        }
  83        rcu_read_unlock();
  84
  85        return ret;
  86}
  87#else
  88static bool has_intersects_mems_allowed(struct task_struct *tsk,
  89                                        const nodemask_t *mask)
  90{
  91        return true;
  92}
  93#endif /* CONFIG_NUMA */
  94
  95/*
  96 * The process p may have detached its own ->mm while exiting or through
  97 * use_mm(), but one or more of its subthreads may still have a valid
  98 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
  99 * task_lock() held.
 100 */
 101struct task_struct *find_lock_task_mm(struct task_struct *p)
 102{
 103        struct task_struct *t;
 104
 105        rcu_read_lock();
 106
 107        for_each_thread(p, t) {
 108                task_lock(t);
 109                if (likely(t->mm))
 110                        goto found;
 111                task_unlock(t);
 112        }
 113        t = NULL;
 114found:
 115        rcu_read_unlock();
 116
 117        return t;
 118}
 119
 120/* return true if the task is not adequate as candidate victim task. */
 121static bool oom_unkillable_task(struct task_struct *p,
 122                const struct mem_cgroup *memcg, const nodemask_t *nodemask)
 123{
 124        if (is_global_init(p))
 125                return true;
 126        if (p->flags & PF_KTHREAD)
 127                return true;
 128
 129        /* When mem_cgroup_out_of_memory() and p is not member of the group */
 130        if (memcg && !task_in_mem_cgroup(p, memcg))
 131                return true;
 132
 133        /* p may not have freeable memory in nodemask */
 134        if (!has_intersects_mems_allowed(p, nodemask))
 135                return true;
 136
 137        return false;
 138}
 139
 140/**
 141 * oom_badness - heuristic function to determine which candidate task to kill
 142 * @p: task struct of which task we should calculate
 143 * @totalpages: total present RAM allowed for page allocation
 144 *
 145 * The heuristic for determining which task to kill is made to be as simple and
 146 * predictable as possible.  The goal is to return the highest value for the
 147 * task consuming the most memory to avoid subsequent oom failures.
 148 */
 149unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 150                          const nodemask_t *nodemask, unsigned long totalpages)
 151{
 152        long points;
 153        long adj;
 154
 155        if (oom_unkillable_task(p, memcg, nodemask))
 156                return 0;
 157
 158        p = find_lock_task_mm(p);
 159        if (!p)
 160                return 0;
 161
 162        adj = (long)p->signal->oom_score_adj;
 163        if (adj == OOM_SCORE_ADJ_MIN) {
 164                task_unlock(p);
 165                return 0;
 166        }
 167
 168        /*
 169         * The baseline for the badness score is the proportion of RAM that each
 170         * task's rss, pagetable and swap space use.
 171         */
 172        points = get_mm_rss(p->mm) + atomic_long_read(&p->mm->nr_ptes) +
 173                 get_mm_counter(p->mm, MM_SWAPENTS);
 174        task_unlock(p);
 175
 176        /*
 177         * Root processes get 3% bonus, just like the __vm_enough_memory()
 178         * implementation used by LSMs.
 179         */
 180        if (has_capability_noaudit(p, CAP_SYS_ADMIN))
 181                points -= (points * 3) / 100;
 182
 183        /* Normalize to oom_score_adj units */
 184        adj *= totalpages / 1000;
 185        points += adj;
 186
 187        /*
 188         * Never return 0 for an eligible task regardless of the root bonus and
 189         * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
 190         */
 191        return points > 0 ? points : 1;
 192}
 193
 194/*
 195 * Determine the type of allocation constraint.
 196 */
 197#ifdef CONFIG_NUMA
 198static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
 199                                gfp_t gfp_mask, nodemask_t *nodemask,
 200                                unsigned long *totalpages)
 201{
 202        struct zone *zone;
 203        struct zoneref *z;
 204        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
 205        bool cpuset_limited = false;
 206        int nid;
 207
 208        /* Default to all available memory */
 209        *totalpages = totalram_pages + total_swap_pages;
 210
 211        if (!zonelist)
 212                return CONSTRAINT_NONE;
 213        /*
 214         * Reach here only when __GFP_NOFAIL is used. So, we should avoid
 215         * to kill current.We have to random task kill in this case.
 216         * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
 217         */
 218        if (gfp_mask & __GFP_THISNODE)
 219                return CONSTRAINT_NONE;
 220
 221        /*
 222         * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
 223         * the page allocator means a mempolicy is in effect.  Cpuset policy
 224         * is enforced in get_page_from_freelist().
 225         */
 226        if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
 227                *totalpages = total_swap_pages;
 228                for_each_node_mask(nid, *nodemask)
 229                        *totalpages += node_spanned_pages(nid);
 230                return CONSTRAINT_MEMORY_POLICY;
 231        }
 232
 233        /* Check this allocation failure is caused by cpuset's wall function */
 234        for_each_zone_zonelist_nodemask(zone, z, zonelist,
 235                        high_zoneidx, nodemask)
 236                if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
 237                        cpuset_limited = true;
 238
 239        if (cpuset_limited) {
 240                *totalpages = total_swap_pages;
 241                for_each_node_mask(nid, cpuset_current_mems_allowed)
 242                        *totalpages += node_spanned_pages(nid);
 243                return CONSTRAINT_CPUSET;
 244        }
 245        return CONSTRAINT_NONE;
 246}
 247#else
 248static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
 249                                gfp_t gfp_mask, nodemask_t *nodemask,
 250                                unsigned long *totalpages)
 251{
 252        *totalpages = totalram_pages + total_swap_pages;
 253        return CONSTRAINT_NONE;
 254}
 255#endif
 256
 257enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
 258                unsigned long totalpages, const nodemask_t *nodemask,
 259                bool force_kill)
 260{
 261        if (task->exit_state)
 262                return OOM_SCAN_CONTINUE;
 263        if (oom_unkillable_task(task, NULL, nodemask))
 264                return OOM_SCAN_CONTINUE;
 265
 266        /*
 267         * This task already has access to memory reserves and is being killed.
 268         * Don't allow any other task to have access to the reserves.
 269         */
 270        if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
 271                if (unlikely(frozen(task)))
 272                        __thaw_task(task);
 273                if (!force_kill)
 274                        return OOM_SCAN_ABORT;
 275        }
 276        if (!task->mm)
 277                return OOM_SCAN_CONTINUE;
 278
 279        /*
 280         * If task is allocating a lot of memory and has been marked to be
 281         * killed first if it triggers an oom, then select it.
 282         */
 283        if (oom_task_origin(task))
 284                return OOM_SCAN_SELECT;
 285
 286        if (task->flags & PF_EXITING && !force_kill) {
 287                /*
 288                 * If this task is not being ptraced on exit, then wait for it
 289                 * to finish before killing some other task unnecessarily.
 290                 */
 291                if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
 292                        return OOM_SCAN_ABORT;
 293        }
 294        return OOM_SCAN_OK;
 295}
 296
 297/*
 298 * Simple selection loop. We chose the process with the highest
 299 * number of 'points'.
 300 *
 301 * (not docbooked, we don't want this one cluttering up the manual)
 302 */
 303static struct task_struct *select_bad_process(unsigned int *ppoints,
 304                unsigned long totalpages, const nodemask_t *nodemask,
 305                bool force_kill)
 306{
 307        struct task_struct *g, *p;
 308        struct task_struct *chosen = NULL;
 309        unsigned long chosen_points = 0;
 310
 311        rcu_read_lock();
 312        for_each_process_thread(g, p) {
 313                unsigned int points;
 314
 315                switch (oom_scan_process_thread(p, totalpages, nodemask,
 316                                                force_kill)) {
 317                case OOM_SCAN_SELECT:
 318                        chosen = p;
 319                        chosen_points = ULONG_MAX;
 320                        /* fall through */
 321                case OOM_SCAN_CONTINUE:
 322                        continue;
 323                case OOM_SCAN_ABORT:
 324                        rcu_read_unlock();
 325                        return ERR_PTR(-1UL);
 326                case OOM_SCAN_OK:
 327                        break;
 328                };
 329                points = oom_badness(p, NULL, nodemask, totalpages);
 330                if (points > chosen_points) {
 331                        chosen = p;
 332                        chosen_points = points;
 333                }
 334        }
 335        if (chosen)
 336                get_task_struct(chosen);
 337        rcu_read_unlock();
 338
 339        *ppoints = chosen_points * 1000 / totalpages;
 340        return chosen;
 341}
 342
 343/**
 344 * dump_tasks - dump current memory state of all system tasks
 345 * @memcg: current's memory controller, if constrained
 346 * @nodemask: nodemask passed to page allocator for mempolicy ooms
 347 *
 348 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 349 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 350 * are not shown.
 351 * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes,
 352 * swapents, oom_score_adj value, and name.
 353 */
 354static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
 355{
 356        struct task_struct *p;
 357        struct task_struct *task;
 358
 359        pr_info("[ pid ]   uid  tgid total_vm      rss nr_ptes swapents oom_score_adj name\n");
 360        rcu_read_lock();
 361        for_each_process(p) {
 362                if (oom_unkillable_task(p, memcg, nodemask))
 363                        continue;
 364
 365                task = find_lock_task_mm(p);
 366                if (!task) {
 367                        /*
 368                         * This is a kthread or all of p's threads have already
 369                         * detached their mm's.  There's no need to report
 370                         * them; they can't be oom killed anyway.
 371                         */
 372                        continue;
 373                }
 374
 375                pr_info("[%5d] %5d %5d %8lu %8lu %7ld %8lu         %5hd %s\n",
 376                        task->pid, from_kuid(&init_user_ns, task_uid(task)),
 377                        task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
 378                        atomic_long_read(&task->mm->nr_ptes),
 379                        get_mm_counter(task->mm, MM_SWAPENTS),
 380                        task->signal->oom_score_adj, task->comm);
 381                task_unlock(task);
 382        }
 383        rcu_read_unlock();
 384}
 385
 386static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
 387                        struct mem_cgroup *memcg, const nodemask_t *nodemask)
 388{
 389        task_lock(current);
 390        pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
 391                "oom_score_adj=%hd\n",
 392                current->comm, gfp_mask, order,
 393                current->signal->oom_score_adj);
 394        cpuset_print_task_mems_allowed(current);
 395        task_unlock(current);
 396        dump_stack();
 397        if (memcg)
 398                mem_cgroup_print_oom_info(memcg, p);
 399        else
 400                show_mem(SHOW_MEM_FILTER_NODES);
 401        if (sysctl_oom_dump_tasks)
 402                dump_tasks(memcg, nodemask);
 403}
 404
 405#define K(x) ((x) << (PAGE_SHIFT-10))
 406/*
 407 * Must be called while holding a reference to p, which will be released upon
 408 * returning.
 409 */
 410void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
 411                      unsigned int points, unsigned long totalpages,
 412                      struct mem_cgroup *memcg, nodemask_t *nodemask,
 413                      const char *message)
 414{
 415        struct task_struct *victim = p;
 416        struct task_struct *child;
 417        struct task_struct *t;
 418        struct mm_struct *mm;
 419        unsigned int victim_points = 0;
 420        static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 421                                              DEFAULT_RATELIMIT_BURST);
 422
 423        /*
 424         * If the task is already exiting, don't alarm the sysadmin or kill
 425         * its children or threads, just set TIF_MEMDIE so it can die quickly
 426         */
 427        if (p->flags & PF_EXITING) {
 428                set_tsk_thread_flag(p, TIF_MEMDIE);
 429                put_task_struct(p);
 430                return;
 431        }
 432
 433        if (__ratelimit(&oom_rs))
 434                dump_header(p, gfp_mask, order, memcg, nodemask);
 435
 436        task_lock(p);
 437        pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
 438                message, task_pid_nr(p), p->comm, points);
 439        task_unlock(p);
 440
 441        /*
 442         * If any of p's children has a different mm and is eligible for kill,
 443         * the one with the highest oom_badness() score is sacrificed for its
 444         * parent.  This attempts to lose the minimal amount of work done while
 445         * still freeing memory.
 446         */
 447        qread_lock(&tasklist_lock);
 448        for_each_thread(p, t) {
 449                list_for_each_entry(child, &t->children, sibling) {
 450                        unsigned int child_points;
 451
 452                        if (child->mm == p->mm)
 453                                continue;
 454                        /*
 455                         * oom_badness() returns 0 if the thread is unkillable
 456                         */
 457                        child_points = oom_badness(child, memcg, nodemask,
 458                                                                totalpages);
 459                        if (child_points > victim_points) {
 460                                put_task_struct(victim);
 461                                victim = child;
 462                                victim_points = child_points;
 463                                get_task_struct(victim);
 464                        }
 465                }
 466        }
 467        qread_unlock(&tasklist_lock);
 468
 469        p = find_lock_task_mm(victim);
 470        if (!p) {
 471                put_task_struct(victim);
 472                return;
 473        } else if (victim != p) {
 474                get_task_struct(p);
 475                put_task_struct(victim);
 476                victim = p;
 477        }
 478
 479        /* mm cannot safely be dereferenced after task_unlock(victim) */
 480        mm = victim->mm;
 481        pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 482                task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
 483                K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 484                K(get_mm_counter(victim->mm, MM_FILEPAGES)),
 485                K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
 486        task_unlock(victim);
 487
 488        /*
 489         * Kill all user processes sharing victim->mm in other thread groups, if
 490         * any.  They don't get access to memory reserves, though, to avoid
 491         * depletion of all memory.  This prevents mm->mmap_sem livelock when an
 492         * oom killed thread cannot exit because it requires the semaphore and
 493         * its contended by another thread trying to allocate memory itself.
 494         * That thread will now get access to memory reserves since it has a
 495         * pending fatal signal.
 496         */
 497        rcu_read_lock();
 498        for_each_process(p)
 499                if (p->mm == mm && !same_thread_group(p, victim) &&
 500                    !(p->flags & PF_KTHREAD)) {
 501                        if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
 502                                continue;
 503
 504                        task_lock(p);   /* Protect ->comm from prctl() */
 505                        pr_err("Kill process %d (%s) sharing same memory\n",
 506                                task_pid_nr(p), p->comm);
 507                        task_unlock(p);
 508                        do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
 509                }
 510        rcu_read_unlock();
 511
 512        set_tsk_thread_flag(victim, TIF_MEMDIE);
 513        do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
 514        put_task_struct(victim);
 515}
 516#undef K
 517
 518/*
 519 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 520 */
 521void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
 522                        int order, const nodemask_t *nodemask)
 523{
 524        if (likely(!sysctl_panic_on_oom))
 525                return;
 526        if (sysctl_panic_on_oom != 2) {
 527                /*
 528                 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
 529                 * does not panic for cpuset, mempolicy, or memcg allocation
 530                 * failures.
 531                 */
 532                if (constraint != CONSTRAINT_NONE)
 533                        return;
 534        }
 535        dump_header(NULL, gfp_mask, order, NULL, nodemask);
 536        panic("Out of memory: %s panic_on_oom is enabled\n",
 537                sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
 538}
 539
 540static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
 541
 542int register_oom_notifier(struct notifier_block *nb)
 543{
 544        return blocking_notifier_chain_register(&oom_notify_list, nb);
 545}
 546EXPORT_SYMBOL_GPL(register_oom_notifier);
 547
 548int unregister_oom_notifier(struct notifier_block *nb)
 549{
 550        return blocking_notifier_chain_unregister(&oom_notify_list, nb);
 551}
 552EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 553
 554/*
 555 * Try to acquire the OOM killer lock for the zones in zonelist.  Returns zero
 556 * if a parallel OOM killing is already taking place that includes a zone in
 557 * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
 558 */
 559int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 560{
 561        struct zoneref *z;
 562        struct zone *zone;
 563        int ret = 1;
 564
 565        spin_lock(&zone_scan_lock);
 566        for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
 567                if (zone_is_oom_locked(zone)) {
 568                        ret = 0;
 569                        goto out;
 570                }
 571        }
 572
 573        for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
 574                /*
 575                 * Lock each zone in the zonelist under zone_scan_lock so a
 576                 * parallel invocation of try_set_zonelist_oom() doesn't succeed
 577                 * when it shouldn't.
 578                 */
 579                zone_set_flag(zone, ZONE_OOM_LOCKED);
 580        }
 581
 582out:
 583        spin_unlock(&zone_scan_lock);
 584        return ret;
 585}
 586
 587/*
 588 * Clears the ZONE_OOM_LOCKED flag for all zones in the zonelist so that failed
 589 * allocation attempts with zonelists containing them may now recall the OOM
 590 * killer, if necessary.
 591 */
 592void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 593{
 594        struct zoneref *z;
 595        struct zone *zone;
 596
 597        spin_lock(&zone_scan_lock);
 598        for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
 599                zone_clear_flag(zone, ZONE_OOM_LOCKED);
 600        }
 601        spin_unlock(&zone_scan_lock);
 602}
 603
 604/**
 605 * out_of_memory - kill the "best" process when we run out of memory
 606 * @zonelist: zonelist pointer
 607 * @gfp_mask: memory allocation flags
 608 * @order: amount of memory being requested as a power of 2
 609 * @nodemask: nodemask passed to page allocator
 610 * @force_kill: true if a task must be killed, even if others are exiting
 611 *
 612 * If we run out of memory, we have the choice between either
 613 * killing a random task (bad), letting the system crash (worse)
 614 * OR try to be smart about which process to kill. Note that we
 615 * don't have to be perfect here, we just have to be good.
 616 */
 617void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
 618                int order, nodemask_t *nodemask, bool force_kill)
 619{
 620        const nodemask_t *mpol_mask;
 621        struct task_struct *p;
 622        unsigned long totalpages;
 623        unsigned long freed = 0;
 624        unsigned int uninitialized_var(points);
 625        enum oom_constraint constraint = CONSTRAINT_NONE;
 626        int killed = 0;
 627
 628        blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
 629        if (freed > 0)
 630                /* Got some memory back in the last second. */
 631                return;
 632
 633        /*
 634         * If current has a pending SIGKILL or is exiting, then automatically
 635         * select it.  The goal is to allow it to allocate so that it may
 636         * quickly exit and free its memory.
 637         */
 638        if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
 639                set_thread_flag(TIF_MEMDIE);
 640                return;
 641        }
 642
 643        /*
 644         * Check if there were limitations on the allocation (only relevant for
 645         * NUMA) that may require different handling.
 646         */
 647        constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
 648                                                &totalpages);
 649        mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
 650        check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
 651
 652        if (sysctl_oom_kill_allocating_task && current->mm &&
 653            !oom_unkillable_task(current, NULL, nodemask) &&
 654            current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
 655                get_task_struct(current);
 656                oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
 657                                 nodemask,
 658                                 "Out of memory (oom_kill_allocating_task)");
 659                goto out;
 660        }
 661
 662        p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
 663        /* Found nothing?!?! Either we hang forever, or we panic. */
 664        if (!p) {
 665                dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
 666                panic("Out of memory and no killable processes...\n");
 667        }
 668        if (PTR_ERR(p) != -1UL) {
 669                oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
 670                                 nodemask, "Out of memory");
 671                killed = 1;
 672        }
 673out:
 674        /*
 675         * Give the killed threads a good chance of exiting before trying to
 676         * allocate memory again.
 677         */
 678        if (killed)
 679                schedule_timeout_killable(1);
 680}
 681
 682/*
 683 * The pagefault handler calls here because it is out of memory, so kill a
 684 * memory-hogging task.  If any populated zone has ZONE_OOM_LOCKED set, a
 685 * parallel oom killing is already in progress so do nothing.
 686 */
 687void pagefault_out_of_memory(void)
 688{
 689        struct zonelist *zonelist;
 690
 691        if (mem_cgroup_oom_synchronize(true))
 692                return;
 693
 694        zonelist = node_zonelist(first_memory_node, GFP_KERNEL);
 695        if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
 696                out_of_memory(NULL, 0, 0, NULL, false);
 697                clear_zonelist_oom(zonelist, GFP_KERNEL);
 698        }
 699}
 700