linux/mm/oom_kill.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/oom_kill.c
   3 * 
   4 *  Copyright (C)  1998,2000  Rik van Riel
   5 *      Thanks go out to Claus Fischer for some serious inspiration and
   6 *      for goading me into coding this file...
   7 *  Copyright (C)  2010  Google, Inc.
   8 *      Rewritten by David Rientjes
   9 *
  10 *  The routines in this file are used to kill a process when
  11 *  we're seriously out of memory. This gets called from __alloc_pages()
  12 *  in mm/page_alloc.c when we really run out of memory.
  13 *
  14 *  Since we won't call these routines often (on a well-configured
  15 *  machine) this file will double as a 'coding guide' and a signpost
  16 *  for newbie kernel hackers. It features several pointers to major
  17 *  kernel subsystems and hints as to where to find out what things do.
  18 */
  19
  20#include <linux/oom.h>
  21#include <linux/mm.h>
  22#include <linux/err.h>
  23#include <linux/gfp.h>
  24#include <linux/sched.h>
  25#include <linux/sched/mm.h>
  26#include <linux/sched/coredump.h>
  27#include <linux/sched/task.h>
  28#include <linux/swap.h>
  29#include <linux/timex.h>
  30#include <linux/jiffies.h>
  31#include <linux/cpuset.h>
  32#include <linux/export.h>
  33#include <linux/notifier.h>
  34#include <linux/memcontrol.h>
  35#include <linux/mempolicy.h>
  36#include <linux/security.h>
  37#include <linux/ptrace.h>
  38#include <linux/freezer.h>
  39#include <linux/ftrace.h>
  40#include <linux/ratelimit.h>
  41#include <linux/kthread.h>
  42#include <linux/init.h>
  43#include <linux/mmu_notifier.h>
  44
  45#include <asm/tlb.h>
  46#include "internal.h"
  47#include "slab.h"
  48
  49#define CREATE_TRACE_POINTS
  50#include <trace/events/oom.h>
  51
  52int sysctl_panic_on_oom;
  53int sysctl_oom_kill_allocating_task;
  54int sysctl_oom_dump_tasks = 1;
  55
  56/*
  57 * Serializes oom killer invocations (out_of_memory()) from all contexts to
  58 * prevent from over eager oom killing (e.g. when the oom killer is invoked
  59 * from different domains).
  60 *
  61 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
  62 * and mark_oom_victim
  63 */
  64DEFINE_MUTEX(oom_lock);
  65
  66#ifdef CONFIG_NUMA
  67/**
  68 * has_intersects_mems_allowed() - check task eligiblity for kill
  69 * @start: task struct of which task to consider
  70 * @mask: nodemask passed to page allocator for mempolicy ooms
  71 *
  72 * Task eligibility is determined by whether or not a candidate task, @tsk,
  73 * shares the same mempolicy nodes as current if it is bound by such a policy
  74 * and whether or not it has the same set of allowed cpuset nodes.
  75 */
  76static bool has_intersects_mems_allowed(struct task_struct *start,
  77                                        const nodemask_t *mask)
  78{
  79        struct task_struct *tsk;
  80        bool ret = false;
  81
  82        rcu_read_lock();
  83        for_each_thread(start, tsk) {
  84                if (mask) {
  85                        /*
  86                         * If this is a mempolicy constrained oom, tsk's
  87                         * cpuset is irrelevant.  Only return true if its
  88                         * mempolicy intersects current, otherwise it may be
  89                         * needlessly killed.
  90                         */
  91                        ret = mempolicy_nodemask_intersects(tsk, mask);
  92                } else {
  93                        /*
  94                         * This is not a mempolicy constrained oom, so only
  95                         * check the mems of tsk's cpuset.
  96                         */
  97                        ret = cpuset_mems_allowed_intersects(current, tsk);
  98                }
  99                if (ret)
 100                        break;
 101        }
 102        rcu_read_unlock();
 103
 104        return ret;
 105}
 106#else
 107static bool has_intersects_mems_allowed(struct task_struct *tsk,
 108                                        const nodemask_t *mask)
 109{
 110        return true;
 111}
 112#endif /* CONFIG_NUMA */
 113
 114/*
 115 * The process p may have detached its own ->mm while exiting or through
 116 * use_mm(), but one or more of its subthreads may still have a valid
 117 * pointer.  Return p, or any of its subthreads with a valid ->mm, with
 118 * task_lock() held.
 119 */
 120struct task_struct *find_lock_task_mm(struct task_struct *p)
 121{
 122        struct task_struct *t;
 123
 124        rcu_read_lock();
 125
 126        for_each_thread(p, t) {
 127                task_lock(t);
 128                if (likely(t->mm))
 129                        goto found;
 130                task_unlock(t);
 131        }
 132        t = NULL;
 133found:
 134        rcu_read_unlock();
 135
 136        return t;
 137}
 138
 139/*
 140 * order == -1 means the oom kill is required by sysrq, otherwise only
 141 * for display purposes.
 142 */
 143static inline bool is_sysrq_oom(struct oom_control *oc)
 144{
 145        return oc->order == -1;
 146}
 147
 148static inline bool is_memcg_oom(struct oom_control *oc)
 149{
 150        return oc->memcg != NULL;
 151}
 152
 153/* return true if the task is not adequate as candidate victim task. */
 154static bool oom_unkillable_task(struct task_struct *p,
 155                struct mem_cgroup *memcg, const nodemask_t *nodemask)
 156{
 157        if (is_global_init(p))
 158                return true;
 159        if (p->flags & PF_KTHREAD)
 160                return true;
 161
 162        /* When mem_cgroup_out_of_memory() and p is not member of the group */
 163        if (memcg && !task_in_mem_cgroup(p, memcg))
 164                return true;
 165
 166        /* p may not have freeable memory in nodemask */
 167        if (!has_intersects_mems_allowed(p, nodemask))
 168                return true;
 169
 170        return false;
 171}
 172
 173/*
 174 * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
 175 * than all user memory (LRU pages)
 176 */
 177static bool is_dump_unreclaim_slabs(void)
 178{
 179        unsigned long nr_lru;
 180
 181        nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
 182                 global_node_page_state(NR_INACTIVE_ANON) +
 183                 global_node_page_state(NR_ACTIVE_FILE) +
 184                 global_node_page_state(NR_INACTIVE_FILE) +
 185                 global_node_page_state(NR_ISOLATED_ANON) +
 186                 global_node_page_state(NR_ISOLATED_FILE) +
 187                 global_node_page_state(NR_UNEVICTABLE);
 188
 189        return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
 190}
 191
 192/**
 193 * oom_badness - heuristic function to determine which candidate task to kill
 194 * @p: task struct of which task we should calculate
 195 * @totalpages: total present RAM allowed for page allocation
 196 * @memcg: task's memory controller, if constrained
 197 * @nodemask: nodemask passed to page allocator for mempolicy ooms
 198 *
 199 * The heuristic for determining which task to kill is made to be as simple and
 200 * predictable as possible.  The goal is to return the highest value for the
 201 * task consuming the most memory to avoid subsequent oom failures.
 202 */
 203unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
 204                          const nodemask_t *nodemask, unsigned long totalpages)
 205{
 206        long points;
 207        long adj;
 208
 209        if (oom_unkillable_task(p, memcg, nodemask))
 210                return 0;
 211
 212        p = find_lock_task_mm(p);
 213        if (!p)
 214                return 0;
 215
 216        /*
 217         * Do not even consider tasks which are explicitly marked oom
 218         * unkillable or have been already oom reaped or the are in
 219         * the middle of vfork
 220         */
 221        adj = (long)p->signal->oom_score_adj;
 222        if (adj == OOM_SCORE_ADJ_MIN ||
 223                        test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
 224                        in_vfork(p)) {
 225                task_unlock(p);
 226                return 0;
 227        }
 228
 229        /*
 230         * The baseline for the badness score is the proportion of RAM that each
 231         * task's rss, pagetable and swap space use.
 232         */
 233        points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
 234                mm_pgtables_bytes(p->mm) / PAGE_SIZE;
 235        task_unlock(p);
 236
 237        /* Normalize to oom_score_adj units */
 238        adj *= totalpages / 1000;
 239        points += adj;
 240
 241        /*
 242         * Never return 0 for an eligible task regardless of the root bonus and
 243         * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
 244         */
 245        return points > 0 ? points : 1;
 246}
 247
 248static const char * const oom_constraint_text[] = {
 249        [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
 250        [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
 251        [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
 252        [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
 253};
 254
 255/*
 256 * Determine the type of allocation constraint.
 257 */
 258static enum oom_constraint constrained_alloc(struct oom_control *oc)
 259{
 260        struct zone *zone;
 261        struct zoneref *z;
 262        enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
 263        bool cpuset_limited = false;
 264        int nid;
 265
 266        if (is_memcg_oom(oc)) {
 267                oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
 268                return CONSTRAINT_MEMCG;
 269        }
 270
 271        /* Default to all available memory */
 272        oc->totalpages = totalram_pages() + total_swap_pages;
 273
 274        if (!IS_ENABLED(CONFIG_NUMA))
 275                return CONSTRAINT_NONE;
 276
 277        if (!oc->zonelist)
 278                return CONSTRAINT_NONE;
 279        /*
 280         * Reach here only when __GFP_NOFAIL is used. So, we should avoid
 281         * to kill current.We have to random task kill in this case.
 282         * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
 283         */
 284        if (oc->gfp_mask & __GFP_THISNODE)
 285                return CONSTRAINT_NONE;
 286
 287        /*
 288         * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
 289         * the page allocator means a mempolicy is in effect.  Cpuset policy
 290         * is enforced in get_page_from_freelist().
 291         */
 292        if (oc->nodemask &&
 293            !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
 294                oc->totalpages = total_swap_pages;
 295                for_each_node_mask(nid, *oc->nodemask)
 296                        oc->totalpages += node_spanned_pages(nid);
 297                return CONSTRAINT_MEMORY_POLICY;
 298        }
 299
 300        /* Check this allocation failure is caused by cpuset's wall function */
 301        for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
 302                        high_zoneidx, oc->nodemask)
 303                if (!cpuset_zone_allowed(zone, oc->gfp_mask))
 304                        cpuset_limited = true;
 305
 306        if (cpuset_limited) {
 307                oc->totalpages = total_swap_pages;
 308                for_each_node_mask(nid, cpuset_current_mems_allowed)
 309                        oc->totalpages += node_spanned_pages(nid);
 310                return CONSTRAINT_CPUSET;
 311        }
 312        return CONSTRAINT_NONE;
 313}
 314
 315static int oom_evaluate_task(struct task_struct *task, void *arg)
 316{
 317        struct oom_control *oc = arg;
 318        unsigned long points;
 319
 320        if (oom_unkillable_task(task, NULL, oc->nodemask))
 321                goto next;
 322
 323        /*
 324         * This task already has access to memory reserves and is being killed.
 325         * Don't allow any other task to have access to the reserves unless
 326         * the task has MMF_OOM_SKIP because chances that it would release
 327         * any memory is quite low.
 328         */
 329        if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
 330                if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
 331                        goto next;
 332                goto abort;
 333        }
 334
 335        /*
 336         * If task is allocating a lot of memory and has been marked to be
 337         * killed first if it triggers an oom, then select it.
 338         */
 339        if (oom_task_origin(task)) {
 340                points = ULONG_MAX;
 341                goto select;
 342        }
 343
 344        points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
 345        if (!points || points < oc->chosen_points)
 346                goto next;
 347
 348        /* Prefer thread group leaders for display purposes */
 349        if (points == oc->chosen_points && thread_group_leader(oc->chosen))
 350                goto next;
 351select:
 352        if (oc->chosen)
 353                put_task_struct(oc->chosen);
 354        get_task_struct(task);
 355        oc->chosen = task;
 356        oc->chosen_points = points;
 357next:
 358        return 0;
 359abort:
 360        if (oc->chosen)
 361                put_task_struct(oc->chosen);
 362        oc->chosen = (void *)-1UL;
 363        return 1;
 364}
 365
 366/*
 367 * Simple selection loop. We choose the process with the highest number of
 368 * 'points'. In case scan was aborted, oc->chosen is set to -1.
 369 */
 370static void select_bad_process(struct oom_control *oc)
 371{
 372        if (is_memcg_oom(oc))
 373                mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
 374        else {
 375                struct task_struct *p;
 376
 377                rcu_read_lock();
 378                for_each_process(p)
 379                        if (oom_evaluate_task(p, oc))
 380                                break;
 381                rcu_read_unlock();
 382        }
 383
 384        oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
 385}
 386
 387/**
 388 * dump_tasks - dump current memory state of all system tasks
 389 * @memcg: current's memory controller, if constrained
 390 * @nodemask: nodemask passed to page allocator for mempolicy ooms
 391 *
 392 * Dumps the current memory state of all eligible tasks.  Tasks not in the same
 393 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
 394 * are not shown.
 395 * State information includes task's pid, uid, tgid, vm size, rss,
 396 * pgtables_bytes, swapents, oom_score_adj value, and name.
 397 */
 398static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
 399{
 400        struct task_struct *p;
 401        struct task_struct *task;
 402
 403        pr_info("Tasks state (memory values in pages):\n");
 404        pr_info("[  pid  ]   uid  tgid total_vm      rss pgtables_bytes swapents oom_score_adj name\n");
 405        rcu_read_lock();
 406        for_each_process(p) {
 407                if (oom_unkillable_task(p, memcg, nodemask))
 408                        continue;
 409
 410                task = find_lock_task_mm(p);
 411                if (!task) {
 412                        /*
 413                         * This is a kthread or all of p's threads have already
 414                         * detached their mm's.  There's no need to report
 415                         * them; they can't be oom killed anyway.
 416                         */
 417                        continue;
 418                }
 419
 420                pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu         %5hd %s\n",
 421                        task->pid, from_kuid(&init_user_ns, task_uid(task)),
 422                        task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
 423                        mm_pgtables_bytes(task->mm),
 424                        get_mm_counter(task->mm, MM_SWAPENTS),
 425                        task->signal->oom_score_adj, task->comm);
 426                task_unlock(task);
 427        }
 428        rcu_read_unlock();
 429}
 430
 431static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
 432{
 433        /* one line summary of the oom killer context. */
 434        pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
 435                        oom_constraint_text[oc->constraint],
 436                        nodemask_pr_args(oc->nodemask));
 437        cpuset_print_current_mems_allowed();
 438        mem_cgroup_print_oom_context(oc->memcg, victim);
 439        pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
 440                from_kuid(&init_user_ns, task_uid(victim)));
 441}
 442
 443static void dump_header(struct oom_control *oc, struct task_struct *p)
 444{
 445        pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
 446                current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
 447                        current->signal->oom_score_adj);
 448        if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
 449                pr_warn("COMPACTION is disabled!!!\n");
 450
 451        dump_stack();
 452        if (is_memcg_oom(oc))
 453                mem_cgroup_print_oom_meminfo(oc->memcg);
 454        else {
 455                show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
 456                if (is_dump_unreclaim_slabs())
 457                        dump_unreclaimable_slab();
 458        }
 459        if (sysctl_oom_dump_tasks)
 460                dump_tasks(oc->memcg, oc->nodemask);
 461        if (p)
 462                dump_oom_summary(oc, p);
 463}
 464
 465/*
 466 * Number of OOM victims in flight
 467 */
 468static atomic_t oom_victims = ATOMIC_INIT(0);
 469static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
 470
 471static bool oom_killer_disabled __read_mostly;
 472
 473#define K(x) ((x) << (PAGE_SHIFT-10))
 474
 475/*
 476 * task->mm can be NULL if the task is the exited group leader.  So to
 477 * determine whether the task is using a particular mm, we examine all the
 478 * task's threads: if one of those is using this mm then this task was also
 479 * using it.
 480 */
 481bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
 482{
 483        struct task_struct *t;
 484
 485        for_each_thread(p, t) {
 486                struct mm_struct *t_mm = READ_ONCE(t->mm);
 487                if (t_mm)
 488                        return t_mm == mm;
 489        }
 490        return false;
 491}
 492
 493#ifdef CONFIG_MMU
 494/*
 495 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
 496 * victim (if that is possible) to help the OOM killer to move on.
 497 */
 498static struct task_struct *oom_reaper_th;
 499static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
 500static struct task_struct *oom_reaper_list;
 501static DEFINE_SPINLOCK(oom_reaper_lock);
 502
 503bool __oom_reap_task_mm(struct mm_struct *mm)
 504{
 505        struct vm_area_struct *vma;
 506        bool ret = true;
 507
 508        /*
 509         * Tell all users of get_user/copy_from_user etc... that the content
 510         * is no longer stable. No barriers really needed because unmapping
 511         * should imply barriers already and the reader would hit a page fault
 512         * if it stumbled over a reaped memory.
 513         */
 514        set_bit(MMF_UNSTABLE, &mm->flags);
 515
 516        for (vma = mm->mmap ; vma; vma = vma->vm_next) {
 517                if (!can_madv_dontneed_vma(vma))
 518                        continue;
 519
 520                /*
 521                 * Only anonymous pages have a good chance to be dropped
 522                 * without additional steps which we cannot afford as we
 523                 * are OOM already.
 524                 *
 525                 * We do not even care about fs backed pages because all
 526                 * which are reclaimable have already been reclaimed and
 527                 * we do not want to block exit_mmap by keeping mm ref
 528                 * count elevated without a good reason.
 529                 */
 530                if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
 531                        struct mmu_notifier_range range;
 532                        struct mmu_gather tlb;
 533
 534                        mmu_notifier_range_init(&range, mm, vma->vm_start,
 535                                                vma->vm_end);
 536                        tlb_gather_mmu(&tlb, mm, range.start, range.end);
 537                        if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
 538                                tlb_finish_mmu(&tlb, range.start, range.end);
 539                                ret = false;
 540                                continue;
 541                        }
 542                        unmap_page_range(&tlb, vma, range.start, range.end, NULL);
 543                        mmu_notifier_invalidate_range_end(&range);
 544                        tlb_finish_mmu(&tlb, range.start, range.end);
 545                }
 546        }
 547
 548        return ret;
 549}
 550
 551/*
 552 * Reaps the address space of the give task.
 553 *
 554 * Returns true on success and false if none or part of the address space
 555 * has been reclaimed and the caller should retry later.
 556 */
 557static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
 558{
 559        bool ret = true;
 560
 561        if (!down_read_trylock(&mm->mmap_sem)) {
 562                trace_skip_task_reaping(tsk->pid);
 563                return false;
 564        }
 565
 566        /*
 567         * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
 568         * work on the mm anymore. The check for MMF_OOM_SKIP must run
 569         * under mmap_sem for reading because it serializes against the
 570         * down_write();up_write() cycle in exit_mmap().
 571         */
 572        if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
 573                trace_skip_task_reaping(tsk->pid);
 574                goto out_unlock;
 575        }
 576
 577        trace_start_task_reaping(tsk->pid);
 578
 579        /* failed to reap part of the address space. Try again later */
 580        ret = __oom_reap_task_mm(mm);
 581        if (!ret)
 582                goto out_finish;
 583
 584        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 585                        task_pid_nr(tsk), tsk->comm,
 586                        K(get_mm_counter(mm, MM_ANONPAGES)),
 587                        K(get_mm_counter(mm, MM_FILEPAGES)),
 588                        K(get_mm_counter(mm, MM_SHMEMPAGES)));
 589out_finish:
 590        trace_finish_task_reaping(tsk->pid);
 591out_unlock:
 592        up_read(&mm->mmap_sem);
 593
 594        return ret;
 595}
 596
 597#define MAX_OOM_REAP_RETRIES 10
 598static void oom_reap_task(struct task_struct *tsk)
 599{
 600        int attempts = 0;
 601        struct mm_struct *mm = tsk->signal->oom_mm;
 602
 603        /* Retry the down_read_trylock(mmap_sem) a few times */
 604        while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
 605                schedule_timeout_idle(HZ/10);
 606
 607        if (attempts <= MAX_OOM_REAP_RETRIES ||
 608            test_bit(MMF_OOM_SKIP, &mm->flags))
 609                goto done;
 610
 611        pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
 612                task_pid_nr(tsk), tsk->comm);
 613        debug_show_all_locks();
 614
 615done:
 616        tsk->oom_reaper_list = NULL;
 617
 618        /*
 619         * Hide this mm from OOM killer because it has been either reaped or
 620         * somebody can't call up_write(mmap_sem).
 621         */
 622        set_bit(MMF_OOM_SKIP, &mm->flags);
 623
 624        /* Drop a reference taken by wake_oom_reaper */
 625        put_task_struct(tsk);
 626}
 627
 628static int oom_reaper(void *unused)
 629{
 630        while (true) {
 631                struct task_struct *tsk = NULL;
 632
 633                wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
 634                spin_lock(&oom_reaper_lock);
 635                if (oom_reaper_list != NULL) {
 636                        tsk = oom_reaper_list;
 637                        oom_reaper_list = tsk->oom_reaper_list;
 638                }
 639                spin_unlock(&oom_reaper_lock);
 640
 641                if (tsk)
 642                        oom_reap_task(tsk);
 643        }
 644
 645        return 0;
 646}
 647
 648static void wake_oom_reaper(struct task_struct *tsk)
 649{
 650        /* mm is already queued? */
 651        if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
 652                return;
 653
 654        get_task_struct(tsk);
 655
 656        spin_lock(&oom_reaper_lock);
 657        tsk->oom_reaper_list = oom_reaper_list;
 658        oom_reaper_list = tsk;
 659        spin_unlock(&oom_reaper_lock);
 660        trace_wake_reaper(tsk->pid);
 661        wake_up(&oom_reaper_wait);
 662}
 663
 664static int __init oom_init(void)
 665{
 666        oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
 667        return 0;
 668}
 669subsys_initcall(oom_init)
 670#else
 671static inline void wake_oom_reaper(struct task_struct *tsk)
 672{
 673}
 674#endif /* CONFIG_MMU */
 675
 676/**
 677 * mark_oom_victim - mark the given task as OOM victim
 678 * @tsk: task to mark
 679 *
 680 * Has to be called with oom_lock held and never after
 681 * oom has been disabled already.
 682 *
 683 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
 684 * under task_lock or operate on the current).
 685 */
 686static void mark_oom_victim(struct task_struct *tsk)
 687{
 688        struct mm_struct *mm = tsk->mm;
 689
 690        WARN_ON(oom_killer_disabled);
 691        /* OOM killer might race with memcg OOM */
 692        if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
 693                return;
 694
 695        /* oom_mm is bound to the signal struct life time. */
 696        if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
 697                mmgrab(tsk->signal->oom_mm);
 698                set_bit(MMF_OOM_VICTIM, &mm->flags);
 699        }
 700
 701        /*
 702         * Make sure that the task is woken up from uninterruptible sleep
 703         * if it is frozen because OOM killer wouldn't be able to free
 704         * any memory and livelock. freezing_slow_path will tell the freezer
 705         * that TIF_MEMDIE tasks should be ignored.
 706         */
 707        __thaw_task(tsk);
 708        atomic_inc(&oom_victims);
 709        trace_mark_victim(tsk->pid);
 710}
 711
 712/**
 713 * exit_oom_victim - note the exit of an OOM victim
 714 */
 715void exit_oom_victim(void)
 716{
 717        clear_thread_flag(TIF_MEMDIE);
 718
 719        if (!atomic_dec_return(&oom_victims))
 720                wake_up_all(&oom_victims_wait);
 721}
 722
 723/**
 724 * oom_killer_enable - enable OOM killer
 725 */
 726void oom_killer_enable(void)
 727{
 728        oom_killer_disabled = false;
 729        pr_info("OOM killer enabled.\n");
 730}
 731
 732/**
 733 * oom_killer_disable - disable OOM killer
 734 * @timeout: maximum timeout to wait for oom victims in jiffies
 735 *
 736 * Forces all page allocations to fail rather than trigger OOM killer.
 737 * Will block and wait until all OOM victims are killed or the given
 738 * timeout expires.
 739 *
 740 * The function cannot be called when there are runnable user tasks because
 741 * the userspace would see unexpected allocation failures as a result. Any
 742 * new usage of this function should be consulted with MM people.
 743 *
 744 * Returns true if successful and false if the OOM killer cannot be
 745 * disabled.
 746 */
 747bool oom_killer_disable(signed long timeout)
 748{
 749        signed long ret;
 750
 751        /*
 752         * Make sure to not race with an ongoing OOM killer. Check that the
 753         * current is not killed (possibly due to sharing the victim's memory).
 754         */
 755        if (mutex_lock_killable(&oom_lock))
 756                return false;
 757        oom_killer_disabled = true;
 758        mutex_unlock(&oom_lock);
 759
 760        ret = wait_event_interruptible_timeout(oom_victims_wait,
 761                        !atomic_read(&oom_victims), timeout);
 762        if (ret <= 0) {
 763                oom_killer_enable();
 764                return false;
 765        }
 766        pr_info("OOM killer disabled.\n");
 767
 768        return true;
 769}
 770
 771static inline bool __task_will_free_mem(struct task_struct *task)
 772{
 773        struct signal_struct *sig = task->signal;
 774
 775        /*
 776         * A coredumping process may sleep for an extended period in exit_mm(),
 777         * so the oom killer cannot assume that the process will promptly exit
 778         * and release memory.
 779         */
 780        if (sig->flags & SIGNAL_GROUP_COREDUMP)
 781                return false;
 782
 783        if (sig->flags & SIGNAL_GROUP_EXIT)
 784                return true;
 785
 786        if (thread_group_empty(task) && (task->flags & PF_EXITING))
 787                return true;
 788
 789        return false;
 790}
 791
 792/*
 793 * Checks whether the given task is dying or exiting and likely to
 794 * release its address space. This means that all threads and processes
 795 * sharing the same mm have to be killed or exiting.
 796 * Caller has to make sure that task->mm is stable (hold task_lock or
 797 * it operates on the current).
 798 */
 799static bool task_will_free_mem(struct task_struct *task)
 800{
 801        struct mm_struct *mm = task->mm;
 802        struct task_struct *p;
 803        bool ret = true;
 804
 805        /*
 806         * Skip tasks without mm because it might have passed its exit_mm and
 807         * exit_oom_victim. oom_reaper could have rescued that but do not rely
 808         * on that for now. We can consider find_lock_task_mm in future.
 809         */
 810        if (!mm)
 811                return false;
 812
 813        if (!__task_will_free_mem(task))
 814                return false;
 815
 816        /*
 817         * This task has already been drained by the oom reaper so there are
 818         * only small chances it will free some more
 819         */
 820        if (test_bit(MMF_OOM_SKIP, &mm->flags))
 821                return false;
 822
 823        if (atomic_read(&mm->mm_users) <= 1)
 824                return true;
 825
 826        /*
 827         * Make sure that all tasks which share the mm with the given tasks
 828         * are dying as well to make sure that a) nobody pins its mm and
 829         * b) the task is also reapable by the oom reaper.
 830         */
 831        rcu_read_lock();
 832        for_each_process(p) {
 833                if (!process_shares_mm(p, mm))
 834                        continue;
 835                if (same_thread_group(task, p))
 836                        continue;
 837                ret = __task_will_free_mem(p);
 838                if (!ret)
 839                        break;
 840        }
 841        rcu_read_unlock();
 842
 843        return ret;
 844}
 845
 846static void __oom_kill_process(struct task_struct *victim, const char *message)
 847{
 848        struct task_struct *p;
 849        struct mm_struct *mm;
 850        bool can_oom_reap = true;
 851
 852        p = find_lock_task_mm(victim);
 853        if (!p) {
 854                put_task_struct(victim);
 855                return;
 856        } else if (victim != p) {
 857                get_task_struct(p);
 858                put_task_struct(victim);
 859                victim = p;
 860        }
 861
 862        /* Get a reference to safely compare mm after task_unlock(victim) */
 863        mm = victim->mm;
 864        mmgrab(mm);
 865
 866        /* Raise event before sending signal: task reaper must see this */
 867        count_vm_event(OOM_KILL);
 868        memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
 869
 870        /*
 871         * We should send SIGKILL before granting access to memory reserves
 872         * in order to prevent the OOM victim from depleting the memory
 873         * reserves from the user space under its control.
 874         */
 875        do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
 876        mark_oom_victim(victim);
 877        pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
 878                message, task_pid_nr(victim), victim->comm,
 879                K(victim->mm->total_vm),
 880                K(get_mm_counter(victim->mm, MM_ANONPAGES)),
 881                K(get_mm_counter(victim->mm, MM_FILEPAGES)),
 882                K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
 883        task_unlock(victim);
 884
 885        /*
 886         * Kill all user processes sharing victim->mm in other thread groups, if
 887         * any.  They don't get access to memory reserves, though, to avoid
 888         * depletion of all memory.  This prevents mm->mmap_sem livelock when an
 889         * oom killed thread cannot exit because it requires the semaphore and
 890         * its contended by another thread trying to allocate memory itself.
 891         * That thread will now get access to memory reserves since it has a
 892         * pending fatal signal.
 893         */
 894        rcu_read_lock();
 895        for_each_process(p) {
 896                if (!process_shares_mm(p, mm))
 897                        continue;
 898                if (same_thread_group(p, victim))
 899                        continue;
 900                if (is_global_init(p)) {
 901                        can_oom_reap = false;
 902                        set_bit(MMF_OOM_SKIP, &mm->flags);
 903                        pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
 904                                        task_pid_nr(victim), victim->comm,
 905                                        task_pid_nr(p), p->comm);
 906                        continue;
 907                }
 908                /*
 909                 * No use_mm() user needs to read from the userspace so we are
 910                 * ok to reap it.
 911                 */
 912                if (unlikely(p->flags & PF_KTHREAD))
 913                        continue;
 914                do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
 915        }
 916        rcu_read_unlock();
 917
 918        if (can_oom_reap)
 919                wake_oom_reaper(victim);
 920
 921        mmdrop(mm);
 922        put_task_struct(victim);
 923}
 924#undef K
 925
 926/*
 927 * Kill provided task unless it's secured by setting
 928 * oom_score_adj to OOM_SCORE_ADJ_MIN.
 929 */
 930static int oom_kill_memcg_member(struct task_struct *task, void *message)
 931{
 932        if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
 933            !is_global_init(task)) {
 934                get_task_struct(task);
 935                __oom_kill_process(task, message);
 936        }
 937        return 0;
 938}
 939
 940static void oom_kill_process(struct oom_control *oc, const char *message)
 941{
 942        struct task_struct *victim = oc->chosen;
 943        struct mem_cgroup *oom_group;
 944        static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 945                                              DEFAULT_RATELIMIT_BURST);
 946
 947        /*
 948         * If the task is already exiting, don't alarm the sysadmin or kill
 949         * its children or threads, just give it access to memory reserves
 950         * so it can die quickly
 951         */
 952        task_lock(victim);
 953        if (task_will_free_mem(victim)) {
 954                mark_oom_victim(victim);
 955                wake_oom_reaper(victim);
 956                task_unlock(victim);
 957                put_task_struct(victim);
 958                return;
 959        }
 960        task_unlock(victim);
 961
 962        if (__ratelimit(&oom_rs))
 963                dump_header(oc, victim);
 964
 965        /*
 966         * Do we need to kill the entire memory cgroup?
 967         * Or even one of the ancestor memory cgroups?
 968         * Check this out before killing the victim task.
 969         */
 970        oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
 971
 972        __oom_kill_process(victim, message);
 973
 974        /*
 975         * If necessary, kill all tasks in the selected memory cgroup.
 976         */
 977        if (oom_group) {
 978                mem_cgroup_print_oom_group(oom_group);
 979                mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
 980                                      (void*)message);
 981                mem_cgroup_put(oom_group);
 982        }
 983}
 984
 985/*
 986 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
 987 */
 988static void check_panic_on_oom(struct oom_control *oc,
 989                               enum oom_constraint constraint)
 990{
 991        if (likely(!sysctl_panic_on_oom))
 992                return;
 993        if (sysctl_panic_on_oom != 2) {
 994                /*
 995                 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
 996                 * does not panic for cpuset, mempolicy, or memcg allocation
 997                 * failures.
 998                 */
 999                if (constraint != CONSTRAINT_NONE)
1000                        return;
1001        }
1002        /* Do not panic for oom kills triggered by sysrq */
1003        if (is_sysrq_oom(oc))
1004                return;
1005        dump_header(oc, NULL);
1006        panic("Out of memory: %s panic_on_oom is enabled\n",
1007                sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1008}
1009
1010static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1011
1012int register_oom_notifier(struct notifier_block *nb)
1013{
1014        return blocking_notifier_chain_register(&oom_notify_list, nb);
1015}
1016EXPORT_SYMBOL_GPL(register_oom_notifier);
1017
1018int unregister_oom_notifier(struct notifier_block *nb)
1019{
1020        return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1021}
1022EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1023
1024/**
1025 * out_of_memory - kill the "best" process when we run out of memory
1026 * @oc: pointer to struct oom_control
1027 *
1028 * If we run out of memory, we have the choice between either
1029 * killing a random task (bad), letting the system crash (worse)
1030 * OR try to be smart about which process to kill. Note that we
1031 * don't have to be perfect here, we just have to be good.
1032 */
1033bool out_of_memory(struct oom_control *oc)
1034{
1035        unsigned long freed = 0;
1036        enum oom_constraint constraint = CONSTRAINT_NONE;
1037
1038        if (oom_killer_disabled)
1039                return false;
1040
1041        if (!is_memcg_oom(oc)) {
1042                blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1043                if (freed > 0)
1044                        /* Got some memory back in the last second. */
1045                        return true;
1046        }
1047
1048        /*
1049         * If current has a pending SIGKILL or is exiting, then automatically
1050         * select it.  The goal is to allow it to allocate so that it may
1051         * quickly exit and free its memory.
1052         */
1053        if (task_will_free_mem(current)) {
1054                mark_oom_victim(current);
1055                wake_oom_reaper(current);
1056                return true;
1057        }
1058
1059        /*
1060         * The OOM killer does not compensate for IO-less reclaim.
1061         * pagefault_out_of_memory lost its gfp context so we have to
1062         * make sure exclude 0 mask - all other users should have at least
1063         * ___GFP_DIRECT_RECLAIM to get here.
1064         */
1065        if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
1066                return true;
1067
1068        /*
1069         * Check if there were limitations on the allocation (only relevant for
1070         * NUMA and memcg) that may require different handling.
1071         */
1072        constraint = constrained_alloc(oc);
1073        if (constraint != CONSTRAINT_MEMORY_POLICY)
1074                oc->nodemask = NULL;
1075        check_panic_on_oom(oc, constraint);
1076
1077        if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1078            current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1079            current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1080                get_task_struct(current);
1081                oc->chosen = current;
1082                oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1083                return true;
1084        }
1085
1086        select_bad_process(oc);
1087        /* Found nothing?!?! */
1088        if (!oc->chosen) {
1089                dump_header(oc, NULL);
1090                pr_warn("Out of memory and no killable processes...\n");
1091                /*
1092                 * If we got here due to an actual allocation at the
1093                 * system level, we cannot survive this and will enter
1094                 * an endless loop in the allocator. Bail out now.
1095                 */
1096                if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1097                        panic("System is deadlocked on memory\n");
1098        }
1099        if (oc->chosen && oc->chosen != (void *)-1UL)
1100                oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1101                                 "Memory cgroup out of memory");
1102        return !!oc->chosen;
1103}
1104
1105/*
1106 * The pagefault handler calls here because it is out of memory, so kill a
1107 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1108 * killing is already in progress so do nothing.
1109 */
1110void pagefault_out_of_memory(void)
1111{
1112        struct oom_control oc = {
1113                .zonelist = NULL,
1114                .nodemask = NULL,
1115                .memcg = NULL,
1116                .gfp_mask = 0,
1117                .order = 0,
1118        };
1119
1120        if (mem_cgroup_oom_synchronize(true))
1121                return;
1122
1123        if (!mutex_trylock(&oom_lock))
1124                return;
1125        out_of_memory(&oc);
1126        mutex_unlock(&oom_lock);
1127}
1128