1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/oom.h>
21#include <linux/mm.h>
22#include <linux/err.h>
23#include <linux/gfp.h>
24#include <linux/sched.h>
25#include <linux/swap.h>
26#include <linux/timex.h>
27#include <linux/jiffies.h>
28#include <linux/cpuset.h>
29#include <linux/export.h>
30#include <linux/notifier.h>
31#include <linux/memcontrol.h>
32#include <linux/mempolicy.h>
33#include <linux/security.h>
34#include <linux/ptrace.h>
35#include <linux/freezer.h>
36#include <linux/ftrace.h>
37#include <linux/ratelimit.h>
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/oom.h>
41
42int sysctl_panic_on_oom;
43int sysctl_oom_kill_allocating_task;
44int sysctl_oom_dump_tasks = 1;
45
46DEFINE_MUTEX(oom_lock);
47
48#ifdef CONFIG_NUMA
49
50
51
52
53
54
55
56
57
58static bool has_intersects_mems_allowed(struct task_struct *start,
59 const nodemask_t *mask)
60{
61 struct task_struct *tsk;
62 bool ret = false;
63
64 rcu_read_lock();
65 for_each_thread(start, tsk) {
66 if (mask) {
67
68
69
70
71
72
73 ret = mempolicy_nodemask_intersects(tsk, mask);
74 } else {
75
76
77
78
79 ret = cpuset_mems_allowed_intersects(current, tsk);
80 }
81 if (ret)
82 break;
83 }
84 rcu_read_unlock();
85
86 return ret;
87}
88#else
89static bool has_intersects_mems_allowed(struct task_struct *tsk,
90 const nodemask_t *mask)
91{
92 return true;
93}
94#endif
95
96
97
98
99
100
101
102struct task_struct *find_lock_task_mm(struct task_struct *p)
103{
104 struct task_struct *t;
105
106 rcu_read_lock();
107
108 for_each_thread(p, t) {
109 task_lock(t);
110 if (likely(t->mm))
111 goto found;
112 task_unlock(t);
113 }
114 t = NULL;
115found:
116 rcu_read_unlock();
117
118 return t;
119}
120
121
122
123
124
125static inline bool is_sysrq_oom(struct oom_control *oc)
126{
127 return oc->order == -1;
128}
129
130
131static bool oom_unkillable_task(struct task_struct *p,
132 struct mem_cgroup *memcg, const nodemask_t *nodemask)
133{
134 if (is_global_init(p))
135 return true;
136 if (p->flags & PF_KTHREAD)
137 return true;
138
139
140 if (memcg && !task_in_mem_cgroup(p, memcg))
141 return true;
142
143
144 if (!has_intersects_mems_allowed(p, nodemask))
145 return true;
146
147 return false;
148}
149
150
151
152
153
154
155
156
157
158
159unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
160 const nodemask_t *nodemask, unsigned long totalpages)
161{
162 long points;
163 long adj;
164
165 if (oom_unkillable_task(p, memcg, nodemask))
166 return 0;
167
168 p = find_lock_task_mm(p);
169 if (!p)
170 return 0;
171
172 adj = (long)p->signal->oom_score_adj;
173 if (adj == OOM_SCORE_ADJ_MIN) {
174 task_unlock(p);
175 return 0;
176 }
177
178
179
180
181
182 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
183 atomic_long_read(&p->mm->nr_ptes) + mm_nr_pmds(p->mm);
184 task_unlock(p);
185
186
187
188
189
190 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
191 points -= (points * 3) / 100;
192
193
194 adj *= totalpages / 1000;
195 points += adj;
196
197
198
199
200
201 return points > 0 ? points : 1;
202}
203
204
205
206
207#ifdef CONFIG_NUMA
208static enum oom_constraint constrained_alloc(struct oom_control *oc,
209 unsigned long *totalpages)
210{
211 struct zone *zone;
212 struct zoneref *z;
213 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
214 bool cpuset_limited = false;
215 int nid;
216
217
218 *totalpages = totalram_pages + total_swap_pages;
219
220 if (!oc->zonelist)
221 return CONSTRAINT_NONE;
222
223
224
225
226
227 if (oc->gfp_mask & __GFP_THISNODE)
228 return CONSTRAINT_NONE;
229
230
231
232
233
234
235 if (oc->nodemask &&
236 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
237 *totalpages = total_swap_pages;
238 for_each_node_mask(nid, *oc->nodemask)
239 *totalpages += node_spanned_pages(nid);
240 return CONSTRAINT_MEMORY_POLICY;
241 }
242
243
244 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
245 high_zoneidx, oc->nodemask)
246 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
247 cpuset_limited = true;
248
249 if (cpuset_limited) {
250 *totalpages = total_swap_pages;
251 for_each_node_mask(nid, cpuset_current_mems_allowed)
252 *totalpages += node_spanned_pages(nid);
253 return CONSTRAINT_CPUSET;
254 }
255 return CONSTRAINT_NONE;
256}
257#else
258static enum oom_constraint constrained_alloc(struct oom_control *oc,
259 unsigned long *totalpages)
260{
261 *totalpages = totalram_pages + total_swap_pages;
262 return CONSTRAINT_NONE;
263}
264#endif
265
266enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
267 struct task_struct *task, unsigned long totalpages)
268{
269 if (oom_unkillable_task(task, NULL, oc->nodemask))
270 return OOM_SCAN_CONTINUE;
271
272
273
274
275
276 if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
277 if (!is_sysrq_oom(oc))
278 return OOM_SCAN_ABORT;
279 }
280 if (!task->mm)
281 return OOM_SCAN_CONTINUE;
282
283
284
285
286
287 if (oom_task_origin(task))
288 return OOM_SCAN_SELECT;
289
290 if (task_will_free_mem(task) && !is_sysrq_oom(oc))
291 return OOM_SCAN_ABORT;
292
293 return OOM_SCAN_OK;
294}
295
296
297
298
299
300static struct task_struct *select_bad_process(struct oom_control *oc,
301 unsigned int *ppoints, unsigned long totalpages)
302{
303 struct task_struct *g, *p;
304 struct task_struct *chosen = NULL;
305 unsigned long chosen_points = 0;
306
307 rcu_read_lock();
308 for_each_process_thread(g, p) {
309 unsigned int points;
310
311 switch (oom_scan_process_thread(oc, p, totalpages)) {
312 case OOM_SCAN_SELECT:
313 chosen = p;
314 chosen_points = ULONG_MAX;
315
316 case OOM_SCAN_CONTINUE:
317 continue;
318 case OOM_SCAN_ABORT:
319 rcu_read_unlock();
320 return (struct task_struct *)(-1UL);
321 case OOM_SCAN_OK:
322 break;
323 };
324 points = oom_badness(p, NULL, oc->nodemask, totalpages);
325 if (!points || points < chosen_points)
326 continue;
327
328 if (points == chosen_points && thread_group_leader(chosen))
329 continue;
330
331 chosen = p;
332 chosen_points = points;
333 }
334 if (chosen)
335 get_task_struct(chosen);
336 rcu_read_unlock();
337
338 *ppoints = chosen_points * 1000 / totalpages;
339 return chosen;
340}
341
342
343
344
345
346
347
348
349
350
351
352
353static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
354{
355 struct task_struct *p;
356 struct task_struct *task;
357
358 pr_info("[ pid ] uid tgid total_vm rss nr_ptes nr_pmds swapents oom_score_adj name\n");
359 rcu_read_lock();
360 for_each_process(p) {
361 if (oom_unkillable_task(p, memcg, nodemask))
362 continue;
363
364 task = find_lock_task_mm(p);
365 if (!task) {
366
367
368
369
370
371 continue;
372 }
373
374 pr_info("[%5d] %5d %5d %8lu %8lu %7ld %7ld %8lu %5hd %s\n",
375 task->pid, from_kuid(&init_user_ns, task_uid(task)),
376 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
377 atomic_long_read(&task->mm->nr_ptes),
378 mm_nr_pmds(task->mm),
379 get_mm_counter(task->mm, MM_SWAPENTS),
380 task->signal->oom_score_adj, task->comm);
381 task_unlock(task);
382 }
383 rcu_read_unlock();
384}
385
386static void dump_header(struct oom_control *oc, struct task_struct *p,
387 struct mem_cgroup *memcg)
388{
389 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
390 "oom_score_adj=%hd\n",
391 current->comm, oc->gfp_mask, oc->order,
392 current->signal->oom_score_adj);
393 cpuset_print_current_mems_allowed();
394 dump_stack();
395 if (memcg)
396 mem_cgroup_print_oom_info(memcg, p);
397 else
398 show_mem(SHOW_MEM_FILTER_NODES);
399 if (sysctl_oom_dump_tasks)
400 dump_tasks(memcg, oc->nodemask);
401}
402
403
404
405
406static atomic_t oom_victims = ATOMIC_INIT(0);
407static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
408
409bool oom_killer_disabled __read_mostly;
410
411
412
413
414
415
416
417
418void mark_oom_victim(struct task_struct *tsk)
419{
420 WARN_ON(oom_killer_disabled);
421
422 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
423 return;
424
425
426
427
428
429
430 __thaw_task(tsk);
431 atomic_inc(&oom_victims);
432}
433
434
435
436
437void exit_oom_victim(void)
438{
439 clear_thread_flag(TIF_MEMDIE);
440
441 if (!atomic_dec_return(&oom_victims))
442 wake_up_all(&oom_victims_wait);
443}
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458bool oom_killer_disable(void)
459{
460
461
462
463
464 mutex_lock(&oom_lock);
465 if (test_thread_flag(TIF_MEMDIE)) {
466 mutex_unlock(&oom_lock);
467 return false;
468 }
469
470 oom_killer_disabled = true;
471 mutex_unlock(&oom_lock);
472
473 wait_event(oom_victims_wait, !atomic_read(&oom_victims));
474
475 return true;
476}
477
478
479
480
481void oom_killer_enable(void)
482{
483 oom_killer_disabled = false;
484}
485
486
487
488
489
490
491
492static bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
493{
494 struct task_struct *t;
495
496 for_each_thread(p, t) {
497 struct mm_struct *t_mm = READ_ONCE(t->mm);
498 if (t_mm)
499 return t_mm == mm;
500 }
501 return false;
502}
503
504#define K(x) ((x) << (PAGE_SHIFT-10))
505
506
507
508
509void oom_kill_process(struct oom_control *oc, struct task_struct *p,
510 unsigned int points, unsigned long totalpages,
511 struct mem_cgroup *memcg, const char *message)
512{
513 struct task_struct *victim = p;
514 struct task_struct *child;
515 struct task_struct *t;
516 struct mm_struct *mm;
517 unsigned int victim_points = 0;
518 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
519 DEFAULT_RATELIMIT_BURST);
520
521
522
523
524
525 task_lock(p);
526 if (p->mm && task_will_free_mem(p)) {
527 mark_oom_victim(p);
528 task_unlock(p);
529 put_task_struct(p);
530 return;
531 }
532 task_unlock(p);
533
534 if (__ratelimit(&oom_rs))
535 dump_header(oc, p, memcg);
536
537 pr_err("%s: Kill process %d (%s) score %u or sacrifice child\n",
538 message, task_pid_nr(p), p->comm, points);
539
540
541
542
543
544
545
546 read_lock(&tasklist_lock);
547 for_each_thread(p, t) {
548 list_for_each_entry(child, &t->children, sibling) {
549 unsigned int child_points;
550
551 if (process_shares_mm(child, p->mm))
552 continue;
553
554
555
556 child_points = oom_badness(child, memcg, oc->nodemask,
557 totalpages);
558 if (child_points > victim_points) {
559 put_task_struct(victim);
560 victim = child;
561 victim_points = child_points;
562 get_task_struct(victim);
563 }
564 }
565 }
566 read_unlock(&tasklist_lock);
567
568 p = find_lock_task_mm(victim);
569 if (!p) {
570 put_task_struct(victim);
571 return;
572 } else if (victim != p) {
573 get_task_struct(p);
574 put_task_struct(victim);
575 victim = p;
576 }
577
578
579 mm = victim->mm;
580 atomic_inc(&mm->mm_count);
581
582
583
584
585
586 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
587 mark_oom_victim(victim);
588 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
589 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
590 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
591 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
592 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
593 task_unlock(victim);
594
595
596
597
598
599
600
601
602
603
604 rcu_read_lock();
605 for_each_process(p) {
606 if (!process_shares_mm(p, mm))
607 continue;
608 if (same_thread_group(p, victim))
609 continue;
610 if (unlikely(p->flags & PF_KTHREAD))
611 continue;
612 if (is_global_init(p))
613 continue;
614 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
615 continue;
616
617 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
618 }
619 rcu_read_unlock();
620
621 mmdrop(mm);
622 put_task_struct(victim);
623}
624#undef K
625
626
627
628
629void check_panic_on_oom(struct oom_control *oc, enum oom_constraint constraint,
630 struct mem_cgroup *memcg)
631{
632 if (likely(!sysctl_panic_on_oom))
633 return;
634 if (sysctl_panic_on_oom != 2) {
635
636
637
638
639
640 if (constraint != CONSTRAINT_NONE)
641 return;
642 }
643
644 if (is_sysrq_oom(oc))
645 return;
646 dump_header(oc, NULL, memcg);
647 panic("Out of memory: %s panic_on_oom is enabled\n",
648 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
649}
650
651static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
652
653int register_oom_notifier(struct notifier_block *nb)
654{
655 return blocking_notifier_chain_register(&oom_notify_list, nb);
656}
657EXPORT_SYMBOL_GPL(register_oom_notifier);
658
659int unregister_oom_notifier(struct notifier_block *nb)
660{
661 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
662}
663EXPORT_SYMBOL_GPL(unregister_oom_notifier);
664
665
666
667
668
669
670
671
672
673
674bool out_of_memory(struct oom_control *oc)
675{
676 struct task_struct *p;
677 unsigned long totalpages;
678 unsigned long freed = 0;
679 unsigned int uninitialized_var(points);
680 enum oom_constraint constraint = CONSTRAINT_NONE;
681
682 if (oom_killer_disabled)
683 return false;
684
685 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
686 if (freed > 0)
687
688 return true;
689
690
691
692
693
694
695
696
697
698 if (current->mm &&
699 (fatal_signal_pending(current) || task_will_free_mem(current))) {
700 mark_oom_victim(current);
701 return true;
702 }
703
704
705
706
707
708 constraint = constrained_alloc(oc, &totalpages);
709 if (constraint != CONSTRAINT_MEMORY_POLICY)
710 oc->nodemask = NULL;
711 check_panic_on_oom(oc, constraint, NULL);
712
713 if (sysctl_oom_kill_allocating_task && current->mm &&
714 !oom_unkillable_task(current, NULL, oc->nodemask) &&
715 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
716 get_task_struct(current);
717 oom_kill_process(oc, current, 0, totalpages, NULL,
718 "Out of memory (oom_kill_allocating_task)");
719 return true;
720 }
721
722 p = select_bad_process(oc, &points, totalpages);
723
724 if (!p && !is_sysrq_oom(oc)) {
725 dump_header(oc, NULL, NULL);
726 panic("Out of memory and no killable processes...\n");
727 }
728 if (p && p != (void *)-1UL) {
729 oom_kill_process(oc, p, points, totalpages, NULL,
730 "Out of memory");
731
732
733
734
735 schedule_timeout_killable(1);
736 }
737 return true;
738}
739
740
741
742
743
744
745void pagefault_out_of_memory(void)
746{
747 struct oom_control oc = {
748 .zonelist = NULL,
749 .nodemask = NULL,
750 .gfp_mask = 0,
751 .order = 0,
752 };
753
754 if (mem_cgroup_oom_synchronize(true))
755 return;
756
757 if (!mutex_trylock(&oom_lock))
758 return;
759
760 if (!out_of_memory(&oc)) {
761
762
763
764
765
766
767 WARN_ON(test_thread_flag(TIF_MEMDIE));
768 }
769
770 mutex_unlock(&oom_lock);
771}
772