1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/oom.h>
21#include <linux/mm.h>
22#include <linux/err.h>
23#include <linux/gfp.h>
24#include <linux/sched.h>
25#include <linux/swap.h>
26#include <linux/timex.h>
27#include <linux/jiffies.h>
28#include <linux/cpuset.h>
29#include <linux/export.h>
30#include <linux/notifier.h>
31#include <linux/memcontrol.h>
32#include <linux/mempolicy.h>
33#include <linux/security.h>
34#include <linux/ptrace.h>
35#include <linux/freezer.h>
36#include <linux/ftrace.h>
37#include <linux/ratelimit.h>
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/oom.h>
41
42int sysctl_panic_on_oom;
43int sysctl_oom_kill_allocating_task;
44int sysctl_oom_dump_tasks = 1;
45static DEFINE_SPINLOCK(zone_scan_lock);
46
47#ifdef CONFIG_NUMA
48
49
50
51
52
53
54
55
56
57static bool has_intersects_mems_allowed(struct task_struct *tsk,
58 const nodemask_t *mask)
59{
60 struct task_struct *start = tsk;
61
62 do {
63 if (mask) {
64
65
66
67
68
69
70 if (mempolicy_nodemask_intersects(tsk, mask))
71 return true;
72 } else {
73
74
75
76
77 if (cpuset_mems_allowed_intersects(current, tsk))
78 return true;
79 }
80 } while_each_thread(start, tsk);
81
82 return false;
83}
84#else
85static bool has_intersects_mems_allowed(struct task_struct *tsk,
86 const nodemask_t *mask)
87{
88 return true;
89}
90#endif
91
92
93
94
95
96
97
98struct task_struct *find_lock_task_mm(struct task_struct *p)
99{
100 struct task_struct *t = p;
101
102 do {
103 task_lock(t);
104 if (likely(t->mm))
105 return t;
106 task_unlock(t);
107 } while_each_thread(p, t);
108
109 return NULL;
110}
111
112
113static bool oom_unkillable_task(struct task_struct *p,
114 const struct mem_cgroup *memcg, const nodemask_t *nodemask)
115{
116 if (is_global_init(p))
117 return true;
118 if (p->flags & PF_KTHREAD)
119 return true;
120
121
122 if (memcg && !task_in_mem_cgroup(p, memcg))
123 return true;
124
125
126 if (!has_intersects_mems_allowed(p, nodemask))
127 return true;
128
129 return false;
130}
131
132
133
134
135
136
137
138
139
140
141unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
142 const nodemask_t *nodemask, unsigned long totalpages)
143{
144 long points;
145 long adj;
146
147 if (oom_unkillable_task(p, memcg, nodemask))
148 return 0;
149
150 p = find_lock_task_mm(p);
151 if (!p)
152 return 0;
153
154 adj = (long)p->signal->oom_score_adj;
155 if (adj == OOM_SCORE_ADJ_MIN) {
156 task_unlock(p);
157 return 0;
158 }
159
160
161
162
163
164 points = get_mm_rss(p->mm) + p->mm->nr_ptes +
165 get_mm_counter(p->mm, MM_SWAPENTS);
166 task_unlock(p);
167
168
169
170
171
172 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
173 adj -= 30;
174
175
176 adj *= totalpages / 1000;
177 points += adj;
178
179
180
181
182
183 return points > 0 ? points : 1;
184}
185
186
187
188
189#ifdef CONFIG_NUMA
190static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
191 gfp_t gfp_mask, nodemask_t *nodemask,
192 unsigned long *totalpages)
193{
194 struct zone *zone;
195 struct zoneref *z;
196 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
197 bool cpuset_limited = false;
198 int nid;
199
200
201 *totalpages = totalram_pages + total_swap_pages;
202
203 if (!zonelist)
204 return CONSTRAINT_NONE;
205
206
207
208
209
210 if (gfp_mask & __GFP_THISNODE)
211 return CONSTRAINT_NONE;
212
213
214
215
216
217
218 if (nodemask && !nodes_subset(node_states[N_MEMORY], *nodemask)) {
219 *totalpages = total_swap_pages;
220 for_each_node_mask(nid, *nodemask)
221 *totalpages += node_spanned_pages(nid);
222 return CONSTRAINT_MEMORY_POLICY;
223 }
224
225
226 for_each_zone_zonelist_nodemask(zone, z, zonelist,
227 high_zoneidx, nodemask)
228 if (!cpuset_zone_allowed_softwall(zone, gfp_mask))
229 cpuset_limited = true;
230
231 if (cpuset_limited) {
232 *totalpages = total_swap_pages;
233 for_each_node_mask(nid, cpuset_current_mems_allowed)
234 *totalpages += node_spanned_pages(nid);
235 return CONSTRAINT_CPUSET;
236 }
237 return CONSTRAINT_NONE;
238}
239#else
240static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
241 gfp_t gfp_mask, nodemask_t *nodemask,
242 unsigned long *totalpages)
243{
244 *totalpages = totalram_pages + total_swap_pages;
245 return CONSTRAINT_NONE;
246}
247#endif
248
249enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
250 unsigned long totalpages, const nodemask_t *nodemask,
251 bool force_kill)
252{
253 if (task->exit_state)
254 return OOM_SCAN_CONTINUE;
255 if (oom_unkillable_task(task, NULL, nodemask))
256 return OOM_SCAN_CONTINUE;
257
258
259
260
261
262 if (test_tsk_thread_flag(task, TIF_MEMDIE)) {
263 if (unlikely(frozen(task)))
264 __thaw_task(task);
265 if (!force_kill)
266 return OOM_SCAN_ABORT;
267 }
268 if (!task->mm)
269 return OOM_SCAN_CONTINUE;
270
271
272
273
274
275 if (oom_task_origin(task))
276 return OOM_SCAN_SELECT;
277
278 if (task->flags & PF_EXITING && !force_kill) {
279
280
281
282
283 if (!(task->group_leader->ptrace & PT_TRACE_EXIT))
284 return OOM_SCAN_ABORT;
285 }
286 return OOM_SCAN_OK;
287}
288
289
290
291
292
293
294
295static struct task_struct *select_bad_process(unsigned int *ppoints,
296 unsigned long totalpages, const nodemask_t *nodemask,
297 bool force_kill)
298{
299 struct task_struct *g, *p;
300 struct task_struct *chosen = NULL;
301 unsigned long chosen_points = 0;
302
303 rcu_read_lock();
304 do_each_thread(g, p) {
305 unsigned int points;
306
307 switch (oom_scan_process_thread(p, totalpages, nodemask,
308 force_kill)) {
309 case OOM_SCAN_SELECT:
310 chosen = p;
311 chosen_points = ULONG_MAX;
312
313 case OOM_SCAN_CONTINUE:
314 continue;
315 case OOM_SCAN_ABORT:
316 rcu_read_unlock();
317 return ERR_PTR(-1UL);
318 case OOM_SCAN_OK:
319 break;
320 };
321 points = oom_badness(p, NULL, nodemask, totalpages);
322 if (points > chosen_points) {
323 chosen = p;
324 chosen_points = points;
325 }
326 } while_each_thread(g, p);
327 if (chosen)
328 get_task_struct(chosen);
329 rcu_read_unlock();
330
331 *ppoints = chosen_points * 1000 / totalpages;
332 return chosen;
333}
334
335
336
337
338
339
340
341
342
343
344
345
346static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask)
347{
348 struct task_struct *p;
349 struct task_struct *task;
350
351 pr_info("[ pid ] uid tgid total_vm rss nr_ptes swapents oom_score_adj name\n");
352 rcu_read_lock();
353 for_each_process(p) {
354 if (oom_unkillable_task(p, memcg, nodemask))
355 continue;
356
357 task = find_lock_task_mm(p);
358 if (!task) {
359
360
361
362
363
364 continue;
365 }
366
367 pr_info("[%5d] %5d %5d %8lu %8lu %7lu %8lu %5hd %s\n",
368 task->pid, from_kuid(&init_user_ns, task_uid(task)),
369 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
370 task->mm->nr_ptes,
371 get_mm_counter(task->mm, MM_SWAPENTS),
372 task->signal->oom_score_adj, task->comm);
373 task_unlock(task);
374 }
375 rcu_read_unlock();
376}
377
378static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
379 struct mem_cgroup *memcg, const nodemask_t *nodemask)
380{
381 task_lock(current);
382 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
383 "oom_score_adj=%hd\n",
384 current->comm, gfp_mask, order,
385 current->signal->oom_score_adj);
386 cpuset_print_task_mems_allowed(current);
387 task_unlock(current);
388 dump_stack();
389 if (memcg)
390 mem_cgroup_print_oom_info(memcg, p);
391 else
392 show_mem(SHOW_MEM_FILTER_NODES);
393 if (sysctl_oom_dump_tasks)
394 dump_tasks(memcg, nodemask);
395}
396
397#define K(x) ((x) << (PAGE_SHIFT-10))
398
399
400
401
402void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
403 unsigned int points, unsigned long totalpages,
404 struct mem_cgroup *memcg, nodemask_t *nodemask,
405 const char *message)
406{
407 struct task_struct *victim = p;
408 struct task_struct *child;
409 struct task_struct *t = p;
410 struct mm_struct *mm;
411 unsigned int victim_points = 0;
412 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
413 DEFAULT_RATELIMIT_BURST);
414
415
416
417
418
419 if (p->flags & PF_EXITING) {
420 set_tsk_thread_flag(p, TIF_MEMDIE);
421 put_task_struct(p);
422 return;
423 }
424
425 if (__ratelimit(&oom_rs))
426 dump_header(p, gfp_mask, order, memcg, nodemask);
427
428 task_lock(p);
429 pr_err("%s: Kill process %d (%s) score %d or sacrifice child\n",
430 message, task_pid_nr(p), p->comm, points);
431 task_unlock(p);
432
433
434
435
436
437
438
439 read_lock(&tasklist_lock);
440 do {
441 list_for_each_entry(child, &t->children, sibling) {
442 unsigned int child_points;
443
444 if (child->mm == p->mm)
445 continue;
446
447
448
449 child_points = oom_badness(child, memcg, nodemask,
450 totalpages);
451 if (child_points > victim_points) {
452 put_task_struct(victim);
453 victim = child;
454 victim_points = child_points;
455 get_task_struct(victim);
456 }
457 }
458 } while_each_thread(p, t);
459 read_unlock(&tasklist_lock);
460
461 rcu_read_lock();
462 p = find_lock_task_mm(victim);
463 if (!p) {
464 rcu_read_unlock();
465 put_task_struct(victim);
466 return;
467 } else if (victim != p) {
468 get_task_struct(p);
469 put_task_struct(victim);
470 victim = p;
471 }
472
473
474 mm = victim->mm;
475 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
476 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
477 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
478 K(get_mm_counter(victim->mm, MM_FILEPAGES)));
479 task_unlock(victim);
480
481
482
483
484
485
486
487
488
489
490 for_each_process(p)
491 if (p->mm == mm && !same_thread_group(p, victim) &&
492 !(p->flags & PF_KTHREAD)) {
493 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
494 continue;
495
496 task_lock(p);
497 pr_err("Kill process %d (%s) sharing same memory\n",
498 task_pid_nr(p), p->comm);
499 task_unlock(p);
500 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, p, true);
501 }
502 rcu_read_unlock();
503
504 set_tsk_thread_flag(victim, TIF_MEMDIE);
505 do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
506 put_task_struct(victim);
507}
508#undef K
509
510
511
512
513void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
514 int order, const nodemask_t *nodemask)
515{
516 if (likely(!sysctl_panic_on_oom))
517 return;
518 if (sysctl_panic_on_oom != 2) {
519
520
521
522
523
524 if (constraint != CONSTRAINT_NONE)
525 return;
526 }
527 dump_header(NULL, gfp_mask, order, NULL, nodemask);
528 panic("Out of memory: %s panic_on_oom is enabled\n",
529 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
530}
531
532static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
533
534int register_oom_notifier(struct notifier_block *nb)
535{
536 return blocking_notifier_chain_register(&oom_notify_list, nb);
537}
538EXPORT_SYMBOL_GPL(register_oom_notifier);
539
540int unregister_oom_notifier(struct notifier_block *nb)
541{
542 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
543}
544EXPORT_SYMBOL_GPL(unregister_oom_notifier);
545
546
547
548
549
550
551int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
552{
553 struct zoneref *z;
554 struct zone *zone;
555 int ret = 1;
556
557 spin_lock(&zone_scan_lock);
558 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
559 if (zone_is_oom_locked(zone)) {
560 ret = 0;
561 goto out;
562 }
563 }
564
565 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
566
567
568
569
570
571 zone_set_flag(zone, ZONE_OOM_LOCKED);
572 }
573
574out:
575 spin_unlock(&zone_scan_lock);
576 return ret;
577}
578
579
580
581
582
583
584void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
585{
586 struct zoneref *z;
587 struct zone *zone;
588
589 spin_lock(&zone_scan_lock);
590 for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
591 zone_clear_flag(zone, ZONE_OOM_LOCKED);
592 }
593 spin_unlock(&zone_scan_lock);
594}
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
610 int order, nodemask_t *nodemask, bool force_kill)
611{
612 const nodemask_t *mpol_mask;
613 struct task_struct *p;
614 unsigned long totalpages;
615 unsigned long freed = 0;
616 unsigned int uninitialized_var(points);
617 enum oom_constraint constraint = CONSTRAINT_NONE;
618 int killed = 0;
619
620 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
621 if (freed > 0)
622
623 return;
624
625
626
627
628
629
630 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
631 set_thread_flag(TIF_MEMDIE);
632 return;
633 }
634
635
636
637
638
639 constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
640 &totalpages);
641 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
642 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
643
644 if (sysctl_oom_kill_allocating_task && current->mm &&
645 !oom_unkillable_task(current, NULL, nodemask) &&
646 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
647 get_task_struct(current);
648 oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL,
649 nodemask,
650 "Out of memory (oom_kill_allocating_task)");
651 goto out;
652 }
653
654 p = select_bad_process(&points, totalpages, mpol_mask, force_kill);
655
656 if (!p) {
657 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
658 panic("Out of memory and no killable processes...\n");
659 }
660 if (PTR_ERR(p) != -1UL) {
661 oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
662 nodemask, "Out of memory");
663 killed = 1;
664 }
665out:
666
667
668
669
670 if (killed)
671 schedule_timeout_killable(1);
672}
673
674
675
676
677
678
679void pagefault_out_of_memory(void)
680{
681 struct zonelist *zonelist = node_zonelist(first_online_node,
682 GFP_KERNEL);
683
684 if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
685 out_of_memory(NULL, 0, 0, NULL, false);
686 clear_zonelist_oom(zonelist, GFP_KERNEL);
687 }
688}
689