linux/include/linux/oom.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __INCLUDE_LINUX_OOM_H
   3#define __INCLUDE_LINUX_OOM_H
   4
   5
   6#include <linux/sched/signal.h>
   7#include <linux/types.h>
   8#include <linux/nodemask.h>
   9#include <uapi/linux/oom.h>
  10#include <linux/sched/coredump.h> /* MMF_* */
  11#include <linux/mm.h> /* VM_FAULT* */
  12
  13struct zonelist;
  14struct notifier_block;
  15struct mem_cgroup;
  16struct task_struct;
  17
  18/*
  19 * Details of the page allocation that triggered the oom killer that are used to
  20 * determine what should be killed.
  21 */
  22struct oom_control {
  23        /* Used to determine cpuset */
  24        struct zonelist *zonelist;
  25
  26        /* Used to determine mempolicy */
  27        nodemask_t *nodemask;
  28
  29        /* Memory cgroup in which oom is invoked, or NULL for global oom */
  30        struct mem_cgroup *memcg;
  31
  32        /* Used to determine cpuset and node locality requirement */
  33        const gfp_t gfp_mask;
  34
  35        /*
  36         * order == -1 means the oom kill is required by sysrq, otherwise only
  37         * for display purposes.
  38         */
  39        const int order;
  40
  41        /* Used by oom implementation, do not set */
  42        unsigned long totalpages;
  43        struct task_struct *chosen;
  44        unsigned long chosen_points;
  45};
  46
  47extern struct mutex oom_lock;
  48
  49static inline void set_current_oom_origin(void)
  50{
  51        current->signal->oom_flag_origin = true;
  52}
  53
  54static inline void clear_current_oom_origin(void)
  55{
  56        current->signal->oom_flag_origin = false;
  57}
  58
  59static inline bool oom_task_origin(const struct task_struct *p)
  60{
  61        return p->signal->oom_flag_origin;
  62}
  63
  64static inline bool tsk_is_oom_victim(struct task_struct * tsk)
  65{
  66        return tsk->signal->oom_mm;
  67}
  68
  69/*
  70 * Use this helper if tsk->mm != mm and the victim mm needs a special
  71 * handling. This is guaranteed to stay true after once set.
  72 */
  73static inline bool mm_is_oom_victim(struct mm_struct *mm)
  74{
  75        return test_bit(MMF_OOM_VICTIM, &mm->flags);
  76}
  77
  78/*
  79 * Checks whether a page fault on the given mm is still reliable.
  80 * This is no longer true if the oom reaper started to reap the
  81 * address space which is reflected by MMF_UNSTABLE flag set in
  82 * the mm. At that moment any !shared mapping would lose the content
  83 * and could cause a memory corruption (zero pages instead of the
  84 * original content).
  85 *
  86 * User should call this before establishing a page table entry for
  87 * a !shared mapping and under the proper page table lock.
  88 *
  89 * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
  90 */
  91static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
  92{
  93        if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
  94                return VM_FAULT_SIGBUS;
  95        return 0;
  96}
  97
  98bool __oom_reap_task_mm(struct mm_struct *mm);
  99
 100extern unsigned long oom_badness(struct task_struct *p,
 101                struct mem_cgroup *memcg, const nodemask_t *nodemask,
 102                unsigned long totalpages);
 103
 104extern bool out_of_memory(struct oom_control *oc);
 105
 106extern void exit_oom_victim(void);
 107
 108extern int register_oom_notifier(struct notifier_block *nb);
 109extern int unregister_oom_notifier(struct notifier_block *nb);
 110
 111extern bool oom_killer_disable(signed long timeout);
 112extern void oom_killer_enable(void);
 113
 114extern struct task_struct *find_lock_task_mm(struct task_struct *p);
 115
 116/* sysctls */
 117extern int sysctl_oom_dump_tasks;
 118extern int sysctl_oom_kill_allocating_task;
 119extern int sysctl_panic_on_oom;
 120#endif /* _INCLUDE_LINUX_OOM_H */
 121