linux/include/linux/oom.h
<<
>>
Prefs
   1#ifndef __INCLUDE_LINUX_OOM_H
   2#define __INCLUDE_LINUX_OOM_H
   3
   4
   5#include <linux/sched/signal.h>
   6#include <linux/types.h>
   7#include <linux/nodemask.h>
   8#include <uapi/linux/oom.h>
   9#include <linux/sched/coredump.h> /* MMF_* */
  10#include <linux/mm.h> /* VM_FAULT* */
  11
  12struct zonelist;
  13struct notifier_block;
  14struct mem_cgroup;
  15struct task_struct;
  16
  17/*
  18 * Details of the page allocation that triggered the oom killer that are used to
  19 * determine what should be killed.
  20 */
  21struct oom_control {
  22        /* Used to determine cpuset */
  23        struct zonelist *zonelist;
  24
  25        /* Used to determine mempolicy */
  26        nodemask_t *nodemask;
  27
  28        /* Memory cgroup in which oom is invoked, or NULL for global oom */
  29        struct mem_cgroup *memcg;
  30
  31        /* Used to determine cpuset and node locality requirement */
  32        const gfp_t gfp_mask;
  33
  34        /*
  35         * order == -1 means the oom kill is required by sysrq, otherwise only
  36         * for display purposes.
  37         */
  38        const int order;
  39
  40        /* Used by oom implementation, do not set */
  41        unsigned long totalpages;
  42        struct task_struct *chosen;
  43        unsigned long chosen_points;
  44};
  45
  46extern struct mutex oom_lock;
  47
  48static inline void set_current_oom_origin(void)
  49{
  50        current->signal->oom_flag_origin = true;
  51}
  52
  53static inline void clear_current_oom_origin(void)
  54{
  55        current->signal->oom_flag_origin = false;
  56}
  57
  58static inline bool oom_task_origin(const struct task_struct *p)
  59{
  60        return p->signal->oom_flag_origin;
  61}
  62
  63static inline bool tsk_is_oom_victim(struct task_struct * tsk)
  64{
  65        return tsk->signal->oom_mm;
  66}
  67
  68/*
  69 * Checks whether a page fault on the given mm is still reliable.
  70 * This is no longer true if the oom reaper started to reap the
  71 * address space which is reflected by MMF_UNSTABLE flag set in
  72 * the mm. At that moment any !shared mapping would lose the content
  73 * and could cause a memory corruption (zero pages instead of the
  74 * original content).
  75 *
  76 * User should call this before establishing a page table entry for
  77 * a !shared mapping and under the proper page table lock.
  78 *
  79 * Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
  80 */
  81static inline int check_stable_address_space(struct mm_struct *mm)
  82{
  83        if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
  84                return VM_FAULT_SIGBUS;
  85        return 0;
  86}
  87
  88extern unsigned long oom_badness(struct task_struct *p,
  89                struct mem_cgroup *memcg, const nodemask_t *nodemask,
  90                unsigned long totalpages);
  91
  92extern bool out_of_memory(struct oom_control *oc);
  93
  94extern void exit_oom_victim(void);
  95
  96extern int register_oom_notifier(struct notifier_block *nb);
  97extern int unregister_oom_notifier(struct notifier_block *nb);
  98
  99extern bool oom_killer_disable(signed long timeout);
 100extern void oom_killer_enable(void);
 101
 102extern struct task_struct *find_lock_task_mm(struct task_struct *p);
 103
 104/* sysctls */
 105extern int sysctl_oom_dump_tasks;
 106extern int sysctl_oom_kill_allocating_task;
 107extern int sysctl_panic_on_oom;
 108#endif /* _INCLUDE_LINUX_OOM_H */
 109