linux/include/linux/oom.h
<<
>>
Prefs
   1#ifndef __INCLUDE_LINUX_OOM_H
   2#define __INCLUDE_LINUX_OOM_H
   3
   4
   5#include <linux/sched.h>
   6#include <linux/types.h>
   7#include <linux/nodemask.h>
   8#include <uapi/linux/oom.h>
   9
  10struct zonelist;
  11struct notifier_block;
  12struct mem_cgroup;
  13struct task_struct;
  14
  15/*
  16 * Types of limitations to the nodes from which allocations may occur
  17 */
  18enum oom_constraint {
  19        CONSTRAINT_NONE,
  20        CONSTRAINT_CPUSET,
  21        CONSTRAINT_MEMORY_POLICY,
  22        CONSTRAINT_MEMCG,
  23};
  24
  25enum oom_scan_t {
  26        OOM_SCAN_OK,            /* scan thread and find its badness */
  27        OOM_SCAN_CONTINUE,      /* do not consider thread for oom kill */
  28        OOM_SCAN_ABORT,         /* abort the iteration and return */
  29        OOM_SCAN_SELECT,        /* always select this thread first */
  30};
  31
  32/* Thread is the potential origin of an oom condition; kill first on oom */
  33#define OOM_FLAG_ORIGIN         ((__force oom_flags_t)0x1)
  34
  35static inline void set_current_oom_origin(void)
  36{
  37        current->signal->oom_flags |= OOM_FLAG_ORIGIN;
  38}
  39
  40static inline void clear_current_oom_origin(void)
  41{
  42        current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
  43}
  44
  45static inline bool oom_task_origin(const struct task_struct *p)
  46{
  47        return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
  48}
  49
  50extern void mark_tsk_oom_victim(struct task_struct *tsk);
  51
  52extern void unmark_oom_victim(void);
  53
  54extern unsigned long oom_badness(struct task_struct *p,
  55                struct mem_cgroup *memcg, const nodemask_t *nodemask,
  56                unsigned long totalpages);
  57
  58extern int oom_kills_count(void);
  59extern void note_oom_kill(void);
  60extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
  61                             unsigned int points, unsigned long totalpages,
  62                             struct mem_cgroup *memcg, nodemask_t *nodemask,
  63                             const char *message);
  64
  65extern bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_flags);
  66extern void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_flags);
  67
  68extern void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
  69                               int order, const nodemask_t *nodemask,
  70                               struct mem_cgroup *memcg);
  71
  72extern enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
  73                unsigned long totalpages, const nodemask_t *nodemask,
  74                bool force_kill);
  75
  76extern bool out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
  77                int order, nodemask_t *mask, bool force_kill);
  78extern int register_oom_notifier(struct notifier_block *nb);
  79extern int unregister_oom_notifier(struct notifier_block *nb);
  80
  81extern bool oom_killer_disabled;
  82extern bool oom_killer_disable(void);
  83extern void oom_killer_enable(void);
  84
  85extern struct task_struct *find_lock_task_mm(struct task_struct *p);
  86
  87static inline bool task_will_free_mem(struct task_struct *task)
  88{
  89        /*
  90         * A coredumping process may sleep for an extended period in exit_mm(),
  91         * so the oom killer cannot assume that the process will promptly exit
  92         * and release memory.
  93         */
  94        return (task->flags & PF_EXITING) &&
  95                !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
  96}
  97
  98/* sysctls */
  99extern int sysctl_oom_dump_tasks;
 100extern int sysctl_oom_kill_allocating_task;
 101extern int sysctl_panic_on_oom;
 102#endif /* _INCLUDE_LINUX_OOM_H */
 103