linux/include/linux/cpuset.h
<<
>>
Prefs
   1#ifndef _LINUX_CPUSET_H
   2#define _LINUX_CPUSET_H
   3/*
   4 *  cpuset interface
   5 *
   6 *  Copyright (C) 2003 BULL SA
   7 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   8 *
   9 */
  10
  11#include <linux/sched.h>
  12#include <linux/cpumask.h>
  13#include <linux/nodemask.h>
  14#include <linux/mm.h>
  15#include <linux/jump_label.h>
  16
  17#ifdef CONFIG_CPUSETS
  18
  19extern struct static_key cpusets_enabled_key;
  20static inline bool cpusets_enabled(void)
  21{
  22        return static_key_false(&cpusets_enabled_key);
  23}
  24
  25static inline int nr_cpusets(void)
  26{
  27        /* jump label reference count + the top-level cpuset */
  28        return static_key_count(&cpusets_enabled_key) + 1;
  29}
  30
  31static inline void cpuset_inc(void)
  32{
  33        static_key_slow_inc(&cpusets_enabled_key);
  34}
  35
  36static inline void cpuset_dec(void)
  37{
  38        static_key_slow_dec(&cpusets_enabled_key);
  39}
  40
  41extern int cpuset_init(void);
  42extern void cpuset_init_smp(void);
  43extern void cpuset_update_active_cpus(bool cpu_online);
  44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  45extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
  46extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  47#define cpuset_current_mems_allowed (current->mems_allowed)
  48void cpuset_init_current_mems_allowed(void);
  49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  50
  51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
  52
  53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
  54{
  55        return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
  56}
  57
  58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  59{
  60        return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  61}
  62
  63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  64                                          const struct task_struct *tsk2);
  65
  66#define cpuset_memory_pressure_bump()                           \
  67        do {                                                    \
  68                if (cpuset_memory_pressure_enabled)             \
  69                        __cpuset_memory_pressure_bump();        \
  70        } while (0)
  71extern int cpuset_memory_pressure_enabled;
  72extern void __cpuset_memory_pressure_bump(void);
  73
  74extern void cpuset_task_status_allowed(struct seq_file *m,
  75                                        struct task_struct *task);
  76extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
  77                            struct pid *pid, struct task_struct *tsk);
  78
  79extern int cpuset_mem_spread_node(void);
  80extern int cpuset_slab_spread_node(void);
  81
  82static inline int cpuset_do_page_mem_spread(void)
  83{
  84        return task_spread_page(current);
  85}
  86
  87static inline int cpuset_do_slab_mem_spread(void)
  88{
  89        return task_spread_slab(current);
  90}
  91
  92extern int current_cpuset_is_being_rebound(void);
  93
  94extern void rebuild_sched_domains(void);
  95
  96extern void cpuset_print_current_mems_allowed(void);
  97
  98/*
  99 * read_mems_allowed_begin is required when making decisions involving
 100 * mems_allowed such as during page allocation. mems_allowed can be updated in
 101 * parallel and depending on the new value an operation can fail potentially
 102 * causing process failure. A retry loop with read_mems_allowed_begin and
 103 * read_mems_allowed_retry prevents these artificial failures.
 104 */
 105static inline unsigned int read_mems_allowed_begin(void)
 106{
 107        if (!cpusets_enabled())
 108                return 0;
 109
 110        return read_seqcount_begin(&current->mems_allowed_seq);
 111}
 112
 113/*
 114 * If this returns true, the operation that took place after
 115 * read_mems_allowed_begin may have failed artificially due to a concurrent
 116 * update of mems_allowed. It is up to the caller to retry the operation if
 117 * appropriate.
 118 */
 119static inline bool read_mems_allowed_retry(unsigned int seq)
 120{
 121        if (!cpusets_enabled())
 122                return false;
 123
 124        return read_seqcount_retry(&current->mems_allowed_seq, seq);
 125}
 126
 127static inline void set_mems_allowed(nodemask_t nodemask)
 128{
 129        unsigned long flags;
 130
 131        task_lock(current);
 132        local_irq_save(flags);
 133        write_seqcount_begin(&current->mems_allowed_seq);
 134        current->mems_allowed = nodemask;
 135        write_seqcount_end(&current->mems_allowed_seq);
 136        local_irq_restore(flags);
 137        task_unlock(current);
 138}
 139
 140#else /* !CONFIG_CPUSETS */
 141
 142static inline bool cpusets_enabled(void) { return false; }
 143
 144static inline int cpuset_init(void) { return 0; }
 145static inline void cpuset_init_smp(void) {}
 146
 147static inline void cpuset_update_active_cpus(bool cpu_online)
 148{
 149        partition_sched_domains(1, NULL, NULL);
 150}
 151
 152static inline void cpuset_cpus_allowed(struct task_struct *p,
 153                                       struct cpumask *mask)
 154{
 155        cpumask_copy(mask, cpu_possible_mask);
 156}
 157
 158static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
 159{
 160}
 161
 162static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 163{
 164        return node_possible_map;
 165}
 166
 167#define cpuset_current_mems_allowed (node_states[N_MEMORY])
 168static inline void cpuset_init_current_mems_allowed(void) {}
 169
 170static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 171{
 172        return 1;
 173}
 174
 175static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
 176{
 177        return 1;
 178}
 179
 180static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
 181{
 182        return 1;
 183}
 184
 185static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
 186                                                 const struct task_struct *tsk2)
 187{
 188        return 1;
 189}
 190
 191static inline void cpuset_memory_pressure_bump(void) {}
 192
 193static inline void cpuset_task_status_allowed(struct seq_file *m,
 194                                                struct task_struct *task)
 195{
 196}
 197
 198static inline int cpuset_mem_spread_node(void)
 199{
 200        return 0;
 201}
 202
 203static inline int cpuset_slab_spread_node(void)
 204{
 205        return 0;
 206}
 207
 208static inline int cpuset_do_page_mem_spread(void)
 209{
 210        return 0;
 211}
 212
 213static inline int cpuset_do_slab_mem_spread(void)
 214{
 215        return 0;
 216}
 217
 218static inline int current_cpuset_is_being_rebound(void)
 219{
 220        return 0;
 221}
 222
 223static inline void rebuild_sched_domains(void)
 224{
 225        partition_sched_domains(1, NULL, NULL);
 226}
 227
 228static inline void cpuset_print_current_mems_allowed(void)
 229{
 230}
 231
 232static inline void set_mems_allowed(nodemask_t nodemask)
 233{
 234}
 235
 236static inline unsigned int read_mems_allowed_begin(void)
 237{
 238        return 0;
 239}
 240
 241static inline bool read_mems_allowed_retry(unsigned int seq)
 242{
 243        return false;
 244}
 245
 246#endif /* !CONFIG_CPUSETS */
 247
 248#endif /* _LINUX_CPUSET_H */
 249