linux/include/linux/cpuset.h
<<
>>
Prefs
   1#ifndef _LINUX_CPUSET_H
   2#define _LINUX_CPUSET_H
   3/*
   4 *  cpuset interface
   5 *
   6 *  Copyright (C) 2003 BULL SA
   7 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   8 *
   9 */
  10
  11#include <linux/sched.h>
  12#include <linux/cpumask.h>
  13#include <linux/nodemask.h>
  14#include <linux/mm.h>
  15#include <linux/jump_label.h>
  16
  17#ifdef CONFIG_CPUSETS
  18
  19extern struct static_key cpusets_enabled_key;
  20static inline bool cpusets_enabled(void)
  21{
  22        return static_key_false(&cpusets_enabled_key);
  23}
  24
  25static inline int nr_cpusets(void)
  26{
  27        /* jump label reference count + the top-level cpuset */
  28        return static_key_count(&cpusets_enabled_key) + 1;
  29}
  30
  31static inline void cpuset_inc(void)
  32{
  33        static_key_slow_inc(&cpusets_enabled_key);
  34}
  35
  36static inline void cpuset_dec(void)
  37{
  38        static_key_slow_dec(&cpusets_enabled_key);
  39}
  40
  41extern int cpuset_init(void);
  42extern void cpuset_init_smp(void);
  43extern void cpuset_update_active_cpus(bool cpu_online);
  44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  45extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
  46extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  47#define cpuset_current_mems_allowed (current->mems_allowed)
  48void cpuset_init_current_mems_allowed(void);
  49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  50
  51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
  52
  53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
  54{
  55        return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
  56}
  57
  58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  59{
  60        return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  61}
  62
  63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  64                                          const struct task_struct *tsk2);
  65
  66#define cpuset_memory_pressure_bump()                           \
  67        do {                                                    \
  68                if (cpuset_memory_pressure_enabled)             \
  69                        __cpuset_memory_pressure_bump();        \
  70        } while (0)
  71extern int cpuset_memory_pressure_enabled;
  72extern void __cpuset_memory_pressure_bump(void);
  73
  74extern void cpuset_task_status_allowed(struct seq_file *m,
  75                                        struct task_struct *task);
  76extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
  77                            struct pid *pid, struct task_struct *tsk);
  78
  79extern int cpuset_mem_spread_node(void);
  80extern int cpuset_slab_spread_node(void);
  81
  82static inline int cpuset_do_page_mem_spread(void)
  83{
  84        return task_spread_page(current);
  85}
  86
  87static inline int cpuset_do_slab_mem_spread(void)
  88{
  89        return task_spread_slab(current);
  90}
  91
  92extern int current_cpuset_is_being_rebound(void);
  93
  94extern void rebuild_sched_domains(void);
  95
  96extern void cpuset_print_current_mems_allowed(void);
  97
  98/*
  99 * read_mems_allowed_begin is required when making decisions involving
 100 * mems_allowed such as during page allocation. mems_allowed can be updated in
 101 * parallel and depending on the new value an operation can fail potentially
 102 * causing process failure. A retry loop with read_mems_allowed_begin and
 103 * read_mems_allowed_retry prevents these artificial failures.
 104 */
 105static inline unsigned int read_mems_allowed_begin(void)
 106{
 107        if (!cpusets_enabled())
 108                return 0;
 109
 110        return read_seqcount_begin(&current->mems_allowed_seq);
 111}
 112
 113/*
 114 * If this returns true, the operation that took place after
 115 * read_mems_allowed_begin may have failed artificially due to a concurrent
 116 * update of mems_allowed. It is up to the caller to retry the operation if
 117 * appropriate.
 118 */
 119static inline bool read_mems_allowed_retry(unsigned int seq)
 120{
 121        if (!cpusets_enabled())
 122                return false;
 123
 124        return read_seqcount_retry(&current->mems_allowed_seq, seq);
 125}
 126
 127static inline void set_mems_allowed(nodemask_t nodemask)
 128{
 129        unsigned long flags;
 130
 131        task_lock(current);
 132        local_irq_save(flags);
 133        write_seqcount_begin(&current->mems_allowed_seq);
 134        current->mems_allowed = nodemask;
 135        write_seqcount_end(&current->mems_allowed_seq);
 136        local_irq_restore(flags);
 137        task_unlock(current);
 138}
 139
 140extern void cpuset_post_attach_flush(void);
 141
 142#else /* !CONFIG_CPUSETS */
 143
 144static inline bool cpusets_enabled(void) { return false; }
 145
 146static inline int cpuset_init(void) { return 0; }
 147static inline void cpuset_init_smp(void) {}
 148
 149static inline void cpuset_update_active_cpus(bool cpu_online)
 150{
 151        partition_sched_domains(1, NULL, NULL);
 152}
 153
 154static inline void cpuset_cpus_allowed(struct task_struct *p,
 155                                       struct cpumask *mask)
 156{
 157        cpumask_copy(mask, cpu_possible_mask);
 158}
 159
 160static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
 161{
 162}
 163
 164static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 165{
 166        return node_possible_map;
 167}
 168
 169#define cpuset_current_mems_allowed (node_states[N_MEMORY])
 170static inline void cpuset_init_current_mems_allowed(void) {}
 171
 172static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 173{
 174        return 1;
 175}
 176
 177static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
 178{
 179        return 1;
 180}
 181
 182static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
 183{
 184        return 1;
 185}
 186
 187static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
 188                                                 const struct task_struct *tsk2)
 189{
 190        return 1;
 191}
 192
 193static inline void cpuset_memory_pressure_bump(void) {}
 194
 195static inline void cpuset_task_status_allowed(struct seq_file *m,
 196                                                struct task_struct *task)
 197{
 198}
 199
 200static inline int cpuset_mem_spread_node(void)
 201{
 202        return 0;
 203}
 204
 205static inline int cpuset_slab_spread_node(void)
 206{
 207        return 0;
 208}
 209
 210static inline int cpuset_do_page_mem_spread(void)
 211{
 212        return 0;
 213}
 214
 215static inline int cpuset_do_slab_mem_spread(void)
 216{
 217        return 0;
 218}
 219
 220static inline int current_cpuset_is_being_rebound(void)
 221{
 222        return 0;
 223}
 224
 225static inline void rebuild_sched_domains(void)
 226{
 227        partition_sched_domains(1, NULL, NULL);
 228}
 229
 230static inline void cpuset_print_current_mems_allowed(void)
 231{
 232}
 233
 234static inline void set_mems_allowed(nodemask_t nodemask)
 235{
 236}
 237
 238static inline unsigned int read_mems_allowed_begin(void)
 239{
 240        return 0;
 241}
 242
 243static inline bool read_mems_allowed_retry(unsigned int seq)
 244{
 245        return false;
 246}
 247
 248static inline void cpuset_post_attach_flush(void)
 249{
 250}
 251
 252#endif /* !CONFIG_CPUSETS */
 253
 254#endif /* _LINUX_CPUSET_H */
 255