linux/include/linux/cpuset.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_CPUSET_H
   3#define _LINUX_CPUSET_H
   4/*
   5 *  cpuset interface
   6 *
   7 *  Copyright (C) 2003 BULL SA
   8 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
   9 *
  10 */
  11
  12#include <linux/sched.h>
  13#include <linux/sched/topology.h>
  14#include <linux/sched/task.h>
  15#include <linux/cpumask.h>
  16#include <linux/nodemask.h>
  17#include <linux/mm.h>
  18#include <linux/mmu_context.h>
  19#include <linux/jump_label.h>
  20
  21#ifdef CONFIG_CPUSETS
  22
  23/*
  24 * Static branch rewrites can happen in an arbitrary order for a given
  25 * key. In code paths where we need to loop with read_mems_allowed_begin() and
  26 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
  27 * to ensure that begin() always gets rewritten before retry() in the
  28 * disabled -> enabled transition. If not, then if local irqs are disabled
  29 * around the loop, we can deadlock since retry() would always be
  30 * comparing the latest value of the mems_allowed seqcount against 0 as
  31 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
  32 * transition should happen in reverse order for the same reasons (want to stop
  33 * looking at real value of mems_allowed.sequence in retry() first).
  34 */
  35extern struct static_key_false cpusets_pre_enable_key;
  36extern struct static_key_false cpusets_enabled_key;
  37static inline bool cpusets_enabled(void)
  38{
  39        return static_branch_unlikely(&cpusets_enabled_key);
  40}
  41
  42static inline void cpuset_inc(void)
  43{
  44        static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
  45        static_branch_inc_cpuslocked(&cpusets_enabled_key);
  46}
  47
  48static inline void cpuset_dec(void)
  49{
  50        static_branch_dec_cpuslocked(&cpusets_enabled_key);
  51        static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
  52}
  53
  54extern int cpuset_init(void);
  55extern void cpuset_init_smp(void);
  56extern void cpuset_force_rebuild(void);
  57extern void cpuset_update_active_cpus(void);
  58extern void cpuset_wait_for_hotplug(void);
  59extern void cpuset_read_lock(void);
  60extern void cpuset_read_unlock(void);
  61extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  62extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
  63extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  64#define cpuset_current_mems_allowed (current->mems_allowed)
  65void cpuset_init_current_mems_allowed(void);
  66int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  67
  68extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
  69
  70static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  71{
  72        if (cpusets_enabled())
  73                return __cpuset_node_allowed(node, gfp_mask);
  74        return true;
  75}
  76
  77static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  78{
  79        return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  80}
  81
  82static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  83{
  84        if (cpusets_enabled())
  85                return __cpuset_zone_allowed(z, gfp_mask);
  86        return true;
  87}
  88
  89extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  90                                          const struct task_struct *tsk2);
  91
  92#define cpuset_memory_pressure_bump()                           \
  93        do {                                                    \
  94                if (cpuset_memory_pressure_enabled)             \
  95                        __cpuset_memory_pressure_bump();        \
  96        } while (0)
  97extern int cpuset_memory_pressure_enabled;
  98extern void __cpuset_memory_pressure_bump(void);
  99
 100extern void cpuset_task_status_allowed(struct seq_file *m,
 101                                        struct task_struct *task);
 102extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
 103                            struct pid *pid, struct task_struct *tsk);
 104
 105extern int cpuset_mem_spread_node(void);
 106extern int cpuset_slab_spread_node(void);
 107
 108static inline int cpuset_do_page_mem_spread(void)
 109{
 110        return task_spread_page(current);
 111}
 112
 113static inline int cpuset_do_slab_mem_spread(void)
 114{
 115        return task_spread_slab(current);
 116}
 117
 118extern bool current_cpuset_is_being_rebound(void);
 119
 120extern void rebuild_sched_domains(void);
 121
 122extern void cpuset_print_current_mems_allowed(void);
 123
 124/*
 125 * read_mems_allowed_begin is required when making decisions involving
 126 * mems_allowed such as during page allocation. mems_allowed can be updated in
 127 * parallel and depending on the new value an operation can fail potentially
 128 * causing process failure. A retry loop with read_mems_allowed_begin and
 129 * read_mems_allowed_retry prevents these artificial failures.
 130 */
 131static inline unsigned int read_mems_allowed_begin(void)
 132{
 133        if (!static_branch_unlikely(&cpusets_pre_enable_key))
 134                return 0;
 135
 136        return read_seqcount_begin(&current->mems_allowed_seq);
 137}
 138
 139/*
 140 * If this returns true, the operation that took place after
 141 * read_mems_allowed_begin may have failed artificially due to a concurrent
 142 * update of mems_allowed. It is up to the caller to retry the operation if
 143 * appropriate.
 144 */
 145static inline bool read_mems_allowed_retry(unsigned int seq)
 146{
 147        if (!static_branch_unlikely(&cpusets_enabled_key))
 148                return false;
 149
 150        return read_seqcount_retry(&current->mems_allowed_seq, seq);
 151}
 152
 153static inline void set_mems_allowed(nodemask_t nodemask)
 154{
 155        unsigned long flags;
 156
 157        task_lock(current);
 158        local_irq_save(flags);
 159        write_seqcount_begin(&current->mems_allowed_seq);
 160        current->mems_allowed = nodemask;
 161        write_seqcount_end(&current->mems_allowed_seq);
 162        local_irq_restore(flags);
 163        task_unlock(current);
 164}
 165
 166#else /* !CONFIG_CPUSETS */
 167
 168static inline bool cpusets_enabled(void) { return false; }
 169
 170static inline int cpuset_init(void) { return 0; }
 171static inline void cpuset_init_smp(void) {}
 172
 173static inline void cpuset_force_rebuild(void) { }
 174
 175static inline void cpuset_update_active_cpus(void)
 176{
 177        partition_sched_domains(1, NULL, NULL);
 178}
 179
 180static inline void cpuset_wait_for_hotplug(void) { }
 181
 182static inline void cpuset_read_lock(void) { }
 183static inline void cpuset_read_unlock(void) { }
 184
 185static inline void cpuset_cpus_allowed(struct task_struct *p,
 186                                       struct cpumask *mask)
 187{
 188        cpumask_copy(mask, task_cpu_possible_mask(p));
 189}
 190
 191static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
 192{
 193        return false;
 194}
 195
 196static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 197{
 198        return node_possible_map;
 199}
 200
 201#define cpuset_current_mems_allowed (node_states[N_MEMORY])
 202static inline void cpuset_init_current_mems_allowed(void) {}
 203
 204static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 205{
 206        return 1;
 207}
 208
 209static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
 210{
 211        return true;
 212}
 213
 214static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
 215{
 216        return true;
 217}
 218
 219static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
 220{
 221        return true;
 222}
 223
 224static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
 225                                                 const struct task_struct *tsk2)
 226{
 227        return 1;
 228}
 229
 230static inline void cpuset_memory_pressure_bump(void) {}
 231
 232static inline void cpuset_task_status_allowed(struct seq_file *m,
 233                                                struct task_struct *task)
 234{
 235}
 236
 237static inline int cpuset_mem_spread_node(void)
 238{
 239        return 0;
 240}
 241
 242static inline int cpuset_slab_spread_node(void)
 243{
 244        return 0;
 245}
 246
 247static inline int cpuset_do_page_mem_spread(void)
 248{
 249        return 0;
 250}
 251
 252static inline int cpuset_do_slab_mem_spread(void)
 253{
 254        return 0;
 255}
 256
 257static inline bool current_cpuset_is_being_rebound(void)
 258{
 259        return false;
 260}
 261
 262static inline void rebuild_sched_domains(void)
 263{
 264        partition_sched_domains(1, NULL, NULL);
 265}
 266
 267static inline void cpuset_print_current_mems_allowed(void)
 268{
 269}
 270
 271static inline void set_mems_allowed(nodemask_t nodemask)
 272{
 273}
 274
 275static inline unsigned int read_mems_allowed_begin(void)
 276{
 277        return 0;
 278}
 279
 280static inline bool read_mems_allowed_retry(unsigned int seq)
 281{
 282        return false;
 283}
 284
 285#endif /* !CONFIG_CPUSETS */
 286
 287#endif /* _LINUX_CPUSET_H */
 288