1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3
4
5
6
7
8
9
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
14#include <linux/mm.h>
15#include <linux/jump_label.h>
16
17#ifdef CONFIG_CPUSETS
18
19extern struct static_key_false cpusets_enabled_key;
20static inline bool cpusets_enabled(void)
21{
22 return static_branch_unlikely(&cpusets_enabled_key);
23}
24
25static inline int nr_cpusets(void)
26{
27
28 return static_key_count(&cpusets_enabled_key.key) + 1;
29}
30
31static inline void cpuset_inc(void)
32{
33 static_branch_inc(&cpusets_enabled_key);
34}
35
36static inline void cpuset_dec(void)
37{
38 static_branch_dec(&cpusets_enabled_key);
39}
40
41extern int cpuset_init(void);
42extern void cpuset_init_smp(void);
43extern void cpuset_update_active_cpus(bool cpu_online);
44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47#define cpuset_current_mems_allowed (current->mems_allowed)
48void cpuset_init_current_mems_allowed(void);
49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50
51extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
52
53static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
54{
55 if (cpusets_enabled())
56 return __cpuset_node_allowed(node, gfp_mask);
57 return true;
58}
59
60static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
61{
62 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
63}
64
65static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
66{
67 if (cpusets_enabled())
68 return __cpuset_zone_allowed(z, gfp_mask);
69 return true;
70}
71
72extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
73 const struct task_struct *tsk2);
74
75#define cpuset_memory_pressure_bump() \
76 do { \
77 if (cpuset_memory_pressure_enabled) \
78 __cpuset_memory_pressure_bump(); \
79 } while (0)
80extern int cpuset_memory_pressure_enabled;
81extern void __cpuset_memory_pressure_bump(void);
82
83extern void cpuset_task_status_allowed(struct seq_file *m,
84 struct task_struct *task);
85extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
86 struct pid *pid, struct task_struct *tsk);
87
88extern int cpuset_mem_spread_node(void);
89extern int cpuset_slab_spread_node(void);
90
91static inline int cpuset_do_page_mem_spread(void)
92{
93 return task_spread_page(current);
94}
95
96static inline int cpuset_do_slab_mem_spread(void)
97{
98 return task_spread_slab(current);
99}
100
101extern int current_cpuset_is_being_rebound(void);
102
103extern void rebuild_sched_domains(void);
104
105extern void cpuset_print_current_mems_allowed(void);
106
107
108
109
110
111
112
113
114static inline unsigned int read_mems_allowed_begin(void)
115{
116 if (!cpusets_enabled())
117 return 0;
118
119 return read_seqcount_begin(¤t->mems_allowed_seq);
120}
121
122
123
124
125
126
127
128static inline bool read_mems_allowed_retry(unsigned int seq)
129{
130 if (!cpusets_enabled())
131 return false;
132
133 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
134}
135
136static inline void set_mems_allowed(nodemask_t nodemask)
137{
138 unsigned long flags;
139
140 task_lock(current);
141 local_irq_save(flags);
142 write_seqcount_begin(¤t->mems_allowed_seq);
143 current->mems_allowed = nodemask;
144 write_seqcount_end(¤t->mems_allowed_seq);
145 local_irq_restore(flags);
146 task_unlock(current);
147}
148
149#else
150
151static inline bool cpusets_enabled(void) { return false; }
152
153static inline int cpuset_init(void) { return 0; }
154static inline void cpuset_init_smp(void) {}
155
156static inline void cpuset_update_active_cpus(bool cpu_online)
157{
158 partition_sched_domains(1, NULL, NULL);
159}
160
161static inline void cpuset_cpus_allowed(struct task_struct *p,
162 struct cpumask *mask)
163{
164 cpumask_copy(mask, cpu_possible_mask);
165}
166
167static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
168{
169}
170
171static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
172{
173 return node_possible_map;
174}
175
176#define cpuset_current_mems_allowed (node_states[N_MEMORY])
177static inline void cpuset_init_current_mems_allowed(void) {}
178
179static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
180{
181 return 1;
182}
183
184static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
185{
186 return true;
187}
188
189static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
190{
191 return true;
192}
193
194static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
195{
196 return true;
197}
198
199static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
200 const struct task_struct *tsk2)
201{
202 return 1;
203}
204
205static inline void cpuset_memory_pressure_bump(void) {}
206
207static inline void cpuset_task_status_allowed(struct seq_file *m,
208 struct task_struct *task)
209{
210}
211
212static inline int cpuset_mem_spread_node(void)
213{
214 return 0;
215}
216
217static inline int cpuset_slab_spread_node(void)
218{
219 return 0;
220}
221
222static inline int cpuset_do_page_mem_spread(void)
223{
224 return 0;
225}
226
227static inline int cpuset_do_slab_mem_spread(void)
228{
229 return 0;
230}
231
232static inline int current_cpuset_is_being_rebound(void)
233{
234 return 0;
235}
236
237static inline void rebuild_sched_domains(void)
238{
239 partition_sched_domains(1, NULL, NULL);
240}
241
242static inline void cpuset_print_current_mems_allowed(void)
243{
244}
245
246static inline void set_mems_allowed(nodemask_t nodemask)
247{
248}
249
250static inline unsigned int read_mems_allowed_begin(void)
251{
252 return 0;
253}
254
255static inline bool read_mems_allowed_retry(unsigned int seq)
256{
257 return false;
258}
259
260#endif
261
262#endif
263