1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3
4
5
6
7
8
9
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
14#include <linux/mm.h>
15#include <linux/jump_label.h>
16
17#ifdef CONFIG_CPUSETS
18
19extern struct static_key cpusets_enabled_key;
20static inline bool cpusets_enabled(void)
21{
22 return static_key_false(&cpusets_enabled_key);
23}
24
25static inline int nr_cpusets(void)
26{
27
28 return static_key_count(&cpusets_enabled_key) + 1;
29}
30
31static inline void cpuset_inc(void)
32{
33 static_key_slow_inc(&cpusets_enabled_key);
34}
35
36static inline void cpuset_dec(void)
37{
38 static_key_slow_dec(&cpusets_enabled_key);
39}
40
41extern int cpuset_init(void);
42extern void cpuset_init_smp(void);
43extern void cpuset_update_active_cpus(bool cpu_online);
44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47#define cpuset_current_mems_allowed (current->mems_allowed)
48void cpuset_init_current_mems_allowed(void);
49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50
51extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53
54static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
55{
56 return nr_cpusets() <= 1 ||
57 __cpuset_node_allowed_softwall(node, gfp_mask);
58}
59
60static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
61{
62 return nr_cpusets() <= 1 ||
63 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74}
75
76extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
77 const struct task_struct *tsk2);
78
79#define cpuset_memory_pressure_bump() \
80 do { \
81 if (cpuset_memory_pressure_enabled) \
82 __cpuset_memory_pressure_bump(); \
83 } while (0)
84extern int cpuset_memory_pressure_enabled;
85extern void __cpuset_memory_pressure_bump(void);
86
87extern void cpuset_task_status_allowed(struct seq_file *m,
88 struct task_struct *task);
89extern int proc_cpuset_show(struct seq_file *, void *);
90
91extern int cpuset_mem_spread_node(void);
92extern int cpuset_slab_spread_node(void);
93
94static inline int cpuset_do_page_mem_spread(void)
95{
96 return task_spread_page(current);
97}
98
99static inline int cpuset_do_slab_mem_spread(void)
100{
101 return task_spread_slab(current);
102}
103
104extern int current_cpuset_is_being_rebound(void);
105
106extern void rebuild_sched_domains(void);
107
108extern void cpuset_print_task_mems_allowed(struct task_struct *p);
109
110
111
112
113
114
115
116
117static inline unsigned int read_mems_allowed_begin(void)
118{
119 return read_seqcount_begin(¤t->mems_allowed_seq);
120}
121
122
123
124
125
126
127
128static inline bool read_mems_allowed_retry(unsigned int seq)
129{
130 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
131}
132
133static inline void set_mems_allowed(nodemask_t nodemask)
134{
135 unsigned long flags;
136
137 task_lock(current);
138 local_irq_save(flags);
139 write_seqcount_begin(¤t->mems_allowed_seq);
140 current->mems_allowed = nodemask;
141 write_seqcount_end(¤t->mems_allowed_seq);
142 local_irq_restore(flags);
143 task_unlock(current);
144}
145
146#else
147
148static inline bool cpusets_enabled(void) { return false; }
149
150static inline int cpuset_init(void) { return 0; }
151static inline void cpuset_init_smp(void) {}
152
153static inline void cpuset_update_active_cpus(bool cpu_online)
154{
155 partition_sched_domains(1, NULL, NULL);
156}
157
158static inline void cpuset_cpus_allowed(struct task_struct *p,
159 struct cpumask *mask)
160{
161 cpumask_copy(mask, cpu_possible_mask);
162}
163
164static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
165{
166}
167
168static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
169{
170 return node_possible_map;
171}
172
173#define cpuset_current_mems_allowed (node_states[N_MEMORY])
174static inline void cpuset_init_current_mems_allowed(void) {}
175
176static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
177{
178 return 1;
179}
180
181static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
182{
183 return 1;
184}
185
186static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
187{
188 return 1;
189}
190
191static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
192{
193 return 1;
194}
195
196static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
197{
198 return 1;
199}
200
201static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
202 const struct task_struct *tsk2)
203{
204 return 1;
205}
206
207static inline void cpuset_memory_pressure_bump(void) {}
208
209static inline void cpuset_task_status_allowed(struct seq_file *m,
210 struct task_struct *task)
211{
212}
213
214static inline int cpuset_mem_spread_node(void)
215{
216 return 0;
217}
218
219static inline int cpuset_slab_spread_node(void)
220{
221 return 0;
222}
223
224static inline int cpuset_do_page_mem_spread(void)
225{
226 return 0;
227}
228
229static inline int cpuset_do_slab_mem_spread(void)
230{
231 return 0;
232}
233
234static inline int current_cpuset_is_being_rebound(void)
235{
236 return 0;
237}
238
239static inline void rebuild_sched_domains(void)
240{
241 partition_sched_domains(1, NULL, NULL);
242}
243
244static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
245{
246}
247
248static inline void set_mems_allowed(nodemask_t nodemask)
249{
250}
251
252static inline unsigned int read_mems_allowed_begin(void)
253{
254 return 0;
255}
256
257static inline bool read_mems_allowed_retry(unsigned int seq)
258{
259 return false;
260}
261
262#endif
263
264#endif
265