1
2#include <linux/slab.h>
3#include <linux/kernel.h>
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/numa.h>
9#include <linux/sched/isolation.h>
10
11
12
13
14
15
16
17
18unsigned int cpumask_next(int n, const struct cpumask *srcp)
19{
20
21 if (n != -1)
22 cpumask_check(n);
23 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
24}
25EXPORT_SYMBOL(cpumask_next);
26
27
28
29
30
31
32
33
34
35int cpumask_next_and(int n, const struct cpumask *src1p,
36 const struct cpumask *src2p)
37{
38
39 if (n != -1)
40 cpumask_check(n);
41 return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
42 nr_cpumask_bits, n + 1);
43}
44EXPORT_SYMBOL(cpumask_next_and);
45
46
47
48
49
50
51
52
53
54int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
55{
56 unsigned int i;
57
58 cpumask_check(cpu);
59 for_each_cpu(i, mask)
60 if (i != cpu)
61 break;
62 return i;
63}
64EXPORT_SYMBOL(cpumask_any_but);
65
66
67
68
69
70
71
72
73
74
75
76
77
78int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
79{
80 int next;
81
82again:
83 next = cpumask_next(n, mask);
84
85 if (wrap && n < start && next >= start) {
86 return nr_cpumask_bits;
87
88 } else if (next >= nr_cpumask_bits) {
89 wrap = true;
90 n = -1;
91 goto again;
92 }
93
94 return next;
95}
96EXPORT_SYMBOL(cpumask_next_wrap);
97
98
99#ifdef CONFIG_CPUMASK_OFFSTACK
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
115{
116 *mask = kmalloc_node(cpumask_size(), flags, node);
117
118#ifdef CONFIG_DEBUG_PER_CPU_MAPS
119 if (!*mask) {
120 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
121 dump_stack();
122 }
123#endif
124
125 return *mask != NULL;
126}
127EXPORT_SYMBOL(alloc_cpumask_var_node);
128
129bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
130{
131 return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
132}
133EXPORT_SYMBOL(zalloc_cpumask_var_node);
134
135
136
137
138
139
140
141
142
143
144
145bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
146{
147 return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
148}
149EXPORT_SYMBOL(alloc_cpumask_var);
150
151bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
152{
153 return alloc_cpumask_var(mask, flags | __GFP_ZERO);
154}
155EXPORT_SYMBOL(zalloc_cpumask_var);
156
157
158
159
160
161
162
163
164
165
166void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
167{
168 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
169 if (!*mask)
170 panic("%s: Failed to allocate %u bytes\n", __func__,
171 cpumask_size());
172}
173
174
175
176
177
178
179
180void free_cpumask_var(cpumask_var_t mask)
181{
182 kfree(mask);
183}
184EXPORT_SYMBOL(free_cpumask_var);
185
186
187
188
189
190void __init free_bootmem_cpumask_var(cpumask_var_t mask)
191{
192 memblock_free_early(__pa(mask), cpumask_size());
193}
194#endif
195
196
197
198
199
200
201
202
203
204
205
206
207unsigned int cpumask_local_spread(unsigned int i, int node)
208{
209 int cpu, hk_flags;
210 const struct cpumask *mask;
211
212 hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
213 mask = housekeeping_cpumask(hk_flags);
214
215 i %= cpumask_weight(mask);
216
217 if (node == NUMA_NO_NODE) {
218 for_each_cpu(cpu, mask) {
219 if (i-- == 0)
220 return cpu;
221 }
222 } else {
223
224 for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
225 if (i-- == 0)
226 return cpu;
227 }
228
229 for_each_cpu(cpu, mask) {
230
231 if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
232 continue;
233
234 if (i-- == 0)
235 return cpu;
236 }
237 }
238 BUG();
239}
240EXPORT_SYMBOL(cpumask_local_spread);
241
242static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
243
244
245
246
247
248
249
250
251
252int cpumask_any_and_distribute(const struct cpumask *src1p,
253 const struct cpumask *src2p)
254{
255 int next, prev;
256
257
258 prev = __this_cpu_read(distribute_cpu_mask_prev);
259
260 next = cpumask_next_and(prev, src1p, src2p);
261 if (next >= nr_cpu_ids)
262 next = cpumask_first_and(src1p, src2p);
263
264 if (next < nr_cpu_ids)
265 __this_cpu_write(distribute_cpu_mask_prev, next);
266
267 return next;
268}
269EXPORT_SYMBOL(cpumask_any_and_distribute);
270