1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "sched.h"
17
18
19
20
21
22#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
23#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
24 (MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE \
25 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
26#else
27#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
28#endif
29
30#define MEMBARRIER_CMD_BITMASK \
31 (MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED \
32 | MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
33 | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
34 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
35 | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
36
37static void ipi_mb(void *info)
38{
39 smp_mb();
40}
41
42static int membarrier_global_expedited(void)
43{
44 int cpu;
45 bool fallback = false;
46 cpumask_var_t tmpmask;
47
48 if (num_online_cpus() == 1)
49 return 0;
50
51
52
53
54
55 smp_mb();
56
57
58
59
60
61
62 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
63
64 fallback = true;
65 }
66
67 cpus_read_lock();
68 for_each_online_cpu(cpu) {
69 struct task_struct *p;
70
71
72
73
74
75
76
77
78
79 if (cpu == raw_smp_processor_id())
80 continue;
81
82 rcu_read_lock();
83 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
84 if (p && p->mm && (atomic_read(&p->mm->membarrier_state) &
85 MEMBARRIER_STATE_GLOBAL_EXPEDITED)) {
86 if (!fallback)
87 __cpumask_set_cpu(cpu, tmpmask);
88 else
89 smp_call_function_single(cpu, ipi_mb, NULL, 1);
90 }
91 rcu_read_unlock();
92 }
93 if (!fallback) {
94 preempt_disable();
95 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
96 preempt_enable();
97 free_cpumask_var(tmpmask);
98 }
99 cpus_read_unlock();
100
101
102
103
104
105
106 smp_mb();
107 return 0;
108}
109
110static int membarrier_private_expedited(int flags)
111{
112 int cpu;
113 bool fallback = false;
114 cpumask_var_t tmpmask;
115
116 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
117 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
118 return -EINVAL;
119 if (!(atomic_read(¤t->mm->membarrier_state) &
120 MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
121 return -EPERM;
122 } else {
123 if (!(atomic_read(¤t->mm->membarrier_state) &
124 MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
125 return -EPERM;
126 }
127
128 if (num_online_cpus() == 1)
129 return 0;
130
131
132
133
134
135 smp_mb();
136
137
138
139
140
141
142 if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
143
144 fallback = true;
145 }
146
147 cpus_read_lock();
148 for_each_online_cpu(cpu) {
149 struct task_struct *p;
150
151
152
153
154
155
156
157
158
159 if (cpu == raw_smp_processor_id())
160 continue;
161 rcu_read_lock();
162 p = task_rcu_dereference(&cpu_rq(cpu)->curr);
163 if (p && p->mm == current->mm) {
164 if (!fallback)
165 __cpumask_set_cpu(cpu, tmpmask);
166 else
167 smp_call_function_single(cpu, ipi_mb, NULL, 1);
168 }
169 rcu_read_unlock();
170 }
171 if (!fallback) {
172 preempt_disable();
173 smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
174 preempt_enable();
175 free_cpumask_var(tmpmask);
176 }
177 cpus_read_unlock();
178
179
180
181
182
183
184 smp_mb();
185
186 return 0;
187}
188
189static int membarrier_register_global_expedited(void)
190{
191 struct task_struct *p = current;
192 struct mm_struct *mm = p->mm;
193
194 if (atomic_read(&mm->membarrier_state) &
195 MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY)
196 return 0;
197 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED, &mm->membarrier_state);
198 if (atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1) {
199
200
201
202
203
204
205
206 smp_mb();
207 } else {
208
209
210
211
212
213 synchronize_rcu();
214 }
215 atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
216 &mm->membarrier_state);
217
218 return 0;
219}
220
221static int membarrier_register_private_expedited(int flags)
222{
223 struct task_struct *p = current;
224 struct mm_struct *mm = p->mm;
225 int state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY;
226
227 if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
228 if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
229 return -EINVAL;
230 state = MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY;
231 }
232
233
234
235
236
237
238 if (atomic_read(&mm->membarrier_state) & state)
239 return 0;
240 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED, &mm->membarrier_state);
241 if (flags & MEMBARRIER_FLAG_SYNC_CORE)
242 atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE,
243 &mm->membarrier_state);
244 if (!(atomic_read(&mm->mm_users) == 1 && get_nr_threads(p) == 1)) {
245
246
247
248
249 synchronize_rcu();
250 }
251 atomic_or(state, &mm->membarrier_state);
252
253 return 0;
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
284{
285 if (unlikely(flags))
286 return -EINVAL;
287 switch (cmd) {
288 case MEMBARRIER_CMD_QUERY:
289 {
290 int cmd_mask = MEMBARRIER_CMD_BITMASK;
291
292 if (tick_nohz_full_enabled())
293 cmd_mask &= ~MEMBARRIER_CMD_GLOBAL;
294 return cmd_mask;
295 }
296 case MEMBARRIER_CMD_GLOBAL:
297
298 if (tick_nohz_full_enabled())
299 return -EINVAL;
300 if (num_online_cpus() > 1)
301 synchronize_rcu();
302 return 0;
303 case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
304 return membarrier_global_expedited();
305 case MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED:
306 return membarrier_register_global_expedited();
307 case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
308 return membarrier_private_expedited(0);
309 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
310 return membarrier_register_private_expedited(0);
311 case MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE:
312 return membarrier_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
313 case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE:
314 return membarrier_register_private_expedited(MEMBARRIER_FLAG_SYNC_CORE);
315 default:
316 return -EINVAL;
317 }
318}
319