1
2
3
4
5
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
17static struct {
18 struct list_head queue;
19 raw_spinlock_t lock;
20} call_function __cacheline_aligned_in_smp =
21 {
22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
24 };
25
26enum {
27 CSD_FLAG_LOCK = 0x01,
28};
29
30struct call_function_data {
31 struct call_single_data csd;
32 atomic_t refs;
33 cpumask_var_t cpumask;
34};
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
38struct call_single_queue {
39 struct list_head list;
40 raw_spinlock_t lock;
41};
42
43static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
44
45static int
46hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
47{
48 long cpu = (long)hcpu;
49 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
50
51 switch (action) {
52 case CPU_UP_PREPARE:
53 case CPU_UP_PREPARE_FROZEN:
54 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
55 cpu_to_node(cpu)))
56 return notifier_from_errno(-ENOMEM);
57 break;
58
59#ifdef CONFIG_HOTPLUG_CPU
60 case CPU_UP_CANCELED:
61 case CPU_UP_CANCELED_FROZEN:
62
63 case CPU_DEAD:
64 case CPU_DEAD_FROZEN:
65 free_cpumask_var(cfd->cpumask);
66 break;
67#endif
68 };
69
70 return NOTIFY_OK;
71}
72
73static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd,
75};
76
77static int __cpuinit init_call_single_data(void)
78{
79 void *cpu = (void *)(long)smp_processor_id();
80 int i;
81
82 for_each_possible_cpu(i) {
83 struct call_single_queue *q = &per_cpu(call_single_queue, i);
84
85 raw_spin_lock_init(&q->lock);
86 INIT_LIST_HEAD(&q->list);
87 }
88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier);
91
92 return 0;
93}
94early_initcall(init_call_single_data);
95
96
97
98
99
100
101
102
103static void csd_lock_wait(struct call_single_data *data)
104{
105 while (data->flags & CSD_FLAG_LOCK)
106 cpu_relax();
107}
108
109static void csd_lock(struct call_single_data *data)
110{
111 csd_lock_wait(data);
112 data->flags = CSD_FLAG_LOCK;
113
114
115
116
117
118
119 smp_mb();
120}
121
122static void csd_unlock(struct call_single_data *data)
123{
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
125
126
127
128
129 smp_mb();
130
131 data->flags &= ~CSD_FLAG_LOCK;
132}
133
134
135
136
137
138
139static
140void generic_exec_single(int cpu, struct call_single_data *data, int wait)
141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 unsigned long flags;
144 int ipi;
145
146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags);
150
151
152
153
154
155
156
157
158
159
160
161
162 if (ipi)
163 arch_send_call_function_single_ipi(cpu);
164
165 if (wait)
166 csd_lock_wait(data);
167}
168
169
170
171
172
173void generic_smp_call_function_interrupt(void)
174{
175 struct call_function_data *data;
176 int cpu = smp_processor_id();
177
178
179
180
181 WARN_ON_ONCE(!cpu_online(cpu));
182
183
184
185
186
187
188
189 smp_mb();
190
191
192
193
194
195 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
196 int refs;
197 void (*func) (void *info);
198
199
200
201
202
203
204
205
206
207
208
209 if (!cpumask_test_cpu(cpu, data->cpumask))
210 continue;
211
212 smp_rmb();
213
214 if (atomic_read(&data->refs) == 0)
215 continue;
216
217 func = data->csd.func;
218 data->csd.func(data->csd.info);
219
220
221
222
223
224
225 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
226 WARN(1, "%pS enabled interrupts and double executed\n",
227 func);
228 continue;
229 }
230
231 refs = atomic_dec_return(&data->refs);
232 WARN_ON(refs < 0);
233
234 if (refs)
235 continue;
236
237 WARN_ON(!cpumask_empty(data->cpumask));
238
239 raw_spin_lock(&call_function.lock);
240 list_del_rcu(&data->csd.list);
241 raw_spin_unlock(&call_function.lock);
242
243 csd_unlock(&data->csd);
244 }
245
246}
247
248
249
250
251
252void generic_smp_call_function_single_interrupt(void)
253{
254 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
255 unsigned int data_flags;
256 LIST_HEAD(list);
257
258
259
260
261 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
262
263 raw_spin_lock(&q->lock);
264 list_replace_init(&q->list, &list);
265 raw_spin_unlock(&q->lock);
266
267 while (!list_empty(&list)) {
268 struct call_single_data *data;
269
270 data = list_entry(list.next, struct call_single_data, list);
271 list_del(&data->list);
272
273
274
275
276
277
278 data_flags = data->flags;
279
280 data->func(data->info);
281
282
283
284
285 if (data_flags & CSD_FLAG_LOCK)
286 csd_unlock(data);
287 }
288}
289
290static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
291
292
293
294
295
296
297
298
299
300int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
301 int wait)
302{
303 struct call_single_data d = {
304 .flags = 0,
305 };
306 unsigned long flags;
307 int this_cpu;
308 int err = 0;
309
310
311
312
313
314 this_cpu = get_cpu();
315
316
317
318
319
320
321
322 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
323 && !oops_in_progress);
324
325 if (cpu == this_cpu) {
326 local_irq_save(flags);
327 func(info);
328 local_irq_restore(flags);
329 } else {
330 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
331 struct call_single_data *data = &d;
332
333 if (!wait)
334 data = &__get_cpu_var(csd_data);
335
336 csd_lock(data);
337
338 data->func = func;
339 data->info = info;
340 generic_exec_single(cpu, data, wait);
341 } else {
342 err = -ENXIO;
343 }
344 }
345
346 put_cpu();
347
348 return err;
349}
350EXPORT_SYMBOL(smp_call_function_single);
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368int smp_call_function_any(const struct cpumask *mask,
369 smp_call_func_t func, void *info, int wait)
370{
371 unsigned int cpu;
372 const struct cpumask *nodemask;
373 int ret;
374
375
376 cpu = get_cpu();
377 if (cpumask_test_cpu(cpu, mask))
378 goto call;
379
380
381 nodemask = cpumask_of_node(cpu_to_node(cpu));
382 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
383 cpu = cpumask_next_and(cpu, nodemask, mask)) {
384 if (cpu_online(cpu))
385 goto call;
386 }
387
388
389 cpu = cpumask_any_and(mask, cpu_online_mask);
390call:
391 ret = smp_call_function_single(cpu, func, info, wait);
392 put_cpu();
393 return ret;
394}
395EXPORT_SYMBOL_GPL(smp_call_function_any);
396
397
398
399
400
401
402
403
404
405
406
407void __smp_call_function_single(int cpu, struct call_single_data *data,
408 int wait)
409{
410 unsigned int this_cpu;
411 unsigned long flags;
412
413 this_cpu = get_cpu();
414
415
416
417
418
419
420 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
421 && !oops_in_progress);
422
423 if (cpu == this_cpu) {
424 local_irq_save(flags);
425 data->func(data->info);
426 local_irq_restore(flags);
427 } else {
428 csd_lock(data);
429 generic_exec_single(cpu, data, wait);
430 }
431 put_cpu();
432}
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448void smp_call_function_many(const struct cpumask *mask,
449 smp_call_func_t func, void *info, bool wait)
450{
451 struct call_function_data *data;
452 unsigned long flags;
453 int cpu, next_cpu, this_cpu = smp_processor_id();
454
455
456
457
458
459
460
461 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
462 && !oops_in_progress && !early_boot_irqs_disabled);
463
464
465 cpu = cpumask_first_and(mask, cpu_online_mask);
466 if (cpu == this_cpu)
467 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
468
469
470 if (cpu >= nr_cpu_ids)
471 return;
472
473
474 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
475 if (next_cpu == this_cpu)
476 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
477
478
479 if (next_cpu >= nr_cpu_ids) {
480 smp_call_function_single(cpu, func, info, wait);
481 return;
482 }
483
484 data = &__get_cpu_var(cfd_data);
485 csd_lock(&data->csd);
486 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
487
488 data->csd.func = func;
489 data->csd.info = info;
490 cpumask_and(data->cpumask, mask, cpu_online_mask);
491 cpumask_clear_cpu(this_cpu, data->cpumask);
492
493
494
495
496
497
498
499 smp_wmb();
500
501 atomic_set(&data->refs, cpumask_weight(data->cpumask));
502
503 raw_spin_lock_irqsave(&call_function.lock, flags);
504
505
506
507
508
509 list_add_rcu(&data->csd.list, &call_function.queue);
510 raw_spin_unlock_irqrestore(&call_function.lock, flags);
511
512
513
514
515
516
517 smp_mb();
518
519
520 arch_send_call_function_ipi_mask(data->cpumask);
521
522
523 if (wait)
524 csd_lock_wait(&data->csd);
525}
526EXPORT_SYMBOL(smp_call_function_many);
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543int smp_call_function(smp_call_func_t func, void *info, int wait)
544{
545 preempt_disable();
546 smp_call_function_many(cpu_online_mask, func, info, wait);
547 preempt_enable();
548
549 return 0;
550}
551EXPORT_SYMBOL(smp_call_function);
552
553void ipi_call_lock(void)
554{
555 raw_spin_lock(&call_function.lock);
556}
557
558void ipi_call_unlock(void)
559{
560 raw_spin_unlock(&call_function.lock);
561}
562
563void ipi_call_lock_irq(void)
564{
565 raw_spin_lock_irq(&call_function.lock);
566}
567
568void ipi_call_unlock_irq(void)
569{
570 raw_spin_unlock_irq(&call_function.lock);
571}
572#endif
573
574
575
576
577
578
579int on_each_cpu(void (*func) (void *info), void *info, int wait)
580{
581 unsigned long flags;
582 int ret = 0;
583
584 preempt_disable();
585 ret = smp_call_function(func, info, wait);
586 local_irq_save(flags);
587 func(info);
588 local_irq_restore(flags);
589 preempt_enable();
590 return ret;
591}
592EXPORT_SYMBOL(on_each_cpu);
593