1
2
3
4
5
6#include <linux/irq_work.h>
7#include <linux/rcupdate.h>
8#include <linux/rculist.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/percpu.h>
12#include <linux/init.h>
13#include <linux/gfp.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17
18#include "smpboot.h"
19
20enum {
21 CSD_FLAG_LOCK = 0x01,
22 CSD_FLAG_SYNCHRONOUS = 0x02,
23};
24
25struct call_function_data {
26 struct call_single_data __percpu *csd;
27 cpumask_var_t cpumask;
28};
29
30static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
31
32static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
33
34static void flush_smp_call_function_queue(bool warn_cpu_offline);
35
36static int
37hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
38{
39 long cpu = (long)hcpu;
40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
41
42 switch (action) {
43 case CPU_UP_PREPARE:
44 case CPU_UP_PREPARE_FROZEN:
45 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
46 cpu_to_node(cpu)))
47 return notifier_from_errno(-ENOMEM);
48 cfd->csd = alloc_percpu(struct call_single_data);
49 if (!cfd->csd) {
50 free_cpumask_var(cfd->cpumask);
51 return notifier_from_errno(-ENOMEM);
52 }
53 break;
54
55#ifdef CONFIG_HOTPLUG_CPU
56 case CPU_UP_CANCELED:
57 case CPU_UP_CANCELED_FROZEN:
58
59
60 case CPU_DEAD:
61 case CPU_DEAD_FROZEN:
62 free_cpumask_var(cfd->cpumask);
63 free_percpu(cfd->csd);
64 break;
65
66 case CPU_DYING:
67 case CPU_DYING_FROZEN:
68
69
70
71
72
73
74
75
76
77 flush_smp_call_function_queue(false);
78 break;
79#endif
80 };
81
82 return NOTIFY_OK;
83}
84
85static struct notifier_block hotplug_cfd_notifier = {
86 .notifier_call = hotplug_cfd,
87};
88
89void __init call_function_init(void)
90{
91 void *cpu = (void *)(long)smp_processor_id();
92 int i;
93
94 for_each_possible_cpu(i)
95 init_llist_head(&per_cpu(call_single_queue, i));
96
97 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
98 register_cpu_notifier(&hotplug_cfd_notifier);
99}
100
101
102
103
104
105
106
107
108static __always_inline void csd_lock_wait(struct call_single_data *csd)
109{
110 smp_cond_acquire(!(csd->flags & CSD_FLAG_LOCK));
111}
112
113static __always_inline void csd_lock(struct call_single_data *csd)
114{
115 csd_lock_wait(csd);
116 csd->flags |= CSD_FLAG_LOCK;
117
118
119
120
121
122
123 smp_wmb();
124}
125
126static __always_inline void csd_unlock(struct call_single_data *csd)
127{
128 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
129
130
131
132
133 smp_store_release(&csd->flags, 0);
134}
135
136static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
137
138
139
140
141
142
143static int generic_exec_single(int cpu, struct call_single_data *csd,
144 smp_call_func_t func, void *info)
145{
146 if (cpu == smp_processor_id()) {
147 unsigned long flags;
148
149
150
151
152
153 csd_unlock(csd);
154 local_irq_save(flags);
155 func(info);
156 local_irq_restore(flags);
157 return 0;
158 }
159
160
161 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
162 csd_unlock(csd);
163 return -ENXIO;
164 }
165
166 csd->func = func;
167 csd->info = info;
168
169
170
171
172
173
174
175
176
177
178
179
180 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
181 arch_send_call_function_single_ipi(cpu);
182
183 return 0;
184}
185
186
187
188
189
190
191
192void generic_smp_call_function_single_interrupt(void)
193{
194 flush_smp_call_function_queue(true);
195}
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211static void flush_smp_call_function_queue(bool warn_cpu_offline)
212{
213 struct llist_head *head;
214 struct llist_node *entry;
215 struct call_single_data *csd, *csd_next;
216 static bool warned;
217
218 WARN_ON(!irqs_disabled());
219
220 head = this_cpu_ptr(&call_single_queue);
221 entry = llist_del_all(head);
222 entry = llist_reverse_order(entry);
223
224
225 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
226 !warned && !llist_empty(head))) {
227 warned = true;
228 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
229
230
231
232
233
234 llist_for_each_entry(csd, entry, llist)
235 pr_warn("IPI callback %pS sent to offline CPU\n",
236 csd->func);
237 }
238
239 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
240 smp_call_func_t func = csd->func;
241 void *info = csd->info;
242
243
244 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
245 func(info);
246 csd_unlock(csd);
247 } else {
248 csd_unlock(csd);
249 func(info);
250 }
251 }
252
253
254
255
256
257
258
259 irq_work_run();
260}
261
262
263
264
265
266
267
268
269
270int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
271 int wait)
272{
273 struct call_single_data *csd;
274 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
275 int this_cpu;
276 int err;
277
278
279
280
281
282 this_cpu = get_cpu();
283
284
285
286
287
288
289
290 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
291 && !oops_in_progress);
292
293 csd = &csd_stack;
294 if (!wait) {
295 csd = this_cpu_ptr(&csd_data);
296 csd_lock(csd);
297 }
298
299 err = generic_exec_single(cpu, csd, func, info);
300
301 if (wait)
302 csd_lock_wait(csd);
303
304 put_cpu();
305
306 return err;
307}
308EXPORT_SYMBOL(smp_call_function_single);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326int smp_call_function_single_async(int cpu, struct call_single_data *csd)
327{
328 int err = 0;
329
330 preempt_disable();
331
332
333 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
334 csd_lock_wait(csd);
335
336 csd->flags = CSD_FLAG_LOCK;
337 smp_wmb();
338
339 err = generic_exec_single(cpu, csd, csd->func, csd->info);
340 preempt_enable();
341
342 return err;
343}
344EXPORT_SYMBOL_GPL(smp_call_function_single_async);
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360int smp_call_function_any(const struct cpumask *mask,
361 smp_call_func_t func, void *info, int wait)
362{
363 unsigned int cpu;
364 const struct cpumask *nodemask;
365 int ret;
366
367
368 cpu = get_cpu();
369 if (cpumask_test_cpu(cpu, mask))
370 goto call;
371
372
373 nodemask = cpumask_of_node(cpu_to_node(cpu));
374 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
375 cpu = cpumask_next_and(cpu, nodemask, mask)) {
376 if (cpu_online(cpu))
377 goto call;
378 }
379
380
381 cpu = cpumask_any_and(mask, cpu_online_mask);
382call:
383 ret = smp_call_function_single(cpu, func, info, wait);
384 put_cpu();
385 return ret;
386}
387EXPORT_SYMBOL_GPL(smp_call_function_any);
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403void smp_call_function_many(const struct cpumask *mask,
404 smp_call_func_t func, void *info, bool wait)
405{
406 struct call_function_data *cfd;
407 int cpu, next_cpu, this_cpu = smp_processor_id();
408
409
410
411
412
413
414
415 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
416 && !oops_in_progress && !early_boot_irqs_disabled);
417
418
419 cpu = cpumask_first_and(mask, cpu_online_mask);
420 if (cpu == this_cpu)
421 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
422
423
424 if (cpu >= nr_cpu_ids)
425 return;
426
427
428 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
429 if (next_cpu == this_cpu)
430 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
431
432
433 if (next_cpu >= nr_cpu_ids) {
434 smp_call_function_single(cpu, func, info, wait);
435 return;
436 }
437
438 cfd = this_cpu_ptr(&cfd_data);
439
440 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
441 cpumask_clear_cpu(this_cpu, cfd->cpumask);
442
443
444 if (unlikely(!cpumask_weight(cfd->cpumask)))
445 return;
446
447 for_each_cpu(cpu, cfd->cpumask) {
448 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
449
450 csd_lock(csd);
451 if (wait)
452 csd->flags |= CSD_FLAG_SYNCHRONOUS;
453 csd->func = func;
454 csd->info = info;
455 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
456 }
457
458
459 arch_send_call_function_ipi_mask(cfd->cpumask);
460
461 if (wait) {
462 for_each_cpu(cpu, cfd->cpumask) {
463 struct call_single_data *csd;
464
465 csd = per_cpu_ptr(cfd->csd, cpu);
466 csd_lock_wait(csd);
467 }
468 }
469}
470EXPORT_SYMBOL(smp_call_function_many);
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487int smp_call_function(smp_call_func_t func, void *info, int wait)
488{
489 preempt_disable();
490 smp_call_function_many(cpu_online_mask, func, info, wait);
491 preempt_enable();
492
493 return 0;
494}
495EXPORT_SYMBOL(smp_call_function);
496
497
498unsigned int setup_max_cpus = NR_CPUS;
499EXPORT_SYMBOL(setup_max_cpus);
500
501
502
503
504
505
506
507
508
509
510
511
512
513void __weak arch_disable_smp_support(void) { }
514
515static int __init nosmp(char *str)
516{
517 setup_max_cpus = 0;
518 arch_disable_smp_support();
519
520 return 0;
521}
522
523early_param("nosmp", nosmp);
524
525
526static int __init nrcpus(char *str)
527{
528 int nr_cpus;
529
530 get_option(&str, &nr_cpus);
531 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
532 nr_cpu_ids = nr_cpus;
533
534 return 0;
535}
536
537early_param("nr_cpus", nrcpus);
538
539static int __init maxcpus(char *str)
540{
541 get_option(&str, &setup_max_cpus);
542 if (setup_max_cpus == 0)
543 arch_disable_smp_support();
544
545 return 0;
546}
547
548early_param("maxcpus", maxcpus);
549
550
551int nr_cpu_ids __read_mostly = NR_CPUS;
552EXPORT_SYMBOL(nr_cpu_ids);
553
554
555void __init setup_nr_cpu_ids(void)
556{
557 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
558}
559
560void __weak smp_announce(void)
561{
562 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
563}
564
565
566void __init smp_init(void)
567{
568 unsigned int cpu;
569
570 idle_threads_init();
571 cpuhp_threads_init();
572
573
574 for_each_present_cpu(cpu) {
575 if (num_online_cpus() >= setup_max_cpus)
576 break;
577 if (!cpu_online(cpu))
578 cpu_up(cpu);
579 }
580
581
582 smp_announce();
583 smp_cpus_done(setup_max_cpus);
584}
585
586
587
588
589
590
591int on_each_cpu(void (*func) (void *info), void *info, int wait)
592{
593 unsigned long flags;
594 int ret = 0;
595
596 preempt_disable();
597 ret = smp_call_function(func, info, wait);
598 local_irq_save(flags);
599 func(info);
600 local_irq_restore(flags);
601 preempt_enable();
602 return ret;
603}
604EXPORT_SYMBOL(on_each_cpu);
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
623 void *info, bool wait)
624{
625 int cpu = get_cpu();
626
627 smp_call_function_many(mask, func, info, wait);
628 if (cpumask_test_cpu(cpu, mask)) {
629 unsigned long flags;
630 local_irq_save(flags);
631 func(info);
632 local_irq_restore(flags);
633 }
634 put_cpu();
635}
636EXPORT_SYMBOL(on_each_cpu_mask);
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
666 smp_call_func_t func, void *info, bool wait,
667 gfp_t gfp_flags)
668{
669 cpumask_var_t cpus;
670 int cpu, ret;
671
672 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
673
674 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
675 preempt_disable();
676 for_each_online_cpu(cpu)
677 if (cond_func(cpu, info))
678 cpumask_set_cpu(cpu, cpus);
679 on_each_cpu_mask(cpus, func, info, wait);
680 preempt_enable();
681 free_cpumask_var(cpus);
682 } else {
683
684
685
686
687 preempt_disable();
688 for_each_online_cpu(cpu)
689 if (cond_func(cpu, info)) {
690 ret = smp_call_function_single(cpu, func,
691 info, wait);
692 WARN_ON_ONCE(ret);
693 }
694 preempt_enable();
695 }
696}
697EXPORT_SYMBOL(on_each_cpu_cond);
698
699static void do_nothing(void *unused)
700{
701}
702
703
704
705
706
707
708
709
710
711
712
713
714void kick_all_cpus_sync(void)
715{
716
717 smp_mb();
718 smp_call_function(do_nothing, NULL, 1);
719}
720EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
721
722
723
724
725
726
727
728void wake_up_all_idle_cpus(void)
729{
730 int cpu;
731
732 preempt_disable();
733 for_each_online_cpu(cpu) {
734 if (cpu == smp_processor_id())
735 continue;
736
737 wake_up_if_idle(cpu);
738 }
739 preempt_enable();
740}
741EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
742