1
2
3
4
5
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/irq_work.h>
10#include <linux/rcupdate.h>
11#include <linux/rculist.h>
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/percpu.h>
15#include <linux/init.h>
16#include <linux/gfp.h>
17#include <linux/smp.h>
18#include <linux/cpu.h>
19#include <linux/sched.h>
20#include <linux/sched/idle.h>
21#include <linux/hypervisor.h>
22
23#include "smpboot.h"
24
25enum {
26 CSD_FLAG_LOCK = 0x01,
27 CSD_FLAG_SYNCHRONOUS = 0x02,
28};
29
30struct call_function_data {
31 call_single_data_t __percpu *csd;
32 cpumask_var_t cpumask;
33 cpumask_var_t cpumask_ipi;
34};
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
37
38static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
39
40static void flush_smp_call_function_queue(bool warn_cpu_offline);
41
42int smpcfd_prepare_cpu(unsigned int cpu)
43{
44 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
45
46 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
47 cpu_to_node(cpu)))
48 return -ENOMEM;
49 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
50 cpu_to_node(cpu))) {
51 free_cpumask_var(cfd->cpumask);
52 return -ENOMEM;
53 }
54 cfd->csd = alloc_percpu(call_single_data_t);
55 if (!cfd->csd) {
56 free_cpumask_var(cfd->cpumask);
57 free_cpumask_var(cfd->cpumask_ipi);
58 return -ENOMEM;
59 }
60
61 return 0;
62}
63
64int smpcfd_dead_cpu(unsigned int cpu)
65{
66 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
67
68 free_cpumask_var(cfd->cpumask);
69 free_cpumask_var(cfd->cpumask_ipi);
70 free_percpu(cfd->csd);
71 return 0;
72}
73
74int smpcfd_dying_cpu(unsigned int cpu)
75{
76
77
78
79
80
81
82
83
84
85 flush_smp_call_function_queue(false);
86 return 0;
87}
88
89void __init call_function_init(void)
90{
91 int i;
92
93 for_each_possible_cpu(i)
94 init_llist_head(&per_cpu(call_single_queue, i));
95
96 smpcfd_prepare_cpu(smp_processor_id());
97}
98
99
100
101
102
103
104
105
106static __always_inline void csd_lock_wait(call_single_data_t *csd)
107{
108 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
109}
110
111static __always_inline void csd_lock(call_single_data_t *csd)
112{
113 csd_lock_wait(csd);
114 csd->flags |= CSD_FLAG_LOCK;
115
116
117
118
119
120
121 smp_wmb();
122}
123
124static __always_inline void csd_unlock(call_single_data_t *csd)
125{
126 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
127
128
129
130
131 smp_store_release(&csd->flags, 0);
132}
133
134static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
135
136
137
138
139
140
141static int generic_exec_single(int cpu, call_single_data_t *csd,
142 smp_call_func_t func, void *info)
143{
144 if (cpu == smp_processor_id()) {
145 unsigned long flags;
146
147
148
149
150
151 csd_unlock(csd);
152 local_irq_save(flags);
153 func(info);
154 local_irq_restore(flags);
155 return 0;
156 }
157
158
159 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
160 csd_unlock(csd);
161 return -ENXIO;
162 }
163
164 csd->func = func;
165 csd->info = info;
166
167
168
169
170
171
172
173
174
175
176
177
178 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
179 arch_send_call_function_single_ipi(cpu);
180
181 return 0;
182}
183
184
185
186
187
188
189
190void generic_smp_call_function_single_interrupt(void)
191{
192 flush_smp_call_function_queue(true);
193}
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static void flush_smp_call_function_queue(bool warn_cpu_offline)
210{
211 struct llist_head *head;
212 struct llist_node *entry;
213 call_single_data_t *csd, *csd_next;
214 static bool warned;
215
216 lockdep_assert_irqs_disabled();
217
218 head = this_cpu_ptr(&call_single_queue);
219 entry = llist_del_all(head);
220 entry = llist_reverse_order(entry);
221
222
223 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
224 !warned && !llist_empty(head))) {
225 warned = true;
226 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
227
228
229
230
231
232 llist_for_each_entry(csd, entry, llist)
233 pr_warn("IPI callback %pS sent to offline CPU\n",
234 csd->func);
235 }
236
237 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
238 smp_call_func_t func = csd->func;
239 void *info = csd->info;
240
241
242 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
243 func(info);
244 csd_unlock(csd);
245 } else {
246 csd_unlock(csd);
247 func(info);
248 }
249 }
250
251
252
253
254
255
256
257 irq_work_run();
258}
259
260
261
262
263
264
265
266
267
268int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
269 int wait)
270{
271 call_single_data_t *csd;
272 call_single_data_t csd_stack = {
273 .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS,
274 };
275 int this_cpu;
276 int err;
277
278
279
280
281
282 this_cpu = get_cpu();
283
284
285
286
287
288
289
290 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
291 && !oops_in_progress);
292
293 csd = &csd_stack;
294 if (!wait) {
295 csd = this_cpu_ptr(&csd_data);
296 csd_lock(csd);
297 }
298
299 err = generic_exec_single(cpu, csd, func, info);
300
301 if (wait)
302 csd_lock_wait(csd);
303
304 put_cpu();
305
306 return err;
307}
308EXPORT_SYMBOL(smp_call_function_single);
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326int smp_call_function_single_async(int cpu, call_single_data_t *csd)
327{
328 int err = 0;
329
330 preempt_disable();
331
332
333 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
334 csd_lock_wait(csd);
335
336 csd->flags = CSD_FLAG_LOCK;
337 smp_wmb();
338
339 err = generic_exec_single(cpu, csd, csd->func, csd->info);
340 preempt_enable();
341
342 return err;
343}
344EXPORT_SYMBOL_GPL(smp_call_function_single_async);
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360int smp_call_function_any(const struct cpumask *mask,
361 smp_call_func_t func, void *info, int wait)
362{
363 unsigned int cpu;
364 const struct cpumask *nodemask;
365 int ret;
366
367
368 cpu = get_cpu();
369 if (cpumask_test_cpu(cpu, mask))
370 goto call;
371
372
373 nodemask = cpumask_of_node(cpu_to_node(cpu));
374 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
375 cpu = cpumask_next_and(cpu, nodemask, mask)) {
376 if (cpu_online(cpu))
377 goto call;
378 }
379
380
381 cpu = cpumask_any_and(mask, cpu_online_mask);
382call:
383 ret = smp_call_function_single(cpu, func, info, wait);
384 put_cpu();
385 return ret;
386}
387EXPORT_SYMBOL_GPL(smp_call_function_any);
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403void smp_call_function_many(const struct cpumask *mask,
404 smp_call_func_t func, void *info, bool wait)
405{
406 struct call_function_data *cfd;
407 int cpu, next_cpu, this_cpu = smp_processor_id();
408
409
410
411
412
413
414
415 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
416 && !oops_in_progress && !early_boot_irqs_disabled);
417
418
419 cpu = cpumask_first_and(mask, cpu_online_mask);
420 if (cpu == this_cpu)
421 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
422
423
424 if (cpu >= nr_cpu_ids)
425 return;
426
427
428 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
429 if (next_cpu == this_cpu)
430 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
431
432
433 if (next_cpu >= nr_cpu_ids) {
434 smp_call_function_single(cpu, func, info, wait);
435 return;
436 }
437
438 cfd = this_cpu_ptr(&cfd_data);
439
440 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
441 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
442
443
444 if (unlikely(!cpumask_weight(cfd->cpumask)))
445 return;
446
447 cpumask_clear(cfd->cpumask_ipi);
448 for_each_cpu(cpu, cfd->cpumask) {
449 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
450
451 csd_lock(csd);
452 if (wait)
453 csd->flags |= CSD_FLAG_SYNCHRONOUS;
454 csd->func = func;
455 csd->info = info;
456 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
457 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
458 }
459
460
461 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
462
463 if (wait) {
464 for_each_cpu(cpu, cfd->cpumask) {
465 call_single_data_t *csd;
466
467 csd = per_cpu_ptr(cfd->csd, cpu);
468 csd_lock_wait(csd);
469 }
470 }
471}
472EXPORT_SYMBOL(smp_call_function_many);
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489int smp_call_function(smp_call_func_t func, void *info, int wait)
490{
491 preempt_disable();
492 smp_call_function_many(cpu_online_mask, func, info, wait);
493 preempt_enable();
494
495 return 0;
496}
497EXPORT_SYMBOL(smp_call_function);
498
499
500unsigned int setup_max_cpus = NR_CPUS;
501EXPORT_SYMBOL(setup_max_cpus);
502
503
504
505
506
507
508
509
510
511
512
513
514
515void __weak arch_disable_smp_support(void) { }
516
517static int __init nosmp(char *str)
518{
519 setup_max_cpus = 0;
520 arch_disable_smp_support();
521
522 return 0;
523}
524
525early_param("nosmp", nosmp);
526
527
528static int __init nrcpus(char *str)
529{
530 int nr_cpus;
531
532 get_option(&str, &nr_cpus);
533 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
534 nr_cpu_ids = nr_cpus;
535
536 return 0;
537}
538
539early_param("nr_cpus", nrcpus);
540
541static int __init maxcpus(char *str)
542{
543 get_option(&str, &setup_max_cpus);
544 if (setup_max_cpus == 0)
545 arch_disable_smp_support();
546
547 return 0;
548}
549
550early_param("maxcpus", maxcpus);
551
552
553unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
554EXPORT_SYMBOL(nr_cpu_ids);
555
556
557void __init setup_nr_cpu_ids(void)
558{
559 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
560}
561
562
563void __init smp_init(void)
564{
565 int num_nodes, num_cpus;
566 unsigned int cpu;
567
568 idle_threads_init();
569 cpuhp_threads_init();
570
571 pr_info("Bringing up secondary CPUs ...\n");
572
573
574 for_each_present_cpu(cpu) {
575 if (num_online_cpus() >= setup_max_cpus)
576 break;
577 if (!cpu_online(cpu))
578 cpu_up(cpu);
579 }
580
581 num_nodes = num_online_nodes();
582 num_cpus = num_online_cpus();
583 pr_info("Brought up %d node%s, %d CPU%s\n",
584 num_nodes, (num_nodes > 1 ? "s" : ""),
585 num_cpus, (num_cpus > 1 ? "s" : ""));
586
587
588 smp_cpus_done(setup_max_cpus);
589}
590
591
592
593
594
595
596int on_each_cpu(void (*func) (void *info), void *info, int wait)
597{
598 unsigned long flags;
599 int ret = 0;
600
601 preempt_disable();
602 ret = smp_call_function(func, info, wait);
603 local_irq_save(flags);
604 func(info);
605 local_irq_restore(flags);
606 preempt_enable();
607 return ret;
608}
609EXPORT_SYMBOL(on_each_cpu);
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
628 void *info, bool wait)
629{
630 int cpu = get_cpu();
631
632 smp_call_function_many(mask, func, info, wait);
633 if (cpumask_test_cpu(cpu, mask)) {
634 unsigned long flags;
635 local_irq_save(flags);
636 func(info);
637 local_irq_restore(flags);
638 }
639 put_cpu();
640}
641EXPORT_SYMBOL(on_each_cpu_mask);
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
671 smp_call_func_t func, void *info, bool wait,
672 gfp_t gfp_flags)
673{
674 cpumask_var_t cpus;
675 int cpu, ret;
676
677 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
678
679 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
680 preempt_disable();
681 for_each_online_cpu(cpu)
682 if (cond_func(cpu, info))
683 cpumask_set_cpu(cpu, cpus);
684 on_each_cpu_mask(cpus, func, info, wait);
685 preempt_enable();
686 free_cpumask_var(cpus);
687 } else {
688
689
690
691
692 preempt_disable();
693 for_each_online_cpu(cpu)
694 if (cond_func(cpu, info)) {
695 ret = smp_call_function_single(cpu, func,
696 info, wait);
697 WARN_ON_ONCE(ret);
698 }
699 preempt_enable();
700 }
701}
702EXPORT_SYMBOL(on_each_cpu_cond);
703
704static void do_nothing(void *unused)
705{
706}
707
708
709
710
711
712
713
714
715
716
717
718
719void kick_all_cpus_sync(void)
720{
721
722 smp_mb();
723 smp_call_function(do_nothing, NULL, 1);
724}
725EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
726
727
728
729
730
731
732
733void wake_up_all_idle_cpus(void)
734{
735 int cpu;
736
737 preempt_disable();
738 for_each_online_cpu(cpu) {
739 if (cpu == smp_processor_id())
740 continue;
741
742 wake_up_if_idle(cpu);
743 }
744 preempt_enable();
745}
746EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
747
748
749
750
751
752
753
754
755struct smp_call_on_cpu_struct {
756 struct work_struct work;
757 struct completion done;
758 int (*func)(void *);
759 void *data;
760 int ret;
761 int cpu;
762};
763
764static void smp_call_on_cpu_callback(struct work_struct *work)
765{
766 struct smp_call_on_cpu_struct *sscs;
767
768 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
769 if (sscs->cpu >= 0)
770 hypervisor_pin_vcpu(sscs->cpu);
771 sscs->ret = sscs->func(sscs->data);
772 if (sscs->cpu >= 0)
773 hypervisor_pin_vcpu(-1);
774
775 complete(&sscs->done);
776}
777
778int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
779{
780 struct smp_call_on_cpu_struct sscs = {
781 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
782 .func = func,
783 .data = par,
784 .cpu = phys ? cpu : -1,
785 };
786
787 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
788
789 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
790 return -ENXIO;
791
792 queue_work_on(cpu, system_wq, &sscs.work);
793 wait_for_completion(&sscs.done);
794
795 return sscs.ret;
796}
797EXPORT_SYMBOL_GPL(smp_call_on_cpu);
798