1
2
3
4
5
6#include <linux/irq_work.h>
7#include <linux/rcupdate.h>
8#include <linux/rculist.h>
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/percpu.h>
12#include <linux/init.h>
13#include <linux/gfp.h>
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17#include <linux/hypervisor.h>
18
19#include "smpboot.h"
20
21enum {
22 CSD_FLAG_LOCK = 0x01,
23 CSD_FLAG_SYNCHRONOUS = 0x02,
24};
25
26struct call_function_data {
27 struct call_single_data __percpu *csd;
28 cpumask_var_t cpumask;
29};
30
31static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
32
33static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
34
35static void flush_smp_call_function_queue(bool warn_cpu_offline);
36
37int smpcfd_prepare_cpu(unsigned int cpu)
38{
39 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
40
41 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
42 cpu_to_node(cpu)))
43 return -ENOMEM;
44 cfd->csd = alloc_percpu(struct call_single_data);
45 if (!cfd->csd) {
46 free_cpumask_var(cfd->cpumask);
47 return -ENOMEM;
48 }
49
50 return 0;
51}
52
53int smpcfd_dead_cpu(unsigned int cpu)
54{
55 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
56
57 free_cpumask_var(cfd->cpumask);
58 free_percpu(cfd->csd);
59 return 0;
60}
61
62int smpcfd_dying_cpu(unsigned int cpu)
63{
64
65
66
67
68
69
70
71
72
73 flush_smp_call_function_queue(false);
74 return 0;
75}
76
77void __init call_function_init(void)
78{
79 int i;
80
81 for_each_possible_cpu(i)
82 init_llist_head(&per_cpu(call_single_queue, i));
83
84 smpcfd_prepare_cpu(smp_processor_id());
85}
86
87
88
89
90
91
92
93
94static __always_inline void csd_lock_wait(struct call_single_data *csd)
95{
96 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
97}
98
99static __always_inline void csd_lock(struct call_single_data *csd)
100{
101 csd_lock_wait(csd);
102 csd->flags |= CSD_FLAG_LOCK;
103
104
105
106
107
108
109 smp_wmb();
110}
111
112static __always_inline void csd_unlock(struct call_single_data *csd)
113{
114 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
115
116
117
118
119 smp_store_release(&csd->flags, 0);
120}
121
122static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
123
124
125
126
127
128
129static int generic_exec_single(int cpu, struct call_single_data *csd,
130 smp_call_func_t func, void *info)
131{
132 if (cpu == smp_processor_id()) {
133 unsigned long flags;
134
135
136
137
138
139 csd_unlock(csd);
140 local_irq_save(flags);
141 func(info);
142 local_irq_restore(flags);
143 return 0;
144 }
145
146
147 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
148 csd_unlock(csd);
149 return -ENXIO;
150 }
151
152 csd->func = func;
153 csd->info = info;
154
155
156
157
158
159
160
161
162
163
164
165
166 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
167 arch_send_call_function_single_ipi(cpu);
168
169 return 0;
170}
171
172
173
174
175
176
177
178void generic_smp_call_function_single_interrupt(void)
179{
180 flush_smp_call_function_queue(true);
181}
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197static void flush_smp_call_function_queue(bool warn_cpu_offline)
198{
199 struct llist_head *head;
200 struct llist_node *entry;
201 struct call_single_data *csd, *csd_next;
202 static bool warned;
203
204 WARN_ON(!irqs_disabled());
205
206 head = this_cpu_ptr(&call_single_queue);
207 entry = llist_del_all(head);
208 entry = llist_reverse_order(entry);
209
210
211 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
212 !warned && !llist_empty(head))) {
213 warned = true;
214 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
215
216
217
218
219
220 llist_for_each_entry(csd, entry, llist)
221 pr_warn("IPI callback %pS sent to offline CPU\n",
222 csd->func);
223 }
224
225 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
226 smp_call_func_t func = csd->func;
227 void *info = csd->info;
228
229
230 if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
231 func(info);
232 csd_unlock(csd);
233 } else {
234 csd_unlock(csd);
235 func(info);
236 }
237 }
238
239
240
241
242
243
244
245 irq_work_run();
246}
247
248
249
250
251
252
253
254
255
256int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
257 int wait)
258{
259 struct call_single_data *csd;
260 struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
261 int this_cpu;
262 int err;
263
264
265
266
267
268 this_cpu = get_cpu();
269
270
271
272
273
274
275
276 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
277 && !oops_in_progress);
278
279 csd = &csd_stack;
280 if (!wait) {
281 csd = this_cpu_ptr(&csd_data);
282 csd_lock(csd);
283 }
284
285 err = generic_exec_single(cpu, csd, func, info);
286
287 if (wait)
288 csd_lock_wait(csd);
289
290 put_cpu();
291
292 return err;
293}
294EXPORT_SYMBOL(smp_call_function_single);
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312int smp_call_function_single_async(int cpu, struct call_single_data *csd)
313{
314 int err = 0;
315
316 preempt_disable();
317
318
319 if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
320 csd_lock_wait(csd);
321
322 csd->flags = CSD_FLAG_LOCK;
323 smp_wmb();
324
325 err = generic_exec_single(cpu, csd, csd->func, csd->info);
326 preempt_enable();
327
328 return err;
329}
330EXPORT_SYMBOL_GPL(smp_call_function_single_async);
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346int smp_call_function_any(const struct cpumask *mask,
347 smp_call_func_t func, void *info, int wait)
348{
349 unsigned int cpu;
350 const struct cpumask *nodemask;
351 int ret;
352
353
354 cpu = get_cpu();
355 if (cpumask_test_cpu(cpu, mask))
356 goto call;
357
358
359 nodemask = cpumask_of_node(cpu_to_node(cpu));
360 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
361 cpu = cpumask_next_and(cpu, nodemask, mask)) {
362 if (cpu_online(cpu))
363 goto call;
364 }
365
366
367 cpu = cpumask_any_and(mask, cpu_online_mask);
368call:
369 ret = smp_call_function_single(cpu, func, info, wait);
370 put_cpu();
371 return ret;
372}
373EXPORT_SYMBOL_GPL(smp_call_function_any);
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389void smp_call_function_many(const struct cpumask *mask,
390 smp_call_func_t func, void *info, bool wait)
391{
392 struct call_function_data *cfd;
393 int cpu, next_cpu, this_cpu = smp_processor_id();
394
395
396
397
398
399
400
401 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
402 && !oops_in_progress && !early_boot_irqs_disabled);
403
404
405 cpu = cpumask_first_and(mask, cpu_online_mask);
406 if (cpu == this_cpu)
407 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
408
409
410 if (cpu >= nr_cpu_ids)
411 return;
412
413
414 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
415 if (next_cpu == this_cpu)
416 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
417
418
419 if (next_cpu >= nr_cpu_ids) {
420 smp_call_function_single(cpu, func, info, wait);
421 return;
422 }
423
424 cfd = this_cpu_ptr(&cfd_data);
425
426 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
427 cpumask_clear_cpu(this_cpu, cfd->cpumask);
428
429
430 if (unlikely(!cpumask_weight(cfd->cpumask)))
431 return;
432
433 for_each_cpu(cpu, cfd->cpumask) {
434 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
435
436 csd_lock(csd);
437 if (wait)
438 csd->flags |= CSD_FLAG_SYNCHRONOUS;
439 csd->func = func;
440 csd->info = info;
441 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
442 }
443
444
445 arch_send_call_function_ipi_mask(cfd->cpumask);
446
447 if (wait) {
448 for_each_cpu(cpu, cfd->cpumask) {
449 struct call_single_data *csd;
450
451 csd = per_cpu_ptr(cfd->csd, cpu);
452 csd_lock_wait(csd);
453 }
454 }
455}
456EXPORT_SYMBOL(smp_call_function_many);
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473int smp_call_function(smp_call_func_t func, void *info, int wait)
474{
475 preempt_disable();
476 smp_call_function_many(cpu_online_mask, func, info, wait);
477 preempt_enable();
478
479 return 0;
480}
481EXPORT_SYMBOL(smp_call_function);
482
483
484unsigned int setup_max_cpus = NR_CPUS;
485EXPORT_SYMBOL(setup_max_cpus);
486
487
488
489
490
491
492
493
494
495
496
497
498
499void __weak arch_disable_smp_support(void) { }
500
501static int __init nosmp(char *str)
502{
503 setup_max_cpus = 0;
504 arch_disable_smp_support();
505
506 return 0;
507}
508
509early_param("nosmp", nosmp);
510
511
512static int __init nrcpus(char *str)
513{
514 int nr_cpus;
515
516 get_option(&str, &nr_cpus);
517 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
518 nr_cpu_ids = nr_cpus;
519
520 return 0;
521}
522
523early_param("nr_cpus", nrcpus);
524
525static int __init maxcpus(char *str)
526{
527 get_option(&str, &setup_max_cpus);
528 if (setup_max_cpus == 0)
529 arch_disable_smp_support();
530
531 return 0;
532}
533
534early_param("maxcpus", maxcpus);
535
536
537int nr_cpu_ids __read_mostly = NR_CPUS;
538EXPORT_SYMBOL(nr_cpu_ids);
539
540
541void __init setup_nr_cpu_ids(void)
542{
543 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
544}
545
546void __weak smp_announce(void)
547{
548 printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
549}
550
551
552void __init smp_init(void)
553{
554 unsigned int cpu;
555
556 idle_threads_init();
557 cpuhp_threads_init();
558
559
560 for_each_present_cpu(cpu) {
561 if (num_online_cpus() >= setup_max_cpus)
562 break;
563 if (!cpu_online(cpu))
564 cpu_up(cpu);
565 }
566
567
568 smp_announce();
569 smp_cpus_done(setup_max_cpus);
570}
571
572
573
574
575
576
577int on_each_cpu(void (*func) (void *info), void *info, int wait)
578{
579 unsigned long flags;
580 int ret = 0;
581
582 preempt_disable();
583 ret = smp_call_function(func, info, wait);
584 local_irq_save(flags);
585 func(info);
586 local_irq_restore(flags);
587 preempt_enable();
588 return ret;
589}
590EXPORT_SYMBOL(on_each_cpu);
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
609 void *info, bool wait)
610{
611 int cpu = get_cpu();
612
613 smp_call_function_many(mask, func, info, wait);
614 if (cpumask_test_cpu(cpu, mask)) {
615 unsigned long flags;
616 local_irq_save(flags);
617 func(info);
618 local_irq_restore(flags);
619 }
620 put_cpu();
621}
622EXPORT_SYMBOL(on_each_cpu_mask);
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
652 smp_call_func_t func, void *info, bool wait,
653 gfp_t gfp_flags)
654{
655 cpumask_var_t cpus;
656 int cpu, ret;
657
658 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
659
660 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
661 preempt_disable();
662 for_each_online_cpu(cpu)
663 if (cond_func(cpu, info))
664 cpumask_set_cpu(cpu, cpus);
665 on_each_cpu_mask(cpus, func, info, wait);
666 preempt_enable();
667 free_cpumask_var(cpus);
668 } else {
669
670
671
672
673 preempt_disable();
674 for_each_online_cpu(cpu)
675 if (cond_func(cpu, info)) {
676 ret = smp_call_function_single(cpu, func,
677 info, wait);
678 WARN_ON_ONCE(ret);
679 }
680 preempt_enable();
681 }
682}
683EXPORT_SYMBOL(on_each_cpu_cond);
684
685static void do_nothing(void *unused)
686{
687}
688
689
690
691
692
693
694
695
696
697
698
699
700void kick_all_cpus_sync(void)
701{
702
703 smp_mb();
704 smp_call_function(do_nothing, NULL, 1);
705}
706EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
707
708
709
710
711
712
713
714void wake_up_all_idle_cpus(void)
715{
716 int cpu;
717
718 preempt_disable();
719 for_each_online_cpu(cpu) {
720 if (cpu == smp_processor_id())
721 continue;
722
723 wake_up_if_idle(cpu);
724 }
725 preempt_enable();
726}
727EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
728
729
730
731
732
733
734
735
736struct smp_call_on_cpu_struct {
737 struct work_struct work;
738 struct completion done;
739 int (*func)(void *);
740 void *data;
741 int ret;
742 int cpu;
743};
744
745static void smp_call_on_cpu_callback(struct work_struct *work)
746{
747 struct smp_call_on_cpu_struct *sscs;
748
749 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
750 if (sscs->cpu >= 0)
751 hypervisor_pin_vcpu(sscs->cpu);
752 sscs->ret = sscs->func(sscs->data);
753 if (sscs->cpu >= 0)
754 hypervisor_pin_vcpu(-1);
755
756 complete(&sscs->done);
757}
758
759int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
760{
761 struct smp_call_on_cpu_struct sscs = {
762 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
763 .func = func,
764 .data = par,
765 .cpu = phys ? cpu : -1,
766 };
767
768 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
769
770 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
771 return -ENXIO;
772
773 queue_work_on(cpu, system_wq, &sscs.work);
774 wait_for_completion(&sscs.done);
775
776 return sscs.ret;
777}
778EXPORT_SYMBOL_GPL(smp_call_on_cpu);
779