1
2
3
4
5
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/export.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16#include "smpboot.h"
17
18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19static struct {
20 struct list_head queue;
21 raw_spinlock_t lock;
22} call_function __cacheline_aligned_in_smp =
23 {
24 .queue = LIST_HEAD_INIT(call_function.queue),
25 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
26 };
27
28enum {
29 CSD_FLAG_LOCK = 0x01,
30};
31
32struct call_function_data {
33 struct call_single_data csd;
34 atomic_t refs;
35 cpumask_var_t cpumask;
36 cpumask_var_t cpumask_ipi;
37};
38
39static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
40
41struct call_single_queue {
42 struct list_head list;
43 raw_spinlock_t lock;
44};
45
46static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
47
48static int
49hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
50{
51 long cpu = (long)hcpu;
52 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
53
54 switch (action) {
55 case CPU_UP_PREPARE:
56 case CPU_UP_PREPARE_FROZEN:
57 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
58 cpu_to_node(cpu)))
59 return notifier_from_errno(-ENOMEM);
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
61 cpu_to_node(cpu)))
62 return notifier_from_errno(-ENOMEM);
63 break;
64
65#ifdef CONFIG_HOTPLUG_CPU
66 case CPU_UP_CANCELED:
67 case CPU_UP_CANCELED_FROZEN:
68
69 case CPU_DEAD:
70 case CPU_DEAD_FROZEN:
71 free_cpumask_var(cfd->cpumask);
72 free_cpumask_var(cfd->cpumask_ipi);
73 break;
74#endif
75 };
76
77 return NOTIFY_OK;
78}
79
80static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
81 .notifier_call = hotplug_cfd,
82};
83
84void __init call_function_init(void)
85{
86 void *cpu = (void *)(long)smp_processor_id();
87 int i;
88
89 for_each_possible_cpu(i) {
90 struct call_single_queue *q = &per_cpu(call_single_queue, i);
91
92 raw_spin_lock_init(&q->lock);
93 INIT_LIST_HEAD(&q->list);
94 }
95
96 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
97 register_cpu_notifier(&hotplug_cfd_notifier);
98}
99
100
101
102
103
104
105
106
107static void csd_lock_wait(struct call_single_data *data)
108{
109 while (data->flags & CSD_FLAG_LOCK)
110 cpu_relax();
111}
112
113static void csd_lock(struct call_single_data *data)
114{
115 csd_lock_wait(data);
116 data->flags = CSD_FLAG_LOCK;
117
118
119
120
121
122
123 smp_mb();
124}
125
126static void csd_unlock(struct call_single_data *data)
127{
128 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
129
130
131
132
133 smp_mb();
134
135 data->flags &= ~CSD_FLAG_LOCK;
136}
137
138
139
140
141
142
143static
144void generic_exec_single(int cpu, struct call_single_data *data, int wait)
145{
146 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
147 unsigned long flags;
148 int ipi;
149
150 raw_spin_lock_irqsave(&dst->lock, flags);
151 ipi = list_empty(&dst->list);
152 list_add_tail(&data->list, &dst->list);
153 raw_spin_unlock_irqrestore(&dst->lock, flags);
154
155
156
157
158
159
160
161
162
163
164
165
166 if (ipi)
167 arch_send_call_function_single_ipi(cpu);
168
169 if (wait)
170 csd_lock_wait(data);
171}
172
173
174
175
176
177void generic_smp_call_function_interrupt(void)
178{
179 struct call_function_data *data;
180 int cpu = smp_processor_id();
181
182
183
184
185 WARN_ON_ONCE(!cpu_online(cpu));
186
187
188
189
190
191
192
193 smp_mb();
194
195
196
197
198
199 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
200 int refs;
201 smp_call_func_t func;
202
203
204
205
206
207
208
209
210
211
212
213 if (!cpumask_test_cpu(cpu, data->cpumask))
214 continue;
215
216 smp_rmb();
217
218 if (atomic_read(&data->refs) == 0)
219 continue;
220
221 func = data->csd.func;
222 func(data->csd.info);
223
224
225
226
227
228
229
230 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
231 WARN(1, "%pf enabled interrupts and double executed\n", func);
232 continue;
233 }
234
235 refs = atomic_dec_return(&data->refs);
236 WARN_ON(refs < 0);
237
238 if (refs)
239 continue;
240
241 WARN_ON(!cpumask_empty(data->cpumask));
242
243 raw_spin_lock(&call_function.lock);
244 list_del_rcu(&data->csd.list);
245 raw_spin_unlock(&call_function.lock);
246
247 csd_unlock(&data->csd);
248 }
249
250}
251
252
253
254
255
256void generic_smp_call_function_single_interrupt(void)
257{
258 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
259 unsigned int data_flags;
260 LIST_HEAD(list);
261
262
263
264
265 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
266
267 raw_spin_lock(&q->lock);
268 list_replace_init(&q->list, &list);
269 raw_spin_unlock(&q->lock);
270
271 while (!list_empty(&list)) {
272 struct call_single_data *data;
273
274 data = list_entry(list.next, struct call_single_data, list);
275 list_del(&data->list);
276
277
278
279
280
281
282 data_flags = data->flags;
283
284 data->func(data->info);
285
286
287
288
289 if (data_flags & CSD_FLAG_LOCK)
290 csd_unlock(data);
291 }
292}
293
294static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
295
296
297
298
299
300
301
302
303
304int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
305 int wait)
306{
307 struct call_single_data d = {
308 .flags = 0,
309 };
310 unsigned long flags;
311 int this_cpu;
312 int err = 0;
313
314
315
316
317
318 this_cpu = get_cpu();
319
320
321
322
323
324
325
326 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
327 && !oops_in_progress);
328
329 if (cpu == this_cpu) {
330 local_irq_save(flags);
331 func(info);
332 local_irq_restore(flags);
333 } else {
334 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
335 struct call_single_data *data = &d;
336
337 if (!wait)
338 data = &__get_cpu_var(csd_data);
339
340 csd_lock(data);
341
342 data->func = func;
343 data->info = info;
344 generic_exec_single(cpu, data, wait);
345 } else {
346 err = -ENXIO;
347 }
348 }
349
350 put_cpu();
351
352 return err;
353}
354EXPORT_SYMBOL(smp_call_function_single);
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372int smp_call_function_any(const struct cpumask *mask,
373 smp_call_func_t func, void *info, int wait)
374{
375 unsigned int cpu;
376 const struct cpumask *nodemask;
377 int ret;
378
379
380 cpu = get_cpu();
381 if (cpumask_test_cpu(cpu, mask))
382 goto call;
383
384
385 nodemask = cpumask_of_node(cpu_to_node(cpu));
386 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
387 cpu = cpumask_next_and(cpu, nodemask, mask)) {
388 if (cpu_online(cpu))
389 goto call;
390 }
391
392
393 cpu = cpumask_any_and(mask, cpu_online_mask);
394call:
395 ret = smp_call_function_single(cpu, func, info, wait);
396 put_cpu();
397 return ret;
398}
399EXPORT_SYMBOL_GPL(smp_call_function_any);
400
401
402
403
404
405
406
407
408
409
410
411void __smp_call_function_single(int cpu, struct call_single_data *data,
412 int wait)
413{
414 unsigned int this_cpu;
415 unsigned long flags;
416
417 this_cpu = get_cpu();
418
419
420
421
422
423
424 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
425 && !oops_in_progress);
426
427 if (cpu == this_cpu) {
428 local_irq_save(flags);
429 data->func(data->info);
430 local_irq_restore(flags);
431 } else {
432 csd_lock(data);
433 generic_exec_single(cpu, data, wait);
434 }
435 put_cpu();
436}
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452void smp_call_function_many(const struct cpumask *mask,
453 smp_call_func_t func, void *info, bool wait)
454{
455 struct call_function_data *data;
456 unsigned long flags;
457 int refs, cpu, next_cpu, this_cpu = smp_processor_id();
458
459
460
461
462
463
464
465 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
466 && !oops_in_progress && !early_boot_irqs_disabled);
467
468
469 cpu = cpumask_first_and(mask, cpu_online_mask);
470 if (cpu == this_cpu)
471 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
472
473
474 if (cpu >= nr_cpu_ids)
475 return;
476
477
478 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
479 if (next_cpu == this_cpu)
480 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
481
482
483 if (next_cpu >= nr_cpu_ids) {
484 smp_call_function_single(cpu, func, info, wait);
485 return;
486 }
487
488 data = &__get_cpu_var(cfd_data);
489 csd_lock(&data->csd);
490
491
492 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515 atomic_set(&data->refs, 0);
516
517 data->csd.func = func;
518 data->csd.info = info;
519
520
521 smp_wmb();
522
523
524 cpumask_and(data->cpumask, mask, cpu_online_mask);
525 cpumask_clear_cpu(this_cpu, data->cpumask);
526 refs = cpumask_weight(data->cpumask);
527
528
529 if (unlikely(!refs)) {
530 csd_unlock(&data->csd);
531 return;
532 }
533
534
535
536
537
538
539 cpumask_copy(data->cpumask_ipi, data->cpumask);
540 raw_spin_lock_irqsave(&call_function.lock, flags);
541
542
543
544
545
546 list_add_rcu(&data->csd.list, &call_function.queue);
547
548
549
550
551
552 atomic_set(&data->refs, refs);
553 raw_spin_unlock_irqrestore(&call_function.lock, flags);
554
555
556
557
558
559
560 smp_mb();
561
562
563 arch_send_call_function_ipi_mask(data->cpumask_ipi);
564
565
566 if (wait)
567 csd_lock_wait(&data->csd);
568}
569EXPORT_SYMBOL(smp_call_function_many);
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586int smp_call_function(smp_call_func_t func, void *info, int wait)
587{
588 preempt_disable();
589 smp_call_function_many(cpu_online_mask, func, info, wait);
590 preempt_enable();
591
592 return 0;
593}
594EXPORT_SYMBOL(smp_call_function);
595#endif
596
597
598unsigned int setup_max_cpus = NR_CPUS;
599EXPORT_SYMBOL(setup_max_cpus);
600
601
602
603
604
605
606
607
608
609
610
611
612
613void __weak arch_disable_smp_support(void) { }
614
615static int __init nosmp(char *str)
616{
617 setup_max_cpus = 0;
618 arch_disable_smp_support();
619
620 return 0;
621}
622
623early_param("nosmp", nosmp);
624
625
626static int __init nrcpus(char *str)
627{
628 int nr_cpus;
629
630 get_option(&str, &nr_cpus);
631 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
632 nr_cpu_ids = nr_cpus;
633
634 return 0;
635}
636
637early_param("nr_cpus", nrcpus);
638
639static int __init maxcpus(char *str)
640{
641 get_option(&str, &setup_max_cpus);
642 if (setup_max_cpus == 0)
643 arch_disable_smp_support();
644
645 return 0;
646}
647
648early_param("maxcpus", maxcpus);
649
650
651int nr_cpu_ids __read_mostly = NR_CPUS;
652EXPORT_SYMBOL(nr_cpu_ids);
653
654
655void __init setup_nr_cpu_ids(void)
656{
657 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
658}
659
660
661void __init smp_init(void)
662{
663 unsigned int cpu;
664
665 idle_threads_init();
666
667
668 for_each_present_cpu(cpu) {
669 if (num_online_cpus() >= setup_max_cpus)
670 break;
671 if (!cpu_online(cpu))
672 cpu_up(cpu);
673 }
674
675
676 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
677 smp_cpus_done(setup_max_cpus);
678}
679
680
681
682
683
684
685int on_each_cpu(void (*func) (void *info), void *info, int wait)
686{
687 unsigned long flags;
688 int ret = 0;
689
690 preempt_disable();
691 ret = smp_call_function(func, info, wait);
692 local_irq_save(flags);
693 func(info);
694 local_irq_restore(flags);
695 preempt_enable();
696 return ret;
697}
698EXPORT_SYMBOL(on_each_cpu);
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
715 void *info, bool wait)
716{
717 int cpu = get_cpu();
718
719 smp_call_function_many(mask, func, info, wait);
720 if (cpumask_test_cpu(cpu, mask)) {
721 local_irq_disable();
722 func(info);
723 local_irq_enable();
724 }
725 put_cpu();
726}
727EXPORT_SYMBOL(on_each_cpu_mask);
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
757 smp_call_func_t func, void *info, bool wait,
758 gfp_t gfp_flags)
759{
760 cpumask_var_t cpus;
761 int cpu, ret;
762
763 might_sleep_if(gfp_flags & __GFP_WAIT);
764
765 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
766 preempt_disable();
767 for_each_online_cpu(cpu)
768 if (cond_func(cpu, info))
769 cpumask_set_cpu(cpu, cpus);
770 on_each_cpu_mask(cpus, func, info, wait);
771 preempt_enable();
772 free_cpumask_var(cpus);
773 } else {
774
775
776
777
778 preempt_disable();
779 for_each_online_cpu(cpu)
780 if (cond_func(cpu, info)) {
781 ret = smp_call_function_single(cpu, func,
782 info, wait);
783 WARN_ON_ONCE(!ret);
784 }
785 preempt_enable();
786 }
787}
788EXPORT_SYMBOL(on_each_cpu_cond);
789
790static void do_nothing(void *unused)
791{
792}
793
794
795
796
797
798
799
800
801
802
803
804
805void kick_all_cpus_sync(void)
806{
807
808 smp_mb();
809 smp_call_function(do_nothing, NULL, 1);
810}
811EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
812