1
2
3
4
5
6#include <linux/rcupdate.h>
7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/export.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/gfp.h>
13#include <linux/smp.h>
14#include <linux/cpu.h>
15
16#include "smpboot.h"
17
18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19enum {
20 CSD_FLAG_LOCK = 0x01,
21};
22
23struct call_function_data {
24 struct call_single_data __percpu *csd;
25 cpumask_var_t cpumask;
26 cpumask_var_t cpumask_ipi;
27};
28
29static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
30
31struct call_single_queue {
32 struct list_head list;
33 raw_spinlock_t lock;
34};
35
36static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
37
38static int
39hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
40{
41 long cpu = (long)hcpu;
42 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
43
44 switch (action) {
45 case CPU_UP_PREPARE:
46 case CPU_UP_PREPARE_FROZEN:
47 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
48 cpu_to_node(cpu)))
49 return notifier_from_errno(-ENOMEM);
50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu)))
52 return notifier_from_errno(-ENOMEM);
53 cfd->csd = alloc_percpu(struct call_single_data);
54 if (!cfd->csd) {
55 free_cpumask_var(cfd->cpumask);
56 return notifier_from_errno(-ENOMEM);
57 }
58 break;
59
60#ifdef CONFIG_HOTPLUG_CPU
61 case CPU_UP_CANCELED:
62 case CPU_UP_CANCELED_FROZEN:
63
64 case CPU_DEAD:
65 case CPU_DEAD_FROZEN:
66 free_cpumask_var(cfd->cpumask);
67 free_cpumask_var(cfd->cpumask_ipi);
68 free_percpu(cfd->csd);
69 break;
70#endif
71 };
72
73 return NOTIFY_OK;
74}
75
76static struct notifier_block hotplug_cfd_notifier = {
77 .notifier_call = hotplug_cfd,
78};
79
80void __init call_function_init(void)
81{
82 void *cpu = (void *)(long)smp_processor_id();
83 int i;
84
85 for_each_possible_cpu(i) {
86 struct call_single_queue *q = &per_cpu(call_single_queue, i);
87
88 raw_spin_lock_init(&q->lock);
89 INIT_LIST_HEAD(&q->list);
90 }
91
92 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
93 register_cpu_notifier(&hotplug_cfd_notifier);
94}
95
96
97
98
99
100
101
102
103static void csd_lock_wait(struct call_single_data *csd)
104{
105 while (csd->flags & CSD_FLAG_LOCK)
106 cpu_relax();
107}
108
109static void csd_lock(struct call_single_data *csd)
110{
111 csd_lock_wait(csd);
112 csd->flags |= CSD_FLAG_LOCK;
113
114
115
116
117
118
119 smp_mb();
120}
121
122static void csd_unlock(struct call_single_data *csd)
123{
124 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
125
126
127
128
129 smp_mb();
130
131 csd->flags &= ~CSD_FLAG_LOCK;
132}
133
134
135
136
137
138
139static
140void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 unsigned long flags;
144 int ipi;
145
146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list);
148 list_add_tail(&csd->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags);
150
151
152
153
154
155
156
157
158
159
160
161
162 if (ipi)
163 arch_send_call_function_single_ipi(cpu);
164
165 if (wait)
166 csd_lock_wait(csd);
167}
168
169
170
171
172
173void generic_smp_call_function_single_interrupt(void)
174{
175 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
176 LIST_HEAD(list);
177
178
179
180
181 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
182
183 raw_spin_lock(&q->lock);
184 list_replace_init(&q->list, &list);
185 raw_spin_unlock(&q->lock);
186
187 while (!list_empty(&list)) {
188 struct call_single_data *csd;
189 unsigned int csd_flags;
190
191 csd = list_entry(list.next, struct call_single_data, list);
192 list_del(&csd->list);
193
194
195
196
197
198
199 csd_flags = csd->flags;
200
201 csd->func(csd->info);
202
203
204
205
206 if (csd_flags & CSD_FLAG_LOCK)
207 csd_unlock(csd);
208 }
209}
210
211static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
212
213
214
215
216
217
218
219
220
221int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
222 int wait)
223{
224 struct call_single_data d = {
225 .flags = 0,
226 };
227 unsigned long flags;
228 int this_cpu;
229 int err = 0;
230
231
232
233
234
235 this_cpu = get_cpu();
236
237
238
239
240
241
242
243 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
244 && !oops_in_progress);
245
246 if (cpu == this_cpu) {
247 local_irq_save(flags);
248 func(info);
249 local_irq_restore(flags);
250 } else {
251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
252 struct call_single_data *csd = &d;
253
254 if (!wait)
255 csd = &__get_cpu_var(csd_data);
256
257 csd_lock(csd);
258
259 csd->func = func;
260 csd->info = info;
261 generic_exec_single(cpu, csd, wait);
262 } else {
263 err = -ENXIO;
264 }
265 }
266
267 put_cpu();
268
269 return err;
270}
271EXPORT_SYMBOL(smp_call_function_single);
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289int smp_call_function_any(const struct cpumask *mask,
290 smp_call_func_t func, void *info, int wait)
291{
292 unsigned int cpu;
293 const struct cpumask *nodemask;
294 int ret;
295
296
297 cpu = get_cpu();
298 if (cpumask_test_cpu(cpu, mask))
299 goto call;
300
301
302 nodemask = cpumask_of_node(cpu_to_node(cpu));
303 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
304 cpu = cpumask_next_and(cpu, nodemask, mask)) {
305 if (cpu_online(cpu))
306 goto call;
307 }
308
309
310 cpu = cpumask_any_and(mask, cpu_online_mask);
311call:
312 ret = smp_call_function_single(cpu, func, info, wait);
313 put_cpu();
314 return ret;
315}
316EXPORT_SYMBOL_GPL(smp_call_function_any);
317
318
319
320
321
322
323
324
325
326
327
328void __smp_call_function_single(int cpu, struct call_single_data *csd,
329 int wait)
330{
331 unsigned int this_cpu;
332 unsigned long flags;
333
334 this_cpu = get_cpu();
335
336
337
338
339
340
341 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
342 && !oops_in_progress);
343
344 if (cpu == this_cpu) {
345 local_irq_save(flags);
346 csd->func(csd->info);
347 local_irq_restore(flags);
348 } else {
349 csd_lock(csd);
350 generic_exec_single(cpu, csd, wait);
351 }
352 put_cpu();
353}
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369void smp_call_function_many(const struct cpumask *mask,
370 smp_call_func_t func, void *info, bool wait)
371{
372 struct call_function_data *cfd;
373 int cpu, next_cpu, this_cpu = smp_processor_id();
374
375
376
377
378
379
380
381 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
382 && !oops_in_progress && !early_boot_irqs_disabled);
383
384
385 cpu = cpumask_first_and(mask, cpu_online_mask);
386 if (cpu == this_cpu)
387 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
388
389
390 if (cpu >= nr_cpu_ids)
391 return;
392
393
394 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
395 if (next_cpu == this_cpu)
396 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
397
398
399 if (next_cpu >= nr_cpu_ids) {
400 smp_call_function_single(cpu, func, info, wait);
401 return;
402 }
403
404 cfd = &__get_cpu_var(cfd_data);
405
406 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
407 cpumask_clear_cpu(this_cpu, cfd->cpumask);
408
409
410 if (unlikely(!cpumask_weight(cfd->cpumask)))
411 return;
412
413
414
415
416
417
418 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
419
420 for_each_cpu(cpu, cfd->cpumask) {
421 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
422 struct call_single_queue *dst =
423 &per_cpu(call_single_queue, cpu);
424 unsigned long flags;
425
426 csd_lock(csd);
427 csd->func = func;
428 csd->info = info;
429
430 raw_spin_lock_irqsave(&dst->lock, flags);
431 list_add_tail(&csd->list, &dst->list);
432 raw_spin_unlock_irqrestore(&dst->lock, flags);
433 }
434
435
436 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
437
438 if (wait) {
439 for_each_cpu(cpu, cfd->cpumask) {
440 struct call_single_data *csd;
441
442 csd = per_cpu_ptr(cfd->csd, cpu);
443 csd_lock_wait(csd);
444 }
445 }
446}
447EXPORT_SYMBOL(smp_call_function_many);
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464int smp_call_function(smp_call_func_t func, void *info, int wait)
465{
466 preempt_disable();
467 smp_call_function_many(cpu_online_mask, func, info, wait);
468 preempt_enable();
469
470 return 0;
471}
472EXPORT_SYMBOL(smp_call_function);
473#endif
474
475
476unsigned int setup_max_cpus = NR_CPUS;
477EXPORT_SYMBOL(setup_max_cpus);
478
479
480
481
482
483
484
485
486
487
488
489
490
491void __weak arch_disable_smp_support(void) { }
492
493static int __init nosmp(char *str)
494{
495 setup_max_cpus = 0;
496 arch_disable_smp_support();
497
498 return 0;
499}
500
501early_param("nosmp", nosmp);
502
503
504static int __init nrcpus(char *str)
505{
506 int nr_cpus;
507
508 get_option(&str, &nr_cpus);
509 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
510 nr_cpu_ids = nr_cpus;
511
512 return 0;
513}
514
515early_param("nr_cpus", nrcpus);
516
517static int __init maxcpus(char *str)
518{
519 get_option(&str, &setup_max_cpus);
520 if (setup_max_cpus == 0)
521 arch_disable_smp_support();
522
523 return 0;
524}
525
526early_param("maxcpus", maxcpus);
527
528
529int nr_cpu_ids __read_mostly = NR_CPUS;
530EXPORT_SYMBOL(nr_cpu_ids);
531
532
533void __init setup_nr_cpu_ids(void)
534{
535 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
536}
537
538
539void __init smp_init(void)
540{
541 unsigned int cpu;
542
543 idle_threads_init();
544
545
546 for_each_present_cpu(cpu) {
547 if (num_online_cpus() >= setup_max_cpus)
548 break;
549 if (!cpu_online(cpu))
550 cpu_up(cpu);
551 }
552
553
554 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
555 smp_cpus_done(setup_max_cpus);
556}
557
558
559
560
561
562
563int on_each_cpu(void (*func) (void *info), void *info, int wait)
564{
565 unsigned long flags;
566 int ret = 0;
567
568 preempt_disable();
569 ret = smp_call_function(func, info, wait);
570 local_irq_save(flags);
571 func(info);
572 local_irq_restore(flags);
573 preempt_enable();
574 return ret;
575}
576EXPORT_SYMBOL(on_each_cpu);
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
593 void *info, bool wait)
594{
595 int cpu = get_cpu();
596
597 smp_call_function_many(mask, func, info, wait);
598 if (cpumask_test_cpu(cpu, mask)) {
599 local_irq_disable();
600 func(info);
601 local_irq_enable();
602 }
603 put_cpu();
604}
605EXPORT_SYMBOL(on_each_cpu_mask);
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
635 smp_call_func_t func, void *info, bool wait,
636 gfp_t gfp_flags)
637{
638 cpumask_var_t cpus;
639 int cpu, ret;
640
641 might_sleep_if(gfp_flags & __GFP_WAIT);
642
643 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
644 preempt_disable();
645 for_each_online_cpu(cpu)
646 if (cond_func(cpu, info))
647 cpumask_set_cpu(cpu, cpus);
648 on_each_cpu_mask(cpus, func, info, wait);
649 preempt_enable();
650 free_cpumask_var(cpus);
651 } else {
652
653
654
655
656 preempt_disable();
657 for_each_online_cpu(cpu)
658 if (cond_func(cpu, info)) {
659 ret = smp_call_function_single(cpu, func,
660 info, wait);
661 WARN_ON_ONCE(!ret);
662 }
663 preempt_enable();
664 }
665}
666EXPORT_SYMBOL(on_each_cpu_cond);
667
668static void do_nothing(void *unused)
669{
670}
671
672
673
674
675
676
677
678
679
680
681
682
683void kick_all_cpus_sync(void)
684{
685
686 smp_mb();
687 smp_call_function(do_nothing, NULL, 1);
688}
689EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
690