1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/irq_work.h>
11#include <linux/rcupdate.h>
12#include <linux/rculist.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/gfp.h>
19#include <linux/smp.h>
20#include <linux/cpu.h>
21#include <linux/sched.h>
22#include <linux/sched/idle.h>
23#include <linux/hypervisor.h>
24#include <linux/sched/clock.h>
25#include <linux/nmi.h>
26#include <linux/sched/debug.h>
27#include <linux/jump_label.h>
28
29#include "smpboot.h"
30#include "sched/smp.h"
31
32#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
33
34#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
35union cfd_seq_cnt {
36 u64 val;
37 struct {
38 u64 src:16;
39 u64 dst:16;
40#define CFD_SEQ_NOCPU 0xffff
41 u64 type:4;
42#define CFD_SEQ_QUEUE 0
43#define CFD_SEQ_IPI 1
44#define CFD_SEQ_NOIPI 2
45#define CFD_SEQ_PING 3
46#define CFD_SEQ_PINGED 4
47#define CFD_SEQ_HANDLE 5
48#define CFD_SEQ_DEQUEUE 6
49#define CFD_SEQ_IDLE 7
50#define CFD_SEQ_GOTIPI 8
51#define CFD_SEQ_HDLEND 9
52 u64 cnt:28;
53 } u;
54};
55
56static char *seq_type[] = {
57 [CFD_SEQ_QUEUE] = "queue",
58 [CFD_SEQ_IPI] = "ipi",
59 [CFD_SEQ_NOIPI] = "noipi",
60 [CFD_SEQ_PING] = "ping",
61 [CFD_SEQ_PINGED] = "pinged",
62 [CFD_SEQ_HANDLE] = "handle",
63 [CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
64 [CFD_SEQ_IDLE] = "idle",
65 [CFD_SEQ_GOTIPI] = "gotipi",
66 [CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
67};
68
69struct cfd_seq_local {
70 u64 ping;
71 u64 pinged;
72 u64 handle;
73 u64 dequeue;
74 u64 idle;
75 u64 gotipi;
76 u64 hdlend;
77};
78#endif
79
80struct cfd_percpu {
81 call_single_data_t csd;
82#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
83 u64 seq_queue;
84 u64 seq_ipi;
85 u64 seq_noipi;
86#endif
87};
88
89struct call_function_data {
90 struct cfd_percpu __percpu *pcpu;
91 cpumask_var_t cpumask;
92 cpumask_var_t cpumask_ipi;
93};
94
95static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
96
97static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
98
99static void flush_smp_call_function_queue(bool warn_cpu_offline);
100
101int smpcfd_prepare_cpu(unsigned int cpu)
102{
103 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
104
105 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
106 cpu_to_node(cpu)))
107 return -ENOMEM;
108 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
109 cpu_to_node(cpu))) {
110 free_cpumask_var(cfd->cpumask);
111 return -ENOMEM;
112 }
113 cfd->pcpu = alloc_percpu(struct cfd_percpu);
114 if (!cfd->pcpu) {
115 free_cpumask_var(cfd->cpumask);
116 free_cpumask_var(cfd->cpumask_ipi);
117 return -ENOMEM;
118 }
119
120 return 0;
121}
122
123int smpcfd_dead_cpu(unsigned int cpu)
124{
125 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
126
127 free_cpumask_var(cfd->cpumask);
128 free_cpumask_var(cfd->cpumask_ipi);
129 free_percpu(cfd->pcpu);
130 return 0;
131}
132
133int smpcfd_dying_cpu(unsigned int cpu)
134{
135
136
137
138
139
140
141
142
143
144 flush_smp_call_function_queue(false);
145 irq_work_run();
146 return 0;
147}
148
149void __init call_function_init(void)
150{
151 int i;
152
153 for_each_possible_cpu(i)
154 init_llist_head(&per_cpu(call_single_queue, i));
155
156 smpcfd_prepare_cpu(smp_processor_id());
157}
158
159#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
160
161static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
162static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
163
164static int __init csdlock_debug(char *str)
165{
166 unsigned int val = 0;
167
168 if (str && !strcmp(str, "ext")) {
169 val = 1;
170 static_branch_enable(&csdlock_debug_extended);
171 } else
172 get_option(&str, &val);
173
174 if (val)
175 static_branch_enable(&csdlock_debug_enabled);
176
177 return 0;
178}
179early_param("csdlock_debug", csdlock_debug);
180
181static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
182static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
183static DEFINE_PER_CPU(void *, cur_csd_info);
184static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
185
186#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
187static atomic_t csd_bug_count = ATOMIC_INIT(0);
188static u64 cfd_seq;
189
190#define CFD_SEQ(s, d, t, c) \
191 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
192
193static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
194{
195 union cfd_seq_cnt new, old;
196
197 new = CFD_SEQ(src, dst, type, 0);
198
199 do {
200 old.val = READ_ONCE(cfd_seq);
201 new.u.cnt = old.u.cnt + 1;
202 } while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
203
204 return old.val;
205}
206
207#define cfd_seq_store(var, src, dst, type) \
208 do { \
209 if (static_branch_unlikely(&csdlock_debug_extended)) \
210 var = cfd_seq_inc(src, dst, type); \
211 } while (0)
212
213
214static void __csd_lock_record(struct __call_single_data *csd)
215{
216 if (!csd) {
217 smp_mb();
218 __this_cpu_write(cur_csd, NULL);
219 return;
220 }
221 __this_cpu_write(cur_csd_func, csd->func);
222 __this_cpu_write(cur_csd_info, csd->info);
223 smp_wmb();
224 __this_cpu_write(cur_csd, csd);
225 smp_mb();
226
227}
228
229static __always_inline void csd_lock_record(struct __call_single_data *csd)
230{
231 if (static_branch_unlikely(&csdlock_debug_enabled))
232 __csd_lock_record(csd);
233}
234
235static int csd_lock_wait_getcpu(struct __call_single_data *csd)
236{
237 unsigned int csd_type;
238
239 csd_type = CSD_TYPE(csd);
240 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
241 return csd->node.dst;
242 return -1;
243}
244
245static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
246 unsigned int type, union cfd_seq_cnt *data,
247 unsigned int *n_data, unsigned int now)
248{
249 union cfd_seq_cnt new[2];
250 unsigned int i, j, k;
251
252 new[0].val = val;
253 new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
254
255 for (i = 0; i < 2; i++) {
256 if (new[i].u.cnt <= now)
257 new[i].u.cnt |= 0x80000000U;
258 for (j = 0; j < *n_data; j++) {
259 if (new[i].u.cnt == data[j].u.cnt) {
260
261 if (i == 0)
262 data[j].val = new[i].val;
263 break;
264 }
265 if (new[i].u.cnt < data[j].u.cnt) {
266 for (k = *n_data; k > j; k--)
267 data[k].val = data[k - 1].val;
268 data[j].val = new[i].val;
269 (*n_data)++;
270 break;
271 }
272 }
273 if (j == *n_data) {
274 data[j].val = new[i].val;
275 (*n_data)++;
276 }
277 }
278}
279
280static const char *csd_lock_get_type(unsigned int type)
281{
282 return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
283}
284
285static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
286{
287 struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
288 unsigned int srccpu = csd->node.src;
289 struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
290 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
291 unsigned int now;
292 union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
293 unsigned int n_data = 0, i;
294
295 data[0].val = READ_ONCE(cfd_seq);
296 now = data[0].u.cnt;
297
298 cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
299 cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
300 cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
301
302 cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
303 cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
304
305 cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
306 cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
307 cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
308 cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
309 cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
310
311 for (i = 0; i < n_data; i++) {
312 pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
313 data[i].u.cnt & ~0x80000000U, data[i].u.src,
314 data[i].u.dst, csd_lock_get_type(data[i].u.type));
315 }
316 pr_alert("\tcsd: cnt now: %07x\n", now);
317}
318
319
320
321
322
323
324static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
325{
326 int cpu = -1;
327 int cpux;
328 bool firsttime;
329 u64 ts2, ts_delta;
330 call_single_data_t *cpu_cur_csd;
331 unsigned int flags = READ_ONCE(csd->node.u_flags);
332
333 if (!(flags & CSD_FLAG_LOCK)) {
334 if (!unlikely(*bug_id))
335 return true;
336 cpu = csd_lock_wait_getcpu(csd);
337 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
338 *bug_id, raw_smp_processor_id(), cpu);
339 return true;
340 }
341
342 ts2 = sched_clock();
343 ts_delta = ts2 - *ts1;
344 if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
345 return false;
346
347 firsttime = !*bug_id;
348 if (firsttime)
349 *bug_id = atomic_inc_return(&csd_bug_count);
350 cpu = csd_lock_wait_getcpu(csd);
351 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
352 cpux = 0;
353 else
354 cpux = cpu;
355 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux));
356 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
357 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
358 cpu, csd->func, csd->info);
359 if (cpu_cur_csd && csd != cpu_cur_csd) {
360 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
361 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
362 READ_ONCE(per_cpu(cur_csd_info, cpux)));
363 } else {
364 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
365 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
366 }
367 if (cpu >= 0) {
368 if (static_branch_unlikely(&csdlock_debug_extended))
369 csd_lock_print_extended(csd, cpu);
370 if (!trigger_single_cpu_backtrace(cpu))
371 dump_cpu_task(cpu);
372 if (!cpu_cur_csd) {
373 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
374 arch_send_call_function_single_ipi(cpu);
375 }
376 }
377 dump_stack();
378 *ts1 = ts2;
379
380 return false;
381}
382
383
384
385
386
387
388
389
390static void __csd_lock_wait(struct __call_single_data *csd)
391{
392 int bug_id = 0;
393 u64 ts0, ts1;
394
395 ts1 = ts0 = sched_clock();
396 for (;;) {
397 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
398 break;
399 cpu_relax();
400 }
401 smp_acquire__after_ctrl_dep();
402}
403
404static __always_inline void csd_lock_wait(struct __call_single_data *csd)
405{
406 if (static_branch_unlikely(&csdlock_debug_enabled)) {
407 __csd_lock_wait(csd);
408 return;
409 }
410
411 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
412}
413
414static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
415{
416 unsigned int this_cpu = smp_processor_id();
417 struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
418 struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
419 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
420
421 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
422 if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
423 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
424 cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
425 send_call_function_single_ipi(cpu);
426 cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
427 } else {
428 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
429 }
430}
431#else
432#define cfd_seq_store(var, src, dst, type)
433
434static void csd_lock_record(struct __call_single_data *csd)
435{
436}
437
438static __always_inline void csd_lock_wait(struct __call_single_data *csd)
439{
440 smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
441}
442#endif
443
444static __always_inline void csd_lock(struct __call_single_data *csd)
445{
446 csd_lock_wait(csd);
447 csd->node.u_flags |= CSD_FLAG_LOCK;
448
449
450
451
452
453
454 smp_wmb();
455}
456
457static __always_inline void csd_unlock(struct __call_single_data *csd)
458{
459 WARN_ON(!(csd->node.u_flags & CSD_FLAG_LOCK));
460
461
462
463
464 smp_store_release(&csd->node.u_flags, 0);
465}
466
467static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
468
469void __smp_call_single_queue(int cpu, struct llist_node *node)
470{
471#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
472 if (static_branch_unlikely(&csdlock_debug_extended)) {
473 unsigned int type;
474
475 type = CSD_TYPE(container_of(node, call_single_data_t,
476 node.llist));
477 if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
478 __smp_call_single_queue_debug(cpu, node);
479 return;
480 }
481 }
482#endif
483
484
485
486
487
488
489
490
491
492
493
494
495 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
496 send_call_function_single_ipi(cpu);
497}
498
499
500
501
502
503
504static int generic_exec_single(int cpu, struct __call_single_data *csd)
505{
506 if (cpu == smp_processor_id()) {
507 smp_call_func_t func = csd->func;
508 void *info = csd->info;
509 unsigned long flags;
510
511
512
513
514
515 csd_lock_record(csd);
516 csd_unlock(csd);
517 local_irq_save(flags);
518 func(info);
519 csd_lock_record(NULL);
520 local_irq_restore(flags);
521 return 0;
522 }
523
524 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
525 csd_unlock(csd);
526 return -ENXIO;
527 }
528
529 __smp_call_single_queue(cpu, &csd->node.llist);
530
531 return 0;
532}
533
534
535
536
537
538
539
540void generic_smp_call_function_single_interrupt(void)
541{
542 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
543 smp_processor_id(), CFD_SEQ_GOTIPI);
544 flush_smp_call_function_queue(true);
545}
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561static void flush_smp_call_function_queue(bool warn_cpu_offline)
562{
563 call_single_data_t *csd, *csd_next;
564 struct llist_node *entry, *prev;
565 struct llist_head *head;
566 static bool warned;
567
568 lockdep_assert_irqs_disabled();
569
570 head = this_cpu_ptr(&call_single_queue);
571 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
572 smp_processor_id(), CFD_SEQ_HANDLE);
573 entry = llist_del_all(head);
574 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
575
576 entry ? CFD_SEQ_NOCPU : 0,
577 smp_processor_id(), CFD_SEQ_DEQUEUE);
578 entry = llist_reverse_order(entry);
579
580
581 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
582 !warned && !llist_empty(head))) {
583 warned = true;
584 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
585
586
587
588
589
590 llist_for_each_entry(csd, entry, node.llist) {
591 switch (CSD_TYPE(csd)) {
592 case CSD_TYPE_ASYNC:
593 case CSD_TYPE_SYNC:
594 case CSD_TYPE_IRQ_WORK:
595 pr_warn("IPI callback %pS sent to offline CPU\n",
596 csd->func);
597 break;
598
599 case CSD_TYPE_TTWU:
600 pr_warn("IPI task-wakeup sent to offline CPU\n");
601 break;
602
603 default:
604 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
605 CSD_TYPE(csd));
606 break;
607 }
608 }
609 }
610
611
612
613
614 prev = NULL;
615 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
616
617 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
618 smp_call_func_t func = csd->func;
619 void *info = csd->info;
620
621 if (prev) {
622 prev->next = &csd_next->node.llist;
623 } else {
624 entry = &csd_next->node.llist;
625 }
626
627 csd_lock_record(csd);
628 func(info);
629 csd_unlock(csd);
630 csd_lock_record(NULL);
631 } else {
632 prev = &csd->node.llist;
633 }
634 }
635
636 if (!entry) {
637 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
638 0, smp_processor_id(),
639 CFD_SEQ_HDLEND);
640 return;
641 }
642
643
644
645
646 prev = NULL;
647 llist_for_each_entry_safe(csd, csd_next, entry, node.llist) {
648 int type = CSD_TYPE(csd);
649
650 if (type != CSD_TYPE_TTWU) {
651 if (prev) {
652 prev->next = &csd_next->node.llist;
653 } else {
654 entry = &csd_next->node.llist;
655 }
656
657 if (type == CSD_TYPE_ASYNC) {
658 smp_call_func_t func = csd->func;
659 void *info = csd->info;
660
661 csd_lock_record(csd);
662 csd_unlock(csd);
663 func(info);
664 csd_lock_record(NULL);
665 } else if (type == CSD_TYPE_IRQ_WORK) {
666 irq_work_single(csd);
667 }
668
669 } else {
670 prev = &csd->node.llist;
671 }
672 }
673
674
675
676
677 if (entry)
678 sched_ttwu_pending(entry);
679
680 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
681 smp_processor_id(), CFD_SEQ_HDLEND);
682}
683
684void flush_smp_call_function_from_idle(void)
685{
686 unsigned long flags;
687
688 if (llist_empty(this_cpu_ptr(&call_single_queue)))
689 return;
690
691 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
692 smp_processor_id(), CFD_SEQ_IDLE);
693 local_irq_save(flags);
694 flush_smp_call_function_queue(true);
695 if (local_softirq_pending())
696 do_softirq();
697
698 local_irq_restore(flags);
699}
700
701
702
703
704
705
706
707
708
709int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
710 int wait)
711{
712 call_single_data_t *csd;
713 call_single_data_t csd_stack = {
714 .node = { .u_flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC, },
715 };
716 int this_cpu;
717 int err;
718
719
720
721
722
723 this_cpu = get_cpu();
724
725
726
727
728
729
730
731 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
732 && !oops_in_progress);
733
734
735
736
737
738
739
740 WARN_ON_ONCE(!in_task());
741
742 csd = &csd_stack;
743 if (!wait) {
744 csd = this_cpu_ptr(&csd_data);
745 csd_lock(csd);
746 }
747
748 csd->func = func;
749 csd->info = info;
750#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
751 csd->node.src = smp_processor_id();
752 csd->node.dst = cpu;
753#endif
754
755 err = generic_exec_single(cpu, csd);
756
757 if (wait)
758 csd_lock_wait(csd);
759
760 put_cpu();
761
762 return err;
763}
764EXPORT_SYMBOL(smp_call_function_single);
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
788{
789 int err = 0;
790
791 preempt_disable();
792
793 if (csd->node.u_flags & CSD_FLAG_LOCK) {
794 err = -EBUSY;
795 goto out;
796 }
797
798 csd->node.u_flags = CSD_FLAG_LOCK;
799 smp_wmb();
800
801 err = generic_exec_single(cpu, csd);
802
803out:
804 preempt_enable();
805
806 return err;
807}
808EXPORT_SYMBOL_GPL(smp_call_function_single_async);
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824int smp_call_function_any(const struct cpumask *mask,
825 smp_call_func_t func, void *info, int wait)
826{
827 unsigned int cpu;
828 const struct cpumask *nodemask;
829 int ret;
830
831
832 cpu = get_cpu();
833 if (cpumask_test_cpu(cpu, mask))
834 goto call;
835
836
837 nodemask = cpumask_of_node(cpu_to_node(cpu));
838 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
839 cpu = cpumask_next_and(cpu, nodemask, mask)) {
840 if (cpu_online(cpu))
841 goto call;
842 }
843
844
845 cpu = cpumask_any_and(mask, cpu_online_mask);
846call:
847 ret = smp_call_function_single(cpu, func, info, wait);
848 put_cpu();
849 return ret;
850}
851EXPORT_SYMBOL_GPL(smp_call_function_any);
852
853
854
855
856
857
858
859#define SCF_WAIT (1U << 0)
860#define SCF_RUN_LOCAL (1U << 1)
861
862static void smp_call_function_many_cond(const struct cpumask *mask,
863 smp_call_func_t func, void *info,
864 unsigned int scf_flags,
865 smp_cond_func_t cond_func)
866{
867 int cpu, last_cpu, this_cpu = smp_processor_id();
868 struct call_function_data *cfd;
869 bool wait = scf_flags & SCF_WAIT;
870 bool run_remote = false;
871 bool run_local = false;
872 int nr_cpus = 0;
873
874 lockdep_assert_preemption_disabled();
875
876
877
878
879
880
881
882 if (cpu_online(this_cpu) && !oops_in_progress &&
883 !early_boot_irqs_disabled)
884 lockdep_assert_irqs_enabled();
885
886
887
888
889
890
891
892 WARN_ON_ONCE(!in_task());
893
894
895 if ((scf_flags & SCF_RUN_LOCAL) && cpumask_test_cpu(this_cpu, mask))
896 run_local = true;
897
898
899 cpu = cpumask_first_and(mask, cpu_online_mask);
900 if (cpu == this_cpu)
901 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
902 if (cpu < nr_cpu_ids)
903 run_remote = true;
904
905 if (run_remote) {
906 cfd = this_cpu_ptr(&cfd_data);
907 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
908 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
909
910 cpumask_clear(cfd->cpumask_ipi);
911 for_each_cpu(cpu, cfd->cpumask) {
912 struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
913 call_single_data_t *csd = &pcpu->csd;
914
915 if (cond_func && !cond_func(cpu, info))
916 continue;
917
918 csd_lock(csd);
919 if (wait)
920 csd->node.u_flags |= CSD_TYPE_SYNC;
921 csd->func = func;
922 csd->info = info;
923#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
924 csd->node.src = smp_processor_id();
925 csd->node.dst = cpu;
926#endif
927 cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
928 if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
929 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
930 nr_cpus++;
931 last_cpu = cpu;
932
933 cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
934 } else {
935 cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
936 }
937 }
938
939 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
940
941
942
943
944
945
946 if (nr_cpus == 1)
947 send_call_function_single_ipi(last_cpu);
948 else if (likely(nr_cpus > 1))
949 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
950
951 cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
952 }
953
954 if (run_local && (!cond_func || cond_func(this_cpu, info))) {
955 unsigned long flags;
956
957 local_irq_save(flags);
958 func(info);
959 local_irq_restore(flags);
960 }
961
962 if (run_remote && wait) {
963 for_each_cpu(cpu, cfd->cpumask) {
964 call_single_data_t *csd;
965
966 csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
967 csd_lock_wait(csd);
968 }
969 }
970}
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988void smp_call_function_many(const struct cpumask *mask,
989 smp_call_func_t func, void *info, bool wait)
990{
991 smp_call_function_many_cond(mask, func, info, wait * SCF_WAIT, NULL);
992}
993EXPORT_SYMBOL(smp_call_function_many);
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010void smp_call_function(smp_call_func_t func, void *info, int wait)
1011{
1012 preempt_disable();
1013 smp_call_function_many(cpu_online_mask, func, info, wait);
1014 preempt_enable();
1015}
1016EXPORT_SYMBOL(smp_call_function);
1017
1018
1019unsigned int setup_max_cpus = NR_CPUS;
1020EXPORT_SYMBOL(setup_max_cpus);
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034void __weak arch_disable_smp_support(void) { }
1035
1036static int __init nosmp(char *str)
1037{
1038 setup_max_cpus = 0;
1039 arch_disable_smp_support();
1040
1041 return 0;
1042}
1043
1044early_param("nosmp", nosmp);
1045
1046
1047static int __init nrcpus(char *str)
1048{
1049 int nr_cpus;
1050
1051 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
1052 nr_cpu_ids = nr_cpus;
1053
1054 return 0;
1055}
1056
1057early_param("nr_cpus", nrcpus);
1058
1059static int __init maxcpus(char *str)
1060{
1061 get_option(&str, &setup_max_cpus);
1062 if (setup_max_cpus == 0)
1063 arch_disable_smp_support();
1064
1065 return 0;
1066}
1067
1068early_param("maxcpus", maxcpus);
1069
1070
1071unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
1072EXPORT_SYMBOL(nr_cpu_ids);
1073
1074
1075void __init setup_nr_cpu_ids(void)
1076{
1077 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
1078}
1079
1080
1081void __init smp_init(void)
1082{
1083 int num_nodes, num_cpus;
1084
1085 idle_threads_init();
1086 cpuhp_threads_init();
1087
1088 pr_info("Bringing up secondary CPUs ...\n");
1089
1090 bringup_nonboot_cpus(setup_max_cpus);
1091
1092 num_nodes = num_online_nodes();
1093 num_cpus = num_online_cpus();
1094 pr_info("Brought up %d node%s, %d CPU%s\n",
1095 num_nodes, (num_nodes > 1 ? "s" : ""),
1096 num_cpus, (num_cpus > 1 ? "s" : ""));
1097
1098
1099 smp_cpus_done(setup_max_cpus);
1100}
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
1125 void *info, bool wait, const struct cpumask *mask)
1126{
1127 unsigned int scf_flags = SCF_RUN_LOCAL;
1128
1129 if (wait)
1130 scf_flags |= SCF_WAIT;
1131
1132 preempt_disable();
1133 smp_call_function_many_cond(mask, func, info, scf_flags, cond_func);
1134 preempt_enable();
1135}
1136EXPORT_SYMBOL(on_each_cpu_cond_mask);
1137
1138static void do_nothing(void *unused)
1139{
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153void kick_all_cpus_sync(void)
1154{
1155
1156 smp_mb();
1157 smp_call_function(do_nothing, NULL, 1);
1158}
1159EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
1160
1161
1162
1163
1164
1165
1166
1167void wake_up_all_idle_cpus(void)
1168{
1169 int cpu;
1170
1171 preempt_disable();
1172 for_each_online_cpu(cpu) {
1173 if (cpu == smp_processor_id())
1174 continue;
1175
1176 wake_up_if_idle(cpu);
1177 }
1178 preempt_enable();
1179}
1180EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
1181
1182
1183
1184
1185
1186
1187
1188
1189struct smp_call_on_cpu_struct {
1190 struct work_struct work;
1191 struct completion done;
1192 int (*func)(void *);
1193 void *data;
1194 int ret;
1195 int cpu;
1196};
1197
1198static void smp_call_on_cpu_callback(struct work_struct *work)
1199{
1200 struct smp_call_on_cpu_struct *sscs;
1201
1202 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
1203 if (sscs->cpu >= 0)
1204 hypervisor_pin_vcpu(sscs->cpu);
1205 sscs->ret = sscs->func(sscs->data);
1206 if (sscs->cpu >= 0)
1207 hypervisor_pin_vcpu(-1);
1208
1209 complete(&sscs->done);
1210}
1211
1212int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
1213{
1214 struct smp_call_on_cpu_struct sscs = {
1215 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
1216 .func = func,
1217 .data = par,
1218 .cpu = phys ? cpu : -1,
1219 };
1220
1221 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1222
1223 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1224 return -ENXIO;
1225
1226 queue_work_on(cpu, system_wq, &sscs.work);
1227 wait_for_completion(&sscs.done);
1228
1229 return sscs.ret;
1230}
1231EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1232