1
2
3
4
5
6
7
8
9#include <linux/acpi.h>
10#include <linux/arm_sdei.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/hotplug.h>
16#include <linux/sched/task_stack.h>
17#include <linux/interrupt.h>
18#include <linux/cache.h>
19#include <linux/profile.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/err.h>
23#include <linux/cpu.h>
24#include <linux/smp.h>
25#include <linux/seq_file.h>
26#include <linux/irq.h>
27#include <linux/irqchip/arm-gic-v3.h>
28#include <linux/percpu.h>
29#include <linux/clockchips.h>
30#include <linux/completion.h>
31#include <linux/of.h>
32#include <linux/irq_work.h>
33#include <linux/kernel_stat.h>
34#include <linux/kexec.h>
35#include <linux/kvm_host.h>
36
37#include <asm/alternative.h>
38#include <asm/atomic.h>
39#include <asm/cacheflush.h>
40#include <asm/cpu.h>
41#include <asm/cputype.h>
42#include <asm/cpu_ops.h>
43#include <asm/daifflags.h>
44#include <asm/kvm_mmu.h>
45#include <asm/mmu_context.h>
46#include <asm/numa.h>
47#include <asm/processor.h>
48#include <asm/smp_plat.h>
49#include <asm/sections.h>
50#include <asm/tlbflush.h>
51#include <asm/ptrace.h>
52#include <asm/virt.h>
53
54#define CREATE_TRACE_POINTS
55#include <trace/events/ipi.h>
56
57DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
58EXPORT_PER_CPU_SYMBOL(cpu_number);
59
60
61
62
63
64
65struct secondary_data secondary_data;
66
67static int cpus_stuck_in_kernel;
68
69enum ipi_msg_type {
70 IPI_RESCHEDULE,
71 IPI_CALL_FUNC,
72 IPI_CPU_STOP,
73 IPI_CPU_CRASH_STOP,
74 IPI_TIMER,
75 IPI_IRQ_WORK,
76 IPI_WAKEUP,
77 NR_IPI
78};
79
80static int ipi_irq_base __read_mostly;
81static int nr_ipi __read_mostly = NR_IPI;
82static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
83
84static void ipi_setup(int cpu);
85
86#ifdef CONFIG_HOTPLUG_CPU
87static void ipi_teardown(int cpu);
88static int op_cpu_kill(unsigned int cpu);
89#else
90static inline int op_cpu_kill(unsigned int cpu)
91{
92 return -ENOSYS;
93}
94#endif
95
96
97
98
99
100
101static int boot_secondary(unsigned int cpu, struct task_struct *idle)
102{
103 const struct cpu_operations *ops = get_cpu_ops(cpu);
104
105 if (ops->cpu_boot)
106 return ops->cpu_boot(cpu);
107
108 return -EOPNOTSUPP;
109}
110
111static DECLARE_COMPLETION(cpu_running);
112
113int __cpu_up(unsigned int cpu, struct task_struct *idle)
114{
115 int ret;
116 long status;
117
118
119
120
121
122 secondary_data.task = idle;
123 update_cpu_boot_status(CPU_MMU_OFF);
124
125
126 ret = boot_secondary(cpu, idle);
127 if (ret) {
128 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
129 return ret;
130 }
131
132
133
134
135
136 wait_for_completion_timeout(&cpu_running,
137 msecs_to_jiffies(5000));
138 if (cpu_online(cpu))
139 return 0;
140
141 pr_crit("CPU%u: failed to come online\n", cpu);
142 secondary_data.task = NULL;
143 status = READ_ONCE(secondary_data.status);
144 if (status == CPU_MMU_OFF)
145 status = READ_ONCE(__early_cpu_boot_status);
146
147 switch (status & CPU_BOOT_STATUS_MASK) {
148 default:
149 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
150 cpu, status);
151 cpus_stuck_in_kernel++;
152 break;
153 case CPU_KILL_ME:
154 if (!op_cpu_kill(cpu)) {
155 pr_crit("CPU%u: died during early boot\n", cpu);
156 break;
157 }
158 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
159 fallthrough;
160 case CPU_STUCK_IN_KERNEL:
161 pr_crit("CPU%u: is stuck in kernel\n", cpu);
162 if (status & CPU_STUCK_REASON_52_BIT_VA)
163 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
164 if (status & CPU_STUCK_REASON_NO_GRAN) {
165 pr_crit("CPU%u: does not support %luK granule\n",
166 cpu, PAGE_SIZE / SZ_1K);
167 }
168 cpus_stuck_in_kernel++;
169 break;
170 case CPU_PANIC_KERNEL:
171 panic("CPU%u detected unsupported configuration\n", cpu);
172 }
173
174 return -EIO;
175}
176
177static void init_gic_priority_masking(void)
178{
179 u32 cpuflags;
180
181 if (WARN_ON(!gic_enable_sre()))
182 return;
183
184 cpuflags = read_sysreg(daif);
185
186 WARN_ON(!(cpuflags & PSR_I_BIT));
187 WARN_ON(!(cpuflags & PSR_F_BIT));
188
189 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
190}
191
192
193
194
195
196asmlinkage notrace void secondary_start_kernel(void)
197{
198 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
199 struct mm_struct *mm = &init_mm;
200 const struct cpu_operations *ops;
201 unsigned int cpu = smp_processor_id();
202
203
204
205
206
207 mmgrab(mm);
208 current->active_mm = mm;
209
210
211
212
213
214 cpu_uninstall_idmap();
215
216 if (system_uses_irq_prio_masking())
217 init_gic_priority_masking();
218
219 rcu_cpu_starting(cpu);
220 trace_hardirqs_off();
221
222
223
224
225
226
227 check_local_cpu_capabilities();
228
229 ops = get_cpu_ops(cpu);
230 if (ops->cpu_postboot)
231 ops->cpu_postboot();
232
233
234
235
236 cpuinfo_store_cpu();
237
238
239
240
241 notify_cpu_starting(cpu);
242
243 ipi_setup(cpu);
244
245 store_cpu_topology(cpu);
246 numa_add_cpu(cpu);
247
248
249
250
251
252
253 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
254 cpu, (unsigned long)mpidr,
255 read_cpuid_id());
256 update_cpu_boot_status(CPU_BOOT_SUCCESS);
257 set_cpu_online(cpu, true);
258 complete(&cpu_running);
259
260 local_daif_restore(DAIF_PROCCTX);
261
262
263
264
265 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
266}
267
268#ifdef CONFIG_HOTPLUG_CPU
269static int op_cpu_disable(unsigned int cpu)
270{
271 const struct cpu_operations *ops = get_cpu_ops(cpu);
272
273
274
275
276
277 if (!ops || !ops->cpu_die)
278 return -EOPNOTSUPP;
279
280
281
282
283
284 if (ops->cpu_disable)
285 return ops->cpu_disable(cpu);
286
287 return 0;
288}
289
290
291
292
293int __cpu_disable(void)
294{
295 unsigned int cpu = smp_processor_id();
296 int ret;
297
298 ret = op_cpu_disable(cpu);
299 if (ret)
300 return ret;
301
302 remove_cpu_topology(cpu);
303 numa_remove_cpu(cpu);
304
305
306
307
308
309 set_cpu_online(cpu, false);
310 ipi_teardown(cpu);
311
312
313
314
315 irq_migrate_all_off_this_cpu();
316
317 return 0;
318}
319
320static int op_cpu_kill(unsigned int cpu)
321{
322 const struct cpu_operations *ops = get_cpu_ops(cpu);
323
324
325
326
327
328
329 if (!ops->cpu_kill)
330 return 0;
331
332 return ops->cpu_kill(cpu);
333}
334
335
336
337
338
339void __cpu_die(unsigned int cpu)
340{
341 int err;
342
343 if (!cpu_wait_death(cpu, 5)) {
344 pr_crit("CPU%u: cpu didn't die\n", cpu);
345 return;
346 }
347 pr_debug("CPU%u: shutdown\n", cpu);
348
349
350
351
352
353
354
355 err = op_cpu_kill(cpu);
356 if (err)
357 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err);
358}
359
360
361
362
363
364void cpu_die(void)
365{
366 unsigned int cpu = smp_processor_id();
367 const struct cpu_operations *ops = get_cpu_ops(cpu);
368
369 idle_task_exit();
370
371 local_daif_mask();
372
373
374 (void)cpu_report_death();
375
376
377
378
379
380
381 ops->cpu_die(cpu);
382
383 BUG();
384}
385#endif
386
387static void __cpu_try_die(int cpu)
388{
389#ifdef CONFIG_HOTPLUG_CPU
390 const struct cpu_operations *ops = get_cpu_ops(cpu);
391
392 if (ops && ops->cpu_die)
393 ops->cpu_die(cpu);
394#endif
395}
396
397
398
399
400
401void cpu_die_early(void)
402{
403 int cpu = smp_processor_id();
404
405 pr_crit("CPU%d: will not boot\n", cpu);
406
407
408 set_cpu_present(cpu, 0);
409 rcu_report_dead(cpu);
410
411 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
412 update_cpu_boot_status(CPU_KILL_ME);
413 __cpu_try_die(cpu);
414 }
415
416 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
417
418 cpu_park_loop();
419}
420
421static void __init hyp_mode_check(void)
422{
423 if (is_hyp_mode_available())
424 pr_info("CPU: All CPU(s) started at EL2\n");
425 else if (is_hyp_mode_mismatched())
426 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
427 "CPU: CPUs started in inconsistent modes");
428 else
429 pr_info("CPU: All CPU(s) started at EL1\n");
430 if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
431 kvm_compute_layout();
432 kvm_apply_hyp_relocations();
433 }
434}
435
436void __init smp_cpus_done(unsigned int max_cpus)
437{
438 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
439 setup_cpu_features();
440 hyp_mode_check();
441 apply_alternatives_all();
442 mark_linear_text_alias_ro();
443}
444
445void __init smp_prepare_boot_cpu(void)
446{
447
448
449
450
451
452 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
453 cpuinfo_store_boot_cpu();
454
455
456
457
458
459
460 apply_boot_alternatives();
461
462
463 if (system_uses_irq_prio_masking())
464 init_gic_priority_masking();
465
466 kasan_init_hw_tags();
467}
468
469
470
471
472
473
474
475static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
476{
477 unsigned int i;
478
479 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
480 if (cpu_logical_map(i) == hwid)
481 return true;
482 return false;
483}
484
485
486
487
488
489static int __init smp_cpu_setup(int cpu)
490{
491 const struct cpu_operations *ops;
492
493 if (init_cpu_ops(cpu))
494 return -ENODEV;
495
496 ops = get_cpu_ops(cpu);
497 if (ops->cpu_init(cpu))
498 return -ENODEV;
499
500 set_cpu_possible(cpu, true);
501
502 return 0;
503}
504
505static bool bootcpu_valid __initdata;
506static unsigned int cpu_count = 1;
507
508#ifdef CONFIG_ACPI
509static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
510
511struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
512{
513 return &cpu_madt_gicc[cpu];
514}
515
516
517
518
519
520
521
522static void __init
523acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
524{
525 u64 hwid = processor->arm_mpidr;
526
527 if (!(processor->flags & ACPI_MADT_ENABLED)) {
528 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
529 return;
530 }
531
532 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
533 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
534 return;
535 }
536
537 if (is_mpidr_duplicate(cpu_count, hwid)) {
538 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
539 return;
540 }
541
542
543 if (cpu_logical_map(0) == hwid) {
544 if (bootcpu_valid) {
545 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
546 hwid);
547 return;
548 }
549 bootcpu_valid = true;
550 cpu_madt_gicc[0] = *processor;
551 return;
552 }
553
554 if (cpu_count >= NR_CPUS)
555 return;
556
557
558 set_cpu_logical_map(cpu_count, hwid);
559
560 cpu_madt_gicc[cpu_count] = *processor;
561
562
563
564
565
566
567
568
569
570
571 acpi_set_mailbox_entry(cpu_count, processor);
572
573 cpu_count++;
574}
575
576static int __init
577acpi_parse_gic_cpu_interface(union acpi_subtable_headers *header,
578 const unsigned long end)
579{
580 struct acpi_madt_generic_interrupt *processor;
581
582 processor = (struct acpi_madt_generic_interrupt *)header;
583 if (BAD_MADT_GICC_ENTRY(processor, end))
584 return -EINVAL;
585
586 acpi_table_print_madt_entry(&header->common);
587
588 acpi_map_gic_cpu_interface(processor);
589
590 return 0;
591}
592
593static void __init acpi_parse_and_init_cpus(void)
594{
595 int i;
596
597
598
599
600
601
602 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
603 acpi_parse_gic_cpu_interface, 0);
604
605
606
607
608
609
610
611
612
613 acpi_map_cpus_to_nodes();
614
615 for (i = 0; i < nr_cpu_ids; i++)
616 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
617}
618#else
619#define acpi_parse_and_init_cpus(...) do { } while (0)
620#endif
621
622
623
624
625
626
627static void __init of_parse_and_init_cpus(void)
628{
629 struct device_node *dn;
630
631 for_each_of_cpu_node(dn) {
632 u64 hwid = of_get_cpu_hwid(dn, 0);
633
634 if (hwid & ~MPIDR_HWID_BITMASK)
635 goto next;
636
637 if (is_mpidr_duplicate(cpu_count, hwid)) {
638 pr_err("%pOF: duplicate cpu reg properties in the DT\n",
639 dn);
640 goto next;
641 }
642
643
644
645
646
647
648
649 if (hwid == cpu_logical_map(0)) {
650 if (bootcpu_valid) {
651 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
652 dn);
653 goto next;
654 }
655
656 bootcpu_valid = true;
657 early_map_cpu_to_node(0, of_node_to_nid(dn));
658
659
660
661
662
663
664
665 continue;
666 }
667
668 if (cpu_count >= NR_CPUS)
669 goto next;
670
671 pr_debug("cpu logical map 0x%llx\n", hwid);
672 set_cpu_logical_map(cpu_count, hwid);
673
674 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
675next:
676 cpu_count++;
677 }
678}
679
680
681
682
683
684
685void __init smp_init_cpus(void)
686{
687 int i;
688
689 if (acpi_disabled)
690 of_parse_and_init_cpus();
691 else
692 acpi_parse_and_init_cpus();
693
694 if (cpu_count > nr_cpu_ids)
695 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
696 cpu_count, nr_cpu_ids);
697
698 if (!bootcpu_valid) {
699 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
700 return;
701 }
702
703
704
705
706
707
708
709
710 for (i = 1; i < nr_cpu_ids; i++) {
711 if (cpu_logical_map(i) != INVALID_HWID) {
712 if (smp_cpu_setup(i))
713 set_cpu_logical_map(i, INVALID_HWID);
714 }
715 }
716}
717
718void __init smp_prepare_cpus(unsigned int max_cpus)
719{
720 const struct cpu_operations *ops;
721 int err;
722 unsigned int cpu;
723 unsigned int this_cpu;
724
725 init_cpu_topology();
726
727 this_cpu = smp_processor_id();
728 store_cpu_topology(this_cpu);
729 numa_store_cpu_info(this_cpu);
730 numa_add_cpu(this_cpu);
731
732
733
734
735
736 if (max_cpus == 0)
737 return;
738
739
740
741
742
743
744 for_each_possible_cpu(cpu) {
745
746 per_cpu(cpu_number, cpu) = cpu;
747
748 if (cpu == smp_processor_id())
749 continue;
750
751 ops = get_cpu_ops(cpu);
752 if (!ops)
753 continue;
754
755 err = ops->cpu_prepare(cpu);
756 if (err)
757 continue;
758
759 set_cpu_present(cpu, true);
760 numa_store_cpu_info(cpu);
761 }
762}
763
764static const char *ipi_types[NR_IPI] __tracepoint_string = {
765 [IPI_RESCHEDULE] = "Rescheduling interrupts",
766 [IPI_CALL_FUNC] = "Function call interrupts",
767 [IPI_CPU_STOP] = "CPU stop interrupts",
768 [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
769 [IPI_TIMER] = "Timer broadcast interrupts",
770 [IPI_IRQ_WORK] = "IRQ work interrupts",
771 [IPI_WAKEUP] = "CPU wake-up interrupts",
772};
773
774static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
775
776unsigned long irq_err_count;
777
778int arch_show_interrupts(struct seq_file *p, int prec)
779{
780 unsigned int cpu, i;
781
782 for (i = 0; i < NR_IPI; i++) {
783 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
784 prec >= 4 ? " " : "");
785 for_each_online_cpu(cpu)
786 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
787 seq_printf(p, " %s\n", ipi_types[i]);
788 }
789
790 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
791 return 0;
792}
793
794void arch_send_call_function_ipi_mask(const struct cpumask *mask)
795{
796 smp_cross_call(mask, IPI_CALL_FUNC);
797}
798
799void arch_send_call_function_single_ipi(int cpu)
800{
801 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
802}
803
804#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
805void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
806{
807 smp_cross_call(mask, IPI_WAKEUP);
808}
809#endif
810
811#ifdef CONFIG_IRQ_WORK
812void arch_irq_work_raise(void)
813{
814 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
815}
816#endif
817
818static void local_cpu_stop(void)
819{
820 set_cpu_online(smp_processor_id(), false);
821
822 local_daif_mask();
823 sdei_mask_local_cpu();
824 cpu_park_loop();
825}
826
827
828
829
830
831
832void panic_smp_self_stop(void)
833{
834 local_cpu_stop();
835}
836
837#ifdef CONFIG_KEXEC_CORE
838static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
839#endif
840
841static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
842{
843#ifdef CONFIG_KEXEC_CORE
844 crash_save_cpu(regs, cpu);
845
846 atomic_dec(&waiting_for_crash_ipi);
847
848 local_irq_disable();
849 sdei_mask_local_cpu();
850
851 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
852 __cpu_try_die(cpu);
853
854
855 cpu_park_loop();
856#endif
857}
858
859
860
861
862static void do_handle_IPI(int ipinr)
863{
864 unsigned int cpu = smp_processor_id();
865
866 if ((unsigned)ipinr < NR_IPI)
867 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
868
869 switch (ipinr) {
870 case IPI_RESCHEDULE:
871 scheduler_ipi();
872 break;
873
874 case IPI_CALL_FUNC:
875 generic_smp_call_function_interrupt();
876 break;
877
878 case IPI_CPU_STOP:
879 local_cpu_stop();
880 break;
881
882 case IPI_CPU_CRASH_STOP:
883 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
884 ipi_cpu_crash_stop(cpu, get_irq_regs());
885
886 unreachable();
887 }
888 break;
889
890#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
891 case IPI_TIMER:
892 tick_receive_broadcast();
893 break;
894#endif
895
896#ifdef CONFIG_IRQ_WORK
897 case IPI_IRQ_WORK:
898 irq_work_run();
899 break;
900#endif
901
902#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
903 case IPI_WAKEUP:
904 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
905 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
906 cpu);
907 break;
908#endif
909
910 default:
911 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
912 break;
913 }
914
915 if ((unsigned)ipinr < NR_IPI)
916 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
917}
918
919static irqreturn_t ipi_handler(int irq, void *data)
920{
921 do_handle_IPI(irq - ipi_irq_base);
922 return IRQ_HANDLED;
923}
924
925static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
926{
927 trace_ipi_raise(target, ipi_types[ipinr]);
928 __ipi_send_mask(ipi_desc[ipinr], target);
929}
930
931static void ipi_setup(int cpu)
932{
933 int i;
934
935 if (WARN_ON_ONCE(!ipi_irq_base))
936 return;
937
938 for (i = 0; i < nr_ipi; i++)
939 enable_percpu_irq(ipi_irq_base + i, 0);
940}
941
942#ifdef CONFIG_HOTPLUG_CPU
943static void ipi_teardown(int cpu)
944{
945 int i;
946
947 if (WARN_ON_ONCE(!ipi_irq_base))
948 return;
949
950 for (i = 0; i < nr_ipi; i++)
951 disable_percpu_irq(ipi_irq_base + i);
952}
953#endif
954
955void __init set_smp_ipi_range(int ipi_base, int n)
956{
957 int i;
958
959 WARN_ON(n < NR_IPI);
960 nr_ipi = min(n, NR_IPI);
961
962 for (i = 0; i < nr_ipi; i++) {
963 int err;
964
965 err = request_percpu_irq(ipi_base + i, ipi_handler,
966 "IPI", &cpu_number);
967 WARN_ON(err);
968
969 ipi_desc[i] = irq_to_desc(ipi_base + i);
970 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
971 }
972
973 ipi_irq_base = ipi_base;
974
975
976 ipi_setup(smp_processor_id());
977}
978
979void smp_send_reschedule(int cpu)
980{
981 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
982}
983
984#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
985void tick_broadcast(const struct cpumask *mask)
986{
987 smp_cross_call(mask, IPI_TIMER);
988}
989#endif
990
991
992
993
994
995static inline unsigned int num_other_online_cpus(void)
996{
997 unsigned int this_cpu_online = cpu_online(smp_processor_id());
998
999 return num_online_cpus() - this_cpu_online;
1000}
1001
1002void smp_send_stop(void)
1003{
1004 unsigned long timeout;
1005
1006 if (num_other_online_cpus()) {
1007 cpumask_t mask;
1008
1009 cpumask_copy(&mask, cpu_online_mask);
1010 cpumask_clear_cpu(smp_processor_id(), &mask);
1011
1012 if (system_state <= SYSTEM_RUNNING)
1013 pr_crit("SMP: stopping secondary CPUs\n");
1014 smp_cross_call(&mask, IPI_CPU_STOP);
1015 }
1016
1017
1018 timeout = USEC_PER_SEC;
1019 while (num_other_online_cpus() && timeout--)
1020 udelay(1);
1021
1022 if (num_other_online_cpus())
1023 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1024 cpumask_pr_args(cpu_online_mask));
1025
1026 sdei_mask_local_cpu();
1027}
1028
1029#ifdef CONFIG_KEXEC_CORE
1030void crash_smp_send_stop(void)
1031{
1032 static int cpus_stopped;
1033 cpumask_t mask;
1034 unsigned long timeout;
1035
1036
1037
1038
1039
1040 if (cpus_stopped)
1041 return;
1042
1043 cpus_stopped = 1;
1044
1045
1046
1047
1048
1049 if (num_other_online_cpus() == 0) {
1050 sdei_mask_local_cpu();
1051 return;
1052 }
1053
1054 cpumask_copy(&mask, cpu_online_mask);
1055 cpumask_clear_cpu(smp_processor_id(), &mask);
1056
1057 atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
1058
1059 pr_crit("SMP: stopping secondary CPUs\n");
1060 smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
1061
1062
1063 timeout = USEC_PER_SEC;
1064 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
1065 udelay(1);
1066
1067 if (atomic_read(&waiting_for_crash_ipi) > 0)
1068 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
1069 cpumask_pr_args(&mask));
1070
1071 sdei_mask_local_cpu();
1072}
1073
1074bool smp_crash_stop_failed(void)
1075{
1076 return (atomic_read(&waiting_for_crash_ipi) > 0);
1077}
1078#endif
1079
1080
1081
1082
1083int setup_profiling_timer(unsigned int multiplier)
1084{
1085 return -EINVAL;
1086}
1087
1088static bool have_cpu_die(void)
1089{
1090#ifdef CONFIG_HOTPLUG_CPU
1091 int any_cpu = raw_smp_processor_id();
1092 const struct cpu_operations *ops = get_cpu_ops(any_cpu);
1093
1094 if (ops && ops->cpu_die)
1095 return true;
1096#endif
1097 return false;
1098}
1099
1100bool cpus_are_stuck_in_kernel(void)
1101{
1102 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1103
1104 return !!cpus_stuck_in_kernel || smp_spin_tables ||
1105 is_protected_kvm_enabled();
1106}
1107