1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/hotplug.h>
16#include <linux/sched/task_stack.h>
17#include <linux/interrupt.h>
18#include <linux/cache.h>
19#include <linux/profile.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/err.h>
23#include <linux/cpu.h>
24#include <linux/seq_file.h>
25#include <linux/irq.h>
26#include <linux/nmi.h>
27#include <linux/percpu.h>
28#include <linux/clockchips.h>
29#include <linux/completion.h>
30#include <linux/cpufreq.h>
31#include <linux/irq_work.h>
32
33#include <linux/atomic.h>
34#include <asm/bugs.h>
35#include <asm/smp.h>
36#include <asm/cacheflush.h>
37#include <asm/cpu.h>
38#include <asm/cputype.h>
39#include <asm/exception.h>
40#include <asm/idmap.h>
41#include <asm/topology.h>
42#include <asm/mmu_context.h>
43#include <asm/pgtable.h>
44#include <asm/pgalloc.h>
45#include <asm/procinfo.h>
46#include <asm/processor.h>
47#include <asm/sections.h>
48#include <asm/tlbflush.h>
49#include <asm/ptrace.h>
50#include <asm/smp_plat.h>
51#include <asm/virt.h>
52#include <asm/mach/arch.h>
53#include <asm/mpu.h>
54
55#define CREATE_TRACE_POINTS
56#include <trace/events/ipi.h>
57
58
59
60
61
62
63struct secondary_data secondary_data;
64
65enum ipi_msg_type {
66 IPI_WAKEUP,
67 IPI_TIMER,
68 IPI_RESCHEDULE,
69 IPI_CALL_FUNC,
70 IPI_CPU_STOP,
71 IPI_IRQ_WORK,
72 IPI_COMPLETION,
73 IPI_CPU_BACKTRACE,
74
75
76
77
78
79};
80
81static DECLARE_COMPLETION(cpu_running);
82
83static struct smp_operations smp_ops __ro_after_init;
84
85void __init smp_set_ops(const struct smp_operations *ops)
86{
87 if (ops)
88 smp_ops = *ops;
89};
90
91static unsigned long get_arch_pgd(pgd_t *pgd)
92{
93#ifdef CONFIG_ARM_LPAE
94 return __phys_to_pfn(virt_to_phys(pgd));
95#else
96 return virt_to_phys(pgd);
97#endif
98}
99
100#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
101static int secondary_biglittle_prepare(unsigned int cpu)
102{
103 if (!cpu_vtable[cpu])
104 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
105
106 return cpu_vtable[cpu] ? 0 : -ENOMEM;
107}
108
109static void secondary_biglittle_init(void)
110{
111 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
112}
113#else
114static int secondary_biglittle_prepare(unsigned int cpu)
115{
116 return 0;
117}
118
119static void secondary_biglittle_init(void)
120{
121}
122#endif
123
124int __cpu_up(unsigned int cpu, struct task_struct *idle)
125{
126 int ret;
127
128 if (!smp_ops.smp_boot_secondary)
129 return -ENOSYS;
130
131 ret = secondary_biglittle_prepare(cpu);
132 if (ret)
133 return ret;
134
135
136
137
138
139 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
140#ifdef CONFIG_ARM_MPU
141 secondary_data.mpu_rgn_info = &mpu_rgn_info;
142#endif
143
144#ifdef CONFIG_MMU
145 secondary_data.pgdir = virt_to_phys(idmap_pgd);
146 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
147#endif
148 sync_cache_w(&secondary_data);
149
150
151
152
153 ret = smp_ops.smp_boot_secondary(cpu, idle);
154 if (ret == 0) {
155
156
157
158
159 wait_for_completion_timeout(&cpu_running,
160 msecs_to_jiffies(1000));
161
162 if (!cpu_online(cpu)) {
163 pr_crit("CPU%u: failed to come online\n", cpu);
164 ret = -EIO;
165 }
166 } else {
167 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
168 }
169
170
171 memset(&secondary_data, 0, sizeof(secondary_data));
172 return ret;
173}
174
175
176void __init smp_init_cpus(void)
177{
178 if (smp_ops.smp_init_cpus)
179 smp_ops.smp_init_cpus();
180}
181
182int platform_can_secondary_boot(void)
183{
184 return !!smp_ops.smp_boot_secondary;
185}
186
187int platform_can_cpu_hotplug(void)
188{
189#ifdef CONFIG_HOTPLUG_CPU
190 if (smp_ops.cpu_kill)
191 return 1;
192#endif
193
194 return 0;
195}
196
197#ifdef CONFIG_HOTPLUG_CPU
198static int platform_cpu_kill(unsigned int cpu)
199{
200 if (smp_ops.cpu_kill)
201 return smp_ops.cpu_kill(cpu);
202 return 1;
203}
204
205static int platform_cpu_disable(unsigned int cpu)
206{
207 if (smp_ops.cpu_disable)
208 return smp_ops.cpu_disable(cpu);
209
210 return 0;
211}
212
213int platform_can_hotplug_cpu(unsigned int cpu)
214{
215
216 if (!smp_ops.cpu_die)
217 return 0;
218
219 if (smp_ops.cpu_can_disable)
220 return smp_ops.cpu_can_disable(cpu);
221
222
223
224
225
226
227 return cpu != 0;
228}
229
230
231
232
233int __cpu_disable(void)
234{
235 unsigned int cpu = smp_processor_id();
236 int ret;
237
238 ret = platform_cpu_disable(cpu);
239 if (ret)
240 return ret;
241
242
243
244
245
246 set_cpu_online(cpu, false);
247
248
249
250
251 irq_migrate_all_off_this_cpu();
252
253
254
255
256
257
258
259
260 flush_cache_louis();
261 local_flush_tlb_all();
262
263 return 0;
264}
265
266static DECLARE_COMPLETION(cpu_died);
267
268
269
270
271
272void __cpu_die(unsigned int cpu)
273{
274 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
275 pr_err("CPU%u: cpu didn't die\n", cpu);
276 return;
277 }
278 pr_debug("CPU%u: shutdown\n", cpu);
279
280 clear_tasks_mm_cpumask(cpu);
281
282
283
284
285
286
287
288 if (!platform_cpu_kill(cpu))
289 pr_err("CPU%u: unable to kill\n", cpu);
290}
291
292
293
294
295
296
297
298
299
300void arch_cpu_idle_dead(void)
301{
302 unsigned int cpu = smp_processor_id();
303
304 idle_task_exit();
305
306 local_irq_disable();
307
308
309
310
311
312
313
314 flush_cache_louis();
315
316
317
318
319
320
321 complete(&cpu_died);
322
323
324
325
326
327
328
329 flush_cache_louis();
330
331
332
333
334
335
336
337
338
339
340
341
342
343 if (smp_ops.cpu_die)
344 smp_ops.cpu_die(cpu);
345
346 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
347 cpu);
348
349
350
351
352
353
354 __asm__("mov sp, %0\n"
355 " mov fp, #0\n"
356 " b secondary_start_kernel"
357 :
358 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
359}
360#endif
361
362
363
364
365
366static void smp_store_cpu_info(unsigned int cpuid)
367{
368 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
369
370 cpu_info->loops_per_jiffy = loops_per_jiffy;
371 cpu_info->cpuid = read_cpuid_id();
372
373 store_cpu_topology(cpuid);
374}
375
376
377
378
379
380asmlinkage void secondary_start_kernel(void)
381{
382 struct mm_struct *mm = &init_mm;
383 unsigned int cpu;
384
385 secondary_biglittle_init();
386
387
388
389
390
391 cpu_switch_mm(mm->pgd, mm);
392 local_flush_bp_all();
393 enter_lazy_tlb(mm, current);
394 local_flush_tlb_all();
395
396
397
398
399
400 cpu = smp_processor_id();
401 mmgrab(mm);
402 current->active_mm = mm;
403 cpumask_set_cpu(cpu, mm_cpumask(mm));
404
405 cpu_init();
406
407#ifndef CONFIG_MMU
408 setup_vectors_base();
409#endif
410 pr_debug("CPU%u: Booted secondary processor\n", cpu);
411
412 preempt_disable();
413 trace_hardirqs_off();
414
415
416
417
418 if (smp_ops.smp_secondary_init)
419 smp_ops.smp_secondary_init(cpu);
420
421 notify_cpu_starting(cpu);
422
423 calibrate_delay();
424
425 smp_store_cpu_info(cpu);
426
427
428
429
430
431
432 set_cpu_online(cpu, true);
433
434 check_other_bugs();
435
436 complete(&cpu_running);
437
438 local_irq_enable();
439 local_fiq_enable();
440 local_abt_enable();
441
442
443
444
445 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
446}
447
448void __init smp_cpus_done(unsigned int max_cpus)
449{
450 int cpu;
451 unsigned long bogosum = 0;
452
453 for_each_online_cpu(cpu)
454 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
455
456 printk(KERN_INFO "SMP: Total of %d processors activated "
457 "(%lu.%02lu BogoMIPS).\n",
458 num_online_cpus(),
459 bogosum / (500000/HZ),
460 (bogosum / (5000/HZ)) % 100);
461
462 hyp_mode_check();
463}
464
465void __init smp_prepare_boot_cpu(void)
466{
467 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
468}
469
470void __init smp_prepare_cpus(unsigned int max_cpus)
471{
472 unsigned int ncores = num_possible_cpus();
473
474 init_cpu_topology();
475
476 smp_store_cpu_info(smp_processor_id());
477
478
479
480
481 if (max_cpus > ncores)
482 max_cpus = ncores;
483 if (ncores > 1 && max_cpus) {
484
485
486
487
488
489
490 init_cpu_present(cpu_possible_mask);
491
492
493
494
495
496 if (smp_ops.smp_prepare_cpus)
497 smp_ops.smp_prepare_cpus(max_cpus);
498 }
499}
500
501static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
502
503void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
504{
505 if (!__smp_cross_call)
506 __smp_cross_call = fn;
507}
508
509static const char *ipi_types[NR_IPI] __tracepoint_string = {
510#define S(x,s) [x] = s
511 S(IPI_WAKEUP, "CPU wakeup interrupts"),
512 S(IPI_TIMER, "Timer broadcast interrupts"),
513 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
514 S(IPI_CALL_FUNC, "Function call interrupts"),
515 S(IPI_CPU_STOP, "CPU stop interrupts"),
516 S(IPI_IRQ_WORK, "IRQ work interrupts"),
517 S(IPI_COMPLETION, "completion interrupts"),
518};
519
520static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
521{
522 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
523 __smp_cross_call(target, ipinr);
524}
525
526void show_ipi_list(struct seq_file *p, int prec)
527{
528 unsigned int cpu, i;
529
530 for (i = 0; i < NR_IPI; i++) {
531 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
532
533 for_each_online_cpu(cpu)
534 seq_printf(p, "%10u ",
535 __get_irq_stat(cpu, ipi_irqs[i]));
536
537 seq_printf(p, " %s\n", ipi_types[i]);
538 }
539}
540
541u64 smp_irq_stat_cpu(unsigned int cpu)
542{
543 u64 sum = 0;
544 int i;
545
546 for (i = 0; i < NR_IPI; i++)
547 sum += __get_irq_stat(cpu, ipi_irqs[i]);
548
549 return sum;
550}
551
552void arch_send_call_function_ipi_mask(const struct cpumask *mask)
553{
554 smp_cross_call(mask, IPI_CALL_FUNC);
555}
556
557void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
558{
559 smp_cross_call(mask, IPI_WAKEUP);
560}
561
562void arch_send_call_function_single_ipi(int cpu)
563{
564 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
565}
566
567#ifdef CONFIG_IRQ_WORK
568void arch_irq_work_raise(void)
569{
570 if (arch_irq_work_has_interrupt())
571 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
572}
573#endif
574
575#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
576void tick_broadcast(const struct cpumask *mask)
577{
578 smp_cross_call(mask, IPI_TIMER);
579}
580#endif
581
582static DEFINE_RAW_SPINLOCK(stop_lock);
583
584
585
586
587static void ipi_cpu_stop(unsigned int cpu)
588{
589 if (system_state <= SYSTEM_RUNNING) {
590 raw_spin_lock(&stop_lock);
591 pr_crit("CPU%u: stopping\n", cpu);
592 dump_stack();
593 raw_spin_unlock(&stop_lock);
594 }
595
596 set_cpu_online(cpu, false);
597
598 local_fiq_disable();
599 local_irq_disable();
600
601 while (1) {
602 cpu_relax();
603 wfe();
604 }
605}
606
607static DEFINE_PER_CPU(struct completion *, cpu_completion);
608
609int register_ipi_completion(struct completion *completion, int cpu)
610{
611 per_cpu(cpu_completion, cpu) = completion;
612 return IPI_COMPLETION;
613}
614
615static void ipi_complete(unsigned int cpu)
616{
617 complete(per_cpu(cpu_completion, cpu));
618}
619
620
621
622
623asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
624{
625 handle_IPI(ipinr, regs);
626}
627
628void handle_IPI(int ipinr, struct pt_regs *regs)
629{
630 unsigned int cpu = smp_processor_id();
631 struct pt_regs *old_regs = set_irq_regs(regs);
632
633 if ((unsigned)ipinr < NR_IPI) {
634 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
635 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
636 }
637
638 switch (ipinr) {
639 case IPI_WAKEUP:
640 break;
641
642#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
643 case IPI_TIMER:
644 irq_enter();
645 tick_receive_broadcast();
646 irq_exit();
647 break;
648#endif
649
650 case IPI_RESCHEDULE:
651 scheduler_ipi();
652 break;
653
654 case IPI_CALL_FUNC:
655 irq_enter();
656 generic_smp_call_function_interrupt();
657 irq_exit();
658 break;
659
660 case IPI_CPU_STOP:
661 irq_enter();
662 ipi_cpu_stop(cpu);
663 irq_exit();
664 break;
665
666#ifdef CONFIG_IRQ_WORK
667 case IPI_IRQ_WORK:
668 irq_enter();
669 irq_work_run();
670 irq_exit();
671 break;
672#endif
673
674 case IPI_COMPLETION:
675 irq_enter();
676 ipi_complete(cpu);
677 irq_exit();
678 break;
679
680 case IPI_CPU_BACKTRACE:
681 printk_nmi_enter();
682 irq_enter();
683 nmi_cpu_backtrace(regs);
684 irq_exit();
685 printk_nmi_exit();
686 break;
687
688 default:
689 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
690 cpu, ipinr);
691 break;
692 }
693
694 if ((unsigned)ipinr < NR_IPI)
695 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
696 set_irq_regs(old_regs);
697}
698
699void smp_send_reschedule(int cpu)
700{
701 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
702}
703
704void smp_send_stop(void)
705{
706 unsigned long timeout;
707 struct cpumask mask;
708
709 cpumask_copy(&mask, cpu_online_mask);
710 cpumask_clear_cpu(smp_processor_id(), &mask);
711 if (!cpumask_empty(&mask))
712 smp_cross_call(&mask, IPI_CPU_STOP);
713
714
715 timeout = USEC_PER_SEC;
716 while (num_online_cpus() > 1 && timeout--)
717 udelay(1);
718
719 if (num_online_cpus() > 1)
720 pr_warn("SMP: failed to stop secondary CPUs\n");
721}
722
723
724
725
726
727
728
729void panic_smp_self_stop(void)
730{
731 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
732 smp_processor_id());
733 set_cpu_online(smp_processor_id(), false);
734 while (1)
735 cpu_relax();
736}
737
738
739
740
741int setup_profiling_timer(unsigned int multiplier)
742{
743 return -EINVAL;
744}
745
746#ifdef CONFIG_CPU_FREQ
747
748static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
749static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
750static unsigned long global_l_p_j_ref;
751static unsigned long global_l_p_j_ref_freq;
752
753static int cpufreq_callback(struct notifier_block *nb,
754 unsigned long val, void *data)
755{
756 struct cpufreq_freqs *freq = data;
757 int cpu = freq->cpu;
758
759 if (freq->flags & CPUFREQ_CONST_LOOPS)
760 return NOTIFY_OK;
761
762 if (!per_cpu(l_p_j_ref, cpu)) {
763 per_cpu(l_p_j_ref, cpu) =
764 per_cpu(cpu_data, cpu).loops_per_jiffy;
765 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
766 if (!global_l_p_j_ref) {
767 global_l_p_j_ref = loops_per_jiffy;
768 global_l_p_j_ref_freq = freq->old;
769 }
770 }
771
772 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
773 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
774 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
775 global_l_p_j_ref_freq,
776 freq->new);
777 per_cpu(cpu_data, cpu).loops_per_jiffy =
778 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
779 per_cpu(l_p_j_ref_freq, cpu),
780 freq->new);
781 }
782 return NOTIFY_OK;
783}
784
785static struct notifier_block cpufreq_notifier = {
786 .notifier_call = cpufreq_callback,
787};
788
789static int __init register_cpufreq_notifier(void)
790{
791 return cpufreq_register_notifier(&cpufreq_notifier,
792 CPUFREQ_TRANSITION_NOTIFIER);
793}
794core_initcall(register_cpufreq_notifier);
795
796#endif
797
798static void raise_nmi(cpumask_t *mask)
799{
800 smp_cross_call(mask, IPI_CPU_BACKTRACE);
801}
802
803void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
804{
805 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
806}
807