1
2
3
4
5
6
7#include <linux/module.h>
8#include <linux/delay.h>
9#include <linux/init.h>
10#include <linux/spinlock.h>
11#include <linux/sched/mm.h>
12#include <linux/sched/hotplug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/interrupt.h>
15#include <linux/cache.h>
16#include <linux/profile.h>
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/err.h>
20#include <linux/cpu.h>
21#include <linux/seq_file.h>
22#include <linux/irq.h>
23#include <linux/nmi.h>
24#include <linux/percpu.h>
25#include <linux/clockchips.h>
26#include <linux/completion.h>
27#include <linux/cpufreq.h>
28#include <linux/irq_work.h>
29#include <linux/kernel_stat.h>
30
31#include <linux/atomic.h>
32#include <asm/bugs.h>
33#include <asm/smp.h>
34#include <asm/cacheflush.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/exception.h>
38#include <asm/idmap.h>
39#include <asm/topology.h>
40#include <asm/mmu_context.h>
41#include <asm/procinfo.h>
42#include <asm/processor.h>
43#include <asm/sections.h>
44#include <asm/tlbflush.h>
45#include <asm/ptrace.h>
46#include <asm/smp_plat.h>
47#include <asm/virt.h>
48#include <asm/mach/arch.h>
49#include <asm/mpu.h>
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ipi.h>
53
54
55
56
57
58
59struct secondary_data secondary_data;
60
61enum ipi_msg_type {
62 IPI_WAKEUP,
63 IPI_TIMER,
64 IPI_RESCHEDULE,
65 IPI_CALL_FUNC,
66 IPI_CPU_STOP,
67 IPI_IRQ_WORK,
68 IPI_COMPLETION,
69 NR_IPI,
70
71
72
73
74 IPI_CPU_BACKTRACE = NR_IPI,
75
76
77
78
79
80 MAX_IPI
81};
82
83static int ipi_irq_base __read_mostly;
84static int nr_ipi __read_mostly = NR_IPI;
85static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
86
87static void ipi_setup(int cpu);
88
89static DECLARE_COMPLETION(cpu_running);
90
91static struct smp_operations smp_ops __ro_after_init;
92
93void __init smp_set_ops(const struct smp_operations *ops)
94{
95 if (ops)
96 smp_ops = *ops;
97};
98
99static unsigned long get_arch_pgd(pgd_t *pgd)
100{
101#ifdef CONFIG_ARM_LPAE
102 return __phys_to_pfn(virt_to_phys(pgd));
103#else
104 return virt_to_phys(pgd);
105#endif
106}
107
108#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
109static int secondary_biglittle_prepare(unsigned int cpu)
110{
111 if (!cpu_vtable[cpu])
112 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
113
114 return cpu_vtable[cpu] ? 0 : -ENOMEM;
115}
116
117static void secondary_biglittle_init(void)
118{
119 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
120}
121#else
122static int secondary_biglittle_prepare(unsigned int cpu)
123{
124 return 0;
125}
126
127static void secondary_biglittle_init(void)
128{
129}
130#endif
131
132int __cpu_up(unsigned int cpu, struct task_struct *idle)
133{
134 int ret;
135
136 if (!smp_ops.smp_boot_secondary)
137 return -ENOSYS;
138
139 ret = secondary_biglittle_prepare(cpu);
140 if (ret)
141 return ret;
142
143
144
145
146
147 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
148#ifdef CONFIG_ARM_MPU
149 secondary_data.mpu_rgn_info = &mpu_rgn_info;
150#endif
151
152#ifdef CONFIG_MMU
153 secondary_data.pgdir = virt_to_phys(idmap_pgd);
154 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
155#endif
156 sync_cache_w(&secondary_data);
157
158
159
160
161 ret = smp_ops.smp_boot_secondary(cpu, idle);
162 if (ret == 0) {
163
164
165
166
167 wait_for_completion_timeout(&cpu_running,
168 msecs_to_jiffies(1000));
169
170 if (!cpu_online(cpu)) {
171 pr_crit("CPU%u: failed to come online\n", cpu);
172 ret = -EIO;
173 }
174 } else {
175 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
176 }
177
178
179 memset(&secondary_data, 0, sizeof(secondary_data));
180 return ret;
181}
182
183
184void __init smp_init_cpus(void)
185{
186 if (smp_ops.smp_init_cpus)
187 smp_ops.smp_init_cpus();
188}
189
190int platform_can_secondary_boot(void)
191{
192 return !!smp_ops.smp_boot_secondary;
193}
194
195int platform_can_cpu_hotplug(void)
196{
197#ifdef CONFIG_HOTPLUG_CPU
198 if (smp_ops.cpu_kill)
199 return 1;
200#endif
201
202 return 0;
203}
204
205#ifdef CONFIG_HOTPLUG_CPU
206static int platform_cpu_kill(unsigned int cpu)
207{
208 if (smp_ops.cpu_kill)
209 return smp_ops.cpu_kill(cpu);
210 return 1;
211}
212
213static int platform_cpu_disable(unsigned int cpu)
214{
215 if (smp_ops.cpu_disable)
216 return smp_ops.cpu_disable(cpu);
217
218 return 0;
219}
220
221int platform_can_hotplug_cpu(unsigned int cpu)
222{
223
224 if (!smp_ops.cpu_die)
225 return 0;
226
227 if (smp_ops.cpu_can_disable)
228 return smp_ops.cpu_can_disable(cpu);
229
230
231
232
233
234
235 return cpu != 0;
236}
237
238static void ipi_teardown(int cpu)
239{
240 int i;
241
242 if (WARN_ON_ONCE(!ipi_irq_base))
243 return;
244
245 for (i = 0; i < nr_ipi; i++)
246 disable_percpu_irq(ipi_irq_base + i);
247}
248
249
250
251
252int __cpu_disable(void)
253{
254 unsigned int cpu = smp_processor_id();
255 int ret;
256
257 ret = platform_cpu_disable(cpu);
258 if (ret)
259 return ret;
260
261#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
262 remove_cpu_topology(cpu);
263#endif
264
265
266
267
268
269 set_cpu_online(cpu, false);
270 ipi_teardown(cpu);
271
272
273
274
275 irq_migrate_all_off_this_cpu();
276
277
278
279
280
281
282
283
284 flush_cache_louis();
285 local_flush_tlb_all();
286
287 return 0;
288}
289
290
291
292
293
294void __cpu_die(unsigned int cpu)
295{
296 if (!cpu_wait_death(cpu, 5)) {
297 pr_err("CPU%u: cpu didn't die\n", cpu);
298 return;
299 }
300 pr_debug("CPU%u: shutdown\n", cpu);
301
302 clear_tasks_mm_cpumask(cpu);
303
304
305
306
307
308
309
310 if (!platform_cpu_kill(cpu))
311 pr_err("CPU%u: unable to kill\n", cpu);
312}
313
314
315
316
317
318
319
320
321
322void arch_cpu_idle_dead(void)
323{
324 unsigned int cpu = smp_processor_id();
325
326 idle_task_exit();
327
328 local_irq_disable();
329
330
331
332
333
334
335
336 flush_cache_louis();
337
338
339
340
341
342
343 (void)cpu_report_death();
344
345
346
347
348
349
350
351 flush_cache_louis();
352
353
354
355
356
357
358
359
360
361
362
363
364
365 if (smp_ops.cpu_die)
366 smp_ops.cpu_die(cpu);
367
368 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
369 cpu);
370
371
372
373
374
375
376 __asm__("mov sp, %0\n"
377 " mov fp, #0\n"
378 " b secondary_start_kernel"
379 :
380 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
381}
382#endif
383
384
385
386
387
388static void smp_store_cpu_info(unsigned int cpuid)
389{
390 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
391
392 cpu_info->loops_per_jiffy = loops_per_jiffy;
393 cpu_info->cpuid = read_cpuid_id();
394
395 store_cpu_topology(cpuid);
396 check_cpu_icache_size(cpuid);
397}
398
399
400
401
402
403asmlinkage void secondary_start_kernel(void)
404{
405 struct mm_struct *mm = &init_mm;
406 unsigned int cpu;
407
408 secondary_biglittle_init();
409
410
411
412
413
414 cpu_switch_mm(mm->pgd, mm);
415 local_flush_bp_all();
416 enter_lazy_tlb(mm, current);
417 local_flush_tlb_all();
418
419
420
421
422
423 cpu = smp_processor_id();
424 mmgrab(mm);
425 current->active_mm = mm;
426 cpumask_set_cpu(cpu, mm_cpumask(mm));
427
428 cpu_init();
429
430#ifndef CONFIG_MMU
431 setup_vectors_base();
432#endif
433 pr_debug("CPU%u: Booted secondary processor\n", cpu);
434
435 trace_hardirqs_off();
436
437
438
439
440 if (smp_ops.smp_secondary_init)
441 smp_ops.smp_secondary_init(cpu);
442
443 notify_cpu_starting(cpu);
444
445 ipi_setup(cpu);
446
447 calibrate_delay();
448
449 smp_store_cpu_info(cpu);
450
451
452
453
454
455
456 set_cpu_online(cpu, true);
457
458 check_other_bugs();
459
460 complete(&cpu_running);
461
462 local_irq_enable();
463 local_fiq_enable();
464 local_abt_enable();
465
466
467
468
469 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
470}
471
472void __init smp_cpus_done(unsigned int max_cpus)
473{
474 int cpu;
475 unsigned long bogosum = 0;
476
477 for_each_online_cpu(cpu)
478 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
479
480 printk(KERN_INFO "SMP: Total of %d processors activated "
481 "(%lu.%02lu BogoMIPS).\n",
482 num_online_cpus(),
483 bogosum / (500000/HZ),
484 (bogosum / (5000/HZ)) % 100);
485
486 hyp_mode_check();
487}
488
489void __init smp_prepare_boot_cpu(void)
490{
491 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
492}
493
494void __init smp_prepare_cpus(unsigned int max_cpus)
495{
496 unsigned int ncores = num_possible_cpus();
497
498 init_cpu_topology();
499
500 smp_store_cpu_info(smp_processor_id());
501
502
503
504
505 if (max_cpus > ncores)
506 max_cpus = ncores;
507 if (ncores > 1 && max_cpus) {
508
509
510
511
512
513
514 init_cpu_present(cpu_possible_mask);
515
516
517
518
519
520 if (smp_ops.smp_prepare_cpus)
521 smp_ops.smp_prepare_cpus(max_cpus);
522 }
523}
524
525static const char *ipi_types[NR_IPI] __tracepoint_string = {
526 [IPI_WAKEUP] = "CPU wakeup interrupts",
527 [IPI_TIMER] = "Timer broadcast interrupts",
528 [IPI_RESCHEDULE] = "Rescheduling interrupts",
529 [IPI_CALL_FUNC] = "Function call interrupts",
530 [IPI_CPU_STOP] = "CPU stop interrupts",
531 [IPI_IRQ_WORK] = "IRQ work interrupts",
532 [IPI_COMPLETION] = "completion interrupts",
533};
534
535static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
536
537void show_ipi_list(struct seq_file *p, int prec)
538{
539 unsigned int cpu, i;
540
541 for (i = 0; i < NR_IPI; i++) {
542 if (!ipi_desc[i])
543 continue;
544
545 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
546
547 for_each_online_cpu(cpu)
548 seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
549
550 seq_printf(p, " %s\n", ipi_types[i]);
551 }
552}
553
554void arch_send_call_function_ipi_mask(const struct cpumask *mask)
555{
556 smp_cross_call(mask, IPI_CALL_FUNC);
557}
558
559void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
560{
561 smp_cross_call(mask, IPI_WAKEUP);
562}
563
564void arch_send_call_function_single_ipi(int cpu)
565{
566 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
567}
568
569#ifdef CONFIG_IRQ_WORK
570void arch_irq_work_raise(void)
571{
572 if (arch_irq_work_has_interrupt())
573 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
574}
575#endif
576
577#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
578void tick_broadcast(const struct cpumask *mask)
579{
580 smp_cross_call(mask, IPI_TIMER);
581}
582#endif
583
584static DEFINE_RAW_SPINLOCK(stop_lock);
585
586
587
588
589static void ipi_cpu_stop(unsigned int cpu)
590{
591 if (system_state <= SYSTEM_RUNNING) {
592 raw_spin_lock(&stop_lock);
593 pr_crit("CPU%u: stopping\n", cpu);
594 dump_stack();
595 raw_spin_unlock(&stop_lock);
596 }
597
598 set_cpu_online(cpu, false);
599
600 local_fiq_disable();
601 local_irq_disable();
602
603 while (1) {
604 cpu_relax();
605 wfe();
606 }
607}
608
609static DEFINE_PER_CPU(struct completion *, cpu_completion);
610
611int register_ipi_completion(struct completion *completion, int cpu)
612{
613 per_cpu(cpu_completion, cpu) = completion;
614 return IPI_COMPLETION;
615}
616
617static void ipi_complete(unsigned int cpu)
618{
619 complete(per_cpu(cpu_completion, cpu));
620}
621
622
623
624
625asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
626{
627 handle_IPI(ipinr, regs);
628}
629
630static void do_handle_IPI(int ipinr)
631{
632 unsigned int cpu = smp_processor_id();
633
634 if ((unsigned)ipinr < NR_IPI)
635 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
636
637 switch (ipinr) {
638 case IPI_WAKEUP:
639 break;
640
641#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
642 case IPI_TIMER:
643 tick_receive_broadcast();
644 break;
645#endif
646
647 case IPI_RESCHEDULE:
648 scheduler_ipi();
649 break;
650
651 case IPI_CALL_FUNC:
652 generic_smp_call_function_interrupt();
653 break;
654
655 case IPI_CPU_STOP:
656 ipi_cpu_stop(cpu);
657 break;
658
659#ifdef CONFIG_IRQ_WORK
660 case IPI_IRQ_WORK:
661 irq_work_run();
662 break;
663#endif
664
665 case IPI_COMPLETION:
666 ipi_complete(cpu);
667 break;
668
669 case IPI_CPU_BACKTRACE:
670 printk_deferred_enter();
671 nmi_cpu_backtrace(get_irq_regs());
672 printk_deferred_exit();
673 break;
674
675 default:
676 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
677 cpu, ipinr);
678 break;
679 }
680
681 if ((unsigned)ipinr < NR_IPI)
682 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
683}
684
685
686void handle_IPI(int ipinr, struct pt_regs *regs)
687{
688 struct pt_regs *old_regs = set_irq_regs(regs);
689
690 irq_enter();
691 do_handle_IPI(ipinr);
692 irq_exit();
693
694 set_irq_regs(old_regs);
695}
696
697static irqreturn_t ipi_handler(int irq, void *data)
698{
699 do_handle_IPI(irq - ipi_irq_base);
700 return IRQ_HANDLED;
701}
702
703static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
704{
705 trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
706 __ipi_send_mask(ipi_desc[ipinr], target);
707}
708
709static void ipi_setup(int cpu)
710{
711 int i;
712
713 if (WARN_ON_ONCE(!ipi_irq_base))
714 return;
715
716 for (i = 0; i < nr_ipi; i++)
717 enable_percpu_irq(ipi_irq_base + i, 0);
718}
719
720void __init set_smp_ipi_range(int ipi_base, int n)
721{
722 int i;
723
724 WARN_ON(n < MAX_IPI);
725 nr_ipi = min(n, MAX_IPI);
726
727 for (i = 0; i < nr_ipi; i++) {
728 int err;
729
730 err = request_percpu_irq(ipi_base + i, ipi_handler,
731 "IPI", &irq_stat);
732 WARN_ON(err);
733
734 ipi_desc[i] = irq_to_desc(ipi_base + i);
735 irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
736 }
737
738 ipi_irq_base = ipi_base;
739
740
741 ipi_setup(smp_processor_id());
742}
743
744void smp_send_reschedule(int cpu)
745{
746 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
747}
748
749void smp_send_stop(void)
750{
751 unsigned long timeout;
752 struct cpumask mask;
753
754 cpumask_copy(&mask, cpu_online_mask);
755 cpumask_clear_cpu(smp_processor_id(), &mask);
756 if (!cpumask_empty(&mask))
757 smp_cross_call(&mask, IPI_CPU_STOP);
758
759
760 timeout = USEC_PER_SEC;
761 while (num_online_cpus() > 1 && timeout--)
762 udelay(1);
763
764 if (num_online_cpus() > 1)
765 pr_warn("SMP: failed to stop secondary CPUs\n");
766}
767
768
769
770
771
772
773
774void panic_smp_self_stop(void)
775{
776 pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
777 smp_processor_id());
778 set_cpu_online(smp_processor_id(), false);
779 while (1)
780 cpu_relax();
781}
782
783
784
785
786int setup_profiling_timer(unsigned int multiplier)
787{
788 return -EINVAL;
789}
790
791#ifdef CONFIG_CPU_FREQ
792
793static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
794static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
795static unsigned long global_l_p_j_ref;
796static unsigned long global_l_p_j_ref_freq;
797
798static int cpufreq_callback(struct notifier_block *nb,
799 unsigned long val, void *data)
800{
801 struct cpufreq_freqs *freq = data;
802 struct cpumask *cpus = freq->policy->cpus;
803 int cpu, first = cpumask_first(cpus);
804 unsigned int lpj;
805
806 if (freq->flags & CPUFREQ_CONST_LOOPS)
807 return NOTIFY_OK;
808
809 if (!per_cpu(l_p_j_ref, first)) {
810 for_each_cpu(cpu, cpus) {
811 per_cpu(l_p_j_ref, cpu) =
812 per_cpu(cpu_data, cpu).loops_per_jiffy;
813 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
814 }
815
816 if (!global_l_p_j_ref) {
817 global_l_p_j_ref = loops_per_jiffy;
818 global_l_p_j_ref_freq = freq->old;
819 }
820 }
821
822 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
823 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
824 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
825 global_l_p_j_ref_freq,
826 freq->new);
827
828 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first),
829 per_cpu(l_p_j_ref_freq, first), freq->new);
830 for_each_cpu(cpu, cpus)
831 per_cpu(cpu_data, cpu).loops_per_jiffy = lpj;
832 }
833 return NOTIFY_OK;
834}
835
836static struct notifier_block cpufreq_notifier = {
837 .notifier_call = cpufreq_callback,
838};
839
840static int __init register_cpufreq_notifier(void)
841{
842 return cpufreq_register_notifier(&cpufreq_notifier,
843 CPUFREQ_TRANSITION_NOTIFIER);
844}
845core_initcall(register_cpufreq_notifier);
846
847#endif
848
849static void raise_nmi(cpumask_t *mask)
850{
851 __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask);
852}
853
854void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
855{
856 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi);
857}
858