1
2
3
4
5
6
7
8
9
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/err.h>
21#include <linux/cpu.h>
22#include <linux/seq_file.h>
23#include <linux/irq.h>
24#include <linux/nmi.h>
25#include <linux/percpu.h>
26#include <linux/clockchips.h>
27#include <linux/completion.h>
28#include <linux/cpufreq.h>
29#include <linux/irq_work.h>
30
31#include <linux/atomic.h>
32#include <asm/smp.h>
33#include <asm/cacheflush.h>
34#include <asm/cpu.h>
35#include <asm/cputype.h>
36#include <asm/exception.h>
37#include <asm/idmap.h>
38#include <asm/topology.h>
39#include <asm/mmu_context.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
42#include <asm/processor.h>
43#include <asm/sections.h>
44#include <asm/tlbflush.h>
45#include <asm/ptrace.h>
46#include <asm/smp_plat.h>
47#include <asm/virt.h>
48#include <asm/mach/arch.h>
49#include <asm/mpu.h>
50
51#define CREATE_TRACE_POINTS
52#include <trace/events/ipi.h>
53
54
55
56
57
58
59struct secondary_data secondary_data;
60
61
62
63
64
65volatile int pen_release = -1;
66
67enum ipi_msg_type {
68 IPI_WAKEUP,
69 IPI_TIMER,
70 IPI_RESCHEDULE,
71 IPI_CALL_FUNC,
72 IPI_CPU_STOP,
73 IPI_IRQ_WORK,
74 IPI_COMPLETION,
75 IPI_CPU_BACKTRACE,
76
77
78
79
80
81};
82
83static DECLARE_COMPLETION(cpu_running);
84
85static struct smp_operations smp_ops;
86
87void __init smp_set_ops(const struct smp_operations *ops)
88{
89 if (ops)
90 smp_ops = *ops;
91};
92
93static unsigned long get_arch_pgd(pgd_t *pgd)
94{
95#ifdef CONFIG_ARM_LPAE
96 return __phys_to_pfn(virt_to_phys(pgd));
97#else
98 return virt_to_phys(pgd);
99#endif
100}
101
102int __cpu_up(unsigned int cpu, struct task_struct *idle)
103{
104 int ret;
105
106 if (!smp_ops.smp_boot_secondary)
107 return -ENOSYS;
108
109
110
111
112
113 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
114#ifdef CONFIG_ARM_MPU
115 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
116#endif
117
118#ifdef CONFIG_MMU
119 secondary_data.pgdir = virt_to_phys(idmap_pgd);
120 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
121#endif
122 sync_cache_w(&secondary_data);
123
124
125
126
127 ret = smp_ops.smp_boot_secondary(cpu, idle);
128 if (ret == 0) {
129
130
131
132
133 wait_for_completion_timeout(&cpu_running,
134 msecs_to_jiffies(1000));
135
136 if (!cpu_online(cpu)) {
137 pr_crit("CPU%u: failed to come online\n", cpu);
138 ret = -EIO;
139 }
140 } else {
141 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
142 }
143
144
145 memset(&secondary_data, 0, sizeof(secondary_data));
146 return ret;
147}
148
149
150void __init smp_init_cpus(void)
151{
152 if (smp_ops.smp_init_cpus)
153 smp_ops.smp_init_cpus();
154}
155
156int platform_can_secondary_boot(void)
157{
158 return !!smp_ops.smp_boot_secondary;
159}
160
161int platform_can_cpu_hotplug(void)
162{
163#ifdef CONFIG_HOTPLUG_CPU
164 if (smp_ops.cpu_kill)
165 return 1;
166#endif
167
168 return 0;
169}
170
171#ifdef CONFIG_HOTPLUG_CPU
172static int platform_cpu_kill(unsigned int cpu)
173{
174 if (smp_ops.cpu_kill)
175 return smp_ops.cpu_kill(cpu);
176 return 1;
177}
178
179static int platform_cpu_disable(unsigned int cpu)
180{
181 if (smp_ops.cpu_disable)
182 return smp_ops.cpu_disable(cpu);
183
184 return 0;
185}
186
187int platform_can_hotplug_cpu(unsigned int cpu)
188{
189
190 if (!smp_ops.cpu_die)
191 return 0;
192
193 if (smp_ops.cpu_can_disable)
194 return smp_ops.cpu_can_disable(cpu);
195
196
197
198
199
200
201 return cpu != 0;
202}
203
204
205
206
207int __cpu_disable(void)
208{
209 unsigned int cpu = smp_processor_id();
210 int ret;
211
212 ret = platform_cpu_disable(cpu);
213 if (ret)
214 return ret;
215
216
217
218
219
220 set_cpu_online(cpu, false);
221
222
223
224
225 migrate_irqs();
226
227
228
229
230
231
232
233
234 flush_cache_louis();
235 local_flush_tlb_all();
236
237 clear_tasks_mm_cpumask(cpu);
238
239 return 0;
240}
241
242static DECLARE_COMPLETION(cpu_died);
243
244
245
246
247
248void __cpu_die(unsigned int cpu)
249{
250 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
251 pr_err("CPU%u: cpu didn't die\n", cpu);
252 return;
253 }
254 pr_notice("CPU%u: shutdown\n", cpu);
255
256
257
258
259
260
261
262
263 if (!platform_cpu_kill(cpu))
264 pr_err("CPU%u: unable to kill\n", cpu);
265}
266
267
268
269
270
271
272
273
274
275void arch_cpu_idle_dead(void)
276{
277 unsigned int cpu = smp_processor_id();
278
279 idle_task_exit();
280
281 local_irq_disable();
282
283
284
285
286
287
288
289 flush_cache_louis();
290
291
292
293
294
295
296 complete(&cpu_died);
297
298
299
300
301
302
303
304 flush_cache_louis();
305
306
307
308
309
310
311
312
313
314
315
316
317
318 if (smp_ops.cpu_die)
319 smp_ops.cpu_die(cpu);
320
321 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
322 cpu);
323
324
325
326
327
328
329 __asm__("mov sp, %0\n"
330 " mov fp, #0\n"
331 " b secondary_start_kernel"
332 :
333 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
334}
335#endif
336
337
338
339
340
341static void smp_store_cpu_info(unsigned int cpuid)
342{
343 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
344
345 cpu_info->loops_per_jiffy = loops_per_jiffy;
346 cpu_info->cpuid = read_cpuid_id();
347
348 store_cpu_topology(cpuid);
349}
350
351
352
353
354
355asmlinkage void secondary_start_kernel(void)
356{
357 struct mm_struct *mm = &init_mm;
358 unsigned int cpu;
359
360
361
362
363
364 cpu_switch_mm(mm->pgd, mm);
365 local_flush_bp_all();
366 enter_lazy_tlb(mm, current);
367 local_flush_tlb_all();
368
369
370
371
372
373 cpu = smp_processor_id();
374 atomic_inc(&mm->mm_count);
375 current->active_mm = mm;
376 cpumask_set_cpu(cpu, mm_cpumask(mm));
377
378 cpu_init();
379
380 pr_debug("CPU%u: Booted secondary processor\n", cpu);
381
382 preempt_disable();
383 trace_hardirqs_off();
384
385
386
387
388 if (smp_ops.smp_secondary_init)
389 smp_ops.smp_secondary_init(cpu);
390
391 notify_cpu_starting(cpu);
392
393 calibrate_delay();
394
395 smp_store_cpu_info(cpu);
396
397
398
399
400
401
402 set_cpu_online(cpu, true);
403 complete(&cpu_running);
404
405 local_irq_enable();
406 local_fiq_enable();
407 local_abt_enable();
408
409
410
411
412 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
413}
414
415void __init smp_cpus_done(unsigned int max_cpus)
416{
417 int cpu;
418 unsigned long bogosum = 0;
419
420 for_each_online_cpu(cpu)
421 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
422
423 printk(KERN_INFO "SMP: Total of %d processors activated "
424 "(%lu.%02lu BogoMIPS).\n",
425 num_online_cpus(),
426 bogosum / (500000/HZ),
427 (bogosum / (5000/HZ)) % 100);
428
429 hyp_mode_check();
430}
431
432void __init smp_prepare_boot_cpu(void)
433{
434 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
435}
436
437void __init smp_prepare_cpus(unsigned int max_cpus)
438{
439 unsigned int ncores = num_possible_cpus();
440
441 init_cpu_topology();
442
443 smp_store_cpu_info(smp_processor_id());
444
445
446
447
448 if (max_cpus > ncores)
449 max_cpus = ncores;
450 if (ncores > 1 && max_cpus) {
451
452
453
454
455
456
457 init_cpu_present(cpu_possible_mask);
458
459
460
461
462
463 if (smp_ops.smp_prepare_cpus)
464 smp_ops.smp_prepare_cpus(max_cpus);
465 }
466}
467
468static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
469
470void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
471{
472 if (!__smp_cross_call)
473 __smp_cross_call = fn;
474}
475
476struct ipi {
477 const char *desc;
478 void (*handler)(void);
479};
480
481static void ipi_cpu_stop(void);
482static void ipi_complete(void);
483
484#define IPI_DESC_STRING_IPI_WAKEUP "CPU wakeup interrupts"
485#define IPI_DESC_STRING_IPI_TIMER "Timer broadcast interrupts"
486#define IPI_DESC_STRING_IPI_RESCHEDULE "Rescheduling interrupts"
487#define IPI_DESC_STRING_IPI_CALL_FUNC "Function call interrupts"
488#define IPI_DESC_STRING_IPI_CPU_STOP "CPU stop interrupts"
489#define IPI_DESC_STRING_IPI_IRQ_WORK "IRQ work interrupts"
490#define IPI_DESC_STRING_IPI_COMPLETION "completion interrupts"
491
492#define IPI_DESC_STR(x) IPI_DESC_STRING_ ## x
493
494static const char* ipi_desc_strings[] __tracepoint_string =
495 {
496 [IPI_WAKEUP] = IPI_DESC_STR(IPI_WAKEUP),
497 [IPI_TIMER] = IPI_DESC_STR(IPI_TIMER),
498 [IPI_RESCHEDULE] = IPI_DESC_STR(IPI_RESCHEDULE),
499 [IPI_CALL_FUNC] = IPI_DESC_STR(IPI_CALL_FUNC),
500 [IPI_CPU_STOP] = IPI_DESC_STR(IPI_CPU_STOP),
501 [IPI_IRQ_WORK] = IPI_DESC_STR(IPI_IRQ_WORK),
502 [IPI_COMPLETION] = IPI_DESC_STR(IPI_COMPLETION)
503 };
504
505
506static void tick_receive_broadcast_local(void)
507{
508 tick_receive_broadcast();
509}
510
511static struct ipi ipi_types[NR_IPI] = {
512#define S(x, f) [x].desc = IPI_DESC_STR(x), [x].handler = f
513 S(IPI_WAKEUP, NULL),
514#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
515 S(IPI_TIMER, tick_receive_broadcast_local),
516#endif
517 S(IPI_RESCHEDULE, scheduler_ipi),
518 S(IPI_CALL_FUNC, generic_smp_call_function_interrupt),
519 S(IPI_CPU_STOP, ipi_cpu_stop),
520#ifdef CONFIG_IRQ_WORK
521 S(IPI_IRQ_WORK, irq_work_run),
522#endif
523 S(IPI_COMPLETION, ipi_complete),
524};
525
526static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
527{
528 trace_ipi_raise(target, ipi_desc_strings[ipinr]);
529 __smp_cross_call(target, ipinr);
530}
531
532void show_ipi_list(struct seq_file *p, int prec)
533{
534 unsigned int cpu, i;
535
536 for (i = 0; i < NR_IPI; i++) {
537 if (ipi_types[i].handler) {
538 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
539 for_each_present_cpu(cpu)
540 seq_printf(p, "%10u ",
541 __get_irq_stat(cpu, ipi_irqs[i]));
542 seq_printf(p, " %s\n", ipi_types[i].desc);
543 }
544 }
545}
546
547u64 smp_irq_stat_cpu(unsigned int cpu)
548{
549 u64 sum = 0;
550 int i;
551
552 for (i = 0; i < NR_IPI; i++)
553 sum += __get_irq_stat(cpu, ipi_irqs[i]);
554
555 return sum;
556}
557
558void arch_send_call_function_ipi_mask(const struct cpumask *mask)
559{
560 smp_cross_call(mask, IPI_CALL_FUNC);
561}
562
563void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
564{
565 smp_cross_call(mask, IPI_WAKEUP);
566}
567
568void arch_send_call_function_single_ipi(int cpu)
569{
570 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
571}
572
573#ifdef CONFIG_IRQ_WORK
574void arch_irq_work_raise(void)
575{
576 if (arch_irq_work_has_interrupt())
577 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
578}
579#endif
580
581#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
582void tick_broadcast(const struct cpumask *mask)
583{
584 smp_cross_call(mask, IPI_TIMER);
585}
586#endif
587
588static DEFINE_RAW_SPINLOCK(stop_lock);
589
590
591
592
593static void ipi_cpu_stop(void)
594{
595 unsigned int cpu = smp_processor_id();
596
597 if (system_state == SYSTEM_BOOTING ||
598 system_state == SYSTEM_RUNNING) {
599 raw_spin_lock(&stop_lock);
600 pr_crit("CPU%u: stopping\n", cpu);
601 dump_stack();
602 raw_spin_unlock(&stop_lock);
603 }
604
605 set_cpu_online(cpu, false);
606
607 local_fiq_disable();
608 local_irq_disable();
609
610 while (1)
611 cpu_relax();
612}
613
614static DEFINE_PER_CPU(struct completion *, cpu_completion);
615
616int register_ipi_completion(struct completion *completion, int cpu)
617{
618 per_cpu(cpu_completion, cpu) = completion;
619 return IPI_COMPLETION;
620}
621
622static void ipi_complete(void)
623{
624 unsigned int cpu = smp_processor_id();
625
626 complete(per_cpu(cpu_completion, cpu));
627}
628
629
630
631
632asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
633{
634 handle_IPI(ipinr, regs);
635}
636
637void handle_IPI(int ipinr, struct pt_regs *regs)
638{
639 unsigned int cpu = smp_processor_id();
640 struct pt_regs *old_regs = set_irq_regs(regs);
641
642 if (ipi_types[ipinr].handler) {
643 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
644 irq_enter();
645 (*ipi_types[ipinr].handler)();
646 irq_exit();
647 } else
648 pr_debug("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
649
650 set_irq_regs(old_regs);
651}
652
653
654
655
656
657int set_ipi_handler(int ipinr, void *handler, char *desc)
658{
659 unsigned int cpu = smp_processor_id();
660
661 if (ipi_types[ipinr].handler) {
662 pr_crit("CPU%u: IPI handler 0x%x already registered to %pf\n",
663 cpu, ipinr, ipi_types[ipinr].handler);
664 return -1;
665 }
666
667 ipi_types[ipinr].handler = handler;
668 ipi_types[ipinr].desc = desc;
669
670 return 0;
671}
672EXPORT_SYMBOL(set_ipi_handler);
673
674
675
676
677
678void clear_ipi_handler(int ipinr)
679{
680 ipi_types[ipinr].handler = NULL;
681 ipi_types[ipinr].desc = NULL;
682}
683EXPORT_SYMBOL(clear_ipi_handler);
684
685void smp_send_reschedule(int cpu)
686{
687 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
688}
689
690void smp_send_stop(void)
691{
692 unsigned long timeout;
693 struct cpumask mask;
694
695 cpumask_copy(&mask, cpu_online_mask);
696 cpumask_clear_cpu(smp_processor_id(), &mask);
697 if (!cpumask_empty(&mask))
698 smp_cross_call(&mask, IPI_CPU_STOP);
699
700
701 timeout = USEC_PER_SEC;
702 while (num_online_cpus() > 1 && timeout--)
703 udelay(1);
704
705 if (num_online_cpus() > 1)
706 pr_warn("SMP: failed to stop secondary CPUs\n");
707}
708
709
710
711
712int setup_profiling_timer(unsigned int multiplier)
713{
714 return -EINVAL;
715}
716
717#ifdef CONFIG_CPU_FREQ
718
719static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
720static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
721static unsigned long global_l_p_j_ref;
722static unsigned long global_l_p_j_ref_freq;
723
724static int cpufreq_callback(struct notifier_block *nb,
725 unsigned long val, void *data)
726{
727 struct cpufreq_freqs *freq = data;
728 int cpu = freq->cpu;
729
730 if (freq->flags & CPUFREQ_CONST_LOOPS)
731 return NOTIFY_OK;
732
733 if (!per_cpu(l_p_j_ref, cpu)) {
734 per_cpu(l_p_j_ref, cpu) =
735 per_cpu(cpu_data, cpu).loops_per_jiffy;
736 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
737 if (!global_l_p_j_ref) {
738 global_l_p_j_ref = loops_per_jiffy;
739 global_l_p_j_ref_freq = freq->old;
740 }
741 }
742
743 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
744 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
745 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
746 global_l_p_j_ref_freq,
747 freq->new);
748 per_cpu(cpu_data, cpu).loops_per_jiffy =
749 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
750 per_cpu(l_p_j_ref_freq, cpu),
751 freq->new);
752 }
753 return NOTIFY_OK;
754}
755
756static struct notifier_block cpufreq_notifier = {
757 .notifier_call = cpufreq_callback,
758};
759
760static int __init register_cpufreq_notifier(void)
761{
762 return cpufreq_register_notifier(&cpufreq_notifier,
763 CPUFREQ_TRANSITION_NOTIFIER);
764}
765core_initcall(register_cpufreq_notifier);
766
767#endif
768
769static void raise_nmi(cpumask_t *mask)
770{
771
772
773
774
775
776
777 if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
778 nmi_cpu_backtrace(NULL);
779
780 smp_cross_call(mask, IPI_CPU_BACKTRACE);
781}
782
783void arch_trigger_all_cpu_backtrace(bool include_self)
784{
785 nmi_trigger_all_cpu_backtrace(include_self, raise_nmi);
786}
787