1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef DEBUG
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/sched.h>
23#include <linux/smp.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/cache.h>
29#include <linux/err.h>
30#include <linux/device.h>
31#include <linux/cpu.h>
32#include <linux/notifier.h>
33#include <linux/topology.h>
34
35#include <asm/ptrace.h>
36#include <linux/atomic.h>
37#include <asm/irq.h>
38#include <asm/hw_irq.h>
39#include <asm/kvm_ppc.h>
40#include <asm/page.h>
41#include <asm/pgtable.h>
42#include <asm/prom.h>
43#include <asm/smp.h>
44#include <asm/time.h>
45#include <asm/machdep.h>
46#include <asm/cputhreads.h>
47#include <asm/cputable.h>
48#include <asm/mpic.h>
49#include <asm/vdso_datapage.h>
50#ifdef CONFIG_PPC64
51#include <asm/paca.h>
52#endif
53#include <asm/vdso.h>
54#include <asm/debug.h>
55
56#ifdef DEBUG
57#include <asm/udbg.h>
58#define DBG(fmt...) udbg_printf(fmt)
59#else
60#define DBG(fmt...)
61#endif
62
63#ifdef CONFIG_HOTPLUG_CPU
64
65static DEFINE_PER_CPU(int, cpu_state) = { 0 };
66#endif
67
68struct thread_info *secondary_ti;
69
70DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
71DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
72
73EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
74EXPORT_PER_CPU_SYMBOL(cpu_core_map);
75
76
77struct smp_ops_t *smp_ops;
78
79
80volatile unsigned int cpu_callin_map[NR_CPUS];
81
82int smt_enabled_at_boot = 1;
83
84static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
85
86
87
88
89
90
91int smp_generic_cpu_bootable(unsigned int nr)
92{
93
94
95
96 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
97 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
98 return 0;
99 if (smt_enabled_at_boot
100 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
101 return 0;
102 }
103
104 return 1;
105}
106
107
108#ifdef CONFIG_PPC64
109int smp_generic_kick_cpu(int nr)
110{
111 BUG_ON(nr < 0 || nr >= NR_CPUS);
112
113
114
115
116
117
118 if (!paca[nr].cpu_start) {
119 paca[nr].cpu_start = 1;
120 smp_mb();
121 return 0;
122 }
123
124#ifdef CONFIG_HOTPLUG_CPU
125
126
127
128
129 generic_set_cpu_up(nr);
130 smp_wmb();
131 smp_send_reschedule(nr);
132#endif
133
134 return 0;
135}
136#endif
137
138static irqreturn_t call_function_action(int irq, void *data)
139{
140 generic_smp_call_function_interrupt();
141 return IRQ_HANDLED;
142}
143
144static irqreturn_t reschedule_action(int irq, void *data)
145{
146 scheduler_ipi();
147 return IRQ_HANDLED;
148}
149
150static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
151{
152 tick_broadcast_ipi_handler();
153 return IRQ_HANDLED;
154}
155
156static irqreturn_t debug_ipi_action(int irq, void *data)
157{
158 if (crash_ipi_function_ptr) {
159 crash_ipi_function_ptr(get_irq_regs());
160 return IRQ_HANDLED;
161 }
162
163#ifdef CONFIG_DEBUGGER
164 debugger_ipi(get_irq_regs());
165#endif
166
167 return IRQ_HANDLED;
168}
169
170static irq_handler_t smp_ipi_action[] = {
171 [PPC_MSG_CALL_FUNCTION] = call_function_action,
172 [PPC_MSG_RESCHEDULE] = reschedule_action,
173 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
174 [PPC_MSG_DEBUGGER_BREAK] = debug_ipi_action,
175};
176
177const char *smp_ipi_name[] = {
178 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
179 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
180 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
181 [PPC_MSG_DEBUGGER_BREAK] = "ipi debugger",
182};
183
184
185int smp_request_message_ipi(int virq, int msg)
186{
187 int err;
188
189 if (msg < 0 || msg > PPC_MSG_DEBUGGER_BREAK) {
190 return -EINVAL;
191 }
192#if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
193 if (msg == PPC_MSG_DEBUGGER_BREAK) {
194 return 1;
195 }
196#endif
197 err = request_irq(virq, smp_ipi_action[msg],
198 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
199 smp_ipi_name[msg], NULL);
200 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
201 virq, smp_ipi_name[msg], err);
202
203 return err;
204}
205
206#ifdef CONFIG_PPC_SMP_MUXED_IPI
207struct cpu_messages {
208 int messages;
209 unsigned long data;
210};
211static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
212
213void smp_muxed_ipi_set_data(int cpu, unsigned long data)
214{
215 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
216
217 info->data = data;
218}
219
220void smp_muxed_ipi_message_pass(int cpu, int msg)
221{
222 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
223 char *message = (char *)&info->messages;
224
225
226
227
228 smp_mb();
229 message[msg] = 1;
230
231
232
233
234 smp_ops->cause_ipi(cpu, info->data);
235}
236
237#ifdef __BIG_ENDIAN__
238#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
239#else
240#define IPI_MESSAGE(A) (1 << (8 * (A)))
241#endif
242
243irqreturn_t smp_ipi_demux(void)
244{
245 struct cpu_messages *info = &__get_cpu_var(ipi_message);
246 unsigned int all;
247
248 mb();
249
250 do {
251 all = xchg(&info->messages, 0);
252 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
253 generic_smp_call_function_interrupt();
254 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
255 scheduler_ipi();
256 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
257 tick_broadcast_ipi_handler();
258 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
259 debug_ipi_action(0, NULL);
260 } while (info->messages);
261
262 return IRQ_HANDLED;
263}
264#endif
265
266static inline void do_message_pass(int cpu, int msg)
267{
268 if (smp_ops->message_pass)
269 smp_ops->message_pass(cpu, msg);
270#ifdef CONFIG_PPC_SMP_MUXED_IPI
271 else
272 smp_muxed_ipi_message_pass(cpu, msg);
273#endif
274}
275
276void smp_send_reschedule(int cpu)
277{
278 if (likely(smp_ops))
279 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
280}
281EXPORT_SYMBOL_GPL(smp_send_reschedule);
282
283void arch_send_call_function_single_ipi(int cpu)
284{
285 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
286}
287
288void arch_send_call_function_ipi_mask(const struct cpumask *mask)
289{
290 unsigned int cpu;
291
292 for_each_cpu(cpu, mask)
293 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
294}
295
296#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
297void tick_broadcast(const struct cpumask *mask)
298{
299 unsigned int cpu;
300
301 for_each_cpu(cpu, mask)
302 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
303}
304#endif
305
306#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
307void smp_send_debugger_break(void)
308{
309 int cpu;
310 int me = raw_smp_processor_id();
311
312 if (unlikely(!smp_ops))
313 return;
314
315 for_each_online_cpu(cpu)
316 if (cpu != me)
317 do_message_pass(cpu, PPC_MSG_DEBUGGER_BREAK);
318}
319#endif
320
321#ifdef CONFIG_KEXEC
322void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
323{
324 crash_ipi_function_ptr = crash_ipi_callback;
325 if (crash_ipi_callback) {
326 mb();
327 smp_send_debugger_break();
328 }
329}
330#endif
331
332static void stop_this_cpu(void *dummy)
333{
334
335 set_cpu_online(smp_processor_id(), false);
336
337 local_irq_disable();
338 while (1)
339 ;
340}
341
342void smp_send_stop(void)
343{
344 smp_call_function(stop_this_cpu, NULL, 0);
345}
346
347struct thread_info *current_set[NR_CPUS];
348
349static void smp_store_cpu_info(int id)
350{
351 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
352#ifdef CONFIG_PPC_FSL_BOOK3E
353 per_cpu(next_tlbcam_idx, id)
354 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
355#endif
356}
357
358void __init smp_prepare_cpus(unsigned int max_cpus)
359{
360 unsigned int cpu;
361
362 DBG("smp_prepare_cpus\n");
363
364
365
366
367
368 BUG_ON(boot_cpuid != smp_processor_id());
369
370
371 smp_store_cpu_info(boot_cpuid);
372 cpu_callin_map[boot_cpuid] = 1;
373
374 for_each_possible_cpu(cpu) {
375 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
376 GFP_KERNEL, cpu_to_node(cpu));
377 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
378 GFP_KERNEL, cpu_to_node(cpu));
379 }
380
381 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
382 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
383
384 if (smp_ops && smp_ops->probe)
385 smp_ops->probe();
386}
387
388void smp_prepare_boot_cpu(void)
389{
390 BUG_ON(smp_processor_id() != boot_cpuid);
391#ifdef CONFIG_PPC64
392 paca[boot_cpuid].__current = current;
393#endif
394 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
395 current_set[boot_cpuid] = task_thread_info(current);
396}
397
398#ifdef CONFIG_HOTPLUG_CPU
399
400int generic_cpu_disable(void)
401{
402 unsigned int cpu = smp_processor_id();
403
404 if (cpu == boot_cpuid)
405 return -EBUSY;
406
407 set_cpu_online(cpu, false);
408#ifdef CONFIG_PPC64
409 vdso_data->processorCount--;
410#endif
411 migrate_irqs();
412 return 0;
413}
414
415void generic_cpu_die(unsigned int cpu)
416{
417 int i;
418
419 for (i = 0; i < 100; i++) {
420 smp_rmb();
421 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
422 return;
423 msleep(100);
424 }
425 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
426}
427
428void generic_mach_cpu_die(void)
429{
430 unsigned int cpu;
431
432 local_irq_disable();
433 idle_task_exit();
434 cpu = smp_processor_id();
435 printk(KERN_DEBUG "CPU%d offline\n", cpu);
436 __get_cpu_var(cpu_state) = CPU_DEAD;
437 smp_wmb();
438 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
439 cpu_relax();
440}
441
442void generic_set_cpu_dead(unsigned int cpu)
443{
444 per_cpu(cpu_state, cpu) = CPU_DEAD;
445}
446
447
448
449
450
451
452void generic_set_cpu_up(unsigned int cpu)
453{
454 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
455}
456
457int generic_check_cpu_restart(unsigned int cpu)
458{
459 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
460}
461
462static bool secondaries_inhibited(void)
463{
464 return kvm_hv_mode_active();
465}
466
467#else
468
469#define secondaries_inhibited() 0
470
471#endif
472
473static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
474{
475 struct thread_info *ti = task_thread_info(idle);
476
477#ifdef CONFIG_PPC64
478 paca[cpu].__current = idle;
479 paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
480#endif
481 ti->cpu = cpu;
482 secondary_ti = current_set[cpu] = ti;
483}
484
485int __cpu_up(unsigned int cpu, struct task_struct *tidle)
486{
487 int rc, c;
488
489
490
491
492 if (threads_per_core > 1 && secondaries_inhibited() &&
493 cpu_thread_in_subcore(cpu))
494 return -EBUSY;
495
496 if (smp_ops == NULL ||
497 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
498 return -EINVAL;
499
500 cpu_idle_thread_init(cpu, tidle);
501
502
503
504
505 cpu_callin_map[cpu] = 0;
506
507
508
509
510
511 smp_mb();
512
513
514 DBG("smp: kicking cpu %d\n", cpu);
515 rc = smp_ops->kick_cpu(cpu);
516 if (rc) {
517 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
518 return rc;
519 }
520
521
522
523
524
525
526 if (system_state < SYSTEM_RUNNING)
527 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
528 udelay(100);
529#ifdef CONFIG_HOTPLUG_CPU
530 else
531
532
533
534
535 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
536 msleep(1);
537#endif
538
539 if (!cpu_callin_map[cpu]) {
540 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
541 return -ENOENT;
542 }
543
544 DBG("Processor %u found.\n", cpu);
545
546 if (smp_ops->give_timebase)
547 smp_ops->give_timebase();
548
549
550 while (!cpu_online(cpu))
551 cpu_relax();
552
553 return 0;
554}
555
556
557
558
559int cpu_to_core_id(int cpu)
560{
561 struct device_node *np;
562 const __be32 *reg;
563 int id = -1;
564
565 np = of_get_cpu_node(cpu, NULL);
566 if (!np)
567 goto out;
568
569 reg = of_get_property(np, "reg", NULL);
570 if (!reg)
571 goto out;
572
573 id = be32_to_cpup(reg);
574out:
575 of_node_put(np);
576 return id;
577}
578
579
580int cpu_core_index_of_thread(int cpu)
581{
582 return cpu >> threads_shift;
583}
584EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
585
586int cpu_first_thread_of_core(int core)
587{
588 return core << threads_shift;
589}
590EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
591
592static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
593{
594 const struct cpumask *mask;
595 struct device_node *np;
596 int i, plen;
597 const __be32 *prop;
598
599 mask = add ? cpu_online_mask : cpu_present_mask;
600 for_each_cpu(i, mask) {
601 np = of_get_cpu_node(i, NULL);
602 if (!np)
603 continue;
604 prop = of_get_property(np, "ibm,chip-id", &plen);
605 if (prop && plen == sizeof(int) &&
606 of_read_number(prop, 1) == chipid) {
607 if (add) {
608 cpumask_set_cpu(cpu, cpu_core_mask(i));
609 cpumask_set_cpu(i, cpu_core_mask(cpu));
610 } else {
611 cpumask_clear_cpu(cpu, cpu_core_mask(i));
612 cpumask_clear_cpu(i, cpu_core_mask(cpu));
613 }
614 }
615 of_node_put(np);
616 }
617}
618
619
620
621
622static struct device_node *cpu_to_l2cache(int cpu)
623{
624 struct device_node *np;
625 struct device_node *cache;
626
627 if (!cpu_present(cpu))
628 return NULL;
629
630 np = of_get_cpu_node(cpu, NULL);
631 if (np == NULL)
632 return NULL;
633
634 cache = of_find_next_cache_node(np);
635
636 of_node_put(np);
637
638 return cache;
639}
640
641static void traverse_core_siblings(int cpu, bool add)
642{
643 struct device_node *l2_cache, *np;
644 const struct cpumask *mask;
645 int i, chip, plen;
646 const __be32 *prop;
647
648
649 np = of_get_cpu_node(cpu, NULL);
650 if (np) {
651 chip = -1;
652 prop = of_get_property(np, "ibm,chip-id", &plen);
653 if (prop && plen == sizeof(int))
654 chip = of_read_number(prop, 1);
655 of_node_put(np);
656 if (chip >= 0) {
657 traverse_siblings_chip_id(cpu, add, chip);
658 return;
659 }
660 }
661
662 l2_cache = cpu_to_l2cache(cpu);
663 mask = add ? cpu_online_mask : cpu_present_mask;
664 for_each_cpu(i, mask) {
665 np = cpu_to_l2cache(i);
666 if (!np)
667 continue;
668 if (np == l2_cache) {
669 if (add) {
670 cpumask_set_cpu(cpu, cpu_core_mask(i));
671 cpumask_set_cpu(i, cpu_core_mask(cpu));
672 } else {
673 cpumask_clear_cpu(cpu, cpu_core_mask(i));
674 cpumask_clear_cpu(i, cpu_core_mask(cpu));
675 }
676 }
677 of_node_put(np);
678 }
679 of_node_put(l2_cache);
680}
681
682
683void start_secondary(void *unused)
684{
685 unsigned int cpu = smp_processor_id();
686 int i, base;
687
688 atomic_inc(&init_mm.mm_count);
689 current->active_mm = &init_mm;
690
691 smp_store_cpu_info(cpu);
692 set_dec(tb_ticks_per_jiffy);
693 preempt_disable();
694 cpu_callin_map[cpu] = 1;
695
696 if (smp_ops->setup_cpu)
697 smp_ops->setup_cpu(cpu);
698 if (smp_ops->take_timebase)
699 smp_ops->take_timebase();
700
701 secondary_cpu_time_init();
702
703#ifdef CONFIG_PPC64
704 if (system_state == SYSTEM_RUNNING)
705 vdso_data->processorCount++;
706
707 vdso_getcpu_init();
708#endif
709
710 base = cpu_first_thread_sibling(cpu);
711 for (i = 0; i < threads_per_core; i++) {
712 if (cpu_is_offline(base + i) && (cpu != base + i))
713 continue;
714 cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
715 cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
716
717
718
719
720
721 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
722 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
723 }
724 traverse_core_siblings(cpu, true);
725
726
727
728
729 set_numa_node(numa_cpu_lookup_table[cpu]);
730 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
731
732 smp_wmb();
733 notify_cpu_starting(cpu);
734 set_cpu_online(cpu, true);
735
736 local_irq_enable();
737
738 cpu_startup_entry(CPUHP_ONLINE);
739
740 BUG();
741}
742
743int setup_profiling_timer(unsigned int multiplier)
744{
745 return 0;
746}
747
748#ifdef CONFIG_SCHED_SMT
749
750static int powerpc_smt_flags(void)
751{
752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
753
754 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
755 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
756 flags |= SD_ASYM_PACKING;
757 }
758 return flags;
759}
760#endif
761
762static struct sched_domain_topology_level powerpc_topology[] = {
763#ifdef CONFIG_SCHED_SMT
764 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
765#endif
766 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
767 { NULL, },
768};
769
770void __init smp_cpus_done(unsigned int max_cpus)
771{
772 cpumask_var_t old_mask;
773
774
775
776
777
778 alloc_cpumask_var(&old_mask, GFP_NOWAIT);
779 cpumask_copy(old_mask, tsk_cpus_allowed(current));
780 set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
781
782 if (smp_ops && smp_ops->setup_cpu)
783 smp_ops->setup_cpu(boot_cpuid);
784
785 set_cpus_allowed_ptr(current, old_mask);
786
787 free_cpumask_var(old_mask);
788
789 if (smp_ops && smp_ops->bringup_done)
790 smp_ops->bringup_done();
791
792 dump_numa_cpu_topology();
793
794 set_sched_topology(powerpc_topology);
795
796}
797
798#ifdef CONFIG_HOTPLUG_CPU
799int __cpu_disable(void)
800{
801 int cpu = smp_processor_id();
802 int base, i;
803 int err;
804
805 if (!smp_ops->cpu_disable)
806 return -ENOSYS;
807
808 err = smp_ops->cpu_disable();
809 if (err)
810 return err;
811
812
813 base = cpu_first_thread_sibling(cpu);
814 for (i = 0; i < threads_per_core; i++) {
815 cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
816 cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
817 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
818 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
819 }
820 traverse_core_siblings(cpu, false);
821
822 return 0;
823}
824
825void __cpu_die(unsigned int cpu)
826{
827 if (smp_ops->cpu_die)
828 smp_ops->cpu_die(cpu);
829}
830
831void cpu_die(void)
832{
833 if (ppc_md.cpu_die)
834 ppc_md.cpu_die();
835
836
837 start_secondary_resume();
838}
839
840#endif
841