1
2
3
4
5
6
7
8
9
10
11
12
13
14#undef DEBUG
15
16#include <linux/kernel.h>
17#include <linux/export.h>
18#include <linux/sched/mm.h>
19#include <linux/sched/task_stack.h>
20#include <linux/sched/topology.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/device.h>
29#include <linux/cpu.h>
30#include <linux/notifier.h>
31#include <linux/topology.h>
32#include <linux/profile.h>
33#include <linux/processor.h>
34#include <linux/random.h>
35#include <linux/stackprotector.h>
36#include <linux/pgtable.h>
37
38#include <asm/ptrace.h>
39#include <linux/atomic.h>
40#include <asm/irq.h>
41#include <asm/hw_irq.h>
42#include <asm/kvm_ppc.h>
43#include <asm/dbell.h>
44#include <asm/page.h>
45#include <asm/prom.h>
46#include <asm/smp.h>
47#include <asm/time.h>
48#include <asm/machdep.h>
49#include <asm/cputhreads.h>
50#include <asm/cputable.h>
51#include <asm/mpic.h>
52#include <asm/vdso_datapage.h>
53#ifdef CONFIG_PPC64
54#include <asm/paca.h>
55#endif
56#include <asm/vdso.h>
57#include <asm/debug.h>
58#include <asm/kexec.h>
59#include <asm/asm-prototypes.h>
60#include <asm/cpu_has_feature.h>
61#include <asm/ftrace.h>
62
63#ifdef DEBUG
64#include <asm/udbg.h>
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
70#ifdef CONFIG_HOTPLUG_CPU
71
72static DEFINE_PER_CPU(int, cpu_state) = { 0 };
73#endif
74
75struct task_struct *secondary_current;
76bool has_big_cores;
77
78DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
79DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
80DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
81DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
82
83EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
84EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
85EXPORT_PER_CPU_SYMBOL(cpu_core_map);
86EXPORT_SYMBOL_GPL(has_big_cores);
87
88#define MAX_THREAD_LIST_SIZE 8
89#define THREAD_GROUP_SHARE_L1 1
90struct thread_groups {
91 unsigned int property;
92 unsigned int nr_groups;
93 unsigned int threads_per_group;
94 unsigned int thread_list[MAX_THREAD_LIST_SIZE];
95};
96
97
98
99
100
101DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
102
103
104struct smp_ops_t *smp_ops;
105
106
107volatile unsigned int cpu_callin_map[NR_CPUS];
108
109int smt_enabled_at_boot = 1;
110
111
112
113
114
115
116int smp_generic_cpu_bootable(unsigned int nr)
117{
118
119
120
121 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
122 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
123 return 0;
124 if (smt_enabled_at_boot
125 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
126 return 0;
127 }
128
129 return 1;
130}
131
132
133#ifdef CONFIG_PPC64
134int smp_generic_kick_cpu(int nr)
135{
136 if (nr < 0 || nr >= nr_cpu_ids)
137 return -EINVAL;
138
139
140
141
142
143
144 if (!paca_ptrs[nr]->cpu_start) {
145 paca_ptrs[nr]->cpu_start = 1;
146 smp_mb();
147 return 0;
148 }
149
150#ifdef CONFIG_HOTPLUG_CPU
151
152
153
154
155 generic_set_cpu_up(nr);
156 smp_wmb();
157 smp_send_reschedule(nr);
158#endif
159
160 return 0;
161}
162#endif
163
164static irqreturn_t call_function_action(int irq, void *data)
165{
166 generic_smp_call_function_interrupt();
167 return IRQ_HANDLED;
168}
169
170static irqreturn_t reschedule_action(int irq, void *data)
171{
172 scheduler_ipi();
173 return IRQ_HANDLED;
174}
175
176#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
177static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
178{
179 timer_broadcast_interrupt();
180 return IRQ_HANDLED;
181}
182#endif
183
184#ifdef CONFIG_NMI_IPI
185static irqreturn_t nmi_ipi_action(int irq, void *data)
186{
187 smp_handle_nmi_ipi(get_irq_regs());
188 return IRQ_HANDLED;
189}
190#endif
191
192static irq_handler_t smp_ipi_action[] = {
193 [PPC_MSG_CALL_FUNCTION] = call_function_action,
194 [PPC_MSG_RESCHEDULE] = reschedule_action,
195#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
196 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
197#endif
198#ifdef CONFIG_NMI_IPI
199 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
200#endif
201};
202
203
204
205
206
207
208const char *smp_ipi_name[] = {
209 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
210 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
211#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
212 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
213#endif
214#ifdef CONFIG_NMI_IPI
215 [PPC_MSG_NMI_IPI] = "nmi ipi",
216#endif
217};
218
219
220int smp_request_message_ipi(int virq, int msg)
221{
222 int err;
223
224 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
225 return -EINVAL;
226#ifndef CONFIG_NMI_IPI
227 if (msg == PPC_MSG_NMI_IPI)
228 return 1;
229#endif
230
231 err = request_irq(virq, smp_ipi_action[msg],
232 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
233 smp_ipi_name[msg], NULL);
234 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
235 virq, smp_ipi_name[msg], err);
236
237 return err;
238}
239
240#ifdef CONFIG_PPC_SMP_MUXED_IPI
241struct cpu_messages {
242 long messages;
243};
244static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
245
246void smp_muxed_ipi_set_message(int cpu, int msg)
247{
248 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
249 char *message = (char *)&info->messages;
250
251
252
253
254 smp_mb();
255 message[msg] = 1;
256}
257
258void smp_muxed_ipi_message_pass(int cpu, int msg)
259{
260 smp_muxed_ipi_set_message(cpu, msg);
261
262
263
264
265
266 smp_ops->cause_ipi(cpu);
267}
268
269#ifdef __BIG_ENDIAN__
270#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
271#else
272#define IPI_MESSAGE(A) (1uL << (8 * (A)))
273#endif
274
275irqreturn_t smp_ipi_demux(void)
276{
277 mb();
278
279 return smp_ipi_demux_relaxed();
280}
281
282
283irqreturn_t smp_ipi_demux_relaxed(void)
284{
285 struct cpu_messages *info;
286 unsigned long all;
287
288 info = this_cpu_ptr(&ipi_message);
289 do {
290 all = xchg(&info->messages, 0);
291#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
292
293
294
295
296
297
298
299 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
300 kvmppc_xics_ipi_action();
301#endif
302 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
303 generic_smp_call_function_interrupt();
304 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
305 scheduler_ipi();
306#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
307 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
308 timer_broadcast_interrupt();
309#endif
310#ifdef CONFIG_NMI_IPI
311 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
312 nmi_ipi_action(0, NULL);
313#endif
314 } while (info->messages);
315
316 return IRQ_HANDLED;
317}
318#endif
319
320static inline void do_message_pass(int cpu, int msg)
321{
322 if (smp_ops->message_pass)
323 smp_ops->message_pass(cpu, msg);
324#ifdef CONFIG_PPC_SMP_MUXED_IPI
325 else
326 smp_muxed_ipi_message_pass(cpu, msg);
327#endif
328}
329
330void smp_send_reschedule(int cpu)
331{
332 if (likely(smp_ops))
333 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
334}
335EXPORT_SYMBOL_GPL(smp_send_reschedule);
336
337void arch_send_call_function_single_ipi(int cpu)
338{
339 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
340}
341
342void arch_send_call_function_ipi_mask(const struct cpumask *mask)
343{
344 unsigned int cpu;
345
346 for_each_cpu(cpu, mask)
347 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
348}
349
350#ifdef CONFIG_NMI_IPI
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
373static struct cpumask nmi_ipi_pending_mask;
374static bool nmi_ipi_busy = false;
375static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
376
377static void nmi_ipi_lock_start(unsigned long *flags)
378{
379 raw_local_irq_save(*flags);
380 hard_irq_disable();
381 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
382 raw_local_irq_restore(*flags);
383 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
384 raw_local_irq_save(*flags);
385 hard_irq_disable();
386 }
387}
388
389static void nmi_ipi_lock(void)
390{
391 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
392 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
393}
394
395static void nmi_ipi_unlock(void)
396{
397 smp_mb();
398 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
399 atomic_set(&__nmi_ipi_lock, 0);
400}
401
402static void nmi_ipi_unlock_end(unsigned long *flags)
403{
404 nmi_ipi_unlock();
405 raw_local_irq_restore(*flags);
406}
407
408
409
410
411int smp_handle_nmi_ipi(struct pt_regs *regs)
412{
413 void (*fn)(struct pt_regs *) = NULL;
414 unsigned long flags;
415 int me = raw_smp_processor_id();
416 int ret = 0;
417
418
419
420
421
422
423 nmi_ipi_lock_start(&flags);
424 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
425 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
426 fn = READ_ONCE(nmi_ipi_function);
427 WARN_ON_ONCE(!fn);
428 ret = 1;
429 }
430 nmi_ipi_unlock_end(&flags);
431
432 if (fn)
433 fn(regs);
434
435 return ret;
436}
437
438static void do_smp_send_nmi_ipi(int cpu, bool safe)
439{
440 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
441 return;
442
443 if (cpu >= 0) {
444 do_message_pass(cpu, PPC_MSG_NMI_IPI);
445 } else {
446 int c;
447
448 for_each_online_cpu(c) {
449 if (c == raw_smp_processor_id())
450 continue;
451 do_message_pass(c, PPC_MSG_NMI_IPI);
452 }
453 }
454}
455
456
457
458
459
460
461
462static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *),
463 u64 delay_us, bool safe)
464{
465 unsigned long flags;
466 int me = raw_smp_processor_id();
467 int ret = 1;
468
469 BUG_ON(cpu == me);
470 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
471
472 if (unlikely(!smp_ops))
473 return 0;
474
475 nmi_ipi_lock_start(&flags);
476 while (nmi_ipi_busy) {
477 nmi_ipi_unlock_end(&flags);
478 spin_until_cond(!nmi_ipi_busy);
479 nmi_ipi_lock_start(&flags);
480 }
481 nmi_ipi_busy = true;
482 nmi_ipi_function = fn;
483
484 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
485
486 if (cpu < 0) {
487
488 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
489 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
490 } else {
491 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
492 }
493
494 nmi_ipi_unlock();
495
496
497
498 do_smp_send_nmi_ipi(cpu, safe);
499
500 nmi_ipi_lock();
501
502 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
503 nmi_ipi_unlock();
504 udelay(1);
505 nmi_ipi_lock();
506 if (delay_us) {
507 delay_us--;
508 if (!delay_us)
509 break;
510 }
511 }
512
513 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
514
515 ret = 0;
516 cpumask_clear(&nmi_ipi_pending_mask);
517 }
518
519 nmi_ipi_function = NULL;
520 nmi_ipi_busy = false;
521
522 nmi_ipi_unlock_end(&flags);
523
524 return ret;
525}
526
527int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
528{
529 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
530}
531
532int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
533{
534 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
535}
536#endif
537
538#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
539void tick_broadcast(const struct cpumask *mask)
540{
541 unsigned int cpu;
542
543 for_each_cpu(cpu, mask)
544 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
545}
546#endif
547
548#ifdef CONFIG_DEBUGGER
549void debugger_ipi_callback(struct pt_regs *regs)
550{
551 debugger_ipi(regs);
552}
553
554void smp_send_debugger_break(void)
555{
556 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
557}
558#endif
559
560#ifdef CONFIG_KEXEC_CORE
561void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
562{
563 int cpu;
564
565 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
566 if (kdump_in_progress() && crash_wake_offline) {
567 for_each_present_cpu(cpu) {
568 if (cpu_online(cpu))
569 continue;
570
571
572
573
574
575
576
577
578
579 do_smp_send_nmi_ipi(cpu, false);
580 }
581 }
582}
583#endif
584
585#ifdef CONFIG_NMI_IPI
586static void nmi_stop_this_cpu(struct pt_regs *regs)
587{
588
589
590
591 spin_begin();
592 while (1)
593 spin_cpu_relax();
594}
595
596void smp_send_stop(void)
597{
598 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
599}
600
601#else
602
603static void stop_this_cpu(void *dummy)
604{
605 hard_irq_disable();
606 spin_begin();
607 while (1)
608 spin_cpu_relax();
609}
610
611void smp_send_stop(void)
612{
613 static bool stopped = false;
614
615
616
617
618
619
620
621 if (stopped)
622 return;
623
624 stopped = true;
625
626 smp_call_function(stop_this_cpu, NULL, 0);
627}
628#endif
629
630struct task_struct *current_set[NR_CPUS];
631
632static void smp_store_cpu_info(int id)
633{
634 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
635#ifdef CONFIG_PPC_FSL_BOOK3E
636 per_cpu(next_tlbcam_idx, id)
637 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
638#endif
639}
640
641
642
643
644
645
646static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
647{
648 cpumask_set_cpu(i, get_cpumask(j));
649 cpumask_set_cpu(j, get_cpumask(i));
650}
651
652#ifdef CONFIG_HOTPLUG_CPU
653static void set_cpus_unrelated(int i, int j,
654 struct cpumask *(*get_cpumask)(int))
655{
656 cpumask_clear_cpu(i, get_cpumask(j));
657 cpumask_clear_cpu(j, get_cpumask(i));
658}
659#endif
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702static int parse_thread_groups(struct device_node *dn,
703 struct thread_groups *tg,
704 unsigned int property)
705{
706 int i;
707 u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
708 u32 *thread_list;
709 size_t total_threads;
710 int ret;
711
712 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
713 thread_group_array, 3);
714 if (ret)
715 return ret;
716
717 tg->property = thread_group_array[0];
718 tg->nr_groups = thread_group_array[1];
719 tg->threads_per_group = thread_group_array[2];
720 if (tg->property != property ||
721 tg->nr_groups < 1 ||
722 tg->threads_per_group < 1)
723 return -ENODATA;
724
725 total_threads = tg->nr_groups * tg->threads_per_group;
726
727 ret = of_property_read_u32_array(dn, "ibm,thread-groups",
728 thread_group_array,
729 3 + total_threads);
730 if (ret)
731 return ret;
732
733 thread_list = &thread_group_array[3];
734
735 for (i = 0 ; i < total_threads; i++)
736 tg->thread_list[i] = thread_list[i];
737
738 return 0;
739}
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
756{
757 int hw_cpu_id = get_hard_smp_processor_id(cpu);
758 int i, j;
759
760 for (i = 0; i < tg->nr_groups; i++) {
761 int group_start = i * tg->threads_per_group;
762
763 for (j = 0; j < tg->threads_per_group; j++) {
764 int idx = group_start + j;
765
766 if (tg->thread_list[idx] == hw_cpu_id)
767 return group_start;
768 }
769 }
770
771 return -1;
772}
773
774static int init_cpu_l1_cache_map(int cpu)
775
776{
777 struct device_node *dn = of_get_cpu_node(cpu, NULL);
778 struct thread_groups tg = {.property = 0,
779 .nr_groups = 0,
780 .threads_per_group = 0};
781 int first_thread = cpu_first_thread_sibling(cpu);
782 int i, cpu_group_start = -1, err = 0;
783
784 if (!dn)
785 return -ENODATA;
786
787 err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
788 if (err)
789 goto out;
790
791 zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
792 GFP_KERNEL,
793 cpu_to_node(cpu));
794
795 cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
796
797 if (unlikely(cpu_group_start == -1)) {
798 WARN_ON_ONCE(1);
799 err = -ENODATA;
800 goto out;
801 }
802
803 for (i = first_thread; i < first_thread + threads_per_core; i++) {
804 int i_group_start = get_cpu_thread_group_start(i, &tg);
805
806 if (unlikely(i_group_start == -1)) {
807 WARN_ON_ONCE(1);
808 err = -ENODATA;
809 goto out;
810 }
811
812 if (i_group_start == cpu_group_start)
813 cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
814 }
815
816out:
817 of_node_put(dn);
818 return err;
819}
820
821static int init_big_cores(void)
822{
823 int cpu;
824
825 for_each_possible_cpu(cpu) {
826 int err = init_cpu_l1_cache_map(cpu);
827
828 if (err)
829 return err;
830
831 zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
832 GFP_KERNEL,
833 cpu_to_node(cpu));
834 }
835
836 has_big_cores = true;
837 return 0;
838}
839
840void __init smp_prepare_cpus(unsigned int max_cpus)
841{
842 unsigned int cpu;
843
844 DBG("smp_prepare_cpus\n");
845
846
847
848
849
850 BUG_ON(boot_cpuid != smp_processor_id());
851
852
853 smp_store_cpu_info(boot_cpuid);
854 cpu_callin_map[boot_cpuid] = 1;
855
856 for_each_possible_cpu(cpu) {
857 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
858 GFP_KERNEL, cpu_to_node(cpu));
859 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
860 GFP_KERNEL, cpu_to_node(cpu));
861 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
862 GFP_KERNEL, cpu_to_node(cpu));
863
864
865
866 if (cpu_present(cpu)) {
867 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
868 set_cpu_numa_mem(cpu,
869 local_memory_node(numa_cpu_lookup_table[cpu]));
870 }
871 }
872
873
874 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
875 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
876 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
877
878 init_big_cores();
879 if (has_big_cores) {
880 cpumask_set_cpu(boot_cpuid,
881 cpu_smallcore_mask(boot_cpuid));
882 }
883
884 if (smp_ops && smp_ops->probe)
885 smp_ops->probe();
886}
887
888void smp_prepare_boot_cpu(void)
889{
890 BUG_ON(smp_processor_id() != boot_cpuid);
891#ifdef CONFIG_PPC64
892 paca_ptrs[boot_cpuid]->__current = current;
893#endif
894 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
895 current_set[boot_cpuid] = current;
896}
897
898#ifdef CONFIG_HOTPLUG_CPU
899
900int generic_cpu_disable(void)
901{
902 unsigned int cpu = smp_processor_id();
903
904 if (cpu == boot_cpuid)
905 return -EBUSY;
906
907 set_cpu_online(cpu, false);
908#ifdef CONFIG_PPC64
909 vdso_data->processorCount--;
910#endif
911
912 irq_migrate_all_off_this_cpu();
913
914
915
916
917
918
919
920
921
922 local_irq_enable();
923 mdelay(1);
924 local_irq_disable();
925
926 return 0;
927}
928
929void generic_cpu_die(unsigned int cpu)
930{
931 int i;
932
933 for (i = 0; i < 100; i++) {
934 smp_rmb();
935 if (is_cpu_dead(cpu))
936 return;
937 msleep(100);
938 }
939 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
940}
941
942void generic_set_cpu_dead(unsigned int cpu)
943{
944 per_cpu(cpu_state, cpu) = CPU_DEAD;
945}
946
947
948
949
950
951
952void generic_set_cpu_up(unsigned int cpu)
953{
954 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
955}
956
957int generic_check_cpu_restart(unsigned int cpu)
958{
959 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
960}
961
962int is_cpu_dead(unsigned int cpu)
963{
964 return per_cpu(cpu_state, cpu) == CPU_DEAD;
965}
966
967static bool secondaries_inhibited(void)
968{
969 return kvm_hv_mode_active();
970}
971
972#else
973
974#define secondaries_inhibited() 0
975
976#endif
977
978static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
979{
980#ifdef CONFIG_PPC64
981 paca_ptrs[cpu]->__current = idle;
982 paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
983 THREAD_SIZE - STACK_FRAME_OVERHEAD;
984#endif
985 idle->cpu = cpu;
986 secondary_current = current_set[cpu] = idle;
987}
988
989int __cpu_up(unsigned int cpu, struct task_struct *tidle)
990{
991 int rc, c;
992
993
994
995
996 if (threads_per_core > 1 && secondaries_inhibited() &&
997 cpu_thread_in_subcore(cpu))
998 return -EBUSY;
999
1000 if (smp_ops == NULL ||
1001 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
1002 return -EINVAL;
1003
1004 cpu_idle_thread_init(cpu, tidle);
1005
1006
1007
1008
1009
1010 if (smp_ops->prepare_cpu) {
1011 rc = smp_ops->prepare_cpu(cpu);
1012 if (rc)
1013 return rc;
1014 }
1015
1016
1017
1018
1019 cpu_callin_map[cpu] = 0;
1020
1021
1022
1023
1024
1025 smp_mb();
1026
1027
1028 DBG("smp: kicking cpu %d\n", cpu);
1029 rc = smp_ops->kick_cpu(cpu);
1030 if (rc) {
1031 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
1032 return rc;
1033 }
1034
1035
1036
1037
1038
1039
1040 if (system_state < SYSTEM_RUNNING)
1041 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
1042 udelay(100);
1043#ifdef CONFIG_HOTPLUG_CPU
1044 else
1045
1046
1047
1048
1049 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
1050 msleep(1);
1051#endif
1052
1053 if (!cpu_callin_map[cpu]) {
1054 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
1055 return -ENOENT;
1056 }
1057
1058 DBG("Processor %u found.\n", cpu);
1059
1060 if (smp_ops->give_timebase)
1061 smp_ops->give_timebase();
1062
1063
1064 spin_until_cond(cpu_online(cpu));
1065
1066 return 0;
1067}
1068
1069
1070
1071
1072int cpu_to_core_id(int cpu)
1073{
1074 struct device_node *np;
1075 const __be32 *reg;
1076 int id = -1;
1077
1078 np = of_get_cpu_node(cpu, NULL);
1079 if (!np)
1080 goto out;
1081
1082 reg = of_get_property(np, "reg", NULL);
1083 if (!reg)
1084 goto out;
1085
1086 id = be32_to_cpup(reg);
1087out:
1088 of_node_put(np);
1089 return id;
1090}
1091EXPORT_SYMBOL_GPL(cpu_to_core_id);
1092
1093
1094int cpu_core_index_of_thread(int cpu)
1095{
1096 return cpu >> threads_shift;
1097}
1098EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
1099
1100int cpu_first_thread_of_core(int core)
1101{
1102 return core << threads_shift;
1103}
1104EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
1105
1106
1107
1108
1109static struct device_node *cpu_to_l2cache(int cpu)
1110{
1111 struct device_node *np;
1112 struct device_node *cache;
1113
1114 if (!cpu_present(cpu))
1115 return NULL;
1116
1117 np = of_get_cpu_node(cpu, NULL);
1118 if (np == NULL)
1119 return NULL;
1120
1121 cache = of_find_next_cache_node(np);
1122
1123 of_node_put(np);
1124
1125 return cache;
1126}
1127
1128static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
1129{
1130 struct device_node *l2_cache, *np;
1131 int i;
1132
1133 l2_cache = cpu_to_l2cache(cpu);
1134 if (!l2_cache)
1135 return false;
1136
1137 for_each_cpu(i, cpu_online_mask) {
1138
1139
1140
1141
1142 np = cpu_to_l2cache(i);
1143 if (!np)
1144 continue;
1145
1146 if (np == l2_cache)
1147 set_cpus_related(cpu, i, mask_fn);
1148
1149 of_node_put(np);
1150 }
1151 of_node_put(l2_cache);
1152
1153 return true;
1154}
1155
1156#ifdef CONFIG_HOTPLUG_CPU
1157static void remove_cpu_from_masks(int cpu)
1158{
1159 int i;
1160
1161
1162 for_each_cpu(i, cpu_core_mask(cpu)) {
1163 set_cpus_unrelated(cpu, i, cpu_core_mask);
1164 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1165 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1166 if (has_big_cores)
1167 set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1168 }
1169}
1170#endif
1171
1172static inline void add_cpu_to_smallcore_masks(int cpu)
1173{
1174 struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
1175 int i, first_thread = cpu_first_thread_sibling(cpu);
1176
1177 if (!has_big_cores)
1178 return;
1179
1180 cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
1181
1182 for (i = first_thread; i < first_thread + threads_per_core; i++) {
1183 if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
1184 set_cpus_related(i, cpu, cpu_smallcore_mask);
1185 }
1186}
1187
1188int get_physical_package_id(int cpu)
1189{
1190 int pkg_id = cpu_to_chip_id(cpu);
1191
1192
1193
1194
1195
1196
1197 if (pkg_id == -1 && firmware_has_feature(FW_FEATURE_LPAR) &&
1198 IS_ENABLED(CONFIG_PPC_SPLPAR)) {
1199 struct device_node *np = of_get_cpu_node(cpu, NULL);
1200 pkg_id = of_node_to_nid(np);
1201 of_node_put(np);
1202 }
1203
1204 return pkg_id;
1205}
1206EXPORT_SYMBOL_GPL(get_physical_package_id);
1207
1208static void add_cpu_to_masks(int cpu)
1209{
1210 int first_thread = cpu_first_thread_sibling(cpu);
1211 int pkg_id = get_physical_package_id(cpu);
1212 int i;
1213
1214
1215
1216
1217
1218 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1219
1220 for (i = first_thread; i < first_thread + threads_per_core; i++)
1221 if (cpu_online(i))
1222 set_cpus_related(i, cpu, cpu_sibling_mask);
1223
1224 add_cpu_to_smallcore_masks(cpu);
1225
1226
1227
1228
1229 for_each_cpu(i, cpu_sibling_mask(cpu))
1230 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1231 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1232
1233
1234
1235
1236
1237 for_each_cpu(i, cpu_l2_cache_mask(cpu))
1238 set_cpus_related(cpu, i, cpu_core_mask);
1239
1240 if (pkg_id == -1)
1241 return;
1242
1243 for_each_cpu(i, cpu_online_mask)
1244 if (get_physical_package_id(i) == pkg_id)
1245 set_cpus_related(cpu, i, cpu_core_mask);
1246}
1247
1248static bool shared_caches;
1249
1250
1251void start_secondary(void *unused)
1252{
1253 unsigned int cpu = smp_processor_id();
1254 struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
1255
1256 mmgrab(&init_mm);
1257 current->active_mm = &init_mm;
1258
1259 smp_store_cpu_info(cpu);
1260 set_dec(tb_ticks_per_jiffy);
1261 preempt_disable();
1262 cpu_callin_map[cpu] = 1;
1263
1264 if (smp_ops->setup_cpu)
1265 smp_ops->setup_cpu(cpu);
1266 if (smp_ops->take_timebase)
1267 smp_ops->take_timebase();
1268
1269 secondary_cpu_time_init();
1270
1271#ifdef CONFIG_PPC64
1272 if (system_state == SYSTEM_RUNNING)
1273 vdso_data->processorCount++;
1274
1275 vdso_getcpu_init();
1276#endif
1277
1278 add_cpu_to_masks(cpu);
1279
1280 if (has_big_cores)
1281 sibling_mask = cpu_smallcore_mask;
1282
1283
1284
1285
1286 if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
1287 shared_caches = true;
1288
1289 set_numa_node(numa_cpu_lookup_table[cpu]);
1290 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1291
1292 smp_wmb();
1293 notify_cpu_starting(cpu);
1294 set_cpu_online(cpu, true);
1295
1296 boot_init_stack_canary();
1297
1298 local_irq_enable();
1299
1300
1301 this_cpu_enable_ftrace();
1302
1303 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1304
1305 BUG();
1306}
1307
1308int setup_profiling_timer(unsigned int multiplier)
1309{
1310 return 0;
1311}
1312
1313#ifdef CONFIG_SCHED_SMT
1314
1315static int powerpc_smt_flags(void)
1316{
1317 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1318
1319 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1320 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1321 flags |= SD_ASYM_PACKING;
1322 }
1323 return flags;
1324}
1325#endif
1326
1327static struct sched_domain_topology_level powerpc_topology[] = {
1328#ifdef CONFIG_SCHED_SMT
1329 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1330#endif
1331 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1332 { NULL, },
1333};
1334
1335
1336
1337
1338
1339
1340
1341static int powerpc_shared_cache_flags(void)
1342{
1343 return SD_SHARE_PKG_RESOURCES;
1344}
1345
1346
1347
1348
1349
1350static const struct cpumask *shared_cache_mask(int cpu)
1351{
1352 return cpu_l2_cache_mask(cpu);
1353}
1354
1355#ifdef CONFIG_SCHED_SMT
1356static const struct cpumask *smallcore_smt_mask(int cpu)
1357{
1358 return cpu_smallcore_mask(cpu);
1359}
1360#endif
1361
1362static struct sched_domain_topology_level power9_topology[] = {
1363#ifdef CONFIG_SCHED_SMT
1364 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1365#endif
1366 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1367 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1368 { NULL, },
1369};
1370
1371void __init smp_cpus_done(unsigned int max_cpus)
1372{
1373
1374
1375
1376 if (smp_ops && smp_ops->setup_cpu)
1377 smp_ops->setup_cpu(boot_cpuid);
1378
1379 if (smp_ops && smp_ops->bringup_done)
1380 smp_ops->bringup_done();
1381
1382 dump_numa_cpu_topology();
1383
1384#ifdef CONFIG_SCHED_SMT
1385 if (has_big_cores) {
1386 pr_info("Big cores detected but using small core scheduling\n");
1387 power9_topology[0].mask = smallcore_smt_mask;
1388 powerpc_topology[0].mask = smallcore_smt_mask;
1389 }
1390#endif
1391
1392
1393
1394
1395 if (shared_caches) {
1396 pr_info("Using shared cache scheduler topology\n");
1397 set_sched_topology(power9_topology);
1398 } else {
1399 pr_info("Using standard scheduler topology\n");
1400 set_sched_topology(powerpc_topology);
1401 }
1402}
1403
1404#ifdef CONFIG_HOTPLUG_CPU
1405int __cpu_disable(void)
1406{
1407 int cpu = smp_processor_id();
1408 int err;
1409
1410 if (!smp_ops->cpu_disable)
1411 return -ENOSYS;
1412
1413 this_cpu_disable_ftrace();
1414
1415 err = smp_ops->cpu_disable();
1416 if (err)
1417 return err;
1418
1419
1420 remove_cpu_from_masks(cpu);
1421
1422 return 0;
1423}
1424
1425void __cpu_die(unsigned int cpu)
1426{
1427 if (smp_ops->cpu_die)
1428 smp_ops->cpu_die(cpu);
1429}
1430
1431void cpu_die(void)
1432{
1433
1434
1435
1436
1437 this_cpu_disable_ftrace();
1438
1439 if (ppc_md.cpu_die)
1440 ppc_md.cpu_die();
1441
1442
1443 start_secondary_resume();
1444}
1445
1446#endif
1447