1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#undef DEBUG
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/sched/mm.h>
23#include <linux/sched/topology.h>
24#include <linux/smp.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/init.h>
28#include <linux/spinlock.h>
29#include <linux/cache.h>
30#include <linux/err.h>
31#include <linux/device.h>
32#include <linux/cpu.h>
33#include <linux/notifier.h>
34#include <linux/topology.h>
35#include <linux/profile.h>
36#include <linux/processor.h>
37
38#include <asm/ptrace.h>
39#include <linux/atomic.h>
40#include <asm/irq.h>
41#include <asm/hw_irq.h>
42#include <asm/kvm_ppc.h>
43#include <asm/dbell.h>
44#include <asm/page.h>
45#include <asm/pgtable.h>
46#include <asm/prom.h>
47#include <asm/smp.h>
48#include <asm/time.h>
49#include <asm/machdep.h>
50#include <asm/cputhreads.h>
51#include <asm/cputable.h>
52#include <asm/mpic.h>
53#include <asm/vdso_datapage.h>
54#ifdef CONFIG_PPC64
55#include <asm/paca.h>
56#endif
57#include <asm/vdso.h>
58#include <asm/debug.h>
59#include <asm/kexec.h>
60#include <asm/asm-prototypes.h>
61#include <asm/cpu_has_feature.h>
62
63#ifdef DEBUG
64#include <asm/udbg.h>
65#define DBG(fmt...) udbg_printf(fmt)
66#else
67#define DBG(fmt...)
68#endif
69
70#ifdef CONFIG_HOTPLUG_CPU
71
72static DEFINE_PER_CPU(int, cpu_state) = { 0 };
73#endif
74
75struct thread_info *secondary_ti;
76
77DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
78DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
79DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
80
81EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
82EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
83EXPORT_PER_CPU_SYMBOL(cpu_core_map);
84
85
86struct smp_ops_t *smp_ops;
87
88
89volatile unsigned int cpu_callin_map[NR_CPUS];
90
91int smt_enabled_at_boot = 1;
92
93
94
95
96
97
98int smp_generic_cpu_bootable(unsigned int nr)
99{
100
101
102
103 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
104 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
105 return 0;
106 if (smt_enabled_at_boot
107 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
108 return 0;
109 }
110
111 return 1;
112}
113
114
115#ifdef CONFIG_PPC64
116int smp_generic_kick_cpu(int nr)
117{
118 if (nr < 0 || nr >= nr_cpu_ids)
119 return -EINVAL;
120
121
122
123
124
125
126 if (!paca_ptrs[nr]->cpu_start) {
127 paca_ptrs[nr]->cpu_start = 1;
128 smp_mb();
129 return 0;
130 }
131
132#ifdef CONFIG_HOTPLUG_CPU
133
134
135
136
137 generic_set_cpu_up(nr);
138 smp_wmb();
139 smp_send_reschedule(nr);
140#endif
141
142 return 0;
143}
144#endif
145
146static irqreturn_t call_function_action(int irq, void *data)
147{
148 generic_smp_call_function_interrupt();
149 return IRQ_HANDLED;
150}
151
152static irqreturn_t reschedule_action(int irq, void *data)
153{
154 scheduler_ipi();
155 return IRQ_HANDLED;
156}
157
158static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
159{
160 tick_broadcast_ipi_handler();
161 return IRQ_HANDLED;
162}
163
164#ifdef CONFIG_NMI_IPI
165static irqreturn_t nmi_ipi_action(int irq, void *data)
166{
167 smp_handle_nmi_ipi(get_irq_regs());
168 return IRQ_HANDLED;
169}
170#endif
171
172static irq_handler_t smp_ipi_action[] = {
173 [PPC_MSG_CALL_FUNCTION] = call_function_action,
174 [PPC_MSG_RESCHEDULE] = reschedule_action,
175 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
176#ifdef CONFIG_NMI_IPI
177 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
178#endif
179};
180
181
182
183
184
185
186const char *smp_ipi_name[] = {
187 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
188 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
189 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
190 [PPC_MSG_NMI_IPI] = "nmi ipi",
191};
192
193
194int smp_request_message_ipi(int virq, int msg)
195{
196 int err;
197
198 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
199 return -EINVAL;
200#ifndef CONFIG_NMI_IPI
201 if (msg == PPC_MSG_NMI_IPI)
202 return 1;
203#endif
204
205 err = request_irq(virq, smp_ipi_action[msg],
206 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
207 smp_ipi_name[msg], NULL);
208 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
209 virq, smp_ipi_name[msg], err);
210
211 return err;
212}
213
214#ifdef CONFIG_PPC_SMP_MUXED_IPI
215struct cpu_messages {
216 long messages;
217};
218static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
219
220void smp_muxed_ipi_set_message(int cpu, int msg)
221{
222 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
223 char *message = (char *)&info->messages;
224
225
226
227
228 smp_mb();
229 message[msg] = 1;
230}
231
232void smp_muxed_ipi_message_pass(int cpu, int msg)
233{
234 smp_muxed_ipi_set_message(cpu, msg);
235
236
237
238
239
240 smp_ops->cause_ipi(cpu);
241}
242
243#ifdef __BIG_ENDIAN__
244#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
245#else
246#define IPI_MESSAGE(A) (1uL << (8 * (A)))
247#endif
248
249irqreturn_t smp_ipi_demux(void)
250{
251 mb();
252
253 return smp_ipi_demux_relaxed();
254}
255
256
257irqreturn_t smp_ipi_demux_relaxed(void)
258{
259 struct cpu_messages *info;
260 unsigned long all;
261
262 info = this_cpu_ptr(&ipi_message);
263 do {
264 all = xchg(&info->messages, 0);
265#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
266
267
268
269
270
271
272
273 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
274 kvmppc_xics_ipi_action();
275#endif
276 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
277 generic_smp_call_function_interrupt();
278 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
279 scheduler_ipi();
280 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
281 tick_broadcast_ipi_handler();
282#ifdef CONFIG_NMI_IPI
283 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
284 nmi_ipi_action(0, NULL);
285#endif
286 } while (info->messages);
287
288 return IRQ_HANDLED;
289}
290#endif
291
292static inline void do_message_pass(int cpu, int msg)
293{
294 if (smp_ops->message_pass)
295 smp_ops->message_pass(cpu, msg);
296#ifdef CONFIG_PPC_SMP_MUXED_IPI
297 else
298 smp_muxed_ipi_message_pass(cpu, msg);
299#endif
300}
301
302void smp_send_reschedule(int cpu)
303{
304 if (likely(smp_ops))
305 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
306}
307EXPORT_SYMBOL_GPL(smp_send_reschedule);
308
309void arch_send_call_function_single_ipi(int cpu)
310{
311 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
312}
313
314void arch_send_call_function_ipi_mask(const struct cpumask *mask)
315{
316 unsigned int cpu;
317
318 for_each_cpu(cpu, mask)
319 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
320}
321
322#ifdef CONFIG_NMI_IPI
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
346static struct cpumask nmi_ipi_pending_mask;
347static int nmi_ipi_busy_count = 0;
348static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
349
350static void nmi_ipi_lock_start(unsigned long *flags)
351{
352 raw_local_irq_save(*flags);
353 hard_irq_disable();
354 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
355 raw_local_irq_restore(*flags);
356 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
357 raw_local_irq_save(*flags);
358 hard_irq_disable();
359 }
360}
361
362static void nmi_ipi_lock(void)
363{
364 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
365 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
366}
367
368static void nmi_ipi_unlock(void)
369{
370 smp_mb();
371 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
372 atomic_set(&__nmi_ipi_lock, 0);
373}
374
375static void nmi_ipi_unlock_end(unsigned long *flags)
376{
377 nmi_ipi_unlock();
378 raw_local_irq_restore(*flags);
379}
380
381
382
383
384int smp_handle_nmi_ipi(struct pt_regs *regs)
385{
386 void (*fn)(struct pt_regs *);
387 unsigned long flags;
388 int me = raw_smp_processor_id();
389 int ret = 0;
390
391
392
393
394
395
396 nmi_ipi_lock_start(&flags);
397 if (!nmi_ipi_busy_count)
398 goto out;
399 if (!cpumask_test_cpu(me, &nmi_ipi_pending_mask))
400 goto out;
401
402 fn = nmi_ipi_function;
403 if (!fn)
404 goto out;
405
406 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
407 nmi_ipi_busy_count++;
408 nmi_ipi_unlock();
409
410 ret = 1;
411
412 fn(regs);
413
414 nmi_ipi_lock();
415 nmi_ipi_busy_count--;
416out:
417 nmi_ipi_unlock_end(&flags);
418
419 return ret;
420}
421
422static void do_smp_send_nmi_ipi(int cpu)
423{
424 if (smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
425 return;
426
427 if (cpu >= 0) {
428 do_message_pass(cpu, PPC_MSG_NMI_IPI);
429 } else {
430 int c;
431
432 for_each_online_cpu(c) {
433 if (c == raw_smp_processor_id())
434 continue;
435 do_message_pass(c, PPC_MSG_NMI_IPI);
436 }
437 }
438}
439
440void smp_flush_nmi_ipi(u64 delay_us)
441{
442 unsigned long flags;
443
444 nmi_ipi_lock_start(&flags);
445 while (nmi_ipi_busy_count) {
446 nmi_ipi_unlock_end(&flags);
447 udelay(1);
448 if (delay_us) {
449 delay_us--;
450 if (!delay_us)
451 return;
452 }
453 nmi_ipi_lock_start(&flags);
454 }
455 nmi_ipi_unlock_end(&flags);
456}
457
458
459
460
461
462
463
464int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
465{
466 unsigned long flags;
467 int me = raw_smp_processor_id();
468 int ret = 1;
469
470 BUG_ON(cpu == me);
471 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
472
473 if (unlikely(!smp_ops))
474 return 0;
475
476
477 nmi_ipi_lock_start(&flags);
478 while (nmi_ipi_busy_count) {
479 nmi_ipi_unlock_end(&flags);
480 spin_until_cond(nmi_ipi_busy_count == 0);
481 nmi_ipi_lock_start(&flags);
482 }
483
484 nmi_ipi_function = fn;
485
486 if (cpu < 0) {
487
488 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
489 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
490 } else {
491
492 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
493 }
494 nmi_ipi_busy_count++;
495 nmi_ipi_unlock();
496
497 do_smp_send_nmi_ipi(cpu);
498
499 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
500 udelay(1);
501 if (delay_us) {
502 delay_us--;
503 if (!delay_us)
504 break;
505 }
506 }
507
508 nmi_ipi_lock();
509 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
510
511 ret = 0;
512 cpumask_clear(&nmi_ipi_pending_mask);
513 }
514 nmi_ipi_busy_count--;
515 nmi_ipi_unlock_end(&flags);
516
517 return ret;
518}
519#endif
520
521#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
522void tick_broadcast(const struct cpumask *mask)
523{
524 unsigned int cpu;
525
526 for_each_cpu(cpu, mask)
527 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
528}
529#endif
530
531#ifdef CONFIG_DEBUGGER
532void debugger_ipi_callback(struct pt_regs *regs)
533{
534 debugger_ipi(regs);
535}
536
537void smp_send_debugger_break(void)
538{
539 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
540}
541#endif
542
543#ifdef CONFIG_KEXEC_CORE
544void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
545{
546 int cpu;
547
548 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
549 if (kdump_in_progress() && crash_wake_offline) {
550 for_each_present_cpu(cpu) {
551 if (cpu_online(cpu))
552 continue;
553
554
555
556
557
558
559
560
561
562 do_smp_send_nmi_ipi(cpu);
563 }
564 }
565}
566#endif
567
568#ifdef CONFIG_NMI_IPI
569static void nmi_stop_this_cpu(struct pt_regs *regs)
570{
571
572
573
574
575
576
577
578 nmi_ipi_lock();
579 nmi_ipi_busy_count--;
580 nmi_ipi_unlock();
581
582
583 set_cpu_online(smp_processor_id(), false);
584
585 spin_begin();
586 while (1)
587 spin_cpu_relax();
588}
589
590void smp_send_stop(void)
591{
592 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
593}
594
595#else
596
597static void stop_this_cpu(void *dummy)
598{
599
600 set_cpu_online(smp_processor_id(), false);
601
602 hard_irq_disable();
603 spin_begin();
604 while (1)
605 spin_cpu_relax();
606}
607
608void smp_send_stop(void)
609{
610 static bool stopped = false;
611
612
613
614
615
616
617
618 if (stopped)
619 return;
620
621 stopped = true;
622
623 smp_call_function(stop_this_cpu, NULL, 0);
624}
625#endif
626
627struct thread_info *current_set[NR_CPUS];
628
629static void smp_store_cpu_info(int id)
630{
631 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
632#ifdef CONFIG_PPC_FSL_BOOK3E
633 per_cpu(next_tlbcam_idx, id)
634 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
635#endif
636}
637
638
639
640
641
642
643static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
644{
645 cpumask_set_cpu(i, get_cpumask(j));
646 cpumask_set_cpu(j, get_cpumask(i));
647}
648
649#ifdef CONFIG_HOTPLUG_CPU
650static void set_cpus_unrelated(int i, int j,
651 struct cpumask *(*get_cpumask)(int))
652{
653 cpumask_clear_cpu(i, get_cpumask(j));
654 cpumask_clear_cpu(j, get_cpumask(i));
655}
656#endif
657
658void __init smp_prepare_cpus(unsigned int max_cpus)
659{
660 unsigned int cpu;
661
662 DBG("smp_prepare_cpus\n");
663
664
665
666
667
668 BUG_ON(boot_cpuid != smp_processor_id());
669
670
671 smp_store_cpu_info(boot_cpuid);
672 cpu_callin_map[boot_cpuid] = 1;
673
674 for_each_possible_cpu(cpu) {
675 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
676 GFP_KERNEL, cpu_to_node(cpu));
677 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
678 GFP_KERNEL, cpu_to_node(cpu));
679 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
680 GFP_KERNEL, cpu_to_node(cpu));
681
682
683
684 if (cpu_present(cpu)) {
685 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
686 set_cpu_numa_mem(cpu,
687 local_memory_node(numa_cpu_lookup_table[cpu]));
688 }
689 }
690
691
692 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
693 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
694 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
695
696 if (smp_ops && smp_ops->probe)
697 smp_ops->probe();
698}
699
700void smp_prepare_boot_cpu(void)
701{
702 BUG_ON(smp_processor_id() != boot_cpuid);
703#ifdef CONFIG_PPC64
704 paca_ptrs[boot_cpuid]->__current = current;
705#endif
706 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
707 current_set[boot_cpuid] = task_thread_info(current);
708}
709
710#ifdef CONFIG_HOTPLUG_CPU
711
712int generic_cpu_disable(void)
713{
714 unsigned int cpu = smp_processor_id();
715
716 if (cpu == boot_cpuid)
717 return -EBUSY;
718
719 set_cpu_online(cpu, false);
720#ifdef CONFIG_PPC64
721 vdso_data->processorCount--;
722#endif
723
724 irq_migrate_all_off_this_cpu();
725
726
727
728
729
730
731
732
733
734 local_irq_enable();
735 mdelay(1);
736 local_irq_disable();
737
738 return 0;
739}
740
741void generic_cpu_die(unsigned int cpu)
742{
743 int i;
744
745 for (i = 0; i < 100; i++) {
746 smp_rmb();
747 if (is_cpu_dead(cpu))
748 return;
749 msleep(100);
750 }
751 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
752}
753
754void generic_set_cpu_dead(unsigned int cpu)
755{
756 per_cpu(cpu_state, cpu) = CPU_DEAD;
757}
758
759
760
761
762
763
764void generic_set_cpu_up(unsigned int cpu)
765{
766 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
767}
768
769int generic_check_cpu_restart(unsigned int cpu)
770{
771 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
772}
773
774int is_cpu_dead(unsigned int cpu)
775{
776 return per_cpu(cpu_state, cpu) == CPU_DEAD;
777}
778
779static bool secondaries_inhibited(void)
780{
781 return kvm_hv_mode_active();
782}
783
784#else
785
786#define secondaries_inhibited() 0
787
788#endif
789
790static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
791{
792 struct thread_info *ti = task_thread_info(idle);
793
794#ifdef CONFIG_PPC64
795 paca_ptrs[cpu]->__current = idle;
796 paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
797#endif
798 ti->cpu = cpu;
799 secondary_ti = current_set[cpu] = ti;
800}
801
802int __cpu_up(unsigned int cpu, struct task_struct *tidle)
803{
804 int rc, c;
805
806
807
808
809 if (threads_per_core > 1 && secondaries_inhibited() &&
810 cpu_thread_in_subcore(cpu))
811 return -EBUSY;
812
813 if (smp_ops == NULL ||
814 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
815 return -EINVAL;
816
817 cpu_idle_thread_init(cpu, tidle);
818
819
820
821
822
823 if (smp_ops->prepare_cpu) {
824 rc = smp_ops->prepare_cpu(cpu);
825 if (rc)
826 return rc;
827 }
828
829
830
831
832 cpu_callin_map[cpu] = 0;
833
834
835
836
837
838 smp_mb();
839
840
841 DBG("smp: kicking cpu %d\n", cpu);
842 rc = smp_ops->kick_cpu(cpu);
843 if (rc) {
844 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
845 return rc;
846 }
847
848
849
850
851
852
853 if (system_state < SYSTEM_RUNNING)
854 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
855 udelay(100);
856#ifdef CONFIG_HOTPLUG_CPU
857 else
858
859
860
861
862 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
863 msleep(1);
864#endif
865
866 if (!cpu_callin_map[cpu]) {
867 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
868 return -ENOENT;
869 }
870
871 DBG("Processor %u found.\n", cpu);
872
873 if (smp_ops->give_timebase)
874 smp_ops->give_timebase();
875
876
877 spin_until_cond(cpu_online(cpu));
878
879 return 0;
880}
881
882
883
884
885int cpu_to_core_id(int cpu)
886{
887 struct device_node *np;
888 const __be32 *reg;
889 int id = -1;
890
891 np = of_get_cpu_node(cpu, NULL);
892 if (!np)
893 goto out;
894
895 reg = of_get_property(np, "reg", NULL);
896 if (!reg)
897 goto out;
898
899 id = be32_to_cpup(reg);
900out:
901 of_node_put(np);
902 return id;
903}
904EXPORT_SYMBOL_GPL(cpu_to_core_id);
905
906
907int cpu_core_index_of_thread(int cpu)
908{
909 return cpu >> threads_shift;
910}
911EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
912
913int cpu_first_thread_of_core(int core)
914{
915 return core << threads_shift;
916}
917EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
918
919
920
921
922static struct device_node *cpu_to_l2cache(int cpu)
923{
924 struct device_node *np;
925 struct device_node *cache;
926
927 if (!cpu_present(cpu))
928 return NULL;
929
930 np = of_get_cpu_node(cpu, NULL);
931 if (np == NULL)
932 return NULL;
933
934 cache = of_find_next_cache_node(np);
935
936 of_node_put(np);
937
938 return cache;
939}
940
941static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
942{
943 struct device_node *l2_cache, *np;
944 int i;
945
946 l2_cache = cpu_to_l2cache(cpu);
947 if (!l2_cache)
948 return false;
949
950 for_each_cpu(i, cpu_online_mask) {
951
952
953
954
955 np = cpu_to_l2cache(i);
956 if (!np)
957 continue;
958
959 if (np == l2_cache)
960 set_cpus_related(cpu, i, mask_fn);
961
962 of_node_put(np);
963 }
964 of_node_put(l2_cache);
965
966 return true;
967}
968
969#ifdef CONFIG_HOTPLUG_CPU
970static void remove_cpu_from_masks(int cpu)
971{
972 int i;
973
974
975 for_each_cpu(i, cpu_core_mask(cpu)) {
976 set_cpus_unrelated(cpu, i, cpu_core_mask);
977 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
978 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
979 }
980}
981#endif
982
983static void add_cpu_to_masks(int cpu)
984{
985 int first_thread = cpu_first_thread_sibling(cpu);
986 int chipid = cpu_to_chip_id(cpu);
987 int i;
988
989
990
991
992
993 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
994
995 for (i = first_thread; i < first_thread + threads_per_core; i++)
996 if (cpu_online(i))
997 set_cpus_related(i, cpu, cpu_sibling_mask);
998
999
1000
1001
1002
1003 for_each_cpu(i, cpu_sibling_mask(cpu))
1004 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1005 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1006
1007
1008
1009
1010
1011 for_each_cpu(i, cpu_l2_cache_mask(cpu))
1012 set_cpus_related(cpu, i, cpu_core_mask);
1013
1014 if (chipid == -1)
1015 return;
1016
1017 for_each_cpu(i, cpu_online_mask)
1018 if (cpu_to_chip_id(i) == chipid)
1019 set_cpus_related(cpu, i, cpu_core_mask);
1020}
1021
1022static bool shared_caches;
1023
1024
1025void start_secondary(void *unused)
1026{
1027 unsigned int cpu = smp_processor_id();
1028
1029 mmgrab(&init_mm);
1030 current->active_mm = &init_mm;
1031
1032 smp_store_cpu_info(cpu);
1033 set_dec(tb_ticks_per_jiffy);
1034 preempt_disable();
1035 cpu_callin_map[cpu] = 1;
1036
1037 if (smp_ops->setup_cpu)
1038 smp_ops->setup_cpu(cpu);
1039 if (smp_ops->take_timebase)
1040 smp_ops->take_timebase();
1041
1042 secondary_cpu_time_init();
1043
1044#ifdef CONFIG_PPC64
1045 if (system_state == SYSTEM_RUNNING)
1046 vdso_data->processorCount++;
1047
1048 vdso_getcpu_init();
1049#endif
1050
1051 add_cpu_to_masks(cpu);
1052
1053
1054
1055
1056
1057 if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
1058 shared_caches = true;
1059
1060 set_numa_node(numa_cpu_lookup_table[cpu]);
1061 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1062
1063 smp_wmb();
1064 notify_cpu_starting(cpu);
1065 set_cpu_online(cpu, true);
1066
1067 local_irq_enable();
1068
1069 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1070
1071 BUG();
1072}
1073
1074int setup_profiling_timer(unsigned int multiplier)
1075{
1076 return 0;
1077}
1078
1079#ifdef CONFIG_SCHED_SMT
1080
1081static int powerpc_smt_flags(void)
1082{
1083 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1084
1085 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1086 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1087 flags |= SD_ASYM_PACKING;
1088 }
1089 return flags;
1090}
1091#endif
1092
1093static struct sched_domain_topology_level powerpc_topology[] = {
1094#ifdef CONFIG_SCHED_SMT
1095 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1096#endif
1097 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1098 { NULL, },
1099};
1100
1101
1102
1103
1104
1105
1106
1107static int powerpc_shared_cache_flags(void)
1108{
1109 return SD_SHARE_PKG_RESOURCES;
1110}
1111
1112
1113
1114
1115
1116static const struct cpumask *shared_cache_mask(int cpu)
1117{
1118 return cpu_l2_cache_mask(cpu);
1119}
1120
1121static struct sched_domain_topology_level power9_topology[] = {
1122#ifdef CONFIG_SCHED_SMT
1123 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1124#endif
1125 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1126 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1127 { NULL, },
1128};
1129
1130void __init smp_cpus_done(unsigned int max_cpus)
1131{
1132
1133
1134
1135 if (smp_ops && smp_ops->setup_cpu)
1136 smp_ops->setup_cpu(boot_cpuid);
1137
1138 if (smp_ops && smp_ops->bringup_done)
1139 smp_ops->bringup_done();
1140
1141 dump_numa_cpu_topology();
1142
1143
1144
1145
1146
1147 if (shared_caches) {
1148 pr_info("Using shared cache scheduler topology\n");
1149 set_sched_topology(power9_topology);
1150 } else {
1151 pr_info("Using standard scheduler topology\n");
1152 set_sched_topology(powerpc_topology);
1153 }
1154}
1155
1156#ifdef CONFIG_HOTPLUG_CPU
1157int __cpu_disable(void)
1158{
1159 int cpu = smp_processor_id();
1160 int err;
1161
1162 if (!smp_ops->cpu_disable)
1163 return -ENOSYS;
1164
1165 err = smp_ops->cpu_disable();
1166 if (err)
1167 return err;
1168
1169
1170 remove_cpu_from_masks(cpu);
1171
1172 return 0;
1173}
1174
1175void __cpu_die(unsigned int cpu)
1176{
1177 if (smp_ops->cpu_die)
1178 smp_ops->cpu_die(cpu);
1179}
1180
1181void cpu_die(void)
1182{
1183 if (ppc_md.cpu_die)
1184 ppc_md.cpu_die();
1185
1186
1187 start_secondary_resume();
1188}
1189
1190#endif
1191