1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43
44#include <linux/init.h>
45#include <linux/smp.h>
46#include <linux/module.h>
47#include <linux/sched.h>
48#include <linux/percpu.h>
49#include <linux/bootmem.h>
50#include <linux/err.h>
51#include <linux/nmi.h>
52#include <linux/tboot.h>
53#include <linux/stackprotector.h>
54#include <linux/gfp.h>
55#include <linux/cpuidle.h>
56
57#include <asm/acpi.h>
58#include <asm/desc.h>
59#include <asm/nmi.h>
60#include <asm/irq.h>
61#include <asm/idle.h>
62#include <asm/realmode.h>
63#include <asm/cpu.h>
64#include <asm/numa.h>
65#include <asm/pgtable.h>
66#include <asm/tlbflush.h>
67#include <asm/mtrr.h>
68#include <asm/mwait.h>
69#include <asm/apic.h>
70#include <asm/io_apic.h>
71#include <asm/i387.h>
72#include <asm/fpu-internal.h>
73#include <asm/setup.h>
74#include <asm/uv/uv.h>
75#include <linux/mc146818rtc.h>
76
77#include <asm/smpboot_hooks.h>
78#include <asm/i8259.h>
79
80#include <asm/realmode.h>
81
82
83DEFINE_PER_CPU(int, cpu_state) = { 0 };
84
85#ifdef CONFIG_HOTPLUG_CPU
86
87
88
89
90static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
91
92void cpu_hotplug_driver_lock(void)
93{
94 mutex_lock(&x86_cpu_hotplug_driver_mutex);
95}
96
97void cpu_hotplug_driver_unlock(void)
98{
99 mutex_unlock(&x86_cpu_hotplug_driver_mutex);
100}
101
102ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
103ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; }
104#endif
105
106
107int smp_num_siblings = 1;
108EXPORT_SYMBOL(smp_num_siblings);
109
110
111DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
112
113
114DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
115EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
116
117
118DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
119EXPORT_PER_CPU_SYMBOL(cpu_core_map);
120
121DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
122
123
124DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
125EXPORT_PER_CPU_SYMBOL(cpu_info);
126
127atomic_t init_deasserted;
128
129
130
131
132
133static void smp_callin(void)
134{
135 int cpuid, phys_id;
136 unsigned long timeout;
137
138
139
140
141
142
143
144
145
146 cpuid = smp_processor_id();
147 if (apic->wait_for_init_deassert && cpuid != 0)
148 apic->wait_for_init_deassert(&init_deasserted);
149
150
151
152
153 phys_id = read_apic_id();
154 if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
155 panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
156 phys_id, cpuid);
157 }
158 pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
159
160
161
162
163
164
165
166
167
168
169
170
171 timeout = jiffies + 2*HZ;
172 while (time_before(jiffies, timeout)) {
173
174
175
176 if (cpumask_test_cpu(cpuid, cpu_callout_mask))
177 break;
178 cpu_relax();
179 }
180
181 if (!time_before(jiffies, timeout)) {
182 panic("%s: CPU%d started up but did not get a callout!\n",
183 __func__, cpuid);
184 }
185
186
187
188
189
190
191
192
193 pr_debug("CALLIN, before setup_local_APIC()\n");
194 if (apic->smp_callin_clear_local_apic)
195 apic->smp_callin_clear_local_apic();
196 setup_local_APIC();
197 end_local_APIC_setup();
198
199
200
201
202 setup_vector_irq(smp_processor_id());
203
204
205
206
207
208 smp_store_cpu_info(cpuid);
209
210
211
212
213
214
215
216 calibrate_delay();
217 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
218 pr_debug("Stack at about %p\n", &cpuid);
219
220
221
222
223
224 set_cpu_sibling_map(raw_smp_processor_id());
225 wmb();
226
227 notify_cpu_starting(cpuid);
228
229
230
231
232 cpumask_set_cpu(cpuid, cpu_callin_mask);
233}
234
235static int cpu0_logical_apicid;
236static int enable_start_cpu0;
237
238
239
240static void notrace start_secondary(void *unused)
241{
242
243
244
245
246
247 cpu_init();
248 x86_cpuinit.early_percpu_clock_init();
249 preempt_disable();
250 smp_callin();
251
252 enable_start_cpu0 = 0;
253
254#ifdef CONFIG_X86_32
255
256 load_cr3(swapper_pg_dir);
257 __flush_tlb_all();
258#endif
259
260
261 barrier();
262
263
264
265 check_tsc_sync_target();
266
267
268
269
270
271
272 lock_vector_lock();
273 set_cpu_online(smp_processor_id(), true);
274 unlock_vector_lock();
275 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
276 x86_platform.nmi_init();
277
278
279 local_irq_enable();
280
281
282 boot_init_stack_canary();
283
284 x86_cpuinit.setup_percpu_clockev();
285
286 wmb();
287 cpu_startup_entry(CPUHP_ONLINE);
288}
289
290void __init smp_store_boot_cpu_info(void)
291{
292 int id = 0;
293 struct cpuinfo_x86 *c = &cpu_data(id);
294
295 *c = boot_cpu_data;
296 c->cpu_index = id;
297}
298
299
300
301
302
303void smp_store_cpu_info(int id)
304{
305 struct cpuinfo_x86 *c = &cpu_data(id);
306
307 *c = boot_cpu_data;
308 c->cpu_index = id;
309
310
311
312
313 identify_secondary_cpu(c);
314}
315
316static bool
317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
318{
319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
320
321 return !WARN_ONCE(cpu_to_node(cpu1) != cpu_to_node(cpu2),
322 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
323 "[node: %d != %d]. Ignoring dependency.\n",
324 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
325}
326
327#define link_mask(_m, c1, c2) \
328do { \
329 cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
331} while (0)
332
333static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
334{
335 if (cpu_has_topoext) {
336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
337
338 if (c->phys_proc_id == o->phys_proc_id &&
339 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
340 c->compute_unit_id == o->compute_unit_id)
341 return topology_sane(c, o, "smt");
342
343 } else if (c->phys_proc_id == o->phys_proc_id &&
344 c->cpu_core_id == o->cpu_core_id) {
345 return topology_sane(c, o, "smt");
346 }
347
348 return false;
349}
350
351static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
352{
353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
354
355 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
356 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
357 return topology_sane(c, o, "llc");
358
359 return false;
360}
361
362static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
363{
364 if (c->phys_proc_id == o->phys_proc_id) {
365 if (cpu_has(c, X86_FEATURE_AMD_DCM))
366 return true;
367
368 return topology_sane(c, o, "mc");
369 }
370 return false;
371}
372
373void set_cpu_sibling_map(int cpu)
374{
375 bool has_smt = smp_num_siblings > 1;
376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
377 struct cpuinfo_x86 *c = &cpu_data(cpu);
378 struct cpuinfo_x86 *o;
379 int i;
380
381 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
382
383 if (!has_mp) {
384 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
385 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
386 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
387 c->booted_cores = 1;
388 return;
389 }
390
391 for_each_cpu(i, cpu_sibling_setup_mask) {
392 o = &cpu_data(i);
393
394 if ((i == cpu) || (has_smt && match_smt(c, o)))
395 link_mask(sibling, cpu, i);
396
397 if ((i == cpu) || (has_mp && match_llc(c, o)))
398 link_mask(llc_shared, cpu, i);
399
400 }
401
402
403
404
405
406 for_each_cpu(i, cpu_sibling_setup_mask) {
407 o = &cpu_data(i);
408
409 if ((i == cpu) || (has_mp && match_mc(c, o))) {
410 link_mask(core, cpu, i);
411
412
413
414
415 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
416
417
418
419
420 if (cpumask_first(cpu_sibling_mask(i)) == i)
421 c->booted_cores++;
422
423
424
425
426 if (i != cpu)
427 cpu_data(i).booted_cores++;
428 } else if (i != cpu && !c->booted_cores)
429 c->booted_cores = cpu_data(i).booted_cores;
430 }
431 }
432}
433
434
435const struct cpumask *cpu_coregroup_mask(int cpu)
436{
437 return cpu_llc_shared_mask(cpu);
438}
439
440static void impress_friends(void)
441{
442 int cpu;
443 unsigned long bogosum = 0;
444
445
446
447 pr_debug("Before bogomips\n");
448 for_each_possible_cpu(cpu)
449 if (cpumask_test_cpu(cpu, cpu_callout_mask))
450 bogosum += cpu_data(cpu).loops_per_jiffy;
451 pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
452 num_online_cpus(),
453 bogosum/(500000/HZ),
454 (bogosum/(5000/HZ))%100);
455
456 pr_debug("Before bogocount - setting activated=1\n");
457}
458
459void __inquire_remote_apic(int apicid)
460{
461 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
462 const char * const names[] = { "ID", "VERSION", "SPIV" };
463 int timeout;
464 u32 status;
465
466 pr_info("Inquiring remote APIC 0x%x...\n", apicid);
467
468 for (i = 0; i < ARRAY_SIZE(regs); i++) {
469 pr_info("... APIC 0x%x %s: ", apicid, names[i]);
470
471
472
473
474 status = safe_apic_wait_icr_idle();
475 if (status)
476 pr_cont("a previous APIC delivery may have failed\n");
477
478 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
479
480 timeout = 0;
481 do {
482 udelay(100);
483 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
484 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
485
486 switch (status) {
487 case APIC_ICR_RR_VALID:
488 status = apic_read(APIC_RRR);
489 pr_cont("%08x\n", status);
490 break;
491 default:
492 pr_cont("failed\n");
493 }
494 }
495}
496
497
498
499
500
501
502int
503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
504{
505 unsigned long send_status, accept_status = 0;
506 int maxlvt;
507
508
509
510
511 apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
512
513 pr_debug("Waiting for send to finish...\n");
514 send_status = safe_apic_wait_icr_idle();
515
516
517
518
519 udelay(200);
520 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
521 maxlvt = lapic_get_maxlvt();
522 if (maxlvt > 3)
523 apic_write(APIC_ESR, 0);
524 accept_status = (apic_read(APIC_ESR) & 0xEF);
525 }
526 pr_debug("NMI sent\n");
527
528 if (send_status)
529 pr_err("APIC never delivered???\n");
530 if (accept_status)
531 pr_err("APIC delivery error (%lx)\n", accept_status);
532
533 return (send_status | accept_status);
534}
535
536static int
537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
538{
539 unsigned long send_status, accept_status = 0;
540 int maxlvt, num_starts, j;
541
542 maxlvt = lapic_get_maxlvt();
543
544
545
546
547 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
548 if (maxlvt > 3)
549 apic_write(APIC_ESR, 0);
550 apic_read(APIC_ESR);
551 }
552
553 pr_debug("Asserting INIT\n");
554
555
556
557
558
559
560
561 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
562 phys_apicid);
563
564 pr_debug("Waiting for send to finish...\n");
565 send_status = safe_apic_wait_icr_idle();
566
567 mdelay(10);
568
569 pr_debug("Deasserting INIT\n");
570
571
572
573 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
574
575 pr_debug("Waiting for send to finish...\n");
576 send_status = safe_apic_wait_icr_idle();
577
578 mb();
579 atomic_set(&init_deasserted, 1);
580
581
582
583
584
585
586
587 if (APIC_INTEGRATED(apic_version[phys_apicid]))
588 num_starts = 2;
589 else
590 num_starts = 0;
591
592
593
594
595
596 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
597 stack_start);
598
599
600
601
602 pr_debug("#startup loops: %d\n", num_starts);
603
604 for (j = 1; j <= num_starts; j++) {
605 pr_debug("Sending STARTUP #%d\n", j);
606 if (maxlvt > 3)
607 apic_write(APIC_ESR, 0);
608 apic_read(APIC_ESR);
609 pr_debug("After apic_write\n");
610
611
612
613
614
615
616
617
618 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
619 phys_apicid);
620
621
622
623
624 udelay(300);
625
626 pr_debug("Startup point 1\n");
627
628 pr_debug("Waiting for send to finish...\n");
629 send_status = safe_apic_wait_icr_idle();
630
631
632
633
634 udelay(200);
635 if (maxlvt > 3)
636 apic_write(APIC_ESR, 0);
637 accept_status = (apic_read(APIC_ESR) & 0xEF);
638 if (send_status || accept_status)
639 break;
640 }
641 pr_debug("After Startup\n");
642
643 if (send_status)
644 pr_err("APIC never delivered???\n");
645 if (accept_status)
646 pr_err("APIC delivery error (%lx)\n", accept_status);
647
648 return (send_status | accept_status);
649}
650
651
652static void announce_cpu(int cpu, int apicid)
653{
654 static int current_node = -1;
655 int node = early_cpu_to_node(cpu);
656 int max_cpu_present = find_last_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
657
658 if (system_state == SYSTEM_BOOTING) {
659 if (node != current_node) {
660 if (current_node > (-1))
661 pr_cont(" OK\n");
662 current_node = node;
663 pr_info("Booting Node %3d, Processors ", node);
664 }
665 pr_cont(" #%4d%s", cpu, cpu == max_cpu_present ? " OK\n" : "");
666 return;
667 } else
668 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
669 node, cpu, apicid);
670}
671
672static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
673{
674 int cpu;
675
676 cpu = smp_processor_id();
677 if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
678 return NMI_HANDLED;
679
680 return NMI_DONE;
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695static int
696wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
697 int *cpu0_nmi_registered)
698{
699 int id;
700 int boot_error;
701
702
703
704
705 if (cpu)
706 return wakeup_secondary_cpu_via_init(apicid, start_ip);
707
708
709
710
711
712
713 boot_error = register_nmi_handler(NMI_LOCAL,
714 wakeup_cpu0_nmi, 0, "wake_cpu0");
715
716 if (!boot_error) {
717 enable_start_cpu0 = 1;
718 *cpu0_nmi_registered = 1;
719 if (apic->dest_logical == APIC_DEST_LOGICAL)
720 id = cpu0_logical_apicid;
721 else
722 id = apicid;
723 boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
724 }
725
726 return boot_error;
727}
728
729
730
731
732
733
734
735static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
736{
737 volatile u32 *trampoline_status =
738 (volatile u32 *) __va(real_mode_header->trampoline_status);
739
740 unsigned long start_ip = real_mode_header->trampoline_start;
741
742 unsigned long boot_error = 0;
743 int timeout;
744 int cpu0_nmi_registered = 0;
745
746
747 alternatives_enable_smp();
748
749 idle->thread.sp = (unsigned long) (((struct pt_regs *)
750 (THREAD_SIZE + task_stack_page(idle))) - 1);
751 per_cpu(current_task, cpu) = idle;
752
753#ifdef CONFIG_X86_32
754
755 irq_ctx_init(cpu);
756#else
757 clear_tsk_thread_flag(idle, TIF_FORK);
758 initial_gs = per_cpu_offset(cpu);
759 per_cpu(kernel_stack, cpu) =
760 (unsigned long)task_stack_page(idle) -
761 KERNEL_STACK_OFFSET + THREAD_SIZE;
762#endif
763 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
764 initial_code = (unsigned long)start_secondary;
765 stack_start = idle->thread.sp;
766
767
768 announce_cpu(cpu, apicid);
769
770
771
772
773
774
775 atomic_set(&init_deasserted, 0);
776
777 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
778
779 pr_debug("Setting warm reset code and vector.\n");
780
781 smpboot_setup_warm_reset_vector(start_ip);
782
783
784
785 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
786 apic_write(APIC_ESR, 0);
787 apic_read(APIC_ESR);
788 }
789 }
790
791
792
793
794
795
796
797 if (apic->wakeup_secondary_cpu)
798 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
799 else
800 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
801 &cpu0_nmi_registered);
802
803 if (!boot_error) {
804
805
806
807 pr_debug("Before Callout %d\n", cpu);
808 cpumask_set_cpu(cpu, cpu_callout_mask);
809 pr_debug("After Callout %d\n", cpu);
810
811
812
813
814 for (timeout = 0; timeout < 50000; timeout++) {
815 if (cpumask_test_cpu(cpu, cpu_callin_mask))
816 break;
817 udelay(100);
818
819
820
821
822
823
824 schedule();
825 }
826
827 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
828 print_cpu_msr(&cpu_data(cpu));
829 pr_debug("CPU%d: has booted.\n", cpu);
830 } else {
831 boot_error = 1;
832 if (*trampoline_status == 0xA5A5A5A5)
833
834 pr_err("CPU%d: Stuck ??\n", cpu);
835 else
836
837 pr_err("CPU%d: Not responding\n", cpu);
838 if (apic->inquire_remote_apic)
839 apic->inquire_remote_apic(apicid);
840 }
841 }
842
843 if (boot_error) {
844
845 numa_remove_cpu(cpu);
846
847
848 cpumask_clear_cpu(cpu, cpu_callout_mask);
849
850
851 cpumask_clear_cpu(cpu, cpu_initialized_mask);
852
853 set_cpu_present(cpu, false);
854 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
855 }
856
857
858 *trampoline_status = 0;
859
860 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
861
862
863
864 smpboot_restore_warm_reset_vector();
865 }
866
867
868
869
870 if (cpu0_nmi_registered)
871 unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
872
873 return boot_error;
874}
875
876int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
877{
878 int apicid = apic->cpu_present_to_apicid(cpu);
879 unsigned long flags;
880 int err;
881
882 WARN_ON(irqs_disabled());
883
884 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
885
886 if (apicid == BAD_APICID ||
887 !physid_isset(apicid, phys_cpu_present_map) ||
888 !apic->apic_id_valid(apicid)) {
889 pr_err("%s: bad cpu %d\n", __func__, cpu);
890 return -EINVAL;
891 }
892
893
894
895
896 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
897 pr_debug("do_boot_cpu %d Already started\n", cpu);
898 return -ENOSYS;
899 }
900
901
902
903
904
905 mtrr_save_state();
906
907 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
908
909
910 __cpu_disable_lazy_restore(cpu);
911
912 err = do_boot_cpu(apicid, cpu, tidle);
913 if (err) {
914 pr_debug("do_boot_cpu failed %d\n", err);
915 return -EIO;
916 }
917
918
919
920
921
922 local_irq_save(flags);
923 check_tsc_sync_source(cpu);
924 local_irq_restore(flags);
925
926 while (!cpu_online(cpu)) {
927 cpu_relax();
928 touch_nmi_watchdog();
929 }
930
931 return 0;
932}
933
934
935
936
937void arch_disable_smp_support(void)
938{
939 disable_ioapic_support();
940}
941
942
943
944
945
946
947static __init void disable_smp(void)
948{
949 init_cpu_present(cpumask_of(0));
950 init_cpu_possible(cpumask_of(0));
951 smpboot_clear_io_apic_irqs();
952
953 if (smp_found_config)
954 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
955 else
956 physid_set_mask_of_physid(0, &phys_cpu_present_map);
957 cpumask_set_cpu(0, cpu_sibling_mask(0));
958 cpumask_set_cpu(0, cpu_core_mask(0));
959}
960
961
962
963
964static int __init smp_sanity_check(unsigned max_cpus)
965{
966 preempt_disable();
967
968#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
969 if (def_to_bigsmp && nr_cpu_ids > 8) {
970 unsigned int cpu;
971 unsigned nr;
972
973 pr_warn("More than 8 CPUs detected - skipping them\n"
974 "Use CONFIG_X86_BIGSMP\n");
975
976 nr = 0;
977 for_each_present_cpu(cpu) {
978 if (nr >= 8)
979 set_cpu_present(cpu, false);
980 nr++;
981 }
982
983 nr = 0;
984 for_each_possible_cpu(cpu) {
985 if (nr >= 8)
986 set_cpu_possible(cpu, false);
987 nr++;
988 }
989
990 nr_cpu_ids = 8;
991 }
992#endif
993
994 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
995 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
996 hard_smp_processor_id());
997
998 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
999 }
1000
1001
1002
1003
1004
1005 if (!smp_found_config && !acpi_lapic) {
1006 preempt_enable();
1007 pr_notice("SMP motherboard not detected\n");
1008 disable_smp();
1009 if (APIC_init_uniprocessor())
1010 pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
1011 return -1;
1012 }
1013
1014
1015
1016
1017
1018 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1019 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1020 boot_cpu_physical_apicid);
1021 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1022 }
1023 preempt_enable();
1024
1025
1026
1027
1028 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1029 !cpu_has_apic) {
1030 if (!disable_apic) {
1031 pr_err("BIOS bug, local APIC #%d not detected!...\n",
1032 boot_cpu_physical_apicid);
1033 pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
1034 }
1035 smpboot_clear_io_apic();
1036 disable_ioapic_support();
1037 return -1;
1038 }
1039
1040 verify_local_APIC();
1041
1042
1043
1044
1045 if (!max_cpus) {
1046 pr_info("SMP mode deactivated\n");
1047 smpboot_clear_io_apic();
1048
1049 connect_bsp_APIC();
1050 setup_local_APIC();
1051 bsp_end_local_APIC_setup();
1052 return -1;
1053 }
1054
1055 return 0;
1056}
1057
1058static void __init smp_cpu_index_default(void)
1059{
1060 int i;
1061 struct cpuinfo_x86 *c;
1062
1063 for_each_possible_cpu(i) {
1064 c = &cpu_data(i);
1065
1066 c->cpu_index = nr_cpu_ids;
1067 }
1068}
1069
1070
1071
1072
1073
1074void __init native_smp_prepare_cpus(unsigned int max_cpus)
1075{
1076 unsigned int i;
1077
1078 preempt_disable();
1079 smp_cpu_index_default();
1080
1081
1082
1083
1084 smp_store_boot_cpu_info();
1085 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1086 mb();
1087
1088 current_thread_info()->cpu = 0;
1089 for_each_possible_cpu(i) {
1090 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1091 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1092 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1093 }
1094 set_cpu_sibling_map(0);
1095
1096
1097 if (smp_sanity_check(max_cpus) < 0) {
1098 pr_info("SMP disabled\n");
1099 disable_smp();
1100 goto out;
1101 }
1102
1103 default_setup_apic_routing();
1104
1105 preempt_disable();
1106 if (read_apic_id() != boot_cpu_physical_apicid) {
1107 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1108 read_apic_id(), boot_cpu_physical_apicid);
1109
1110 }
1111 preempt_enable();
1112
1113 connect_bsp_APIC();
1114
1115
1116
1117
1118 setup_local_APIC();
1119
1120 if (x2apic_mode)
1121 cpu0_logical_apicid = apic_read(APIC_LDR);
1122 else
1123 cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1124
1125
1126
1127
1128 if (!skip_ioapic_setup && nr_ioapics)
1129 enable_IO_APIC();
1130
1131 bsp_end_local_APIC_setup();
1132
1133 if (apic->setup_portio_remap)
1134 apic->setup_portio_remap();
1135
1136 smpboot_setup_io_apic();
1137
1138
1139
1140
1141 pr_info("CPU%d: ", 0);
1142 print_cpu_info(&cpu_data(0));
1143 x86_init.timers.setup_percpu_clockev();
1144
1145 if (is_uv_system())
1146 uv_system_init();
1147
1148 set_mtrr_aps_delayed_init();
1149out:
1150 preempt_enable();
1151}
1152
1153void arch_enable_nonboot_cpus_begin(void)
1154{
1155 set_mtrr_aps_delayed_init();
1156}
1157
1158void arch_enable_nonboot_cpus_end(void)
1159{
1160 mtrr_aps_init();
1161}
1162
1163
1164
1165
1166void __init native_smp_prepare_boot_cpu(void)
1167{
1168 int me = smp_processor_id();
1169 switch_to_new_gdt(me);
1170
1171 cpumask_set_cpu(me, cpu_callout_mask);
1172 per_cpu(cpu_state, me) = CPU_ONLINE;
1173}
1174
1175void __init native_smp_cpus_done(unsigned int max_cpus)
1176{
1177 pr_debug("Boot done\n");
1178
1179 nmi_selftest();
1180 impress_friends();
1181#ifdef CONFIG_X86_IO_APIC
1182 setup_ioapic_dest();
1183#endif
1184 mtrr_aps_init();
1185}
1186
1187static int __initdata setup_possible_cpus = -1;
1188static int __init _setup_possible_cpus(char *str)
1189{
1190 get_option(&str, &setup_possible_cpus);
1191 return 0;
1192}
1193early_param("possible_cpus", _setup_possible_cpus);
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213__init void prefill_possible_map(void)
1214{
1215 int i, possible;
1216
1217
1218 if (!num_processors)
1219 num_processors = 1;
1220
1221 i = setup_max_cpus ?: 1;
1222 if (setup_possible_cpus == -1) {
1223 possible = num_processors;
1224#ifdef CONFIG_HOTPLUG_CPU
1225 if (setup_max_cpus)
1226 possible += disabled_cpus;
1227#else
1228 if (possible > i)
1229 possible = i;
1230#endif
1231 } else
1232 possible = setup_possible_cpus;
1233
1234 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1235
1236
1237 if (possible > nr_cpu_ids) {
1238 pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
1239 possible, nr_cpu_ids);
1240 possible = nr_cpu_ids;
1241 }
1242
1243#ifdef CONFIG_HOTPLUG_CPU
1244 if (!setup_max_cpus)
1245#endif
1246 if (possible > i) {
1247 pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1248 possible, setup_max_cpus);
1249 possible = i;
1250 }
1251
1252 pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1253 possible, max_t(int, possible - num_processors, 0));
1254
1255 for (i = 0; i < possible; i++)
1256 set_cpu_possible(i, true);
1257 for (; i < NR_CPUS; i++)
1258 set_cpu_possible(i, false);
1259
1260 nr_cpu_ids = possible;
1261}
1262
1263#ifdef CONFIG_HOTPLUG_CPU
1264
1265static void remove_siblinginfo(int cpu)
1266{
1267 int sibling;
1268 struct cpuinfo_x86 *c = &cpu_data(cpu);
1269
1270 for_each_cpu(sibling, cpu_core_mask(cpu)) {
1271 cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
1272
1273
1274
1275 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
1276 cpu_data(sibling).booted_cores--;
1277 }
1278
1279 for_each_cpu(sibling, cpu_sibling_mask(cpu))
1280 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
1281 cpumask_clear(cpu_sibling_mask(cpu));
1282 cpumask_clear(cpu_core_mask(cpu));
1283 c->phys_proc_id = 0;
1284 c->cpu_core_id = 0;
1285 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1286}
1287
1288static void __ref remove_cpu_from_maps(int cpu)
1289{
1290 set_cpu_online(cpu, false);
1291 cpumask_clear_cpu(cpu, cpu_callout_mask);
1292 cpumask_clear_cpu(cpu, cpu_callin_mask);
1293
1294 cpumask_clear_cpu(cpu, cpu_initialized_mask);
1295 numa_remove_cpu(cpu);
1296}
1297
1298void cpu_disable_common(void)
1299{
1300 int cpu = smp_processor_id();
1301
1302 remove_siblinginfo(cpu);
1303
1304
1305 lock_vector_lock();
1306 remove_cpu_from_maps(cpu);
1307 unlock_vector_lock();
1308 fixup_irqs();
1309}
1310
1311int native_cpu_disable(void)
1312{
1313 clear_local_APIC();
1314
1315 cpu_disable_common();
1316 return 0;
1317}
1318
1319void native_cpu_die(unsigned int cpu)
1320{
1321
1322 unsigned int i;
1323
1324 for (i = 0; i < 10; i++) {
1325
1326 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
1327 if (system_state == SYSTEM_RUNNING)
1328 pr_info("CPU %u is now offline\n", cpu);
1329 return;
1330 }
1331 msleep(100);
1332 }
1333 pr_err("CPU %u didn't die...\n", cpu);
1334}
1335
1336void play_dead_common(void)
1337{
1338 idle_task_exit();
1339 reset_lazy_tlbstate();
1340 amd_e400_remove_cpu(raw_smp_processor_id());
1341
1342 mb();
1343
1344 __this_cpu_write(cpu_state, CPU_DEAD);
1345
1346
1347
1348
1349 local_irq_disable();
1350}
1351
1352static bool wakeup_cpu0(void)
1353{
1354 if (smp_processor_id() == 0 && enable_start_cpu0)
1355 return true;
1356
1357 return false;
1358}
1359
1360
1361
1362
1363
1364static inline void mwait_play_dead(void)
1365{
1366 unsigned int eax, ebx, ecx, edx;
1367 unsigned int highest_cstate = 0;
1368 unsigned int highest_subcstate = 0;
1369 void *mwait_ptr;
1370 int i;
1371
1372 if (!this_cpu_has(X86_FEATURE_MWAIT))
1373 return;
1374 if (!this_cpu_has(X86_FEATURE_CLFLSH))
1375 return;
1376 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1377 return;
1378
1379 eax = CPUID_MWAIT_LEAF;
1380 ecx = 0;
1381 native_cpuid(&eax, &ebx, &ecx, &edx);
1382
1383
1384
1385
1386
1387 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1388 eax = 0;
1389 } else {
1390 edx >>= MWAIT_SUBSTATE_SIZE;
1391 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1392 if (edx & MWAIT_SUBSTATE_MASK) {
1393 highest_cstate = i;
1394 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1395 }
1396 }
1397 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1398 (highest_subcstate - 1);
1399 }
1400
1401
1402
1403
1404
1405
1406 mwait_ptr = ¤t_thread_info()->flags;
1407
1408 wbinvd();
1409
1410 while (1) {
1411
1412
1413
1414
1415
1416
1417
1418 clflush(mwait_ptr);
1419 __monitor(mwait_ptr, 0, 0);
1420 mb();
1421 __mwait(eax, 0);
1422
1423
1424
1425 if (wakeup_cpu0())
1426 start_cpu0();
1427 }
1428}
1429
1430static inline void hlt_play_dead(void)
1431{
1432 if (__this_cpu_read(cpu_info.x86) >= 4)
1433 wbinvd();
1434
1435 while (1) {
1436 native_halt();
1437
1438
1439
1440 if (wakeup_cpu0())
1441 start_cpu0();
1442 }
1443}
1444
1445void native_play_dead(void)
1446{
1447 play_dead_common();
1448 tboot_shutdown(TB_SHUTDOWN_WFS);
1449
1450 mwait_play_dead();
1451 if (cpuidle_play_dead())
1452 hlt_play_dead();
1453}
1454
1455#else
1456int native_cpu_disable(void)
1457{
1458 return -ENOSYS;
1459}
1460
1461void native_cpu_die(unsigned int cpu)
1462{
1463
1464 BUG();
1465}
1466
1467void native_play_dead(void)
1468{
1469 BUG();
1470}
1471
1472#endif
1473