1
2#define USE_EARLY_PGTABLE_L5
3
4#include <linux/memblock.h>
5#include <linux/linkage.h>
6#include <linux/bitops.h>
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/percpu.h>
10#include <linux/string.h>
11#include <linux/ctype.h>
12#include <linux/delay.h>
13#include <linux/sched/mm.h>
14#include <linux/sched/clock.h>
15#include <linux/sched/task.h>
16#include <linux/init.h>
17#include <linux/kprobes.h>
18#include <linux/kgdb.h>
19#include <linux/smp.h>
20#include <linux/io.h>
21#include <linux/syscore_ops.h>
22
23#include <asm/stackprotector.h>
24#include <asm/perf_event.h>
25#include <asm/mmu_context.h>
26#include <asm/archrandom.h>
27#include <asm/hypervisor.h>
28#include <asm/processor.h>
29#include <asm/tlbflush.h>
30#include <asm/debugreg.h>
31#include <asm/sections.h>
32#include <asm/vsyscall.h>
33#include <linux/topology.h>
34#include <linux/cpumask.h>
35#include <asm/pgtable.h>
36#include <linux/atomic.h>
37#include <asm/proto.h>
38#include <asm/setup.h>
39#include <asm/apic.h>
40#include <asm/desc.h>
41#include <asm/fpu/internal.h>
42#include <asm/mtrr.h>
43#include <asm/hwcap2.h>
44#include <linux/numa.h>
45#include <asm/numa.h>
46#include <asm/asm.h>
47#include <asm/bugs.h>
48#include <asm/cpu.h>
49#include <asm/mce.h>
50#include <asm/msr.h>
51#include <asm/memtype.h>
52#include <asm/microcode.h>
53#include <asm/microcode_intel.h>
54#include <asm/intel-family.h>
55#include <asm/cpu_device_id.h>
56#include <asm/spec_ctrl.h>
57
58#include <asm/uv/uv.h>
59
60#include "cpu.h"
61
62u32 elf_hwcap2 __read_mostly;
63
64
65cpumask_var_t cpu_initialized_mask;
66cpumask_var_t cpu_callout_mask;
67cpumask_var_t cpu_callin_mask;
68
69
70cpumask_var_t cpu_sibling_setup_mask;
71
72
73int smp_num_siblings = 1;
74EXPORT_SYMBOL(smp_num_siblings);
75
76
77DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
78
79
80void __init setup_cpu_local_masks(void)
81{
82 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
83 alloc_bootmem_cpumask_var(&cpu_callin_mask);
84 alloc_bootmem_cpumask_var(&cpu_callout_mask);
85 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
86}
87
88static void default_init(struct cpuinfo_x86 *c)
89{
90#ifdef CONFIG_X86_64
91 cpu_detect_cache_sizes(c);
92#else
93
94
95 if (c->cpuid_level == -1) {
96
97 if (c->x86 == 4)
98 strcpy(c->x86_model_id, "486");
99 else if (c->x86 == 3)
100 strcpy(c->x86_model_id, "386");
101 }
102#endif
103}
104
105static const struct cpu_dev default_cpu = {
106 .c_init = default_init,
107 .c_vendor = "Unknown",
108 .c_x86_vendor = X86_VENDOR_UNKNOWN,
109};
110
111static const struct cpu_dev *this_cpu = &default_cpu;
112
113DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
114#ifdef CONFIG_X86_64
115
116
117
118
119
120
121
122
123 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
124 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
125 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
126 [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
127 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
128 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
129#else
130 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
131 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
132 [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
133 [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
134
135
136
137
138
139
140 [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
141
142 [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
143
144 [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
145
146 [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
147
148 [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
149
150
151
152
153
154 [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
155
156 [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
157
158 [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
159
160 [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
161 [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
162 GDT_STACK_CANARY_INIT
163#endif
164} };
165EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
166
167static int __init x86_mpx_setup(char *s)
168{
169
170 if (strlen(s))
171 return 0;
172
173
174 if (!boot_cpu_has(X86_FEATURE_MPX))
175 return 1;
176
177 setup_clear_cpu_cap(X86_FEATURE_MPX);
178 pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
179 return 1;
180}
181__setup("nompx", x86_mpx_setup);
182
183#ifdef CONFIG_X86_64
184static int __init x86_nopcid_setup(char *s)
185{
186
187 if (s)
188 return -EINVAL;
189
190
191 if (!boot_cpu_has(X86_FEATURE_PCID))
192 return 0;
193
194 setup_clear_cpu_cap(X86_FEATURE_PCID);
195 pr_info("nopcid: PCID feature disabled\n");
196 return 0;
197}
198early_param("nopcid", x86_nopcid_setup);
199#endif
200
201static int __init x86_noinvpcid_setup(char *s)
202{
203
204 if (s)
205 return -EINVAL;
206
207
208 if (!boot_cpu_has(X86_FEATURE_INVPCID))
209 return 0;
210
211 setup_clear_cpu_cap(X86_FEATURE_INVPCID);
212 pr_info("noinvpcid: INVPCID feature disabled\n");
213 return 0;
214}
215early_param("noinvpcid", x86_noinvpcid_setup);
216
217#ifdef CONFIG_X86_32
218static int cachesize_override = -1;
219static int disable_x86_serial_nr = 1;
220
221static int __init cachesize_setup(char *str)
222{
223 get_option(&str, &cachesize_override);
224 return 1;
225}
226__setup("cachesize=", cachesize_setup);
227
228static int __init x86_sep_setup(char *s)
229{
230 setup_clear_cpu_cap(X86_FEATURE_SEP);
231 return 1;
232}
233__setup("nosep", x86_sep_setup);
234
235
236static inline int flag_is_changeable_p(u32 flag)
237{
238 u32 f1, f2;
239
240
241
242
243
244
245
246
247 asm volatile ("pushfl \n\t"
248 "pushfl \n\t"
249 "popl %0 \n\t"
250 "movl %0, %1 \n\t"
251 "xorl %2, %0 \n\t"
252 "pushl %0 \n\t"
253 "popfl \n\t"
254 "pushfl \n\t"
255 "popl %0 \n\t"
256 "popfl \n\t"
257
258 : "=&r" (f1), "=&r" (f2)
259 : "ir" (flag));
260
261 return ((f1^f2) & flag) != 0;
262}
263
264
265int have_cpuid_p(void)
266{
267 return flag_is_changeable_p(X86_EFLAGS_ID);
268}
269
270static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
271{
272 unsigned long lo, hi;
273
274 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
275 return;
276
277
278
279 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
280 lo |= 0x200000;
281 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
282
283 pr_notice("CPU serial number disabled.\n");
284 clear_cpu_cap(c, X86_FEATURE_PN);
285
286
287 c->cpuid_level = cpuid_eax(0);
288}
289
290static int __init x86_serial_nr_setup(char *s)
291{
292 disable_x86_serial_nr = 0;
293 return 1;
294}
295__setup("serialnumber", x86_serial_nr_setup);
296#else
297static inline int flag_is_changeable_p(u32 flag)
298{
299 return 1;
300}
301static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
302{
303}
304#endif
305
306static __init int setup_disable_smep(char *arg)
307{
308 setup_clear_cpu_cap(X86_FEATURE_SMEP);
309
310 check_mpx_erratum(&boot_cpu_data);
311 return 1;
312}
313__setup("nosmep", setup_disable_smep);
314
315static __always_inline void setup_smep(struct cpuinfo_x86 *c)
316{
317 if (cpu_has(c, X86_FEATURE_SMEP))
318 cr4_set_bits(X86_CR4_SMEP);
319}
320
321static __init int setup_disable_smap(char *arg)
322{
323 setup_clear_cpu_cap(X86_FEATURE_SMAP);
324 return 1;
325}
326__setup("nosmap", setup_disable_smap);
327
328static __always_inline void setup_smap(struct cpuinfo_x86 *c)
329{
330 unsigned long eflags = native_save_fl();
331
332
333 BUG_ON(eflags & X86_EFLAGS_AC);
334
335 if (cpu_has(c, X86_FEATURE_SMAP)) {
336#ifdef CONFIG_X86_SMAP
337 cr4_set_bits(X86_CR4_SMAP);
338#else
339 cr4_clear_bits(X86_CR4_SMAP);
340#endif
341 }
342}
343
344static __always_inline void setup_umip(struct cpuinfo_x86 *c)
345{
346
347 if (!cpu_feature_enabled(X86_FEATURE_UMIP))
348 goto out;
349
350
351 if (!cpu_has(c, X86_FEATURE_UMIP))
352 goto out;
353
354 cr4_set_bits(X86_CR4_UMIP);
355
356 pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
357
358 return;
359
360out:
361
362
363
364
365 cr4_clear_bits(X86_CR4_UMIP);
366}
367
368static __init int x86_nofsgsbase_setup(char *arg)
369{
370
371 if (strlen(arg))
372 return 0;
373
374
375 if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
376 return 1;
377
378 setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
379 pr_info("FSGSBASE disabled via kernel command line\n");
380 return 1;
381}
382__setup("nofsgsbase", x86_nofsgsbase_setup);
383
384
385
386
387static bool pku_disabled;
388
389static __always_inline void setup_pku(struct cpuinfo_x86 *c)
390{
391 struct pkru_state *pk;
392
393
394 if (!cpu_feature_enabled(X86_FEATURE_PKU))
395 return;
396
397 if (!cpu_has(c, X86_FEATURE_PKU))
398 return;
399 if (pku_disabled)
400 return;
401
402 cr4_set_bits(X86_CR4_PKE);
403 pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
404 if (pk)
405 pk->pkru = init_pkru_value;
406
407
408
409
410
411 get_cpu_cap(c);
412}
413
414#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
415static __init int setup_disable_pku(char *arg)
416{
417
418
419
420
421
422
423
424
425
426
427
428 pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
429 pku_disabled = true;
430 return 1;
431}
432__setup("nopku", setup_disable_pku);
433#endif
434
435
436
437
438
439
440struct cpuid_dependent_feature {
441 u32 feature;
442 u32 level;
443};
444
445static const struct cpuid_dependent_feature
446cpuid_dependent_features[] = {
447 { X86_FEATURE_MWAIT, 0x00000005 },
448 { X86_FEATURE_DCA, 0x00000009 },
449 { X86_FEATURE_XSAVE, 0x0000000d },
450 { 0, 0 }
451};
452
453static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
454{
455 const struct cpuid_dependent_feature *df;
456
457 for (df = cpuid_dependent_features; df->feature; df++) {
458
459 if (!cpu_has(c, df->feature))
460 continue;
461
462
463
464
465
466
467
468 if (!((s32)df->level < 0 ?
469 (u32)df->level > (u32)c->extended_cpuid_level :
470 (s32)df->level > (s32)c->cpuid_level))
471 continue;
472
473 clear_cpu_cap(c, df->feature);
474 if (!warn)
475 continue;
476
477 pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
478 x86_cap_flag(df->feature), df->level);
479 }
480}
481
482
483
484
485
486
487
488
489
490static const char *table_lookup_model(struct cpuinfo_x86 *c)
491{
492#ifdef CONFIG_X86_32
493 const struct legacy_cpu_model_info *info;
494
495 if (c->x86_model >= 16)
496 return NULL;
497
498 if (!this_cpu)
499 return NULL;
500
501 info = this_cpu->legacy_models;
502
503 while (info->family) {
504 if (info->family == c->x86)
505 return info->model_names[c->x86_model];
506 info++;
507 }
508#endif
509 return NULL;
510}
511
512
513__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
514__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
515
516void load_percpu_segment(int cpu)
517{
518#ifdef CONFIG_X86_32
519 loadsegment(fs, __KERNEL_PERCPU);
520#else
521 __loadsegment_simple(gs, 0);
522 wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
523#endif
524 load_stack_canary_segment();
525}
526
527#ifdef CONFIG_X86_32
528
529DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
530#endif
531
532
533void load_direct_gdt(int cpu)
534{
535 struct desc_ptr gdt_descr;
536
537 gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
538 gdt_descr.size = GDT_SIZE - 1;
539 load_gdt(&gdt_descr);
540}
541EXPORT_SYMBOL_GPL(load_direct_gdt);
542
543
544void load_fixmap_gdt(int cpu)
545{
546 struct desc_ptr gdt_descr;
547
548 gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
549 gdt_descr.size = GDT_SIZE - 1;
550 load_gdt(&gdt_descr);
551}
552EXPORT_SYMBOL_GPL(load_fixmap_gdt);
553
554
555
556
557
558void switch_to_new_gdt(int cpu)
559{
560
561 load_direct_gdt(cpu);
562
563 load_percpu_segment(cpu);
564}
565
566static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
567
568static void get_model_name(struct cpuinfo_x86 *c)
569{
570 unsigned int *v;
571 char *p, *q, *s;
572
573 if (c->extended_cpuid_level < 0x80000004)
574 return;
575
576 v = (unsigned int *)c->x86_model_id;
577 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
578 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
579 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
580 c->x86_model_id[48] = 0;
581
582
583 p = q = s = &c->x86_model_id[0];
584
585 while (*p == ' ')
586 p++;
587
588 while (*p) {
589
590 if (!isspace(*p))
591 s = q;
592
593 *q++ = *p++;
594 }
595
596 *(s + 1) = '\0';
597}
598
599void detect_num_cpu_cores(struct cpuinfo_x86 *c)
600{
601 unsigned int eax, ebx, ecx, edx;
602
603 c->x86_max_cores = 1;
604 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
605 return;
606
607 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
608 if (eax & 0x1f)
609 c->x86_max_cores = (eax >> 26) + 1;
610}
611
612void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
613{
614 unsigned int n, dummy, ebx, ecx, edx, l2size;
615
616 n = c->extended_cpuid_level;
617
618 if (n >= 0x80000005) {
619 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
620 c->x86_cache_size = (ecx>>24) + (edx>>24);
621#ifdef CONFIG_X86_64
622
623 c->x86_tlbsize = 0;
624#endif
625 }
626
627 if (n < 0x80000006)
628 return;
629
630 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
631 l2size = ecx >> 16;
632
633#ifdef CONFIG_X86_64
634 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
635#else
636
637 if (this_cpu->legacy_cache_size)
638 l2size = this_cpu->legacy_cache_size(c, l2size);
639
640
641 if (cachesize_override != -1)
642 l2size = cachesize_override;
643
644 if (l2size == 0)
645 return;
646#endif
647
648 c->x86_cache_size = l2size;
649}
650
651u16 __read_mostly tlb_lli_4k[NR_INFO];
652u16 __read_mostly tlb_lli_2m[NR_INFO];
653u16 __read_mostly tlb_lli_4m[NR_INFO];
654u16 __read_mostly tlb_lld_4k[NR_INFO];
655u16 __read_mostly tlb_lld_2m[NR_INFO];
656u16 __read_mostly tlb_lld_4m[NR_INFO];
657u16 __read_mostly tlb_lld_1g[NR_INFO];
658
659static void cpu_detect_tlb(struct cpuinfo_x86 *c)
660{
661 if (this_cpu->c_detect_tlb)
662 this_cpu->c_detect_tlb(c);
663
664 pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
665 tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
666 tlb_lli_4m[ENTRIES]);
667
668 pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
669 tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
670 tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
671}
672
673int detect_ht_early(struct cpuinfo_x86 *c)
674{
675#ifdef CONFIG_SMP
676 u32 eax, ebx, ecx, edx;
677
678 if (!cpu_has(c, X86_FEATURE_HT))
679 return -1;
680
681 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
682 return -1;
683
684 if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
685 return -1;
686
687 cpuid(1, &eax, &ebx, &ecx, &edx);
688
689 smp_num_siblings = (ebx & 0xff0000) >> 16;
690 if (smp_num_siblings == 1)
691 pr_info_once("CPU0: Hyper-Threading is disabled\n");
692#endif
693 return 0;
694}
695
696void detect_ht(struct cpuinfo_x86 *c)
697{
698#ifdef CONFIG_SMP
699 int index_msb, core_bits;
700
701 if (detect_ht_early(c) < 0)
702 return;
703
704 index_msb = get_count_order(smp_num_siblings);
705 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
706
707 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
708
709 index_msb = get_count_order(smp_num_siblings);
710
711 core_bits = get_count_order(c->x86_max_cores);
712
713 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
714 ((1 << core_bits) - 1);
715#endif
716}
717
718static void get_cpu_vendor(struct cpuinfo_x86 *c)
719{
720 char *v = c->x86_vendor_id;
721 int i;
722
723 for (i = 0; i < X86_VENDOR_NUM; i++) {
724 if (!cpu_devs[i])
725 break;
726
727 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
728 (cpu_devs[i]->c_ident[1] &&
729 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
730
731 this_cpu = cpu_devs[i];
732 c->x86_vendor = this_cpu->c_x86_vendor;
733 return;
734 }
735 }
736
737 pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
738 "CPU: Your system may be unstable.\n", v);
739
740 c->x86_vendor = X86_VENDOR_UNKNOWN;
741 this_cpu = &default_cpu;
742}
743
744void cpu_detect(struct cpuinfo_x86 *c)
745{
746
747 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
748 (unsigned int *)&c->x86_vendor_id[0],
749 (unsigned int *)&c->x86_vendor_id[8],
750 (unsigned int *)&c->x86_vendor_id[4]);
751
752 c->x86 = 4;
753
754 if (c->cpuid_level >= 0x00000001) {
755 u32 junk, tfms, cap0, misc;
756
757 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
758 c->x86 = x86_family(tfms);
759 c->x86_model = x86_model(tfms);
760 c->x86_stepping = x86_stepping(tfms);
761
762 if (cap0 & (1<<19)) {
763 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
764 c->x86_cache_alignment = c->x86_clflush_size;
765 }
766 }
767}
768
769static void apply_forced_caps(struct cpuinfo_x86 *c)
770{
771 int i;
772
773 for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
774 c->x86_capability[i] &= ~cpu_caps_cleared[i];
775 c->x86_capability[i] |= cpu_caps_set[i];
776 }
777}
778
779static void init_speculation_control(struct cpuinfo_x86 *c)
780{
781
782
783
784
785
786
787 if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
788 set_cpu_cap(c, X86_FEATURE_IBRS);
789 set_cpu_cap(c, X86_FEATURE_IBPB);
790 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
791 }
792
793 if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
794 set_cpu_cap(c, X86_FEATURE_STIBP);
795
796 if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
797 cpu_has(c, X86_FEATURE_VIRT_SSBD))
798 set_cpu_cap(c, X86_FEATURE_SSBD);
799
800 if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
801 set_cpu_cap(c, X86_FEATURE_IBRS);
802 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
803 }
804
805 if (cpu_has(c, X86_FEATURE_AMD_IBPB))
806 set_cpu_cap(c, X86_FEATURE_IBPB);
807
808 if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
809 set_cpu_cap(c, X86_FEATURE_STIBP);
810 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
811 }
812
813 if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
814 set_cpu_cap(c, X86_FEATURE_SSBD);
815 set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
816 clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
817 }
818}
819
820void get_cpu_cap(struct cpuinfo_x86 *c)
821{
822 u32 eax, ebx, ecx, edx;
823
824
825 if (c->cpuid_level >= 0x00000001) {
826 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
827
828 c->x86_capability[CPUID_1_ECX] = ecx;
829 c->x86_capability[CPUID_1_EDX] = edx;
830 }
831
832
833 if (c->cpuid_level >= 0x00000006)
834 c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
835
836
837 if (c->cpuid_level >= 0x00000007) {
838 cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
839 c->x86_capability[CPUID_7_0_EBX] = ebx;
840 c->x86_capability[CPUID_7_ECX] = ecx;
841 c->x86_capability[CPUID_7_EDX] = edx;
842
843
844 if (eax >= 1) {
845 cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
846 c->x86_capability[CPUID_7_1_EAX] = eax;
847 }
848 }
849
850
851 if (c->cpuid_level >= 0x0000000d) {
852 cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
853
854 c->x86_capability[CPUID_D_1_EAX] = eax;
855 }
856
857
858 eax = cpuid_eax(0x80000000);
859 c->extended_cpuid_level = eax;
860
861 if ((eax & 0xffff0000) == 0x80000000) {
862 if (eax >= 0x80000001) {
863 cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
864
865 c->x86_capability[CPUID_8000_0001_ECX] = ecx;
866 c->x86_capability[CPUID_8000_0001_EDX] = edx;
867 }
868 }
869
870 if (c->extended_cpuid_level >= 0x80000007) {
871 cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
872
873 c->x86_capability[CPUID_8000_0007_EBX] = ebx;
874 c->x86_power = edx;
875 }
876
877 if (c->extended_cpuid_level >= 0x80000008) {
878 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
879 c->x86_capability[CPUID_8000_0008_EBX] = ebx;
880 }
881
882 if (c->extended_cpuid_level >= 0x8000000a)
883 c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
884
885 if (c->extended_cpuid_level >= 0x8000001f)
886 c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
887
888 init_scattered_cpuid_features(c);
889 init_speculation_control(c);
890
891
892
893
894
895
896 apply_forced_caps(c);
897}
898
899static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
900{
901 u32 eax, ebx, ecx, edx;
902
903 if (c->extended_cpuid_level >= 0x80000008) {
904 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
905
906 c->x86_virt_bits = (eax >> 8) & 0xff;
907 c->x86_phys_bits = eax & 0xff;
908 }
909#ifdef CONFIG_X86_32
910 else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
911 c->x86_phys_bits = 36;
912#endif
913 c->x86_cache_bits = c->x86_phys_bits;
914}
915
916static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
917{
918#ifdef CONFIG_X86_32
919 int i;
920
921
922
923
924
925 if (flag_is_changeable_p(X86_EFLAGS_AC))
926 c->x86 = 4;
927 else
928 c->x86 = 3;
929
930 for (i = 0; i < X86_VENDOR_NUM; i++)
931 if (cpu_devs[i] && cpu_devs[i]->c_identify) {
932 c->x86_vendor_id[0] = 0;
933 cpu_devs[i]->c_identify(c);
934 if (c->x86_vendor_id[0]) {
935 get_cpu_vendor(c);
936 break;
937 }
938 }
939#endif
940}
941
942#define NO_SPECULATION BIT(0)
943#define NO_MELTDOWN BIT(1)
944#define NO_SSB BIT(2)
945#define NO_L1TF BIT(3)
946#define NO_MDS BIT(4)
947#define MSBDS_ONLY BIT(5)
948#define NO_SWAPGS BIT(6)
949#define NO_ITLB_MULTIHIT BIT(7)
950
951#define VULNWL(vendor, family, model, whitelist) \
952 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
953
954#define VULNWL_INTEL(model, whitelist) \
955 VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
956
957#define VULNWL_AMD(family, whitelist) \
958 VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
959
960static const __initconst struct x86_cpu_id_v2 cpu_vuln_whitelist[] = {
961 VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
962 VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
963 VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
964 VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
965
966
967 VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
968 VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
969 VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
970 VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
971 VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
972
973 VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
974 VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
975 VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
976 VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
977 VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
978 VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
979
980 VULNWL_INTEL(CORE_YONAH, NO_SSB),
981
982 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
983
984 VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
985 VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
986 VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
987
988
989
990
991
992
993
994
995
996 VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT),
997
998
999 VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1000 VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1001 VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1002 VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1003
1004
1005 VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
1006 {}
1007};
1008
1009#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
1010 X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
1011 INTEL_FAM6_##model, steppings, \
1012 X86_FEATURE_ANY, issues)
1013
1014#define SRBDS BIT(0)
1015
1016static const struct x86_cpu_id_v2 cpu_vuln_blacklist[] __initconst = {
1017 VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
1018 VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
1019 VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
1020 VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
1021 VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
1022 VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
1023 VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS),
1024 VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS),
1025 VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xC), SRBDS),
1026 VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xD), SRBDS),
1027 {}
1028};
1029
1030static bool __init cpu_matches(const struct x86_cpu_id_v2 *table, unsigned long which)
1031{
1032 const struct x86_cpu_id_v2 *m = x86_match_cpu_v2(table);
1033
1034 return m && !!(m->driver_data & which);
1035}
1036
1037u64 x86_read_arch_cap_msr(void)
1038{
1039 u64 ia32_cap = 0;
1040
1041 if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
1042 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
1043
1044 return ia32_cap;
1045}
1046
1047static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
1048{
1049 u64 ia32_cap = x86_read_arch_cap_msr();
1050
1051
1052 if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
1053 !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
1054 setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
1055
1056 if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
1057 return;
1058
1059 setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
1060 setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
1061
1062 if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
1063 !(ia32_cap & ARCH_CAP_SSB_NO) &&
1064 !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
1065 setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
1066
1067 if (ia32_cap & ARCH_CAP_IBRS_ALL)
1068 setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
1069
1070 if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
1071 !(ia32_cap & ARCH_CAP_MDS_NO)) {
1072 setup_force_cpu_bug(X86_BUG_MDS);
1073 if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
1074 setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
1075 }
1076
1077 if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
1078 setup_force_cpu_bug(X86_BUG_SWAPGS);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
1091 (cpu_has(c, X86_FEATURE_RTM) ||
1092 (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
1093 setup_force_cpu_bug(X86_BUG_TAA);
1094
1095
1096
1097
1098
1099 if ((cpu_has(c, X86_FEATURE_RDRAND) ||
1100 cpu_has(c, X86_FEATURE_RDSEED)) &&
1101 cpu_matches(cpu_vuln_blacklist, SRBDS))
1102 setup_force_cpu_bug(X86_BUG_SRBDS);
1103
1104 if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
1105 return;
1106
1107
1108 if (ia32_cap & ARCH_CAP_RDCL_NO)
1109 return;
1110
1111 setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
1112
1113 if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
1114 return;
1115
1116 setup_force_cpu_bug(X86_BUG_L1TF);
1117}
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128static void detect_nopl(struct cpuinfo_x86 *c)
1129{
1130#ifdef CONFIG_X86_32
1131 clear_cpu_cap(c, X86_FEATURE_NOPL);
1132#else
1133 set_cpu_cap(c, X86_FEATURE_NOPL);
1134#endif
1135}
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146static void __init early_identify_cpu(struct cpuinfo_x86 *c)
1147{
1148#ifdef CONFIG_X86_64
1149 c->x86_clflush_size = 64;
1150 c->x86_phys_bits = 36;
1151 c->x86_virt_bits = 48;
1152#else
1153 c->x86_clflush_size = 32;
1154 c->x86_phys_bits = 32;
1155 c->x86_virt_bits = 32;
1156#endif
1157 c->x86_cache_alignment = c->x86_clflush_size;
1158
1159 memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1160 c->extended_cpuid_level = 0;
1161
1162
1163 if (have_cpuid_p()) {
1164 cpu_detect(c);
1165 get_cpu_vendor(c);
1166 get_cpu_cap(c);
1167 get_model_name(c);
1168 get_cpu_address_sizes(c);
1169 setup_force_cpu_cap(X86_FEATURE_CPUID);
1170
1171 if (this_cpu->c_early_init)
1172 this_cpu->c_early_init(c);
1173
1174 c->cpu_index = 0;
1175 filter_cpuid_features(c, false);
1176
1177 if (this_cpu->c_bsp_init)
1178 this_cpu->c_bsp_init(c);
1179 } else {
1180 identify_cpu_without_cpuid(c);
1181 setup_clear_cpu_cap(X86_FEATURE_CPUID);
1182 }
1183
1184 setup_force_cpu_cap(X86_FEATURE_ALWAYS);
1185
1186 cpu_set_bug_bits(c);
1187
1188 sld_setup(c);
1189
1190 fpu__init_system(c);
1191
1192#ifdef CONFIG_X86_32
1193
1194
1195
1196
1197 setup_clear_cpu_cap(X86_FEATURE_PCID);
1198#endif
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212 if (!pgtable_l5_enabled())
1213 setup_clear_cpu_cap(X86_FEATURE_LA57);
1214
1215 detect_nopl(c);
1216}
1217
1218void __init early_cpu_init(void)
1219{
1220 const struct cpu_dev *const *cdev;
1221 int count = 0;
1222
1223#ifdef CONFIG_PROCESSOR_SELECT
1224 pr_info("KERNEL supported cpus:\n");
1225#endif
1226
1227 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
1228 const struct cpu_dev *cpudev = *cdev;
1229
1230 if (count >= X86_VENDOR_NUM)
1231 break;
1232 cpu_devs[count] = cpudev;
1233 count++;
1234
1235#ifdef CONFIG_PROCESSOR_SELECT
1236 {
1237 unsigned int j;
1238
1239 for (j = 0; j < 2; j++) {
1240 if (!cpudev->c_ident[j])
1241 continue;
1242 pr_info(" %s %s\n", cpudev->c_vendor,
1243 cpudev->c_ident[j]);
1244 }
1245 }
1246#endif
1247 }
1248 early_identify_cpu(&boot_cpu_data);
1249}
1250
1251static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
1252{
1253#ifdef CONFIG_X86_64
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 unsigned long old_base, tmp;
1270 rdmsrl(MSR_FS_BASE, old_base);
1271 wrmsrl(MSR_FS_BASE, 1);
1272 loadsegment(fs, 0);
1273 rdmsrl(MSR_FS_BASE, tmp);
1274 if (tmp != 0)
1275 set_cpu_bug(c, X86_BUG_NULL_SEG);
1276 wrmsrl(MSR_FS_BASE, old_base);
1277#endif
1278}
1279
1280static void generic_identify(struct cpuinfo_x86 *c)
1281{
1282 c->extended_cpuid_level = 0;
1283
1284 if (!have_cpuid_p())
1285 identify_cpu_without_cpuid(c);
1286
1287
1288 if (!have_cpuid_p())
1289 return;
1290
1291 cpu_detect(c);
1292
1293 get_cpu_vendor(c);
1294
1295 get_cpu_cap(c);
1296
1297 get_cpu_address_sizes(c);
1298
1299 if (c->cpuid_level >= 0x00000001) {
1300 c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
1301#ifdef CONFIG_X86_32
1302# ifdef CONFIG_SMP
1303 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1304# else
1305 c->apicid = c->initial_apicid;
1306# endif
1307#endif
1308 c->phys_proc_id = c->initial_apicid;
1309 }
1310
1311 get_model_name(c);
1312
1313 detect_nopl(c);
1314
1315 detect_null_seg_behavior(c);
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330#ifdef CONFIG_X86_32
1331# ifdef CONFIG_PARAVIRT
1332 do {
1333 extern void native_iret(void);
1334 if (pv_cpu_ops.iret == native_iret)
1335 set_cpu_bug(c, X86_BUG_ESPFIX);
1336 } while (0);
1337# else
1338 set_cpu_bug(c, X86_BUG_ESPFIX);
1339# endif
1340#endif
1341}
1342
1343
1344
1345
1346
1347static void validate_apic_and_package_id(struct cpuinfo_x86 *c)
1348{
1349#ifdef CONFIG_SMP
1350 unsigned int apicid, cpu = smp_processor_id();
1351
1352 apicid = apic->cpu_present_to_apicid(cpu);
1353
1354 if (apicid != c->apicid) {
1355 pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x APIC: %x\n",
1356 cpu, apicid, c->initial_apicid);
1357 }
1358 BUG_ON(topology_update_package_map(c->phys_proc_id, cpu));
1359 BUG_ON(topology_update_die_map(c->_rh.cpu_die_id, cpu));
1360#else
1361 c->logical_proc_id = 0;
1362#endif
1363}
1364
1365
1366
1367
1368static void identify_cpu(struct cpuinfo_x86 *c)
1369{
1370 int i;
1371
1372 c->loops_per_jiffy = loops_per_jiffy;
1373 c->x86_cache_size = 0;
1374 c->x86_vendor = X86_VENDOR_UNKNOWN;
1375 c->x86_model = c->x86_stepping = 0;
1376 c->x86_vendor_id[0] = '\0';
1377 c->x86_model_id[0] = '\0';
1378 c->x86_max_cores = 1;
1379 c->x86_coreid_bits = 0;
1380 c->cu_id = 0xff;
1381#ifdef CONFIG_X86_64
1382 c->x86_clflush_size = 64;
1383 c->x86_phys_bits = 36;
1384 c->x86_virt_bits = 48;
1385#else
1386 c->cpuid_level = -1;
1387 c->x86_clflush_size = 32;
1388 c->x86_phys_bits = 32;
1389 c->x86_virt_bits = 32;
1390#endif
1391 c->x86_cache_alignment = c->x86_clflush_size;
1392 memset(&c->x86_capability, 0, sizeof(c->x86_capability));
1393#ifdef CONFIG_X86_VMX_FEATURE_NAMES
1394 memset(&c->_rh.vmx_capability, 0, sizeof(c->_rh.vmx_capability));
1395#endif
1396
1397 generic_identify(c);
1398
1399 if (this_cpu->c_identify)
1400 this_cpu->c_identify(c);
1401
1402
1403 apply_forced_caps(c);
1404
1405#ifdef CONFIG_X86_64
1406 c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
1407#endif
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419 if (this_cpu->c_init)
1420 this_cpu->c_init(c);
1421
1422
1423 squash_the_stupid_serial_number(c);
1424
1425
1426 setup_smep(c);
1427 setup_smap(c);
1428 setup_umip(c);
1429
1430
1431 if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
1432 cr4_set_bits(X86_CR4_FSGSBASE);
1433 elf_hwcap2 |= HWCAP2_FSGSBASE;
1434 }
1435
1436
1437
1438
1439
1440
1441
1442 filter_cpuid_features(c, true);
1443
1444
1445 if (!c->x86_model_id[0]) {
1446 const char *p;
1447 p = table_lookup_model(c);
1448 if (p)
1449 strcpy(c->x86_model_id, p);
1450 else
1451
1452 sprintf(c->x86_model_id, "%02x/%02x",
1453 c->x86, c->x86_model);
1454 }
1455
1456#ifdef CONFIG_X86_64
1457 detect_ht(c);
1458#endif
1459
1460 x86_init_rdrand(c);
1461 setup_pku(c);
1462
1463
1464
1465
1466
1467 apply_forced_caps(c);
1468
1469
1470
1471
1472
1473
1474
1475 if (c != &boot_cpu_data) {
1476
1477 for (i = 0; i < NCAPINTS; i++)
1478 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1479
1480
1481 for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
1482 c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
1483 }
1484
1485
1486 mcheck_cpu_init(c);
1487
1488 select_idle_routine(c);
1489
1490#ifdef CONFIG_NUMA
1491 numa_add_cpu(smp_processor_id());
1492#endif
1493}
1494
1495
1496
1497
1498
1499#ifdef CONFIG_X86_32
1500void enable_sep_cpu(void)
1501{
1502 struct tss_struct *tss;
1503 int cpu;
1504
1505 if (!boot_cpu_has(X86_FEATURE_SEP))
1506 return;
1507
1508 cpu = get_cpu();
1509 tss = &per_cpu(cpu_tss_rw, cpu);
1510
1511
1512
1513
1514
1515
1516 tss->x86_tss.ss1 = __KERNEL_CS;
1517 wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
1518 wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
1519 wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
1520
1521 put_cpu();
1522}
1523#endif
1524
1525void __init identify_boot_cpu(void)
1526{
1527 identify_cpu(&boot_cpu_data);
1528#ifdef CONFIG_X86_32
1529 sysenter_setup();
1530 enable_sep_cpu();
1531#endif
1532 cpu_detect_tlb(&boot_cpu_data);
1533
1534 tsx_init();
1535}
1536
1537void identify_secondary_cpu(struct cpuinfo_x86 *c)
1538{
1539 BUG_ON(c == &boot_cpu_data);
1540 identify_cpu(c);
1541 spec_ctrl_cpu_init();
1542#ifdef CONFIG_X86_32
1543 enable_sep_cpu();
1544#endif
1545 mtrr_ap_init();
1546 validate_apic_and_package_id(c);
1547 x86_spec_ctrl_setup_ap();
1548 update_srbds_msr();
1549}
1550
1551static __init int setup_noclflush(char *arg)
1552{
1553 setup_clear_cpu_cap(X86_FEATURE_CLFLUSH);
1554 setup_clear_cpu_cap(X86_FEATURE_CLFLUSHOPT);
1555 return 1;
1556}
1557__setup("noclflush", setup_noclflush);
1558
1559void print_cpu_info(struct cpuinfo_x86 *c)
1560{
1561 const char *vendor = NULL;
1562
1563 if (c->x86_vendor < X86_VENDOR_NUM) {
1564 vendor = this_cpu->c_vendor;
1565 } else {
1566 if (c->cpuid_level >= 0)
1567 vendor = c->x86_vendor_id;
1568 }
1569
1570 if (vendor && !strstr(c->x86_model_id, vendor))
1571 pr_cont("%s ", vendor);
1572
1573 if (c->x86_model_id[0])
1574 pr_cont("%s", c->x86_model_id);
1575 else
1576 pr_cont("%d86", c->x86);
1577
1578 pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
1579
1580 if (c->x86_stepping || c->cpuid_level >= 0)
1581 pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
1582 else
1583 pr_cont(")\n");
1584}
1585
1586
1587
1588
1589
1590
1591static __init int setup_clearcpuid(char *arg)
1592{
1593 return 1;
1594}
1595__setup("clearcpuid=", setup_clearcpuid);
1596
1597#ifdef CONFIG_X86_64
1598DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
1599 fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
1600EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
1601
1602
1603
1604
1605
1606DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
1607 &init_task;
1608EXPORT_PER_CPU_SYMBOL(current_task);
1609
1610DEFINE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
1611DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1612
1613DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1614EXPORT_PER_CPU_SYMBOL(__preempt_count);
1615
1616
1617void syscall_init(void)
1618{
1619 wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
1620 wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
1621
1622#ifdef CONFIG_IA32_EMULATION
1623 wrmsrl(MSR_CSTAR, (unsigned long)entry_SYSCALL_compat);
1624
1625
1626
1627
1628
1629
1630 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
1631 wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
1632 (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
1633 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
1634#else
1635 wrmsrl(MSR_CSTAR, (unsigned long)ignore_sysret);
1636 wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
1637 wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
1638 wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
1639#endif
1640
1641
1642 wrmsrl(MSR_SYSCALL_MASK,
1643 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
1644 X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
1645}
1646
1647DEFINE_PER_CPU(int, debug_stack_usage);
1648DEFINE_PER_CPU(u32, debug_idt_ctr);
1649
1650noinstr void debug_stack_set_zero(void)
1651{
1652 this_cpu_inc(debug_idt_ctr);
1653 load_current_idt();
1654}
1655
1656noinstr void debug_stack_reset(void)
1657{
1658 if (WARN_ON(!this_cpu_read(debug_idt_ctr)))
1659 return;
1660 if (this_cpu_dec_return(debug_idt_ctr) == 0)
1661 load_current_idt();
1662}
1663
1664#else
1665
1666DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
1667EXPORT_PER_CPU_SYMBOL(current_task);
1668DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
1669EXPORT_PER_CPU_SYMBOL(__preempt_count);
1670
1671
1672
1673
1674
1675
1676DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
1677 (unsigned long)&init_thread_union + THREAD_SIZE;
1678EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
1679
1680#ifdef CONFIG_STACKPROTECTOR
1681DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
1682#endif
1683
1684#endif
1685
1686
1687
1688
1689static void clear_all_debug_regs(void)
1690{
1691 int i;
1692
1693 for (i = 0; i < 8; i++) {
1694
1695 if ((i == 4) || (i == 5))
1696 continue;
1697
1698 set_debugreg(0, i);
1699 }
1700}
1701
1702#ifdef CONFIG_KGDB
1703
1704
1705
1706
1707static void dbg_restore_debug_regs(void)
1708{
1709 if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
1710 arch_kgdb_ops.correct_hw_break();
1711}
1712#else
1713#define dbg_restore_debug_regs()
1714#endif
1715
1716static void wait_for_master_cpu(int cpu)
1717{
1718#ifdef CONFIG_SMP
1719
1720
1721
1722
1723 WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask));
1724 while (!cpumask_test_cpu(cpu, cpu_callout_mask))
1725 cpu_relax();
1726#endif
1727}
1728
1729#ifdef CONFIG_X86_64
1730static inline void setup_getcpu(int cpu)
1731{
1732 unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
1733 struct desc_struct d = { };
1734
1735 if (boot_cpu_has(X86_FEATURE_RDTSCP))
1736 write_rdtscp_aux(cpudata);
1737
1738
1739 d.limit0 = cpudata;
1740 d.limit1 = cpudata >> 16;
1741
1742 d.type = 5;
1743 d.dpl = 3;
1744 d.s = 1;
1745 d.p = 1;
1746 d.d = 1;
1747
1748 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
1749}
1750
1751static inline void ucode_cpu_init(int cpu)
1752{
1753 if (cpu)
1754 load_ucode_ap();
1755}
1756
1757static inline void tss_setup_ist(struct tss_struct *tss)
1758{
1759
1760 tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
1761 tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
1762 tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
1763 tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
1764
1765 tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
1766}
1767
1768static inline void gdt_setup_doublefault_tss(int cpu) { }
1769
1770#else
1771
1772static inline void setup_getcpu(int cpu) { }
1773
1774static inline void ucode_cpu_init(int cpu)
1775{
1776 show_ucode_info_early();
1777}
1778
1779static inline void tss_setup_ist(struct tss_struct *tss) { }
1780
1781static inline void gdt_setup_doublefault_tss(int cpu)
1782{
1783#ifdef CONFIG_DOUBLEFAULT
1784
1785 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1786#endif
1787}
1788#endif
1789
1790
1791
1792
1793
1794void cpu_init_exception_handling(void)
1795{
1796 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
1797 int cpu = raw_smp_processor_id();
1798
1799
1800 setup_getcpu(cpu);
1801
1802
1803 tss_setup_ist(tss);
1804 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1805 memset(tss->io_bitmap, 0xff, sizeof(tss->io_bitmap));
1806 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1807
1808 load_TR_desc();
1809
1810
1811 load_current_idt();
1812}
1813
1814
1815
1816
1817
1818
1819
1820void cpu_init(void)
1821{
1822 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
1823 struct task_struct *cur = current;
1824 int cpu = raw_smp_processor_id();
1825
1826 wait_for_master_cpu(cpu);
1827
1828
1829
1830
1831
1832 cr4_init_shadow();
1833
1834 ucode_cpu_init(cpu);
1835
1836#ifdef CONFIG_NUMA
1837 if (this_cpu_read(numa_node) == 0 &&
1838 early_cpu_to_node(cpu) != NUMA_NO_NODE)
1839 set_numa_node(early_cpu_to_node(cpu));
1840#endif
1841 setup_getcpu(cpu);
1842
1843 pr_debug("Initializing CPU#%d\n", cpu);
1844
1845 if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
1846 boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
1847 cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
1848
1849
1850
1851
1852
1853 switch_to_new_gdt(cpu);
1854 load_current_idt();
1855
1856 if (IS_ENABLED(CONFIG_X86_64)) {
1857 loadsegment(fs, 0);
1858 memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
1859 syscall_init();
1860
1861 wrmsrl(MSR_FS_BASE, 0);
1862 wrmsrl(MSR_KERNEL_GS_BASE, 0);
1863 barrier();
1864
1865 x2apic_setup();
1866 }
1867
1868 mmgrab(&init_mm);
1869 cur->active_mm = &init_mm;
1870 BUG_ON(cur->mm);
1871 initialize_tlbstate_and_flush();
1872 enter_lazy_tlb(&init_mm, cur);
1873
1874
1875 tss_setup_ist(tss);
1876 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
1877 memset(tss->io_bitmap, 0xff, sizeof(tss->io_bitmap));
1878 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
1879
1880 load_TR_desc();
1881
1882
1883
1884
1885 load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
1886
1887 load_mm_ldt(&init_mm);
1888
1889 clear_all_debug_regs();
1890 dbg_restore_debug_regs();
1891
1892 gdt_setup_doublefault_tss(cpu);
1893
1894 fpu__init_cpu();
1895
1896 if (is_uv_system())
1897 uv_cpu_init();
1898
1899 load_fixmap_gdt(cpu);
1900}
1901
1902static void bsp_resume(void)
1903{
1904 if (this_cpu->c_bsp_resume)
1905 this_cpu->c_bsp_resume(&boot_cpu_data);
1906}
1907
1908static struct syscore_ops cpu_syscore_ops = {
1909 .resume = bsp_resume,
1910};
1911
1912static int __init init_cpu_syscore(void)
1913{
1914 register_syscore_ops(&cpu_syscore_ops);
1915 return 0;
1916}
1917core_initcall(init_cpu_syscore);
1918
1919
1920
1921
1922
1923
1924void microcode_check(void)
1925{
1926 struct cpuinfo_x86 info;
1927
1928 perf_check_microcode();
1929
1930
1931 info.cpuid_level = cpuid_eax(0);
1932
1933
1934
1935
1936
1937
1938 memcpy(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability));
1939
1940 get_cpu_cap(&info);
1941
1942 if (!memcmp(&info.x86_capability, &boot_cpu_data.x86_capability, sizeof(info.x86_capability)))
1943 return;
1944
1945 pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
1946 pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
1947}
1948